Commit 6beb000923882f6204ea2cfcd932e568e900803f

Authored by Thomas Gleixner
1 parent 156171c71a

locking: Make inlining decision Kconfig based

commit 892a7c67 (locking: Allow arch-inlined spinlocks) implements the
selection of which lock functions are inlined based on defines in
arch/.../spinlock.h: #define __always_inline__LOCK_FUNCTION

Despite of the name __always_inline__* the lock functions can be built
out of line depending on config options. Also if the arch does not set
some inline defines the generic code might set them; again depending on
config options.

This makes it unnecessary hard to figure out when and which lock
functions are inlined. Aside of that it makes it way harder and
messier for -rt to manipulate the lock functions.

Convert the inlining decision to CONFIG switches. Each lock function
is inlined depending on CONFIG_INLINE_*. The configs implement the
existing dependencies. The architecture code can select ARCH_INLINE_*
to signal that it wants the corresponding lock function inlined.
ARCH_INLINE_* is necessary as Kconfig ignores "depends on"
restrictions when a config element is selected.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
LKML-Reference: <20091109151428.504477141@linutronix.de>
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>

Showing 6 changed files with 284 additions and 104 deletions Side-by-side Diff

... ... @@ -95,6 +95,34 @@
95 95 select HAVE_ARCH_TRACEHOOK
96 96 select INIT_ALL_POSSIBLE
97 97 select HAVE_PERF_EVENTS
  98 + select ARCH_INLINE_SPIN_TRYLOCK
  99 + select ARCH_INLINE_SPIN_TRYLOCK_BH
  100 + select ARCH_INLINE_SPIN_LOCK
  101 + select ARCH_INLINE_SPIN_LOCK_BH
  102 + select ARCH_INLINE_SPIN_LOCK_IRQ
  103 + select ARCH_INLINE_SPIN_LOCK_IRQSAVE
  104 + select ARCH_INLINE_SPIN_UNLOCK
  105 + select ARCH_INLINE_SPIN_UNLOCK_BH
  106 + select ARCH_INLINE_SPIN_UNLOCK_IRQ
  107 + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
  108 + select ARCH_INLINE_READ_TRYLOCK
  109 + select ARCH_INLINE_READ_LOCK
  110 + select ARCH_INLINE_READ_LOCK_BH
  111 + select ARCH_INLINE_READ_LOCK_IRQ
  112 + select ARCH_INLINE_READ_LOCK_IRQSAVE
  113 + select ARCH_INLINE_READ_UNLOCK
  114 + select ARCH_INLINE_READ_UNLOCK_BH
  115 + select ARCH_INLINE_READ_UNLOCK_IRQ
  116 + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
  117 + select ARCH_INLINE_WRITE_TRYLOCK
  118 + select ARCH_INLINE_WRITE_LOCK
  119 + select ARCH_INLINE_WRITE_LOCK_BH
  120 + select ARCH_INLINE_WRITE_LOCK_IRQ
  121 + select ARCH_INLINE_WRITE_LOCK_IRQSAVE
  122 + select ARCH_INLINE_WRITE_UNLOCK
  123 + select ARCH_INLINE_WRITE_UNLOCK_BH
  124 + select ARCH_INLINE_WRITE_UNLOCK_IRQ
  125 + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
98 126  
99 127 config SCHED_OMIT_FRAME_POINTER
100 128 bool
arch/s390/include/asm/spinlock.h
... ... @@ -191,34 +191,5 @@
191 191 #define _raw_read_relax(lock) cpu_relax()
192 192 #define _raw_write_relax(lock) cpu_relax()
193 193  
194   -#define __always_inline__spin_lock
195   -#define __always_inline__read_lock
196   -#define __always_inline__write_lock
197   -#define __always_inline__spin_lock_bh
198   -#define __always_inline__read_lock_bh
199   -#define __always_inline__write_lock_bh
200   -#define __always_inline__spin_lock_irq
201   -#define __always_inline__read_lock_irq
202   -#define __always_inline__write_lock_irq
203   -#define __always_inline__spin_lock_irqsave
204   -#define __always_inline__read_lock_irqsave
205   -#define __always_inline__write_lock_irqsave
206   -#define __always_inline__spin_trylock
207   -#define __always_inline__read_trylock
208   -#define __always_inline__write_trylock
209   -#define __always_inline__spin_trylock_bh
210   -#define __always_inline__spin_unlock
211   -#define __always_inline__read_unlock
212   -#define __always_inline__write_unlock
213   -#define __always_inline__spin_unlock_bh
214   -#define __always_inline__read_unlock_bh
215   -#define __always_inline__write_unlock_bh
216   -#define __always_inline__spin_unlock_irq
217   -#define __always_inline__read_unlock_irq
218   -#define __always_inline__write_unlock_irq
219   -#define __always_inline__spin_unlock_irqrestore
220   -#define __always_inline__read_unlock_irqrestore
221   -#define __always_inline__write_unlock_irqrestore
222   -
223 194 #endif /* __ASM_SPINLOCK_H */
include/linux/spinlock_api_smp.h
... ... @@ -60,136 +60,117 @@
60 60 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 61 __releases(lock);
62 62  
63   -/*
64   - * We inline the unlock functions in the nondebug case:
65   - */
66   -#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67   -#define __always_inline__spin_unlock
68   -#define __always_inline__read_unlock
69   -#define __always_inline__write_unlock
70   -#define __always_inline__spin_unlock_irq
71   -#define __always_inline__read_unlock_irq
72   -#define __always_inline__write_unlock_irq
73   -#endif
74   -
75   -#ifndef CONFIG_DEBUG_SPINLOCK
76   -#ifndef CONFIG_GENERIC_LOCKBREAK
77   -
78   -#ifdef __always_inline__spin_lock
  63 +#ifdef CONFIG_INLINE_SPIN_LOCK
79 64 #define _spin_lock(lock) __spin_lock(lock)
80 65 #endif
81 66  
82   -#ifdef __always_inline__read_lock
  67 +#ifdef CONFIG_INLINE_READ_LOCK
83 68 #define _read_lock(lock) __read_lock(lock)
84 69 #endif
85 70  
86   -#ifdef __always_inline__write_lock
  71 +#ifdef CONFIG_INLINE_WRITE_LOCK
87 72 #define _write_lock(lock) __write_lock(lock)
88 73 #endif
89 74  
90   -#ifdef __always_inline__spin_lock_bh
  75 +#ifdef CONFIG_INLINE_SPIN_LOCK_BH
91 76 #define _spin_lock_bh(lock) __spin_lock_bh(lock)
92 77 #endif
93 78  
94   -#ifdef __always_inline__read_lock_bh
  79 +#ifdef CONFIG_INLINE_READ_LOCK_BH
95 80 #define _read_lock_bh(lock) __read_lock_bh(lock)
96 81 #endif
97 82  
98   -#ifdef __always_inline__write_lock_bh
  83 +#ifdef CONFIG_INLINE_WRITE_LOCK_BH
99 84 #define _write_lock_bh(lock) __write_lock_bh(lock)
100 85 #endif
101 86  
102   -#ifdef __always_inline__spin_lock_irq
  87 +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
103 88 #define _spin_lock_irq(lock) __spin_lock_irq(lock)
104 89 #endif
105 90  
106   -#ifdef __always_inline__read_lock_irq
  91 +#ifdef CONFIG_INLINE_READ_LOCK_IRQ
107 92 #define _read_lock_irq(lock) __read_lock_irq(lock)
108 93 #endif
109 94  
110   -#ifdef __always_inline__write_lock_irq
  95 +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
111 96 #define _write_lock_irq(lock) __write_lock_irq(lock)
112 97 #endif
113 98  
114   -#ifdef __always_inline__spin_lock_irqsave
  99 +#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
115 100 #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116 101 #endif
117 102  
118   -#ifdef __always_inline__read_lock_irqsave
  103 +#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
119 104 #define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120 105 #endif
121 106  
122   -#ifdef __always_inline__write_lock_irqsave
  107 +#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
123 108 #define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124 109 #endif
125 110  
126   -#endif /* !CONFIG_GENERIC_LOCKBREAK */
127   -
128   -#ifdef __always_inline__spin_trylock
  111 +#ifdef CONFIG_INLINE_SPIN_TRYLOCK
129 112 #define _spin_trylock(lock) __spin_trylock(lock)
130 113 #endif
131 114  
132   -#ifdef __always_inline__read_trylock
  115 +#ifdef CONFIG_INLINE_READ_TRYLOCK
133 116 #define _read_trylock(lock) __read_trylock(lock)
134 117 #endif
135 118  
136   -#ifdef __always_inline__write_trylock
  119 +#ifdef CONFIG_INLINE_WRITE_TRYLOCK
137 120 #define _write_trylock(lock) __write_trylock(lock)
138 121 #endif
139 122  
140   -#ifdef __always_inline__spin_trylock_bh
  123 +#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
141 124 #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142 125 #endif
143 126  
144   -#ifdef __always_inline__spin_unlock
  127 +#ifdef CONFIG_INLINE_SPIN_UNLOCK
145 128 #define _spin_unlock(lock) __spin_unlock(lock)
146 129 #endif
147 130  
148   -#ifdef __always_inline__read_unlock
  131 +#ifdef CONFIG_INLINE_READ_UNLOCK
149 132 #define _read_unlock(lock) __read_unlock(lock)
150 133 #endif
151 134  
152   -#ifdef __always_inline__write_unlock
  135 +#ifdef CONFIG_INLINE_WRITE_UNLOCK
153 136 #define _write_unlock(lock) __write_unlock(lock)
154 137 #endif
155 138  
156   -#ifdef __always_inline__spin_unlock_bh
  139 +#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
157 140 #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158 141 #endif
159 142  
160   -#ifdef __always_inline__read_unlock_bh
  143 +#ifdef CONFIG_INLINE_READ_UNLOCK_BH
161 144 #define _read_unlock_bh(lock) __read_unlock_bh(lock)
162 145 #endif
163 146  
164   -#ifdef __always_inline__write_unlock_bh
  147 +#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
165 148 #define _write_unlock_bh(lock) __write_unlock_bh(lock)
166 149 #endif
167 150  
168   -#ifdef __always_inline__spin_unlock_irq
  151 +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
169 152 #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170 153 #endif
171 154  
172   -#ifdef __always_inline__read_unlock_irq
  155 +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
173 156 #define _read_unlock_irq(lock) __read_unlock_irq(lock)
174 157 #endif
175 158  
176   -#ifdef __always_inline__write_unlock_irq
  159 +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
177 160 #define _write_unlock_irq(lock) __write_unlock_irq(lock)
178 161 #endif
179 162  
180   -#ifdef __always_inline__spin_unlock_irqrestore
  163 +#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
181 164 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182 165 #endif
183 166  
184   -#ifdef __always_inline__read_unlock_irqrestore
  167 +#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
185 168 #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186 169 #endif
187 170  
188   -#ifdef __always_inline__write_unlock_irqrestore
  171 +#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
189 172 #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190 173 #endif
191   -
192   -#endif /* CONFIG_DEBUG_SPINLOCK */
193 174  
194 175 static inline int __spin_trylock(spinlock_t *lock)
195 176 {
... ... @@ -1209,4 +1209,6 @@
1209 1209  
1210 1210 config PREEMPT_NOTIFIERS
1211 1211 bool
  1212 +
  1213 +source "kernel/Kconfig.locks"
kernel/Kconfig.locks
  1 +#
  2 +# The ARCH_INLINE foo is necessary because select ignores "depends on"
  3 +#
  4 +config ARCH_INLINE_SPIN_TRYLOCK
  5 + bool
  6 +
  7 +config ARCH_INLINE_SPIN_TRYLOCK_BH
  8 + bool
  9 +
  10 +config ARCH_INLINE_SPIN_LOCK
  11 + bool
  12 +
  13 +config ARCH_INLINE_SPIN_LOCK_BH
  14 + bool
  15 +
  16 +config ARCH_INLINE_SPIN_LOCK_IRQ
  17 + bool
  18 +
  19 +config ARCH_INLINE_SPIN_LOCK_IRQSAVE
  20 + bool
  21 +
  22 +config ARCH_INLINE_SPIN_UNLOCK
  23 + bool
  24 +
  25 +config ARCH_INLINE_SPIN_UNLOCK_BH
  26 + bool
  27 +
  28 +config ARCH_INLINE_SPIN_UNLOCK_IRQ
  29 + bool
  30 +
  31 +config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
  32 + bool
  33 +
  34 +
  35 +config ARCH_INLINE_READ_TRYLOCK
  36 + bool
  37 +
  38 +config ARCH_INLINE_READ_LOCK
  39 + bool
  40 +
  41 +config ARCH_INLINE_READ_LOCK_BH
  42 + bool
  43 +
  44 +config ARCH_INLINE_READ_LOCK_IRQ
  45 + bool
  46 +
  47 +config ARCH_INLINE_READ_LOCK_IRQSAVE
  48 + bool
  49 +
  50 +config ARCH_INLINE_READ_UNLOCK
  51 + bool
  52 +
  53 +config ARCH_INLINE_READ_UNLOCK_BH
  54 + bool
  55 +
  56 +config ARCH_INLINE_READ_UNLOCK_IRQ
  57 + bool
  58 +
  59 +config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
  60 + bool
  61 +
  62 +
  63 +config ARCH_INLINE_WRITE_TRYLOCK
  64 + bool
  65 +
  66 +config ARCH_INLINE_WRITE_LOCK
  67 + bool
  68 +
  69 +config ARCH_INLINE_WRITE_LOCK_BH
  70 + bool
  71 +
  72 +config ARCH_INLINE_WRITE_LOCK_IRQ
  73 + bool
  74 +
  75 +config ARCH_INLINE_WRITE_LOCK_IRQSAVE
  76 + bool
  77 +
  78 +config ARCH_INLINE_WRITE_UNLOCK
  79 + bool
  80 +
  81 +config ARCH_INLINE_WRITE_UNLOCK_BH
  82 + bool
  83 +
  84 +config ARCH_INLINE_WRITE_UNLOCK_IRQ
  85 + bool
  86 +
  87 +config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
  88 + bool
  89 +
  90 +#
  91 +# lock_* functions are inlined when:
  92 +# - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
  93 +#
  94 +# trylock_* functions are inlined when:
  95 +# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
  96 +#
  97 +# unlock and unlock_irq functions are inlined when:
  98 +# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
  99 +# or
  100 +# - DEBUG_SPINLOCK=n and PREEMPT=n
  101 +#
  102 +# unlock_bh and unlock_irqrestore functions are inlined when:
  103 +# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
  104 +#
  105 +
  106 +config INLINE_SPIN_TRYLOCK
  107 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK
  108 +
  109 +config INLINE_SPIN_TRYLOCK_BH
  110 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH
  111 +
  112 +config INLINE_SPIN_LOCK
  113 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
  114 +
  115 +config INLINE_SPIN_LOCK_BH
  116 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  117 + ARCH_INLINE_SPIN_LOCK_BH
  118 +
  119 +config INLINE_SPIN_LOCK_IRQ
  120 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  121 + ARCH_INLINE_SPIN_LOCK_IRQ
  122 +
  123 +config INLINE_SPIN_LOCK_IRQSAVE
  124 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  125 + ARCH_INLINE_SPIN_LOCK_IRQSAVE
  126 +
  127 +config INLINE_SPIN_UNLOCK
  128 + def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
  129 +
  130 +config INLINE_SPIN_UNLOCK_BH
  131 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
  132 +
  133 +config INLINE_SPIN_UNLOCK_IRQ
  134 + def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH)
  135 +
  136 +config INLINE_SPIN_UNLOCK_IRQRESTORE
  137 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
  138 +
  139 +
  140 +config INLINE_READ_TRYLOCK
  141 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK
  142 +
  143 +config INLINE_READ_LOCK
  144 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
  145 +
  146 +config INLINE_READ_LOCK_BH
  147 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  148 + ARCH_INLINE_READ_LOCK_BH
  149 +
  150 +config INLINE_READ_LOCK_IRQ
  151 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  152 + ARCH_INLINE_READ_LOCK_IRQ
  153 +
  154 +config INLINE_READ_LOCK_IRQSAVE
  155 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  156 + ARCH_INLINE_READ_LOCK_IRQSAVE
  157 +
  158 +config INLINE_READ_UNLOCK
  159 + def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK)
  160 +
  161 +config INLINE_READ_UNLOCK_BH
  162 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH
  163 +
  164 +config INLINE_READ_UNLOCK_IRQ
  165 + def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH)
  166 +
  167 +config INLINE_READ_UNLOCK_IRQRESTORE
  168 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE
  169 +
  170 +
  171 +config INLINE_WRITE_TRYLOCK
  172 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK
  173 +
  174 +config INLINE_WRITE_LOCK
  175 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
  176 +
  177 +config INLINE_WRITE_LOCK_BH
  178 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  179 + ARCH_INLINE_WRITE_LOCK_BH
  180 +
  181 +config INLINE_WRITE_LOCK_IRQ
  182 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  183 + ARCH_INLINE_WRITE_LOCK_IRQ
  184 +
  185 +config INLINE_WRITE_LOCK_IRQSAVE
  186 + def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
  187 + ARCH_INLINE_WRITE_LOCK_IRQSAVE
  188 +
  189 +config INLINE_WRITE_UNLOCK
  190 + def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK)
  191 +
  192 +config INLINE_WRITE_UNLOCK_BH
  193 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH
  194 +
  195 +config INLINE_WRITE_UNLOCK_IRQ
  196 + def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH)
  197 +
  198 +config INLINE_WRITE_UNLOCK_IRQRESTORE
  199 + def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
... ... @@ -21,7 +21,7 @@
21 21 #include <linux/debug_locks.h>
22 22 #include <linux/module.h>
23 23  
24   -#ifndef _spin_trylock
  24 +#ifndef CONFIG_INLINE_SPIN_TRYLOCK
25 25 int __lockfunc _spin_trylock(spinlock_t *lock)
26 26 {
27 27 return __spin_trylock(lock);
... ... @@ -29,7 +29,7 @@
29 29 EXPORT_SYMBOL(_spin_trylock);
30 30 #endif
31 31  
32   -#ifndef _read_trylock
  32 +#ifndef CONFIG_INLINE_READ_TRYLOCK
33 33 int __lockfunc _read_trylock(rwlock_t *lock)
34 34 {
35 35 return __read_trylock(lock);
... ... @@ -37,7 +37,7 @@
37 37 EXPORT_SYMBOL(_read_trylock);
38 38 #endif
39 39  
40   -#ifndef _write_trylock
  40 +#ifndef CONFIG_INLINE_WRITE_TRYLOCK
41 41 int __lockfunc _write_trylock(rwlock_t *lock)
42 42 {
43 43 return __write_trylock(lock);
... ... @@ -52,7 +52,7 @@
52 52 */
53 53 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54 54  
55   -#ifndef _read_lock
  55 +#ifndef CONFIG_INLINE_READ_LOCK
56 56 void __lockfunc _read_lock(rwlock_t *lock)
57 57 {
58 58 __read_lock(lock);
... ... @@ -60,7 +60,7 @@
60 60 EXPORT_SYMBOL(_read_lock);
61 61 #endif
62 62  
63   -#ifndef _spin_lock_irqsave
  63 +#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
64 64 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
65 65 {
66 66 return __spin_lock_irqsave(lock);
... ... @@ -68,7 +68,7 @@
68 68 EXPORT_SYMBOL(_spin_lock_irqsave);
69 69 #endif
70 70  
71   -#ifndef _spin_lock_irq
  71 +#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
72 72 void __lockfunc _spin_lock_irq(spinlock_t *lock)
73 73 {
74 74 __spin_lock_irq(lock);
... ... @@ -76,7 +76,7 @@
76 76 EXPORT_SYMBOL(_spin_lock_irq);
77 77 #endif
78 78  
79   -#ifndef _spin_lock_bh
  79 +#ifndef CONFIG_INLINE_SPIN_LOCK_BH
80 80 void __lockfunc _spin_lock_bh(spinlock_t *lock)
81 81 {
82 82 __spin_lock_bh(lock);
... ... @@ -84,7 +84,7 @@
84 84 EXPORT_SYMBOL(_spin_lock_bh);
85 85 #endif
86 86  
87   -#ifndef _read_lock_irqsave
  87 +#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
88 88 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
89 89 {
90 90 return __read_lock_irqsave(lock);
... ... @@ -92,7 +92,7 @@
92 92 EXPORT_SYMBOL(_read_lock_irqsave);
93 93 #endif
94 94  
95   -#ifndef _read_lock_irq
  95 +#ifndef CONFIG_INLINE_READ_LOCK_IRQ
96 96 void __lockfunc _read_lock_irq(rwlock_t *lock)
97 97 {
98 98 __read_lock_irq(lock);
... ... @@ -100,7 +100,7 @@
100 100 EXPORT_SYMBOL(_read_lock_irq);
101 101 #endif
102 102  
103   -#ifndef _read_lock_bh
  103 +#ifndef CONFIG_INLINE_READ_LOCK_BH
104 104 void __lockfunc _read_lock_bh(rwlock_t *lock)
105 105 {
106 106 __read_lock_bh(lock);
... ... @@ -108,7 +108,7 @@
108 108 EXPORT_SYMBOL(_read_lock_bh);
109 109 #endif
110 110  
111   -#ifndef _write_lock_irqsave
  111 +#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
112 112 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
113 113 {
114 114 return __write_lock_irqsave(lock);
... ... @@ -116,7 +116,7 @@
116 116 EXPORT_SYMBOL(_write_lock_irqsave);
117 117 #endif
118 118  
119   -#ifndef _write_lock_irq
  119 +#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
120 120 void __lockfunc _write_lock_irq(rwlock_t *lock)
121 121 {
122 122 __write_lock_irq(lock);
... ... @@ -124,7 +124,7 @@
124 124 EXPORT_SYMBOL(_write_lock_irq);
125 125 #endif
126 126  
127   -#ifndef _write_lock_bh
  127 +#ifndef CONFIG_INLINE_WRITE_LOCK_BH
128 128 void __lockfunc _write_lock_bh(rwlock_t *lock)
129 129 {
130 130 __write_lock_bh(lock);
... ... @@ -132,7 +132,7 @@
132 132 EXPORT_SYMBOL(_write_lock_bh);
133 133 #endif
134 134  
135   -#ifndef _spin_lock
  135 +#ifndef CONFIG_INLINE_SPIN_LOCK
136 136 void __lockfunc _spin_lock(spinlock_t *lock)
137 137 {
138 138 __spin_lock(lock);
... ... @@ -140,7 +140,7 @@
140 140 EXPORT_SYMBOL(_spin_lock);
141 141 #endif
142 142  
143   -#ifndef _write_lock
  143 +#ifndef CONFIG_INLINE_WRITE_LOCK
144 144 void __lockfunc _write_lock(rwlock_t *lock)
145 145 {
146 146 __write_lock(lock);
... ... @@ -272,7 +272,7 @@
272 272  
273 273 #endif
274 274  
275   -#ifndef _spin_unlock
  275 +#ifndef CONFIG_INLINE_SPIN_UNLOCK
276 276 void __lockfunc _spin_unlock(spinlock_t *lock)
277 277 {
278 278 __spin_unlock(lock);
... ... @@ -280,7 +280,7 @@
280 280 EXPORT_SYMBOL(_spin_unlock);
281 281 #endif
282 282  
283   -#ifndef _write_unlock
  283 +#ifndef CONFIG_INLINE_WRITE_UNLOCK
284 284 void __lockfunc _write_unlock(rwlock_t *lock)
285 285 {
286 286 __write_unlock(lock);
... ... @@ -288,7 +288,7 @@
288 288 EXPORT_SYMBOL(_write_unlock);
289 289 #endif
290 290  
291   -#ifndef _read_unlock
  291 +#ifndef CONFIG_INLINE_READ_UNLOCK
292 292 void __lockfunc _read_unlock(rwlock_t *lock)
293 293 {
294 294 __read_unlock(lock);
... ... @@ -296,7 +296,7 @@
296 296 EXPORT_SYMBOL(_read_unlock);
297 297 #endif
298 298  
299   -#ifndef _spin_unlock_irqrestore
  299 +#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
300 300 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
301 301 {
302 302 __spin_unlock_irqrestore(lock, flags);
... ... @@ -304,7 +304,7 @@
304 304 EXPORT_SYMBOL(_spin_unlock_irqrestore);
305 305 #endif
306 306  
307   -#ifndef _spin_unlock_irq
  307 +#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
308 308 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
309 309 {
310 310 __spin_unlock_irq(lock);
... ... @@ -312,7 +312,7 @@
312 312 EXPORT_SYMBOL(_spin_unlock_irq);
313 313 #endif
314 314  
315   -#ifndef _spin_unlock_bh
  315 +#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
316 316 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
317 317 {
318 318 __spin_unlock_bh(lock);
... ... @@ -320,7 +320,7 @@
320 320 EXPORT_SYMBOL(_spin_unlock_bh);
321 321 #endif
322 322  
323   -#ifndef _read_unlock_irqrestore
  323 +#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
324 324 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
325 325 {
326 326 __read_unlock_irqrestore(lock, flags);
... ... @@ -328,7 +328,7 @@
328 328 EXPORT_SYMBOL(_read_unlock_irqrestore);
329 329 #endif
330 330  
331   -#ifndef _read_unlock_irq
  331 +#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
332 332 void __lockfunc _read_unlock_irq(rwlock_t *lock)
333 333 {
334 334 __read_unlock_irq(lock);
... ... @@ -336,7 +336,7 @@
336 336 EXPORT_SYMBOL(_read_unlock_irq);
337 337 #endif
338 338  
339   -#ifndef _read_unlock_bh
  339 +#ifndef CONFIG_INLINE_READ_UNLOCK_BH
340 340 void __lockfunc _read_unlock_bh(rwlock_t *lock)
341 341 {
342 342 __read_unlock_bh(lock);
... ... @@ -344,7 +344,7 @@
344 344 EXPORT_SYMBOL(_read_unlock_bh);
345 345 #endif
346 346  
347   -#ifndef _write_unlock_irqrestore
  347 +#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
348 348 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
349 349 {
350 350 __write_unlock_irqrestore(lock, flags);
... ... @@ -352,7 +352,7 @@
352 352 EXPORT_SYMBOL(_write_unlock_irqrestore);
353 353 #endif
354 354  
355   -#ifndef _write_unlock_irq
  355 +#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
356 356 void __lockfunc _write_unlock_irq(rwlock_t *lock)
357 357 {
358 358 __write_unlock_irq(lock);
... ... @@ -360,7 +360,7 @@
360 360 EXPORT_SYMBOL(_write_unlock_irq);
361 361 #endif
362 362  
363   -#ifndef _write_unlock_bh
  363 +#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
364 364 void __lockfunc _write_unlock_bh(rwlock_t *lock)
365 365 {
366 366 __write_unlock_bh(lock);
... ... @@ -368,7 +368,7 @@
368 368 EXPORT_SYMBOL(_write_unlock_bh);
369 369 #endif
370 370  
371   -#ifndef _spin_trylock_bh
  371 +#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
372 372 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
373 373 {
374 374 return __spin_trylock_bh(lock);