Commit 9828ea9d75c38fe3dce05d00566eed61c85732e6
1 parent
5f6384c5fb
locking: Further name space cleanups
The name space hierarchy for the internal lock functions is now a bit backwards. raw_spin* functions map to _spin* which use __spin*, while we would like to have _raw_spin* and __raw_spin*. _raw_spin* is already used by lock debugging, so rename those funtions to do_raw_spin* to free up the _raw_spin* name space. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Showing 8 changed files with 81 additions and 81 deletions Side-by-side Diff
include/linux/rwlock.h
... | ... | @@ -29,25 +29,25 @@ |
29 | 29 | #endif |
30 | 30 | |
31 | 31 | #ifdef CONFIG_DEBUG_SPINLOCK |
32 | - extern void _raw_read_lock(rwlock_t *lock); | |
33 | -#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | |
34 | - extern int _raw_read_trylock(rwlock_t *lock); | |
35 | - extern void _raw_read_unlock(rwlock_t *lock); | |
36 | - extern void _raw_write_lock(rwlock_t *lock); | |
37 | -#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | |
38 | - extern int _raw_write_trylock(rwlock_t *lock); | |
39 | - extern void _raw_write_unlock(rwlock_t *lock); | |
32 | + extern void do_raw_read_lock(rwlock_t *lock); | |
33 | +#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) | |
34 | + extern int do_raw_read_trylock(rwlock_t *lock); | |
35 | + extern void do_raw_read_unlock(rwlock_t *lock); | |
36 | + extern void do_raw_write_lock(rwlock_t *lock); | |
37 | +#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) | |
38 | + extern int do_raw_write_trylock(rwlock_t *lock); | |
39 | + extern void do_raw_write_unlock(rwlock_t *lock); | |
40 | 40 | #else |
41 | -# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) | |
42 | -# define _raw_read_lock_flags(lock, flags) \ | |
41 | +# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) | |
42 | +# define do_raw_read_lock_flags(lock, flags) \ | |
43 | 43 | arch_read_lock_flags(&(lock)->raw_lock, *(flags)) |
44 | -# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) | |
45 | -# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) | |
46 | -# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) | |
47 | -# define _raw_write_lock_flags(lock, flags) \ | |
44 | +# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) | |
45 | +# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) | |
46 | +# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) | |
47 | +# define do_raw_write_lock_flags(lock, flags) \ | |
48 | 48 | arch_write_lock_flags(&(lock)->raw_lock, *(flags)) |
49 | -# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) | |
50 | -# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) | |
49 | +# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) | |
50 | +# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) | |
51 | 51 | #endif |
52 | 52 | |
53 | 53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) |
include/linux/rwlock_api_smp.h
... | ... | @@ -113,7 +113,7 @@ |
113 | 113 | static inline int __read_trylock(rwlock_t *lock) |
114 | 114 | { |
115 | 115 | preempt_disable(); |
116 | - if (_raw_read_trylock(lock)) { | |
116 | + if (do_raw_read_trylock(lock)) { | |
117 | 117 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); |
118 | 118 | return 1; |
119 | 119 | } |
... | ... | @@ -124,7 +124,7 @@ |
124 | 124 | static inline int __write_trylock(rwlock_t *lock) |
125 | 125 | { |
126 | 126 | preempt_disable(); |
127 | - if (_raw_write_trylock(lock)) { | |
127 | + if (do_raw_write_trylock(lock)) { | |
128 | 128 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
129 | 129 | return 1; |
130 | 130 | } |
... | ... | @@ -143,7 +143,7 @@ |
143 | 143 | { |
144 | 144 | preempt_disable(); |
145 | 145 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
146 | - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | |
146 | + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | |
147 | 147 | } |
148 | 148 | |
149 | 149 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) |
... | ... | @@ -153,8 +153,8 @@ |
153 | 153 | local_irq_save(flags); |
154 | 154 | preempt_disable(); |
155 | 155 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
156 | - LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | |
157 | - _raw_read_lock_flags, &flags); | |
156 | + LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, | |
157 | + do_raw_read_lock_flags, &flags); | |
158 | 158 | return flags; |
159 | 159 | } |
160 | 160 | |
... | ... | @@ -163,7 +163,7 @@ |
163 | 163 | local_irq_disable(); |
164 | 164 | preempt_disable(); |
165 | 165 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
166 | - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | |
166 | + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | |
167 | 167 | } |
168 | 168 | |
169 | 169 | static inline void __read_lock_bh(rwlock_t *lock) |
... | ... | @@ -171,7 +171,7 @@ |
171 | 171 | local_bh_disable(); |
172 | 172 | preempt_disable(); |
173 | 173 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
174 | - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | |
174 | + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | |
175 | 175 | } |
176 | 176 | |
177 | 177 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) |
... | ... | @@ -181,8 +181,8 @@ |
181 | 181 | local_irq_save(flags); |
182 | 182 | preempt_disable(); |
183 | 183 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
184 | - LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | |
185 | - _raw_write_lock_flags, &flags); | |
184 | + LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, | |
185 | + do_raw_write_lock_flags, &flags); | |
186 | 186 | return flags; |
187 | 187 | } |
188 | 188 | |
... | ... | @@ -191,7 +191,7 @@ |
191 | 191 | local_irq_disable(); |
192 | 192 | preempt_disable(); |
193 | 193 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
194 | - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | |
194 | + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | |
195 | 195 | } |
196 | 196 | |
197 | 197 | static inline void __write_lock_bh(rwlock_t *lock) |
198 | 198 | |
... | ... | @@ -199,14 +199,14 @@ |
199 | 199 | local_bh_disable(); |
200 | 200 | preempt_disable(); |
201 | 201 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
202 | - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | |
202 | + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | |
203 | 203 | } |
204 | 204 | |
205 | 205 | static inline void __write_lock(rwlock_t *lock) |
206 | 206 | { |
207 | 207 | preempt_disable(); |
208 | 208 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
209 | - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | |
209 | + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | |
210 | 210 | } |
211 | 211 | |
212 | 212 | #endif /* CONFIG_PREEMPT */ |
213 | 213 | |
214 | 214 | |
... | ... | @@ -214,21 +214,21 @@ |
214 | 214 | static inline void __write_unlock(rwlock_t *lock) |
215 | 215 | { |
216 | 216 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
217 | - _raw_write_unlock(lock); | |
217 | + do_raw_write_unlock(lock); | |
218 | 218 | preempt_enable(); |
219 | 219 | } |
220 | 220 | |
221 | 221 | static inline void __read_unlock(rwlock_t *lock) |
222 | 222 | { |
223 | 223 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
224 | - _raw_read_unlock(lock); | |
224 | + do_raw_read_unlock(lock); | |
225 | 225 | preempt_enable(); |
226 | 226 | } |
227 | 227 | |
228 | 228 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
229 | 229 | { |
230 | 230 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
231 | - _raw_read_unlock(lock); | |
231 | + do_raw_read_unlock(lock); | |
232 | 232 | local_irq_restore(flags); |
233 | 233 | preempt_enable(); |
234 | 234 | } |
... | ... | @@ -236,7 +236,7 @@ |
236 | 236 | static inline void __read_unlock_irq(rwlock_t *lock) |
237 | 237 | { |
238 | 238 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
239 | - _raw_read_unlock(lock); | |
239 | + do_raw_read_unlock(lock); | |
240 | 240 | local_irq_enable(); |
241 | 241 | preempt_enable(); |
242 | 242 | } |
... | ... | @@ -244,7 +244,7 @@ |
244 | 244 | static inline void __read_unlock_bh(rwlock_t *lock) |
245 | 245 | { |
246 | 246 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
247 | - _raw_read_unlock(lock); | |
247 | + do_raw_read_unlock(lock); | |
248 | 248 | preempt_enable_no_resched(); |
249 | 249 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
250 | 250 | } |
... | ... | @@ -253,7 +253,7 @@ |
253 | 253 | unsigned long flags) |
254 | 254 | { |
255 | 255 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
256 | - _raw_write_unlock(lock); | |
256 | + do_raw_write_unlock(lock); | |
257 | 257 | local_irq_restore(flags); |
258 | 258 | preempt_enable(); |
259 | 259 | } |
... | ... | @@ -261,7 +261,7 @@ |
261 | 261 | static inline void __write_unlock_irq(rwlock_t *lock) |
262 | 262 | { |
263 | 263 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
264 | - _raw_write_unlock(lock); | |
264 | + do_raw_write_unlock(lock); | |
265 | 265 | local_irq_enable(); |
266 | 266 | preempt_enable(); |
267 | 267 | } |
... | ... | @@ -269,7 +269,7 @@ |
269 | 269 | static inline void __write_unlock_bh(rwlock_t *lock) |
270 | 270 | { |
271 | 271 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
272 | - _raw_write_unlock(lock); | |
272 | + do_raw_write_unlock(lock); | |
273 | 273 | preempt_enable_no_resched(); |
274 | 274 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
275 | 275 | } |
include/linux/spinlock.h
... | ... | @@ -128,28 +128,28 @@ |
128 | 128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
129 | 129 | |
130 | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
131 | - extern void _raw_spin_lock(raw_spinlock_t *lock); | |
132 | -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | |
133 | - extern int _raw_spin_trylock(raw_spinlock_t *lock); | |
134 | - extern void _raw_spin_unlock(raw_spinlock_t *lock); | |
131 | + extern void do_raw_spin_lock(raw_spinlock_t *lock); | |
132 | +#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) | |
133 | + extern int do_raw_spin_trylock(raw_spinlock_t *lock); | |
134 | + extern void do_raw_spin_unlock(raw_spinlock_t *lock); | |
135 | 135 | #else |
136 | -static inline void _raw_spin_lock(raw_spinlock_t *lock) | |
136 | +static inline void do_raw_spin_lock(raw_spinlock_t *lock) | |
137 | 137 | { |
138 | 138 | arch_spin_lock(&lock->raw_lock); |
139 | 139 | } |
140 | 140 | |
141 | 141 | static inline void |
142 | -_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) | |
142 | +do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) | |
143 | 143 | { |
144 | 144 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
145 | 145 | } |
146 | 146 | |
147 | -static inline int _raw_spin_trylock(raw_spinlock_t *lock) | |
147 | +static inline int do_raw_spin_trylock(raw_spinlock_t *lock) | |
148 | 148 | { |
149 | 149 | return arch_spin_trylock(&(lock)->raw_lock); |
150 | 150 | } |
151 | 151 | |
152 | -static inline void _raw_spin_unlock(raw_spinlock_t *lock) | |
152 | +static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | |
153 | 153 | { |
154 | 154 | arch_spin_unlock(&lock->raw_lock); |
155 | 155 | } |
include/linux/spinlock_api_smp.h
... | ... | @@ -85,7 +85,7 @@ |
85 | 85 | static inline int __spin_trylock(raw_spinlock_t *lock) |
86 | 86 | { |
87 | 87 | preempt_disable(); |
88 | - if (_raw_spin_trylock(lock)) { | |
88 | + if (do_raw_spin_trylock(lock)) { | |
89 | 89 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
90 | 90 | return 1; |
91 | 91 | } |
92 | 92 | |
93 | 93 | |
... | ... | @@ -109,13 +109,13 @@ |
109 | 109 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
110 | 110 | /* |
111 | 111 | * On lockdep we dont want the hand-coded irq-enable of |
112 | - * _raw_spin_lock_flags() code, because lockdep assumes | |
112 | + * do_raw_spin_lock_flags() code, because lockdep assumes | |
113 | 113 | * that interrupts are not re-enabled during lock-acquire: |
114 | 114 | */ |
115 | 115 | #ifdef CONFIG_LOCKDEP |
116 | - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | |
116 | + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
117 | 117 | #else |
118 | - _raw_spin_lock_flags(lock, &flags); | |
118 | + do_raw_spin_lock_flags(lock, &flags); | |
119 | 119 | #endif |
120 | 120 | return flags; |
121 | 121 | } |
... | ... | @@ -125,7 +125,7 @@ |
125 | 125 | local_irq_disable(); |
126 | 126 | preempt_disable(); |
127 | 127 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
128 | - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | |
128 | + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
129 | 129 | } |
130 | 130 | |
131 | 131 | static inline void __spin_lock_bh(raw_spinlock_t *lock) |
132 | 132 | |
... | ... | @@ -133,14 +133,14 @@ |
133 | 133 | local_bh_disable(); |
134 | 134 | preempt_disable(); |
135 | 135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
136 | - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | |
136 | + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
137 | 137 | } |
138 | 138 | |
139 | 139 | static inline void __spin_lock(raw_spinlock_t *lock) |
140 | 140 | { |
141 | 141 | preempt_disable(); |
142 | 142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
143 | - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | |
143 | + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
144 | 144 | } |
145 | 145 | |
146 | 146 | #endif /* CONFIG_PREEMPT */ |
... | ... | @@ -148,7 +148,7 @@ |
148 | 148 | static inline void __spin_unlock(raw_spinlock_t *lock) |
149 | 149 | { |
150 | 150 | spin_release(&lock->dep_map, 1, _RET_IP_); |
151 | - _raw_spin_unlock(lock); | |
151 | + do_raw_spin_unlock(lock); | |
152 | 152 | preempt_enable(); |
153 | 153 | } |
154 | 154 | |
... | ... | @@ -156,7 +156,7 @@ |
156 | 156 | unsigned long flags) |
157 | 157 | { |
158 | 158 | spin_release(&lock->dep_map, 1, _RET_IP_); |
159 | - _raw_spin_unlock(lock); | |
159 | + do_raw_spin_unlock(lock); | |
160 | 160 | local_irq_restore(flags); |
161 | 161 | preempt_enable(); |
162 | 162 | } |
... | ... | @@ -164,7 +164,7 @@ |
164 | 164 | static inline void __spin_unlock_irq(raw_spinlock_t *lock) |
165 | 165 | { |
166 | 166 | spin_release(&lock->dep_map, 1, _RET_IP_); |
167 | - _raw_spin_unlock(lock); | |
167 | + do_raw_spin_unlock(lock); | |
168 | 168 | local_irq_enable(); |
169 | 169 | preempt_enable(); |
170 | 170 | } |
... | ... | @@ -172,7 +172,7 @@ |
172 | 172 | static inline void __spin_unlock_bh(raw_spinlock_t *lock) |
173 | 173 | { |
174 | 174 | spin_release(&lock->dep_map, 1, _RET_IP_); |
175 | - _raw_spin_unlock(lock); | |
175 | + do_raw_spin_unlock(lock); | |
176 | 176 | preempt_enable_no_resched(); |
177 | 177 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
178 | 178 | } |
... | ... | @@ -181,7 +181,7 @@ |
181 | 181 | { |
182 | 182 | local_bh_disable(); |
183 | 183 | preempt_disable(); |
184 | - if (_raw_spin_trylock(lock)) { | |
184 | + if (do_raw_spin_trylock(lock)) { | |
185 | 185 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
186 | 186 | return 1; |
187 | 187 | } |
kernel/sched.c
kernel/spinlock.c
... | ... | @@ -48,7 +48,7 @@ |
48 | 48 | { \ |
49 | 49 | for (;;) { \ |
50 | 50 | preempt_disable(); \ |
51 | - if (likely(_raw_##op##_trylock(lock))) \ | |
51 | + if (likely(do_raw_##op##_trylock(lock))) \ | |
52 | 52 | break; \ |
53 | 53 | preempt_enable(); \ |
54 | 54 | \ |
... | ... | @@ -67,7 +67,7 @@ |
67 | 67 | for (;;) { \ |
68 | 68 | preempt_disable(); \ |
69 | 69 | local_irq_save(flags); \ |
70 | - if (likely(_raw_##op##_trylock(lock))) \ | |
70 | + if (likely(do_raw_##op##_trylock(lock))) \ | |
71 | 71 | break; \ |
72 | 72 | local_irq_restore(flags); \ |
73 | 73 | preempt_enable(); \ |
... | ... | @@ -345,7 +345,7 @@ |
345 | 345 | { |
346 | 346 | preempt_disable(); |
347 | 347 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
348 | - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | |
348 | + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
349 | 349 | } |
350 | 350 | EXPORT_SYMBOL(_spin_lock_nested); |
351 | 351 | |
... | ... | @@ -357,8 +357,8 @@ |
357 | 357 | local_irq_save(flags); |
358 | 358 | preempt_disable(); |
359 | 359 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
360 | - LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, | |
361 | - _raw_spin_lock_flags, &flags); | |
360 | + LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, | |
361 | + do_raw_spin_lock_flags, &flags); | |
362 | 362 | return flags; |
363 | 363 | } |
364 | 364 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
... | ... | @@ -368,7 +368,7 @@ |
368 | 368 | { |
369 | 369 | preempt_disable(); |
370 | 370 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
371 | - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | |
371 | + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | |
372 | 372 | } |
373 | 373 | EXPORT_SYMBOL(_spin_lock_nest_lock); |
374 | 374 |
lib/kernel_lock.c
... | ... | @@ -36,12 +36,12 @@ |
36 | 36 | * If it successfully gets the lock, it should increment |
37 | 37 | * the preemption count like any spinlock does. |
38 | 38 | * |
39 | - * (This works on UP too - _raw_spin_trylock will never | |
39 | + * (This works on UP too - do_raw_spin_trylock will never | |
40 | 40 | * return false in that case) |
41 | 41 | */ |
42 | 42 | int __lockfunc __reacquire_kernel_lock(void) |
43 | 43 | { |
44 | - while (!_raw_spin_trylock(&kernel_flag)) { | |
44 | + while (!do_raw_spin_trylock(&kernel_flag)) { | |
45 | 45 | if (need_resched()) |
46 | 46 | return -EAGAIN; |
47 | 47 | cpu_relax(); |
48 | 48 | |
49 | 49 | |
50 | 50 | |
... | ... | @@ -52,27 +52,27 @@ |
52 | 52 | |
53 | 53 | void __lockfunc __release_kernel_lock(void) |
54 | 54 | { |
55 | - _raw_spin_unlock(&kernel_flag); | |
55 | + do_raw_spin_unlock(&kernel_flag); | |
56 | 56 | preempt_enable_no_resched(); |
57 | 57 | } |
58 | 58 | |
59 | 59 | /* |
60 | 60 | * These are the BKL spinlocks - we try to be polite about preemption. |
61 | 61 | * If SMP is not on (ie UP preemption), this all goes away because the |
62 | - * _raw_spin_trylock() will always succeed. | |
62 | + * do_raw_spin_trylock() will always succeed. | |
63 | 63 | */ |
64 | 64 | #ifdef CONFIG_PREEMPT |
65 | 65 | static inline void __lock_kernel(void) |
66 | 66 | { |
67 | 67 | preempt_disable(); |
68 | - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | |
68 | + if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { | |
69 | 69 | /* |
70 | 70 | * If preemption was disabled even before this |
71 | 71 | * was called, there's nothing we can be polite |
72 | 72 | * about - just spin. |
73 | 73 | */ |
74 | 74 | if (preempt_count() > 1) { |
75 | - _raw_spin_lock(&kernel_flag); | |
75 | + do_raw_spin_lock(&kernel_flag); | |
76 | 76 | return; |
77 | 77 | } |
78 | 78 | |
... | ... | @@ -85,7 +85,7 @@ |
85 | 85 | while (spin_is_locked(&kernel_flag)) |
86 | 86 | cpu_relax(); |
87 | 87 | preempt_disable(); |
88 | - } while (!_raw_spin_trylock(&kernel_flag)); | |
88 | + } while (!do_raw_spin_trylock(&kernel_flag)); | |
89 | 89 | } |
90 | 90 | } |
91 | 91 | |
... | ... | @@ -96,7 +96,7 @@ |
96 | 96 | */ |
97 | 97 | static inline void __lock_kernel(void) |
98 | 98 | { |
99 | - _raw_spin_lock(&kernel_flag); | |
99 | + do_raw_spin_lock(&kernel_flag); | |
100 | 100 | } |
101 | 101 | #endif |
102 | 102 | |
... | ... | @@ -106,7 +106,7 @@ |
106 | 106 | * the BKL is not covered by lockdep, so we open-code the |
107 | 107 | * unlocking sequence (and thus avoid the dep-chain ops): |
108 | 108 | */ |
109 | - _raw_spin_unlock(&kernel_flag); | |
109 | + do_raw_spin_unlock(&kernel_flag); | |
110 | 110 | preempt_enable(); |
111 | 111 | } |
112 | 112 |
lib/spinlock_debug.c
... | ... | @@ -125,7 +125,7 @@ |
125 | 125 | } |
126 | 126 | } |
127 | 127 | |
128 | -void _raw_spin_lock(raw_spinlock_t *lock) | |
128 | +void do_raw_spin_lock(raw_spinlock_t *lock) | |
129 | 129 | { |
130 | 130 | debug_spin_lock_before(lock); |
131 | 131 | if (unlikely(!arch_spin_trylock(&lock->raw_lock))) |
... | ... | @@ -133,7 +133,7 @@ |
133 | 133 | debug_spin_lock_after(lock); |
134 | 134 | } |
135 | 135 | |
136 | -int _raw_spin_trylock(raw_spinlock_t *lock) | |
136 | +int do_raw_spin_trylock(raw_spinlock_t *lock) | |
137 | 137 | { |
138 | 138 | int ret = arch_spin_trylock(&lock->raw_lock); |
139 | 139 | |
... | ... | @@ -148,7 +148,7 @@ |
148 | 148 | return ret; |
149 | 149 | } |
150 | 150 | |
151 | -void _raw_spin_unlock(raw_spinlock_t *lock) | |
151 | +void do_raw_spin_unlock(raw_spinlock_t *lock) | |
152 | 152 | { |
153 | 153 | debug_spin_unlock(lock); |
154 | 154 | arch_spin_unlock(&lock->raw_lock); |
155 | 155 | |
... | ... | @@ -193,13 +193,13 @@ |
193 | 193 | } |
194 | 194 | #endif |
195 | 195 | |
196 | -void _raw_read_lock(rwlock_t *lock) | |
196 | +void do_raw_read_lock(rwlock_t *lock) | |
197 | 197 | { |
198 | 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
199 | 199 | arch_read_lock(&lock->raw_lock); |
200 | 200 | } |
201 | 201 | |
202 | -int _raw_read_trylock(rwlock_t *lock) | |
202 | +int do_raw_read_trylock(rwlock_t *lock) | |
203 | 203 | { |
204 | 204 | int ret = arch_read_trylock(&lock->raw_lock); |
205 | 205 | |
... | ... | @@ -212,7 +212,7 @@ |
212 | 212 | return ret; |
213 | 213 | } |
214 | 214 | |
215 | -void _raw_read_unlock(rwlock_t *lock) | |
215 | +void do_raw_read_unlock(rwlock_t *lock) | |
216 | 216 | { |
217 | 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
218 | 218 | arch_read_unlock(&lock->raw_lock); |
219 | 219 | |
... | ... | @@ -268,14 +268,14 @@ |
268 | 268 | } |
269 | 269 | #endif |
270 | 270 | |
271 | -void _raw_write_lock(rwlock_t *lock) | |
271 | +void do_raw_write_lock(rwlock_t *lock) | |
272 | 272 | { |
273 | 273 | debug_write_lock_before(lock); |
274 | 274 | arch_write_lock(&lock->raw_lock); |
275 | 275 | debug_write_lock_after(lock); |
276 | 276 | } |
277 | 277 | |
278 | -int _raw_write_trylock(rwlock_t *lock) | |
278 | +int do_raw_write_trylock(rwlock_t *lock) | |
279 | 279 | { |
280 | 280 | int ret = arch_write_trylock(&lock->raw_lock); |
281 | 281 | |
... | ... | @@ -290,7 +290,7 @@ |
290 | 290 | return ret; |
291 | 291 | } |
292 | 292 | |
293 | -void _raw_write_unlock(rwlock_t *lock) | |
293 | +void do_raw_write_unlock(rwlock_t *lock) | |
294 | 294 | { |
295 | 295 | debug_write_unlock(lock); |
296 | 296 | arch_write_unlock(&lock->raw_lock); |