Blame view
lib/kernel_lock.c
3.2 KB
1da177e4c
|
1 2 3 4 |
/* * lib/kernel_lock.c * * This is the traditional BKL - big kernel lock. Largely |
5895df967
|
5 |
* relegated to obsolescence, but used by various less |
1da177e4c
|
6 7 |
* important (or lazy) subsystems. */ |
1da177e4c
|
8 9 |
#include <linux/module.h> #include <linux/kallsyms.h> |
6188e10d3
|
10 |
#include <linux/semaphore.h> |
96a2c464d
|
11 |
#include <linux/smp_lock.h> |
1da177e4c
|
12 |
|
925936ebf
|
13 14 |
#define CREATE_TRACE_POINTS #include <trace/events/bkl.h> |
1da177e4c
|
15 |
/* |
8e3e076c5
|
16 |
* The 'big kernel lock' |
1da177e4c
|
17 |
* |
8e3e076c5
|
18 |
* This spinlock is taken and released recursively by lock_kernel() |
d6e05edc5
|
19 |
* and unlock_kernel(). It is transparently dropped and reacquired |
1da177e4c
|
20 21 22 |
* over schedule(). It is used to protect legacy code that hasn't * been migrated to a proper locking design yet. * |
1da177e4c
|
23 24 |
* Don't use in new code. */ |
fa4062e7e
|
25 |
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); |
8e3e076c5
|
26 |
|
1da177e4c
|
27 28 |
/* |
8e3e076c5
|
29 |
* Acquire/release the underlying lock from the scheduler. |
1da177e4c
|
30 |
* |
8e3e076c5
|
31 32 33 |
* This is called with preemption disabled, and should * return an error value if it cannot get the lock and * TIF_NEED_RESCHED gets set. |
1da177e4c
|
34 |
* |
8e3e076c5
|
35 36 37 |
* If it successfully gets the lock, it should increment * the preemption count like any spinlock does. * |
9828ea9d7
|
38 |
* (This works on UP too - do_raw_spin_trylock will never |
8e3e076c5
|
39 |
* return false in that case) |
1da177e4c
|
40 41 42 |
*/ int __lockfunc __reacquire_kernel_lock(void) { |
9828ea9d7
|
43 |
while (!do_raw_spin_trylock(&kernel_flag)) { |
5ed0cec0a
|
44 |
if (need_resched()) |
8e3e076c5
|
45 46 47 |
return -EAGAIN; cpu_relax(); } |
1da177e4c
|
48 |
preempt_disable(); |
1da177e4c
|
49 50 51 52 53 |
return 0; } void __lockfunc __release_kernel_lock(void) { |
9828ea9d7
|
54 |
do_raw_spin_unlock(&kernel_flag); |
8e3e076c5
|
55 |
preempt_enable_no_resched(); |
1da177e4c
|
56 57 58 |
} /* |
8e3e076c5
|
59 60 |
* These are the BKL spinlocks - we try to be polite about preemption. * If SMP is not on (ie UP preemption), this all goes away because the |
9828ea9d7
|
61 |
* do_raw_spin_trylock() will always succeed. |
1da177e4c
|
62 |
*/ |
8e3e076c5
|
63 64 |
#ifdef CONFIG_PREEMPT static inline void __lock_kernel(void) |
1da177e4c
|
65 |
{ |
8e3e076c5
|
66 |
preempt_disable(); |
9828ea9d7
|
67 |
if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { |
8e3e076c5
|
68 69 70 71 72 73 |
/* * If preemption was disabled even before this * was called, there's nothing we can be polite * about - just spin. */ if (preempt_count() > 1) { |
9828ea9d7
|
74 |
do_raw_spin_lock(&kernel_flag); |
8e3e076c5
|
75 76 |
return; } |
1da177e4c
|
77 |
|
1da177e4c
|
78 |
/* |
8e3e076c5
|
79 80 |
* Otherwise, let's wait for the kernel lock * with preemption enabled.. |
1da177e4c
|
81 |
*/ |
8e3e076c5
|
82 83 |
do { preempt_enable(); |
fa4062e7e
|
84 |
while (raw_spin_is_locked(&kernel_flag)) |
8e3e076c5
|
85 86 |
cpu_relax(); preempt_disable(); |
9828ea9d7
|
87 |
} while (!do_raw_spin_trylock(&kernel_flag)); |
8e3e076c5
|
88 89 |
} } |
1da177e4c
|
90 |
|
8e3e076c5
|
91 92 93 94 95 96 97 |
#else /* * Non-preemption case - just get the spinlock */ static inline void __lock_kernel(void) { |
9828ea9d7
|
98 |
do_raw_spin_lock(&kernel_flag); |
1da177e4c
|
99 |
} |
8e3e076c5
|
100 |
#endif |
1da177e4c
|
101 |
|
8e3e076c5
|
102 |
static inline void __unlock_kernel(void) |
1da177e4c
|
103 |
{ |
8e3e076c5
|
104 105 106 107 |
/* * the BKL is not covered by lockdep, so we open-code the * unlocking sequence (and thus avoid the dep-chain ops): */ |
9828ea9d7
|
108 |
do_raw_spin_unlock(&kernel_flag); |
8e3e076c5
|
109 110 |
preempt_enable(); } |
1da177e4c
|
111 |
|
8e3e076c5
|
112 113 114 115 116 117 |
/* * Getting the big kernel lock. * * This cannot happen asynchronously, so we only need to * worry about other CPU's. */ |
925936ebf
|
118 |
void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
8e3e076c5
|
119 |
{ |
925936ebf
|
120 121 122 |
int depth = current->lock_depth + 1; trace_lock_kernel(func, file, line); |
f01eb3640
|
123 124 |
if (likely(!depth)) { might_sleep(); |
8e3e076c5
|
125 |
__lock_kernel(); |
f01eb3640
|
126 |
} |
8e3e076c5
|
127 128 |
current->lock_depth = depth; } |
1da177e4c
|
129 |
|
925936ebf
|
130 |
void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
8e3e076c5
|
131 132 133 134 |
{ BUG_ON(current->lock_depth < 0); if (likely(--current->lock_depth < 0)) __unlock_kernel(); |
925936ebf
|
135 136 |
trace_unlock_kernel(func, file, line); |
1da177e4c
|
137 |
} |
96a2c464d
|
138 139 |
EXPORT_SYMBOL(_lock_kernel); EXPORT_SYMBOL(_unlock_kernel); |
1da177e4c
|
140 |