Blame view

lib/kernel_lock.c 3.2 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
  /*
   * lib/kernel_lock.c
   *
   * This is the traditional BKL - big kernel lock. Largely
5895df967   Simon Arlott   spelling fixes: lib/
5
   * relegated to obsolescence, but used by various less
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
   * important (or lazy) subsystems.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
9
  #include <linux/module.h>
  #include <linux/kallsyms.h>
6188e10d3   Matthew Wilcox   Convert asm/semap...
10
  #include <linux/semaphore.h>
96a2c464d   Frederic Weisbecker   tracing/bkl: Add ...
11
  #include <linux/smp_lock.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12

925936ebf   Frederic Weisbecker   tracing: Pushdown...
13
14
  #define CREATE_TRACE_POINTS
  #include <trace/events/bkl.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
  /*
8e3e076c5   Linus Torvalds   BKL: revert back ...
16
   * The 'big kernel lock'
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
   *
8e3e076c5   Linus Torvalds   BKL: revert back ...
18
   * This spinlock is taken and released recursively by lock_kernel()
d6e05edc5   Andreas Mohr   spelling fixes
19
   * and unlock_kernel().  It is transparently dropped and reacquired
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
   * over schedule().  It is used to protect legacy code that hasn't
   * been migrated to a proper locking design yet.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23
24
   * Don't use in new code.
   */
fa4062e7e   Thomas Gleixner   bkl: Fixup core_l...
25
  static  __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
8e3e076c5   Linus Torvalds   BKL: revert back ...
26

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
28
  
  /*
8e3e076c5   Linus Torvalds   BKL: revert back ...
29
   * Acquire/release the underlying lock from the scheduler.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30
   *
8e3e076c5   Linus Torvalds   BKL: revert back ...
31
32
33
   * This is called with preemption disabled, and should
   * return an error value if it cannot get the lock and
   * TIF_NEED_RESCHED gets set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
34
   *
8e3e076c5   Linus Torvalds   BKL: revert back ...
35
36
37
   * If it successfully gets the lock, it should increment
   * the preemption count like any spinlock does.
   *
9828ea9d7   Thomas Gleixner   locking: Further ...
38
   * (This works on UP too - do_raw_spin_trylock will never
8e3e076c5   Linus Torvalds   BKL: revert back ...
39
   * return false in that case)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40
41
42
   */
  int __lockfunc __reacquire_kernel_lock(void)
  {
9828ea9d7   Thomas Gleixner   locking: Further ...
43
  	while (!do_raw_spin_trylock(&kernel_flag)) {
5ed0cec0a   Lai Jiangshan   sched: TIF_NEED_R...
44
  		if (need_resched())
8e3e076c5   Linus Torvalds   BKL: revert back ...
45
46
47
  			return -EAGAIN;
  		cpu_relax();
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
48
  	preempt_disable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
50
51
52
53
  	return 0;
  }
  
  void __lockfunc __release_kernel_lock(void)
  {
9828ea9d7   Thomas Gleixner   locking: Further ...
54
  	do_raw_spin_unlock(&kernel_flag);
8e3e076c5   Linus Torvalds   BKL: revert back ...
55
  	preempt_enable_no_resched();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56
57
58
  }
  
  /*
8e3e076c5   Linus Torvalds   BKL: revert back ...
59
60
   * These are the BKL spinlocks - we try to be polite about preemption.
   * If SMP is not on (ie UP preemption), this all goes away because the
9828ea9d7   Thomas Gleixner   locking: Further ...
61
   * do_raw_spin_trylock() will always succeed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62
   */
8e3e076c5   Linus Torvalds   BKL: revert back ...
63
64
  #ifdef CONFIG_PREEMPT
  static inline void __lock_kernel(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
65
  {
8e3e076c5   Linus Torvalds   BKL: revert back ...
66
  	preempt_disable();
9828ea9d7   Thomas Gleixner   locking: Further ...
67
  	if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
8e3e076c5   Linus Torvalds   BKL: revert back ...
68
69
70
71
72
73
  		/*
  		 * If preemption was disabled even before this
  		 * was called, there's nothing we can be polite
  		 * about - just spin.
  		 */
  		if (preempt_count() > 1) {
9828ea9d7   Thomas Gleixner   locking: Further ...
74
  			do_raw_spin_lock(&kernel_flag);
8e3e076c5   Linus Torvalds   BKL: revert back ...
75
76
  			return;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
78
  		/*
8e3e076c5   Linus Torvalds   BKL: revert back ...
79
80
  		 * Otherwise, let's wait for the kernel lock
  		 * with preemption enabled..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
81
  		 */
8e3e076c5   Linus Torvalds   BKL: revert back ...
82
83
  		do {
  			preempt_enable();
fa4062e7e   Thomas Gleixner   bkl: Fixup core_l...
84
  			while (raw_spin_is_locked(&kernel_flag))
8e3e076c5   Linus Torvalds   BKL: revert back ...
85
86
  				cpu_relax();
  			preempt_disable();
9828ea9d7   Thomas Gleixner   locking: Further ...
87
  		} while (!do_raw_spin_trylock(&kernel_flag));
8e3e076c5   Linus Torvalds   BKL: revert back ...
88
89
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
90

8e3e076c5   Linus Torvalds   BKL: revert back ...
91
92
93
94
95
96
97
  #else
  
  /*
   * Non-preemption case - just get the spinlock
   */
  static inline void __lock_kernel(void)
  {
9828ea9d7   Thomas Gleixner   locking: Further ...
98
  	do_raw_spin_lock(&kernel_flag);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
99
  }
8e3e076c5   Linus Torvalds   BKL: revert back ...
100
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
101

8e3e076c5   Linus Torvalds   BKL: revert back ...
102
  static inline void __unlock_kernel(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
  {
8e3e076c5   Linus Torvalds   BKL: revert back ...
104
105
106
107
  	/*
  	 * the BKL is not covered by lockdep, so we open-code the
  	 * unlocking sequence (and thus avoid the dep-chain ops):
  	 */
9828ea9d7   Thomas Gleixner   locking: Further ...
108
  	do_raw_spin_unlock(&kernel_flag);
8e3e076c5   Linus Torvalds   BKL: revert back ...
109
110
  	preempt_enable();
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
111

8e3e076c5   Linus Torvalds   BKL: revert back ...
112
113
114
115
116
117
  /*
   * Getting the big kernel lock.
   *
   * This cannot happen asynchronously, so we only need to
   * worry about other CPU's.
   */
925936ebf   Frederic Weisbecker   tracing: Pushdown...
118
  void __lockfunc _lock_kernel(const char *func, const char *file, int line)
8e3e076c5   Linus Torvalds   BKL: revert back ...
119
  {
925936ebf   Frederic Weisbecker   tracing: Pushdown...
120
121
122
  	int depth = current->lock_depth + 1;
  
  	trace_lock_kernel(func, file, line);
f01eb3640   Linus Torvalds   [BKL] add 'might_...
123
124
  	if (likely(!depth)) {
  		might_sleep();
8e3e076c5   Linus Torvalds   BKL: revert back ...
125
  		__lock_kernel();
f01eb3640   Linus Torvalds   [BKL] add 'might_...
126
  	}
8e3e076c5   Linus Torvalds   BKL: revert back ...
127
128
  	current->lock_depth = depth;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129

925936ebf   Frederic Weisbecker   tracing: Pushdown...
130
  void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
8e3e076c5   Linus Torvalds   BKL: revert back ...
131
132
133
134
  {
  	BUG_ON(current->lock_depth < 0);
  	if (likely(--current->lock_depth < 0))
  		__unlock_kernel();
925936ebf   Frederic Weisbecker   tracing: Pushdown...
135
136
  
  	trace_unlock_kernel(func, file, line);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
137
  }
96a2c464d   Frederic Weisbecker   tracing/bkl: Add ...
138
139
  EXPORT_SYMBOL(_lock_kernel);
  EXPORT_SYMBOL(_unlock_kernel);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
140