Blame view

lib/kernel_lock.c 2.95 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
  /*
   * lib/kernel_lock.c
   *
   * This is the traditional BKL - big kernel lock. Largely
5895df967   Simon Arlott   spelling fixes: lib/
5
   * relegated to obsolescence, but used by various less
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
10
   * important (or lazy) subsystems.
   */
  #include <linux/smp_lock.h>
  #include <linux/module.h>
  #include <linux/kallsyms.h>
6188e10d3   Matthew Wilcox   Convert asm/semap...
11
  #include <linux/semaphore.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
  /*
8e3e076c5   Linus Torvalds   BKL: revert back ...
14
   * The 'big kernel lock'
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
   *
8e3e076c5   Linus Torvalds   BKL: revert back ...
16
   * This spinlock is taken and released recursively by lock_kernel()
d6e05edc5   Andreas Mohr   spelling fixes
17
   * and unlock_kernel().  It is transparently dropped and reacquired
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
   * over schedule().  It is used to protect legacy code that hasn't
   * been migrated to a proper locking design yet.
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
22
   * Don't use in new code.
   */
8e3e076c5   Linus Torvalds   BKL: revert back ...
23
  static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
25
  
  /*
8e3e076c5   Linus Torvalds   BKL: revert back ...
26
   * Acquire/release the underlying lock from the scheduler.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
27
   *
8e3e076c5   Linus Torvalds   BKL: revert back ...
28
29
30
   * This is called with preemption disabled, and should
   * return an error value if it cannot get the lock and
   * TIF_NEED_RESCHED gets set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
31
   *
8e3e076c5   Linus Torvalds   BKL: revert back ...
32
33
34
35
36
   * If it successfully gets the lock, it should increment
   * the preemption count like any spinlock does.
   *
   * (This works on UP too - _raw_spin_trylock will never
   * return false in that case)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
37
38
39
   */
  int __lockfunc __reacquire_kernel_lock(void)
  {
8e3e076c5   Linus Torvalds   BKL: revert back ...
40
41
42
43
44
  	while (!_raw_spin_trylock(&kernel_flag)) {
  		if (test_thread_flag(TIF_NEED_RESCHED))
  			return -EAGAIN;
  		cpu_relax();
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
45
  	preempt_disable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
47
48
49
50
  	return 0;
  }
  
  void __lockfunc __release_kernel_lock(void)
  {
8e3e076c5   Linus Torvalds   BKL: revert back ...
51
52
  	_raw_spin_unlock(&kernel_flag);
  	preempt_enable_no_resched();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53
54
55
  }
  
  /*
8e3e076c5   Linus Torvalds   BKL: revert back ...
56
57
58
   * These are the BKL spinlocks - we try to be polite about preemption.
   * If SMP is not on (ie UP preemption), this all goes away because the
   * _raw_spin_trylock() will always succeed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59
   */
8e3e076c5   Linus Torvalds   BKL: revert back ...
60
61
  #ifdef CONFIG_PREEMPT
  static inline void __lock_kernel(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62
  {
8e3e076c5   Linus Torvalds   BKL: revert back ...
63
64
65
66
67
68
69
70
71
72
73
  	preempt_disable();
  	if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
  		/*
  		 * If preemption was disabled even before this
  		 * was called, there's nothing we can be polite
  		 * about - just spin.
  		 */
  		if (preempt_count() > 1) {
  			_raw_spin_lock(&kernel_flag);
  			return;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
74

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
75
  		/*
8e3e076c5   Linus Torvalds   BKL: revert back ...
76
77
  		 * Otherwise, let's wait for the kernel lock
  		 * with preemption enabled..
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
78
  		 */
8e3e076c5   Linus Torvalds   BKL: revert back ...
79
80
81
82
83
84
85
86
  		do {
  			preempt_enable();
  			while (spin_is_locked(&kernel_flag))
  				cpu_relax();
  			preempt_disable();
  		} while (!_raw_spin_trylock(&kernel_flag));
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
87

8e3e076c5   Linus Torvalds   BKL: revert back ...
88
89
90
91
92
93
94
95
  #else
  
  /*
   * Non-preemption case - just get the spinlock
   */
  static inline void __lock_kernel(void)
  {
  	_raw_spin_lock(&kernel_flag);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
96
  }
8e3e076c5   Linus Torvalds   BKL: revert back ...
97
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
98

8e3e076c5   Linus Torvalds   BKL: revert back ...
99
  static inline void __unlock_kernel(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
100
  {
8e3e076c5   Linus Torvalds   BKL: revert back ...
101
102
103
104
105
106
107
  	/*
  	 * the BKL is not covered by lockdep, so we open-code the
  	 * unlocking sequence (and thus avoid the dep-chain ops):
  	 */
  	_raw_spin_unlock(&kernel_flag);
  	preempt_enable();
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
108

8e3e076c5   Linus Torvalds   BKL: revert back ...
109
110
111
112
113
114
115
116
117
118
119
120
121
  /*
   * Getting the big kernel lock.
   *
   * This cannot happen asynchronously, so we only need to
   * worry about other CPU's.
   */
  void __lockfunc lock_kernel(void)
  {
  	int depth = current->lock_depth+1;
  	if (likely(!depth))
  		__lock_kernel();
  	current->lock_depth = depth;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
122

8e3e076c5   Linus Torvalds   BKL: revert back ...
123
124
125
126
127
  void __lockfunc unlock_kernel(void)
  {
  	BUG_ON(current->lock_depth < 0);
  	if (likely(--current->lock_depth < 0))
  		__unlock_kernel();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
128
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
129
130
  EXPORT_SYMBOL(lock_kernel);
  EXPORT_SYMBOL(unlock_kernel);