Blame view

kernel/wait.c 7.26 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  /*
   * Generic waiting primitives.
   *
   * (C) 2004 William Irwin, Oracle
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
10
11
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/sched.h>
  #include <linux/mm.h>
  #include <linux/wait.h>
  #include <linux/hash.h>
21d71f513   Ingo Molnar   [PATCH] uninline ...
12
13
14
15
16
  void init_waitqueue_head(wait_queue_head_t *q)
  {
  	spin_lock_init(&q->lock);
  	INIT_LIST_HEAD(&q->task_list);
  }
eb4542b98   Ingo Molnar   [PATCH] lockdep: ...
17

21d71f513   Ingo Molnar   [PATCH] uninline ...
18
  EXPORT_SYMBOL(init_waitqueue_head);
eb4542b98   Ingo Molnar   [PATCH] lockdep: ...
19

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
  void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
  {
  	unsigned long flags;
  
  	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
  	spin_lock_irqsave(&q->lock, flags);
  	__add_wait_queue(q, wait);
  	spin_unlock_irqrestore(&q->lock, flags);
  }
  EXPORT_SYMBOL(add_wait_queue);
  
  void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  {
  	unsigned long flags;
  
  	wait->flags |= WQ_FLAG_EXCLUSIVE;
  	spin_lock_irqsave(&q->lock, flags);
  	__add_wait_queue_tail(q, wait);
  	spin_unlock_irqrestore(&q->lock, flags);
  }
  EXPORT_SYMBOL(add_wait_queue_exclusive);
  
  void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&q->lock, flags);
  	__remove_wait_queue(q, wait);
  	spin_unlock_irqrestore(&q->lock, flags);
  }
  EXPORT_SYMBOL(remove_wait_queue);
  
  
  /*
   * Note: we use "set_current_state()" _after_ the wait-queue add,
   * because we need a memory barrier there on SMP, so that any
   * wake-function that tests for the wait-queue being active
   * will be guaranteed to see waitqueue addition _or_ subsequent
   * tests in this thread will see the wakeup having taken place.
   *
   * The spin_unlock() itself is semi-permeable and only protects
   * one way (it only protects stuff inside the critical region and
   * stops them from bleeding out - it would still allow subsequent
   * loads to move into the the critical region).
   */
  void fastcall
  prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
  {
  	unsigned long flags;
  
  	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
  	spin_lock_irqsave(&q->lock, flags);
  	if (list_empty(&wait->task_list))
  		__add_wait_queue(q, wait);
  	/*
  	 * don't alter the task state if this is just going to
  	 * queue an async wait queue callback
  	 */
  	if (is_sync_wait(wait))
  		set_current_state(state);
  	spin_unlock_irqrestore(&q->lock, flags);
  }
  EXPORT_SYMBOL(prepare_to_wait);
  
  void fastcall
  prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
  {
  	unsigned long flags;
  
  	wait->flags |= WQ_FLAG_EXCLUSIVE;
  	spin_lock_irqsave(&q->lock, flags);
  	if (list_empty(&wait->task_list))
  		__add_wait_queue_tail(q, wait);
  	/*
  	 * don't alter the task state if this is just going to
   	 * queue an async wait queue callback
  	 */
  	if (is_sync_wait(wait))
  		set_current_state(state);
  	spin_unlock_irqrestore(&q->lock, flags);
  }
  EXPORT_SYMBOL(prepare_to_wait_exclusive);
  
  void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
  {
  	unsigned long flags;
  
  	__set_current_state(TASK_RUNNING);
  	/*
  	 * We can check for list emptiness outside the lock
  	 * IFF:
  	 *  - we use the "careful" check that verifies both
  	 *    the next and prev pointers, so that there cannot
  	 *    be any half-pending updates in progress on other
  	 *    CPU's that we haven't seen yet (and that might
  	 *    still change the stack area.
  	 * and
  	 *  - all other users take the lock (ie we can only
  	 *    have _one_ other CPU that looks at or modifies
  	 *    the list).
  	 */
  	if (!list_empty_careful(&wait->task_list)) {
  		spin_lock_irqsave(&q->lock, flags);
  		list_del_init(&wait->task_list);
  		spin_unlock_irqrestore(&q->lock, flags);
  	}
  }
  EXPORT_SYMBOL(finish_wait);
  
  int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
  {
  	int ret = default_wake_function(wait, mode, sync, key);
  
  	if (ret)
  		list_del_init(&wait->task_list);
  	return ret;
  }
  EXPORT_SYMBOL(autoremove_wake_function);
  
  int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
  {
  	struct wait_bit_key *key = arg;
  	struct wait_bit_queue *wait_bit
  		= container_of(wait, struct wait_bit_queue, wait);
  
  	if (wait_bit->key.flags != key->flags ||
  			wait_bit->key.bit_nr != key->bit_nr ||
  			test_bit(key->bit_nr, key->flags))
  		return 0;
  	else
  		return autoremove_wake_function(wait, mode, sync, key);
  }
  EXPORT_SYMBOL(wake_bit_function);
  
  /*
   * To allow interruptible waiting and asynchronous (i.e. nonblocking)
   * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
   * permitted return codes. Nonzero return codes halt waiting and return.
   */
  int __sched fastcall
  __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
  			int (*action)(void *), unsigned mode)
  {
  	int ret = 0;
  
  	do {
  		prepare_to_wait(wq, &q->wait, mode);
  		if (test_bit(q->key.bit_nr, q->key.flags))
  			ret = (*action)(q->key.flags);
  	} while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
  	finish_wait(wq, &q->wait);
  	return ret;
  }
  EXPORT_SYMBOL(__wait_on_bit);
  
  int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
  					int (*action)(void *), unsigned mode)
  {
  	wait_queue_head_t *wq = bit_waitqueue(word, bit);
  	DEFINE_WAIT_BIT(wait, word, bit);
  
  	return __wait_on_bit(wq, &wait, action, mode);
  }
  EXPORT_SYMBOL(out_of_line_wait_on_bit);
  
  int __sched fastcall
  __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
  			int (*action)(void *), unsigned mode)
  {
  	int ret = 0;
  
  	do {
  		prepare_to_wait_exclusive(wq, &q->wait, mode);
  		if (test_bit(q->key.bit_nr, q->key.flags)) {
  			if ((ret = (*action)(q->key.flags)))
  				break;
  		}
  	} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
  	finish_wait(wq, &q->wait);
  	return ret;
  }
  EXPORT_SYMBOL(__wait_on_bit_lock);
  
  int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
  					int (*action)(void *), unsigned mode)
  {
  	wait_queue_head_t *wq = bit_waitqueue(word, bit);
  	DEFINE_WAIT_BIT(wait, word, bit);
  
  	return __wait_on_bit_lock(wq, &wait, action, mode);
  }
  EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
  
  void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
  {
  	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
  	if (waitqueue_active(wq))
  		__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
  }
  EXPORT_SYMBOL(__wake_up_bit);
  
  /**
   * wake_up_bit - wake up a waiter on a bit
   * @word: the word being waited on, a kernel virtual address
   * @bit: the bit of the word being waited on
   *
   * There is a standard hashed waitqueue table for generic use. This
   * is the part of the hashtable's accessor API that wakes up waiters
   * on a bit. For instance, if one were to have waiters on a bitflag,
   * one would call wake_up_bit() after clearing the bit.
   *
   * In order for this to function properly, as it uses waitqueue_active()
   * internally, some kind of memory barrier must be done prior to calling
   * this. Typically, this will be smp_mb__after_clear_bit(), but in some
   * cases where bitflags are manipulated non-atomically under a lock, one
   * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
   * because spin_unlock() does not guarantee a memory barrier.
   */
  void fastcall wake_up_bit(void *word, int bit)
  {
  	__wake_up_bit(bit_waitqueue(word, bit), word, bit);
  }
  EXPORT_SYMBOL(wake_up_bit);
  
  fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
  {
  	const int shift = BITS_PER_LONG == 32 ? 5 : 6;
  	const struct zone *zone = page_zone(virt_to_page(word));
  	unsigned long val = (unsigned long)word << shift | bit;
  
  	return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
  }
  EXPORT_SYMBOL(bit_waitqueue);