Blame view

lib/sbitmap.c 13.8 KB
88459642c   Omar Sandoval   blk-mq: abstract ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  /*
   * Copyright (C) 2016 Facebook
   * Copyright (C) 2013-2014 Jens Axboe
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public
   * License v2 as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program.  If not, see <https://www.gnu.org/licenses/>.
   */
af8601ad4   Ingo Molnar   kasan, sched/head...
17
  #include <linux/sched.h>
98d95416d   Omar Sandoval   sbitmap: randomiz...
18
  #include <linux/random.h>
88459642c   Omar Sandoval   blk-mq: abstract ...
19
  #include <linux/sbitmap.h>
24af1ccfe   Omar Sandoval   sbitmap: add help...
20
  #include <linux/seq_file.h>
88459642c   Omar Sandoval   blk-mq: abstract ...
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
  
  int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
  		      gfp_t flags, int node)
  {
  	unsigned int bits_per_word;
  	unsigned int i;
  
  	if (shift < 0) {
  		shift = ilog2(BITS_PER_LONG);
  		/*
  		 * If the bitmap is small, shrink the number of bits per word so
  		 * we spread over a few cachelines, at least. If less than 4
  		 * bits, just forget about it, it's not going to work optimally
  		 * anyway.
  		 */
  		if (depth >= 4) {
  			while ((4U << shift) > depth)
  				shift--;
  		}
  	}
  	bits_per_word = 1U << shift;
  	if (bits_per_word > BITS_PER_LONG)
  		return -EINVAL;
  
  	sb->shift = shift;
  	sb->depth = depth;
  	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  
  	if (depth == 0) {
  		sb->map = NULL;
  		return 0;
  	}
590b5b7d8   Kees Cook   treewide: kzalloc...
53
  	sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
88459642c   Omar Sandoval   blk-mq: abstract ...
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
  	if (!sb->map)
  		return -ENOMEM;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		sb->map[i].depth = min(depth, bits_per_word);
  		depth -= sb->map[i].depth;
  	}
  	return 0;
  }
  EXPORT_SYMBOL_GPL(sbitmap_init_node);
  
  void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
  {
  	unsigned int bits_per_word = 1U << sb->shift;
  	unsigned int i;
  
  	sb->depth = depth;
  	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  
  	for (i = 0; i < sb->map_nr; i++) {
  		sb->map[i].depth = min(depth, bits_per_word);
  		depth -= sb->map[i].depth;
  	}
  }
  EXPORT_SYMBOL_GPL(sbitmap_resize);
c05e66733   Omar Sandoval   sbitmap: add sbit...
79
80
  static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
  			      unsigned int hint, bool wrap)
88459642c   Omar Sandoval   blk-mq: abstract ...
81
82
83
84
85
  {
  	unsigned int orig_hint = hint;
  	int nr;
  
  	while (1) {
c05e66733   Omar Sandoval   sbitmap: add sbit...
86
87
  		nr = find_next_zero_bit(word, depth, hint);
  		if (unlikely(nr >= depth)) {
88459642c   Omar Sandoval   blk-mq: abstract ...
88
89
90
91
92
93
94
95
96
97
98
  			/*
  			 * We started with an offset, and we didn't reset the
  			 * offset to 0 in a failure case, so start from 0 to
  			 * exhaust the map.
  			 */
  			if (orig_hint && hint && wrap) {
  				hint = orig_hint = 0;
  				continue;
  			}
  			return -1;
  		}
4ace53f1e   Omar Sandoval   sbitmap: use test...
99
  		if (!test_and_set_bit_lock(nr, word))
88459642c   Omar Sandoval   blk-mq: abstract ...
100
101
102
  			break;
  
  		hint = nr + 1;
c05e66733   Omar Sandoval   sbitmap: add sbit...
103
  		if (hint >= depth - 1)
88459642c   Omar Sandoval   blk-mq: abstract ...
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  			hint = 0;
  	}
  
  	return nr;
  }
  
  int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
  {
  	unsigned int i, index;
  	int nr = -1;
  
  	index = SB_NR_TO_INDEX(sb, alloc_hint);
  
  	for (i = 0; i < sb->map_nr; i++) {
c05e66733   Omar Sandoval   sbitmap: add sbit...
118
119
  		nr = __sbitmap_get_word(&sb->map[index].word,
  					sb->map[index].depth,
88459642c   Omar Sandoval   blk-mq: abstract ...
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
  					SB_NR_TO_BIT(sb, alloc_hint),
  					!round_robin);
  		if (nr != -1) {
  			nr += index << sb->shift;
  			break;
  		}
  
  		/* Jump to next index. */
  		index++;
  		alloc_hint = index << sb->shift;
  
  		if (index >= sb->map_nr) {
  			index = 0;
  			alloc_hint = 0;
  		}
  	}
  
  	return nr;
  }
  EXPORT_SYMBOL_GPL(sbitmap_get);
c05e66733   Omar Sandoval   sbitmap: add sbit...
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
  int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
  			unsigned long shallow_depth)
  {
  	unsigned int i, index;
  	int nr = -1;
  
  	index = SB_NR_TO_INDEX(sb, alloc_hint);
  
  	for (i = 0; i < sb->map_nr; i++) {
  		nr = __sbitmap_get_word(&sb->map[index].word,
  					min(sb->map[index].depth, shallow_depth),
  					SB_NR_TO_BIT(sb, alloc_hint), true);
  		if (nr != -1) {
  			nr += index << sb->shift;
  			break;
  		}
  
  		/* Jump to next index. */
  		index++;
  		alloc_hint = index << sb->shift;
  
  		if (index >= sb->map_nr) {
  			index = 0;
  			alloc_hint = 0;
  		}
  	}
  
  	return nr;
  }
  EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
88459642c   Omar Sandoval   blk-mq: abstract ...
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
  bool sbitmap_any_bit_set(const struct sbitmap *sb)
  {
  	unsigned int i;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		if (sb->map[i].word)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
  
  bool sbitmap_any_bit_clear(const struct sbitmap *sb)
  {
  	unsigned int i;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		const struct sbitmap_word *word = &sb->map[i];
  		unsigned long ret;
  
  		ret = find_first_zero_bit(&word->word, word->depth);
  		if (ret < word->depth)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
  
  unsigned int sbitmap_weight(const struct sbitmap *sb)
  {
60658e0dc   Colin Ian King   sbitmap: initiali...
200
  	unsigned int i, weight = 0;
88459642c   Omar Sandoval   blk-mq: abstract ...
201
202
203
204
205
206
207
208
209
  
  	for (i = 0; i < sb->map_nr; i++) {
  		const struct sbitmap_word *word = &sb->map[i];
  
  		weight += bitmap_weight(&word->word, word->depth);
  	}
  	return weight;
  }
  EXPORT_SYMBOL_GPL(sbitmap_weight);
24af1ccfe   Omar Sandoval   sbitmap: add help...
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
  void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
  {
  	seq_printf(m, "depth=%u
  ", sb->depth);
  	seq_printf(m, "busy=%u
  ", sbitmap_weight(sb));
  	seq_printf(m, "bits_per_word=%u
  ", 1U << sb->shift);
  	seq_printf(m, "map_nr=%u
  ", sb->map_nr);
  }
  EXPORT_SYMBOL_GPL(sbitmap_show);
  
  static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
  {
  	if ((offset & 0xf) == 0) {
  		if (offset != 0)
  			seq_putc(m, '
  ');
  		seq_printf(m, "%08x:", offset);
  	}
  	if ((offset & 0x1) == 0)
  		seq_putc(m, ' ');
  	seq_printf(m, "%02x", byte);
  }
  
  void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
  {
  	u8 byte = 0;
  	unsigned int byte_bits = 0;
  	unsigned int offset = 0;
  	int i;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		unsigned long word = READ_ONCE(sb->map[i].word);
  		unsigned int word_bits = READ_ONCE(sb->map[i].depth);
  
  		while (word_bits > 0) {
  			unsigned int bits = min(8 - byte_bits, word_bits);
  
  			byte |= (word & (BIT(bits) - 1)) << byte_bits;
  			byte_bits += bits;
  			if (byte_bits == 8) {
  				emit_byte(m, offset, byte);
  				byte = 0;
  				byte_bits = 0;
  				offset++;
  			}
  			word >>= bits;
  			word_bits -= bits;
  		}
  	}
  	if (byte_bits) {
  		emit_byte(m, offset, byte);
  		offset++;
  	}
  	if (offset)
  		seq_putc(m, '
  ');
  }
  EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
a32755396   Omar Sandoval   sbitmap: fix miss...
271
272
  static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
  					unsigned int depth)
88459642c   Omar Sandoval   blk-mq: abstract ...
273
274
  {
  	unsigned int wake_batch;
a32755396   Omar Sandoval   sbitmap: fix miss...
275
  	unsigned int shallow_depth;
88459642c   Omar Sandoval   blk-mq: abstract ...
276
277
278
  
  	/*
  	 * For each batch, we wake up one queue. We need to make sure that our
a32755396   Omar Sandoval   sbitmap: fix miss...
279
280
281
282
283
284
285
286
287
288
289
290
291
  	 * batch size is small enough that the full depth of the bitmap,
  	 * potentially limited by a shallow depth, is enough to wake up all of
  	 * the queues.
  	 *
  	 * Each full word of the bitmap has bits_per_word bits, and there might
  	 * be a partial word. There are depth / bits_per_word full words and
  	 * depth % bits_per_word bits left over. In bitwise arithmetic:
  	 *
  	 * bits_per_word = 1 << shift
  	 * depth / bits_per_word = depth >> shift
  	 * depth % bits_per_word = depth & ((1 << shift) - 1)
  	 *
  	 * Each word can be limited to sbq->min_shallow_depth bits.
88459642c   Omar Sandoval   blk-mq: abstract ...
292
  	 */
a32755396   Omar Sandoval   sbitmap: fix miss...
293
294
295
296
297
  	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
  	depth = ((depth >> sbq->sb.shift) * shallow_depth +
  		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
  	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
  			     SBQ_WAKE_BATCH);
88459642c   Omar Sandoval   blk-mq: abstract ...
298
299
300
301
302
  
  	return wake_batch;
  }
  
  int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
f4a644db8   Omar Sandoval   sbitmap: push all...
303
  			    int shift, bool round_robin, gfp_t flags, int node)
88459642c   Omar Sandoval   blk-mq: abstract ...
304
305
306
307
308
309
310
  {
  	int ret;
  	int i;
  
  	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
  	if (ret)
  		return ret;
40aabb674   Omar Sandoval   sbitmap: push per...
311
312
313
314
315
  	sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
  	if (!sbq->alloc_hint) {
  		sbitmap_free(&sbq->sb);
  		return -ENOMEM;
  	}
98d95416d   Omar Sandoval   sbitmap: randomiz...
316
317
318
319
  	if (depth && !round_robin) {
  		for_each_possible_cpu(i)
  			*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
  	}
a32755396   Omar Sandoval   sbitmap: fix miss...
320
321
  	sbq->min_shallow_depth = UINT_MAX;
  	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
88459642c   Omar Sandoval   blk-mq: abstract ...
322
  	atomic_set(&sbq->wake_index, 0);
48e28166a   Omar Sandoval   sbitmap: allocate...
323
  	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
88459642c   Omar Sandoval   blk-mq: abstract ...
324
  	if (!sbq->ws) {
40aabb674   Omar Sandoval   sbitmap: push per...
325
  		free_percpu(sbq->alloc_hint);
88459642c   Omar Sandoval   blk-mq: abstract ...
326
327
328
329
330
331
332
333
  		sbitmap_free(&sbq->sb);
  		return -ENOMEM;
  	}
  
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		init_waitqueue_head(&sbq->ws[i].wait);
  		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
  	}
f4a644db8   Omar Sandoval   sbitmap: push all...
334
335
  
  	sbq->round_robin = round_robin;
88459642c   Omar Sandoval   blk-mq: abstract ...
336
337
338
  	return 0;
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
a32755396   Omar Sandoval   sbitmap: fix miss...
339
340
  static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
  					    unsigned int depth)
88459642c   Omar Sandoval   blk-mq: abstract ...
341
  {
a32755396   Omar Sandoval   sbitmap: fix miss...
342
  	unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
343
344
345
346
347
  	int i;
  
  	if (sbq->wake_batch != wake_batch) {
  		WRITE_ONCE(sbq->wake_batch, wake_batch);
  		/*
e6fc46498   Ming Lei   blk-mq: avoid sta...
348
349
350
  		 * Pairs with the memory barrier in sbitmap_queue_wake_up()
  		 * to ensure that the batch size is updated before the wait
  		 * counts.
6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
351
352
353
354
355
  		 */
  		smp_mb__before_atomic();
  		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
  			atomic_set(&sbq->ws[i].wait_cnt, 1);
  	}
a32755396   Omar Sandoval   sbitmap: fix miss...
356
357
358
359
360
  }
  
  void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
  {
  	sbitmap_queue_update_wake_batch(sbq, depth);
88459642c   Omar Sandoval   blk-mq: abstract ...
361
362
363
  	sbitmap_resize(&sbq->sb, depth);
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
f4a644db8   Omar Sandoval   sbitmap: push all...
364
  int __sbitmap_queue_get(struct sbitmap_queue *sbq)
40aabb674   Omar Sandoval   sbitmap: push per...
365
  {
05fd095d5   Omar Sandoval   sbitmap: re-initi...
366
  	unsigned int hint, depth;
40aabb674   Omar Sandoval   sbitmap: push per...
367
368
369
  	int nr;
  
  	hint = this_cpu_read(*sbq->alloc_hint);
05fd095d5   Omar Sandoval   sbitmap: re-initi...
370
371
372
373
374
  	depth = READ_ONCE(sbq->sb.depth);
  	if (unlikely(hint >= depth)) {
  		hint = depth ? prandom_u32() % depth : 0;
  		this_cpu_write(*sbq->alloc_hint, hint);
  	}
f4a644db8   Omar Sandoval   sbitmap: push all...
375
  	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
40aabb674   Omar Sandoval   sbitmap: push per...
376
377
378
379
  
  	if (nr == -1) {
  		/* If the map is full, a hint won't do us much good. */
  		this_cpu_write(*sbq->alloc_hint, 0);
f4a644db8   Omar Sandoval   sbitmap: push all...
380
  	} else if (nr == hint || unlikely(sbq->round_robin)) {
40aabb674   Omar Sandoval   sbitmap: push per...
381
382
  		/* Only update the hint if we used it. */
  		hint = nr + 1;
05fd095d5   Omar Sandoval   sbitmap: re-initi...
383
  		if (hint >= depth - 1)
40aabb674   Omar Sandoval   sbitmap: push per...
384
385
386
387
388
389
390
  			hint = 0;
  		this_cpu_write(*sbq->alloc_hint, hint);
  	}
  
  	return nr;
  }
  EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
c05e66733   Omar Sandoval   sbitmap: add sbit...
391
392
393
394
395
  int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
  				unsigned int shallow_depth)
  {
  	unsigned int hint, depth;
  	int nr;
61445b56d   Omar Sandoval   sbitmap: warn if ...
396
  	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
c05e66733   Omar Sandoval   sbitmap: add sbit...
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
  	hint = this_cpu_read(*sbq->alloc_hint);
  	depth = READ_ONCE(sbq->sb.depth);
  	if (unlikely(hint >= depth)) {
  		hint = depth ? prandom_u32() % depth : 0;
  		this_cpu_write(*sbq->alloc_hint, hint);
  	}
  	nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
  
  	if (nr == -1) {
  		/* If the map is full, a hint won't do us much good. */
  		this_cpu_write(*sbq->alloc_hint, 0);
  	} else if (nr == hint || unlikely(sbq->round_robin)) {
  		/* Only update the hint if we used it. */
  		hint = nr + 1;
  		if (hint >= depth - 1)
  			hint = 0;
  		this_cpu_write(*sbq->alloc_hint, hint);
  	}
  
  	return nr;
  }
  EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
a32755396   Omar Sandoval   sbitmap: fix miss...
419
420
421
422
423
424
425
  void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
  				     unsigned int min_shallow_depth)
  {
  	sbq->min_shallow_depth = min_shallow_depth;
  	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
88459642c   Omar Sandoval   blk-mq: abstract ...
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
  static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
  {
  	int i, wake_index;
  
  	wake_index = atomic_read(&sbq->wake_index);
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
  
  		if (waitqueue_active(&ws->wait)) {
  			int o = atomic_read(&sbq->wake_index);
  
  			if (wake_index != o)
  				atomic_cmpxchg(&sbq->wake_index, o, wake_index);
  			return ws;
  		}
  
  		wake_index = sbq_index_inc(wake_index);
  	}
  
  	return NULL;
  }
c854ab577   Jens Axboe   sbitmap: fix race...
447
  static bool __sbq_wake_up(struct sbitmap_queue *sbq)
88459642c   Omar Sandoval   blk-mq: abstract ...
448
449
  {
  	struct sbq_wait_state *ws;
6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
450
  	unsigned int wake_batch;
88459642c   Omar Sandoval   blk-mq: abstract ...
451
  	int wait_cnt;
88459642c   Omar Sandoval   blk-mq: abstract ...
452
453
  	ws = sbq_wake_ptr(sbq);
  	if (!ws)
c854ab577   Jens Axboe   sbitmap: fix race...
454
  		return false;
88459642c   Omar Sandoval   blk-mq: abstract ...
455
456
  
  	wait_cnt = atomic_dec_return(&ws->wait_cnt);
6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
457
  	if (wait_cnt <= 0) {
c854ab577   Jens Axboe   sbitmap: fix race...
458
  		int ret;
6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
459
  		wake_batch = READ_ONCE(sbq->wake_batch);
c854ab577   Jens Axboe   sbitmap: fix race...
460

6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
461
462
463
464
465
466
  		/*
  		 * Pairs with the memory barrier in sbitmap_queue_resize() to
  		 * ensure that we see the batch size update before the wait
  		 * count is reset.
  		 */
  		smp_mb__before_atomic();
c854ab577   Jens Axboe   sbitmap: fix race...
467

6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
468
  		/*
c854ab577   Jens Axboe   sbitmap: fix race...
469
470
471
  		 * For concurrent callers of this, the one that failed the
  		 * atomic_cmpxhcg() race should call this function again
  		 * to wakeup a new batch on a different 'ws'.
6c0ca7ae2   Omar Sandoval   sbitmap: fix wake...
472
  		 */
c854ab577   Jens Axboe   sbitmap: fix race...
473
474
475
476
477
478
479
480
  		ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
  		if (ret == wait_cnt) {
  			sbq_index_atomic_inc(&sbq->wake_index);
  			wake_up_nr(&ws->wait, wake_batch);
  			return false;
  		}
  
  		return true;
88459642c   Omar Sandoval   blk-mq: abstract ...
481
  	}
c854ab577   Jens Axboe   sbitmap: fix race...
482
483
484
  
  	return false;
  }
e6fc46498   Ming Lei   blk-mq: avoid sta...
485
  void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
c854ab577   Jens Axboe   sbitmap: fix race...
486
487
488
  {
  	while (__sbq_wake_up(sbq))
  		;
88459642c   Omar Sandoval   blk-mq: abstract ...
489
  }
e6fc46498   Ming Lei   blk-mq: avoid sta...
490
  EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
88459642c   Omar Sandoval   blk-mq: abstract ...
491

40aabb674   Omar Sandoval   sbitmap: push per...
492
  void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
f4a644db8   Omar Sandoval   sbitmap: push all...
493
  			 unsigned int cpu)
88459642c   Omar Sandoval   blk-mq: abstract ...
494
  {
4ace53f1e   Omar Sandoval   sbitmap: use test...
495
  	sbitmap_clear_bit_unlock(&sbq->sb, nr);
e6fc46498   Ming Lei   blk-mq: avoid sta...
496
497
498
499
500
501
502
503
  	/*
  	 * Pairs with the memory barrier in set_current_state() to ensure the
  	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
  	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
  	 * waiter. See the comment on waitqueue_active().
  	 */
  	smp_mb__after_atomic();
  	sbitmap_queue_wake_up(sbq);
5c64a8df0   Omar Sandoval   sbitmap: don't up...
504
  	if (likely(!sbq->round_robin && nr < sbq->sb.depth))
40aabb674   Omar Sandoval   sbitmap: push per...
505
  		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
88459642c   Omar Sandoval   blk-mq: abstract ...
506
507
508
509
510
511
512
513
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
  
  void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
  {
  	int i, wake_index;
  
  	/*
f66227de5   Omar Sandoval   sbitmap: use smp_...
514
  	 * Pairs with the memory barrier in set_current_state() like in
e6fc46498   Ming Lei   blk-mq: avoid sta...
515
  	 * sbitmap_queue_wake_up().
88459642c   Omar Sandoval   blk-mq: abstract ...
516
517
518
519
520
521
522
523
524
525
526
527
528
  	 */
  	smp_mb();
  	wake_index = atomic_read(&sbq->wake_index);
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
  
  		if (waitqueue_active(&ws->wait))
  			wake_up(&ws->wait);
  
  		wake_index = sbq_index_inc(wake_index);
  	}
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
24af1ccfe   Omar Sandoval   sbitmap: add help...
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
  
  void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
  {
  	bool first;
  	int i;
  
  	sbitmap_show(&sbq->sb, m);
  
  	seq_puts(m, "alloc_hint={");
  	first = true;
  	for_each_possible_cpu(i) {
  		if (!first)
  			seq_puts(m, ", ");
  		first = false;
  		seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
  	}
  	seq_puts(m, "}
  ");
  
  	seq_printf(m, "wake_batch=%u
  ", sbq->wake_batch);
  	seq_printf(m, "wake_index=%d
  ", atomic_read(&sbq->wake_index));
  
  	seq_puts(m, "ws={
  ");
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		struct sbq_wait_state *ws = &sbq->ws[i];
  
  		seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},
  ",
  			   atomic_read(&ws->wait_cnt),
  			   waitqueue_active(&ws->wait) ? "active" : "inactive");
  	}
  	seq_puts(m, "}
  ");
  
  	seq_printf(m, "round_robin=%d
  ", sbq->round_robin);
a32755396   Omar Sandoval   sbitmap: fix miss...
568
569
  	seq_printf(m, "min_shallow_depth=%u
  ", sbq->min_shallow_depth);
24af1ccfe   Omar Sandoval   sbitmap: add help...
570
571
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_show);