Blame view

lib/sbitmap.c 7.89 KB
88459642c   Omar Sandoval   blk-mq: abstract ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  /*
   * Copyright (C) 2016 Facebook
   * Copyright (C) 2013-2014 Jens Axboe
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public
   * License v2 as published by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   * General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program.  If not, see <https://www.gnu.org/licenses/>.
   */
98d95416d   Omar Sandoval   sbitmap: randomiz...
17
  #include <linux/random.h>
88459642c   Omar Sandoval   blk-mq: abstract ...
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
  #include <linux/sbitmap.h>
  
  int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
  		      gfp_t flags, int node)
  {
  	unsigned int bits_per_word;
  	unsigned int i;
  
  	if (shift < 0) {
  		shift = ilog2(BITS_PER_LONG);
  		/*
  		 * If the bitmap is small, shrink the number of bits per word so
  		 * we spread over a few cachelines, at least. If less than 4
  		 * bits, just forget about it, it's not going to work optimally
  		 * anyway.
  		 */
  		if (depth >= 4) {
  			while ((4U << shift) > depth)
  				shift--;
  		}
  	}
  	bits_per_word = 1U << shift;
  	if (bits_per_word > BITS_PER_LONG)
  		return -EINVAL;
  
  	sb->shift = shift;
  	sb->depth = depth;
  	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  
  	if (depth == 0) {
  		sb->map = NULL;
  		return 0;
  	}
  
  	sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
  	if (!sb->map)
  		return -ENOMEM;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		sb->map[i].depth = min(depth, bits_per_word);
  		depth -= sb->map[i].depth;
  	}
  	return 0;
  }
  EXPORT_SYMBOL_GPL(sbitmap_init_node);
  
  void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
  {
  	unsigned int bits_per_word = 1U << sb->shift;
  	unsigned int i;
  
  	sb->depth = depth;
  	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  
  	for (i = 0; i < sb->map_nr; i++) {
  		sb->map[i].depth = min(depth, bits_per_word);
  		depth -= sb->map[i].depth;
  	}
  }
  EXPORT_SYMBOL_GPL(sbitmap_resize);
  
  static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
  			      bool wrap)
  {
  	unsigned int orig_hint = hint;
  	int nr;
  
  	while (1) {
  		nr = find_next_zero_bit(&word->word, word->depth, hint);
  		if (unlikely(nr >= word->depth)) {
  			/*
  			 * We started with an offset, and we didn't reset the
  			 * offset to 0 in a failure case, so start from 0 to
  			 * exhaust the map.
  			 */
  			if (orig_hint && hint && wrap) {
  				hint = orig_hint = 0;
  				continue;
  			}
  			return -1;
  		}
  
  		if (!test_and_set_bit(nr, &word->word))
  			break;
  
  		hint = nr + 1;
  		if (hint >= word->depth - 1)
  			hint = 0;
  	}
  
  	return nr;
  }
  
  int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
  {
  	unsigned int i, index;
  	int nr = -1;
  
  	index = SB_NR_TO_INDEX(sb, alloc_hint);
  
  	for (i = 0; i < sb->map_nr; i++) {
  		nr = __sbitmap_get_word(&sb->map[index],
  					SB_NR_TO_BIT(sb, alloc_hint),
  					!round_robin);
  		if (nr != -1) {
  			nr += index << sb->shift;
  			break;
  		}
  
  		/* Jump to next index. */
  		index++;
  		alloc_hint = index << sb->shift;
  
  		if (index >= sb->map_nr) {
  			index = 0;
  			alloc_hint = 0;
  		}
  	}
  
  	return nr;
  }
  EXPORT_SYMBOL_GPL(sbitmap_get);
  
  bool sbitmap_any_bit_set(const struct sbitmap *sb)
  {
  	unsigned int i;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		if (sb->map[i].word)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
  
  bool sbitmap_any_bit_clear(const struct sbitmap *sb)
  {
  	unsigned int i;
  
  	for (i = 0; i < sb->map_nr; i++) {
  		const struct sbitmap_word *word = &sb->map[i];
  		unsigned long ret;
  
  		ret = find_first_zero_bit(&word->word, word->depth);
  		if (ret < word->depth)
  			return true;
  	}
  	return false;
  }
  EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
  
  unsigned int sbitmap_weight(const struct sbitmap *sb)
  {
60658e0dc   Colin Ian King   sbitmap: initiali...
171
  	unsigned int i, weight = 0;
88459642c   Omar Sandoval   blk-mq: abstract ...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
  
  	for (i = 0; i < sb->map_nr; i++) {
  		const struct sbitmap_word *word = &sb->map[i];
  
  		weight += bitmap_weight(&word->word, word->depth);
  	}
  	return weight;
  }
  EXPORT_SYMBOL_GPL(sbitmap_weight);
  
  static unsigned int sbq_calc_wake_batch(unsigned int depth)
  {
  	unsigned int wake_batch;
  
  	/*
  	 * For each batch, we wake up one queue. We need to make sure that our
  	 * batch size is small enough that the full depth of the bitmap is
  	 * enough to wake up all of the queues.
  	 */
  	wake_batch = SBQ_WAKE_BATCH;
  	if (wake_batch > depth / SBQ_WAIT_QUEUES)
  		wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
  
  	return wake_batch;
  }
  
  int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
f4a644db8   Omar Sandoval   sbitmap: push all...
199
  			    int shift, bool round_robin, gfp_t flags, int node)
88459642c   Omar Sandoval   blk-mq: abstract ...
200
201
202
203
204
205
206
  {
  	int ret;
  	int i;
  
  	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
  	if (ret)
  		return ret;
40aabb674   Omar Sandoval   sbitmap: push per...
207
208
209
210
211
  	sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
  	if (!sbq->alloc_hint) {
  		sbitmap_free(&sbq->sb);
  		return -ENOMEM;
  	}
98d95416d   Omar Sandoval   sbitmap: randomiz...
212
213
214
215
  	if (depth && !round_robin) {
  		for_each_possible_cpu(i)
  			*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
  	}
88459642c   Omar Sandoval   blk-mq: abstract ...
216
217
  	sbq->wake_batch = sbq_calc_wake_batch(depth);
  	atomic_set(&sbq->wake_index, 0);
48e28166a   Omar Sandoval   sbitmap: allocate...
218
  	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
88459642c   Omar Sandoval   blk-mq: abstract ...
219
  	if (!sbq->ws) {
40aabb674   Omar Sandoval   sbitmap: push per...
220
  		free_percpu(sbq->alloc_hint);
88459642c   Omar Sandoval   blk-mq: abstract ...
221
222
223
224
225
226
227
228
  		sbitmap_free(&sbq->sb);
  		return -ENOMEM;
  	}
  
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		init_waitqueue_head(&sbq->ws[i].wait);
  		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
  	}
f4a644db8   Omar Sandoval   sbitmap: push all...
229
230
  
  	sbq->round_robin = round_robin;
88459642c   Omar Sandoval   blk-mq: abstract ...
231
232
233
234
235
236
237
238
239
240
  	return 0;
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
  
  void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
  {
  	sbq->wake_batch = sbq_calc_wake_batch(depth);
  	sbitmap_resize(&sbq->sb, depth);
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
f4a644db8   Omar Sandoval   sbitmap: push all...
241
  int __sbitmap_queue_get(struct sbitmap_queue *sbq)
40aabb674   Omar Sandoval   sbitmap: push per...
242
  {
05fd095d5   Omar Sandoval   sbitmap: re-initi...
243
  	unsigned int hint, depth;
40aabb674   Omar Sandoval   sbitmap: push per...
244
245
246
  	int nr;
  
  	hint = this_cpu_read(*sbq->alloc_hint);
05fd095d5   Omar Sandoval   sbitmap: re-initi...
247
248
249
250
251
  	depth = READ_ONCE(sbq->sb.depth);
  	if (unlikely(hint >= depth)) {
  		hint = depth ? prandom_u32() % depth : 0;
  		this_cpu_write(*sbq->alloc_hint, hint);
  	}
f4a644db8   Omar Sandoval   sbitmap: push all...
252
  	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
40aabb674   Omar Sandoval   sbitmap: push per...
253
254
255
256
  
  	if (nr == -1) {
  		/* If the map is full, a hint won't do us much good. */
  		this_cpu_write(*sbq->alloc_hint, 0);
f4a644db8   Omar Sandoval   sbitmap: push all...
257
  	} else if (nr == hint || unlikely(sbq->round_robin)) {
40aabb674   Omar Sandoval   sbitmap: push per...
258
259
  		/* Only update the hint if we used it. */
  		hint = nr + 1;
05fd095d5   Omar Sandoval   sbitmap: re-initi...
260
  		if (hint >= depth - 1)
40aabb674   Omar Sandoval   sbitmap: push per...
261
262
263
264
265
266
267
  			hint = 0;
  		this_cpu_write(*sbq->alloc_hint, hint);
  	}
  
  	return nr;
  }
  EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
88459642c   Omar Sandoval   blk-mq: abstract ...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
  static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
  {
  	int i, wake_index;
  
  	wake_index = atomic_read(&sbq->wake_index);
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
  
  		if (waitqueue_active(&ws->wait)) {
  			int o = atomic_read(&sbq->wake_index);
  
  			if (wake_index != o)
  				atomic_cmpxchg(&sbq->wake_index, o, wake_index);
  			return ws;
  		}
  
  		wake_index = sbq_index_inc(wake_index);
  	}
  
  	return NULL;
  }
  
  static void sbq_wake_up(struct sbitmap_queue *sbq)
  {
  	struct sbq_wait_state *ws;
  	int wait_cnt;
  
  	/* Ensure that the wait list checks occur after clear_bit(). */
  	smp_mb();
  
  	ws = sbq_wake_ptr(sbq);
  	if (!ws)
  		return;
  
  	wait_cnt = atomic_dec_return(&ws->wait_cnt);
  	if (unlikely(wait_cnt < 0))
  		wait_cnt = atomic_inc_return(&ws->wait_cnt);
  	if (wait_cnt == 0) {
  		atomic_add(sbq->wake_batch, &ws->wait_cnt);
  		sbq_index_atomic_inc(&sbq->wake_index);
  		wake_up(&ws->wait);
  	}
  }
40aabb674   Omar Sandoval   sbitmap: push per...
311
  void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
f4a644db8   Omar Sandoval   sbitmap: push all...
312
  			 unsigned int cpu)
88459642c   Omar Sandoval   blk-mq: abstract ...
313
314
315
  {
  	sbitmap_clear_bit(&sbq->sb, nr);
  	sbq_wake_up(sbq);
5c64a8df0   Omar Sandoval   sbitmap: don't up...
316
  	if (likely(!sbq->round_robin && nr < sbq->sb.depth))
40aabb674   Omar Sandoval   sbitmap: push per...
317
  		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
88459642c   Omar Sandoval   blk-mq: abstract ...
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
  
  void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
  {
  	int i, wake_index;
  
  	/*
  	 * Make sure all changes prior to this are visible from other CPUs.
  	 */
  	smp_mb();
  	wake_index = atomic_read(&sbq->wake_index);
  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
  
  		if (waitqueue_active(&ws->wait))
  			wake_up(&ws->wait);
  
  		wake_index = sbq_index_inc(wake_index);
  	}
  }
  EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);