Blame view

block/bio.c 44 KB
8c16567d8   Christoph Hellwig   block: switch all...
1
  // SPDX-License-Identifier: GPL-2.0
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
  /*
0fe234795   Jens Axboe   [PATCH] Update ax...
3
   * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4
5
6
7
8
   */
  #include <linux/mm.h>
  #include <linux/swap.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
a27bb332c   Kent Overstreet   aio: don't includ...
9
  #include <linux/uio.h>
852c788f8   Tejun Heo   block: implement ...
10
  #include <linux/iocontext.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
  #include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
630d9c472   Paul Gortmaker   fs: reduce the us...
14
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
16
  #include <linux/mempool.h>
  #include <linux/workqueue.h>
852c788f8   Tejun Heo   block: implement ...
17
  #include <linux/cgroup.h>
08e18eab0   Josef Bacik   block: add bi_blk...
18
  #include <linux/blk-cgroup.h>
b4c5875d3   Damien Le Moal   block: Allow mapp...
19
  #include <linux/highmem.h>
de6a78b60   Ming Lei   block: Prevent hu...
20
  #include <linux/sched/sysctl.h>
a892c8d52   Satya Tangirala   block: Inline enc...
21
  #include <linux/blk-crypto.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22

55782138e   Li Zefan   tracing/events: c...
23
  #include <trace/events/block.h>
9e234eeaf   Shaohua Li   blk-throttle: add...
24
  #include "blk.h"
67b42d0bf   Josef Bacik   rq-qos: introduce...
25
  #include "blk-rq-qos.h"
0bfc24559   Ingo Molnar   blktrace: port to...
26

392ddc329   Jens Axboe   bio: add support ...
27
28
29
30
31
  /*
   * Test patch to inline a certain number of bi_io_vec's inside the bio
   * itself, to shrink a bio data allocation from two mempool calls to one
   */
  #define BIO_INLINE_VECS		4
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32
33
34
35
36
  /*
   * if you change this list, also change bvec_alloc or things will
   * break badly! cannot be bigger than what you can fit into an
   * unsigned short
   */
bd5c4facf   Mikulas Patocka   Fix slab name "bi...
37
  #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
ed996a52c   Christoph Hellwig   block: simplify a...
38
  static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
bd5c4facf   Mikulas Patocka   Fix slab name "bi...
39
  	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40
41
42
43
  };
  #undef BV
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44
45
46
   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
   * IO code that does not need private memory pools.
   */
f4f8154a0   Kent Overstreet   block: Use bioset...
47
  struct bio_set fs_bio_set;
3f86a82ae   Kent Overstreet   block: Consolidat...
48
  EXPORT_SYMBOL(fs_bio_set);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49

bb799ca02   Jens Axboe   bio: allow indivi...
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
  /*
   * Our slab pool management
   */
  struct bio_slab {
  	struct kmem_cache *slab;
  	unsigned int slab_ref;
  	unsigned int slab_size;
  	char name[8];
  };
  static DEFINE_MUTEX(bio_slab_lock);
  static struct bio_slab *bio_slabs;
  static unsigned int bio_slab_nr, bio_slab_max;
  
  static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  {
  	unsigned int sz = sizeof(struct bio) + extra_size;
  	struct kmem_cache *slab = NULL;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
67
  	struct bio_slab *bslab, *new_bio_slabs;
386bc35a2   Anna Leuschner   vfs: fix: don't i...
68
  	unsigned int new_bio_slab_max;
bb799ca02   Jens Axboe   bio: allow indivi...
69
70
71
72
73
74
  	unsigned int i, entry = -1;
  
  	mutex_lock(&bio_slab_lock);
  
  	i = 0;
  	while (i < bio_slab_nr) {
f06f135d8   Thiago Farina   fs/bio.c: fix sha...
75
  		bslab = &bio_slabs[i];
bb799ca02   Jens Axboe   bio: allow indivi...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  
  		if (!bslab->slab && entry == -1)
  			entry = i;
  		else if (bslab->slab_size == sz) {
  			slab = bslab->slab;
  			bslab->slab_ref++;
  			break;
  		}
  		i++;
  	}
  
  	if (slab)
  		goto out_unlock;
  
  	if (bio_slab_nr == bio_slab_max && entry == -1) {
386bc35a2   Anna Leuschner   vfs: fix: don't i...
91
  		new_bio_slab_max = bio_slab_max << 1;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
92
  		new_bio_slabs = krealloc(bio_slabs,
386bc35a2   Anna Leuschner   vfs: fix: don't i...
93
  					 new_bio_slab_max * sizeof(struct bio_slab),
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
94
95
  					 GFP_KERNEL);
  		if (!new_bio_slabs)
bb799ca02   Jens Axboe   bio: allow indivi...
96
  			goto out_unlock;
386bc35a2   Anna Leuschner   vfs: fix: don't i...
97
  		bio_slab_max = new_bio_slab_max;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
98
  		bio_slabs = new_bio_slabs;
bb799ca02   Jens Axboe   bio: allow indivi...
99
100
101
102
103
104
105
  	}
  	if (entry == -1)
  		entry = bio_slab_nr++;
  
  	bslab = &bio_slabs[entry];
  
  	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
6a2414836   Mikulas Patocka   block: use kmallo...
106
107
  	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
  				 SLAB_HWCACHE_ALIGN, NULL);
bb799ca02   Jens Axboe   bio: allow indivi...
108
109
  	if (!slab)
  		goto out_unlock;
bb799ca02   Jens Axboe   bio: allow indivi...
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  	bslab->slab = slab;
  	bslab->slab_ref = 1;
  	bslab->slab_size = sz;
  out_unlock:
  	mutex_unlock(&bio_slab_lock);
  	return slab;
  }
  
  static void bio_put_slab(struct bio_set *bs)
  {
  	struct bio_slab *bslab = NULL;
  	unsigned int i;
  
  	mutex_lock(&bio_slab_lock);
  
  	for (i = 0; i < bio_slab_nr; i++) {
  		if (bs->bio_slab == bio_slabs[i].slab) {
  			bslab = &bio_slabs[i];
  			break;
  		}
  	}
  
  	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!
  "))
  		goto out;
  
  	WARN_ON(!bslab->slab_ref);
  
  	if (--bslab->slab_ref)
  		goto out;
  
  	kmem_cache_destroy(bslab->slab);
  	bslab->slab = NULL;
  
  out:
  	mutex_unlock(&bio_slab_lock);
  }
7ba1ba12e   Martin K. Petersen   block: Block laye...
147
148
  unsigned int bvec_nr_vecs(unsigned short idx)
  {
d6c02a9be   Greg Edwards   block: bvec_nr_ve...
149
  	return bvec_slabs[--idx].nr_vecs;
7ba1ba12e   Martin K. Petersen   block: Block laye...
150
  }
9f060e223   Kent Overstreet   block: Convert in...
151
  void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
bb799ca02   Jens Axboe   bio: allow indivi...
152
  {
ed996a52c   Christoph Hellwig   block: simplify a...
153
154
155
156
157
  	if (!idx)
  		return;
  	idx--;
  
  	BIO_BUG_ON(idx >= BVEC_POOL_NR);
bb799ca02   Jens Axboe   bio: allow indivi...
158

ed996a52c   Christoph Hellwig   block: simplify a...
159
  	if (idx == BVEC_POOL_MAX) {
9f060e223   Kent Overstreet   block: Convert in...
160
  		mempool_free(bv, pool);
ed996a52c   Christoph Hellwig   block: simplify a...
161
  	} else {
bb799ca02   Jens Axboe   bio: allow indivi...
162
163
164
165
166
  		struct biovec_slab *bvs = bvec_slabs + idx;
  
  		kmem_cache_free(bvs->slab, bv);
  	}
  }
9f060e223   Kent Overstreet   block: Convert in...
167
168
  struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
  			   mempool_t *pool)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
170
  {
  	struct bio_vec *bvl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
171
172
  
  	/*
7ff9345ff   Jens Axboe   bio: only mempool...
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
  	 * see comment near bvec_array define!
  	 */
  	switch (nr) {
  	case 1:
  		*idx = 0;
  		break;
  	case 2 ... 4:
  		*idx = 1;
  		break;
  	case 5 ... 16:
  		*idx = 2;
  		break;
  	case 17 ... 64:
  		*idx = 3;
  		break;
  	case 65 ... 128:
  		*idx = 4;
  		break;
  	case 129 ... BIO_MAX_PAGES:
  		*idx = 5;
  		break;
  	default:
  		return NULL;
  	}
  
  	/*
  	 * idx now points to the pool we want to allocate from. only the
  	 * 1-vec entry pool is mempool backed.
  	 */
ed996a52c   Christoph Hellwig   block: simplify a...
202
  	if (*idx == BVEC_POOL_MAX) {
7ff9345ff   Jens Axboe   bio: only mempool...
203
  fallback:
9f060e223   Kent Overstreet   block: Convert in...
204
  		bvl = mempool_alloc(pool, gfp_mask);
7ff9345ff   Jens Axboe   bio: only mempool...
205
206
  	} else {
  		struct biovec_slab *bvs = bvec_slabs + *idx;
d0164adc8   Mel Gorman   mm, page_alloc: d...
207
  		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
7ff9345ff   Jens Axboe   bio: only mempool...
208

0a0d96b03   Jens Axboe   block: add bio_km...
209
  		/*
7ff9345ff   Jens Axboe   bio: only mempool...
210
211
212
  		 * Make this allocation restricted and don't dump info on
  		 * allocation failures, since we'll fallback to the mempool
  		 * in case of failure.
0a0d96b03   Jens Axboe   block: add bio_km...
213
  		 */
7ff9345ff   Jens Axboe   bio: only mempool...
214
  		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
215

0a0d96b03   Jens Axboe   block: add bio_km...
216
  		/*
d0164adc8   Mel Gorman   mm, page_alloc: d...
217
  		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
7ff9345ff   Jens Axboe   bio: only mempool...
218
  		 * is set, retry with the 1-entry mempool
0a0d96b03   Jens Axboe   block: add bio_km...
219
  		 */
7ff9345ff   Jens Axboe   bio: only mempool...
220
  		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
d0164adc8   Mel Gorman   mm, page_alloc: d...
221
  		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
ed996a52c   Christoph Hellwig   block: simplify a...
222
  			*idx = BVEC_POOL_MAX;
7ff9345ff   Jens Axboe   bio: only mempool...
223
224
225
  			goto fallback;
  		}
  	}
ed996a52c   Christoph Hellwig   block: simplify a...
226
  	(*idx)++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
227
228
  	return bvl;
  }
9ae3b3f52   Jens Axboe   block: provide bi...
229
  void bio_uninit(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
230
  {
db9819c76   Christoph Hellwig   block: remove bio...
231
232
233
234
235
236
  #ifdef CONFIG_BLK_CGROUP
  	if (bio->bi_blkg) {
  		blkg_put(bio->bi_blkg);
  		bio->bi_blkg = NULL;
  	}
  #endif
ece841abb   Justin Tee   block: fix memlea...
237
238
  	if (bio_integrity(bio))
  		bio_integrity_free(bio);
a892c8d52   Satya Tangirala   block: Inline enc...
239
240
  
  	bio_crypt_free_ctx(bio);
4254bba17   Kent Overstreet   block: Kill bi_de...
241
  }
9ae3b3f52   Jens Axboe   block: provide bi...
242
  EXPORT_SYMBOL(bio_uninit);
7ba1ba12e   Martin K. Petersen   block: Block laye...
243

4254bba17   Kent Overstreet   block: Kill bi_de...
244
245
246
247
  static void bio_free(struct bio *bio)
  {
  	struct bio_set *bs = bio->bi_pool;
  	void *p;
9ae3b3f52   Jens Axboe   block: provide bi...
248
  	bio_uninit(bio);
4254bba17   Kent Overstreet   block: Kill bi_de...
249
250
  
  	if (bs) {
8aa6ba2f6   Kent Overstreet   block: Convert bi...
251
  		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
4254bba17   Kent Overstreet   block: Kill bi_de...
252
253
254
255
256
  
  		/*
  		 * If we have front padding, adjust the bio pointer before freeing
  		 */
  		p = bio;
bb799ca02   Jens Axboe   bio: allow indivi...
257
  		p -= bs->front_pad;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
258
  		mempool_free(p, &bs->bio_pool);
4254bba17   Kent Overstreet   block: Kill bi_de...
259
260
261
262
  	} else {
  		/* Bio was allocated by bio_kmalloc() */
  		kfree(bio);
  	}
3676347a5   Peter Osterlund   [PATCH] kill bio-...
263
  }
9ae3b3f52   Jens Axboe   block: provide bi...
264
265
266
267
268
  /*
   * Users of this function have their own bio allocation. Subsequently,
   * they must remember to pair any call to bio_init() with bio_uninit()
   * when IO has completed, or when the bio is released.
   */
3a83f4677   Ming Lei   block: bio: pass ...
269
270
  void bio_init(struct bio *bio, struct bio_vec *table,
  	      unsigned short max_vecs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
271
  {
2b94de552   Jens Axboe   bio: use memset()...
272
  	memset(bio, 0, sizeof(*bio));
c4cf5261f   Jens Axboe   bio: skip atomic ...
273
  	atomic_set(&bio->__bi_remaining, 1);
dac56212e   Jens Axboe   bio: skip atomic ...
274
  	atomic_set(&bio->__bi_cnt, 1);
3a83f4677   Ming Lei   block: bio: pass ...
275
276
277
  
  	bio->bi_io_vec = table;
  	bio->bi_max_vecs = max_vecs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
278
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
279
  EXPORT_SYMBOL(bio_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
280
281
  
  /**
f44b48c76   Kent Overstreet   block: Add bio_re...
282
283
284
285
286
287
288
289
290
291
292
293
   * bio_reset - reinitialize a bio
   * @bio:	bio to reset
   *
   * Description:
   *   After calling bio_reset(), @bio will be in the same state as a freshly
   *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
   *   preserved are the ones that are initialized by bio_alloc_bioset(). See
   *   comment in struct bio.
   */
  void bio_reset(struct bio *bio)
  {
  	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
9ae3b3f52   Jens Axboe   block: provide bi...
294
  	bio_uninit(bio);
f44b48c76   Kent Overstreet   block: Add bio_re...
295
296
  
  	memset(bio, 0, BIO_RESET_BYTES);
4246a0b63   Christoph Hellwig   block: add a bi_e...
297
  	bio->bi_flags = flags;
c4cf5261f   Jens Axboe   bio: skip atomic ...
298
  	atomic_set(&bio->__bi_remaining, 1);
f44b48c76   Kent Overstreet   block: Add bio_re...
299
300
  }
  EXPORT_SYMBOL(bio_reset);
38f8baae8   Christoph Hellwig   block: factor out...
301
  static struct bio *__bio_chain_endio(struct bio *bio)
196d38bcc   Kent Overstreet   block: Generic bi...
302
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
303
  	struct bio *parent = bio->bi_private;
4e4cbee93   Christoph Hellwig   block: switch bio...
304
305
  	if (!parent->bi_status)
  		parent->bi_status = bio->bi_status;
196d38bcc   Kent Overstreet   block: Generic bi...
306
  	bio_put(bio);
38f8baae8   Christoph Hellwig   block: factor out...
307
308
309
310
311
312
  	return parent;
  }
  
  static void bio_chain_endio(struct bio *bio)
  {
  	bio_endio(__bio_chain_endio(bio));
196d38bcc   Kent Overstreet   block: Generic bi...
313
314
315
316
  }
  
  /**
   * bio_chain - chain bio completions
1051a902f   Randy Dunlap   fs: fix new kerne...
317
   * @bio: the target bio
5b874af62   Mauro Carvalho Chehab   block: bio: fix a...
318
   * @parent: the parent bio of @bio
196d38bcc   Kent Overstreet   block: Generic bi...
319
320
321
322
323
324
325
326
327
328
329
330
331
   *
   * The caller won't have a bi_end_io called when @bio completes - instead,
   * @parent's bi_end_io won't be called until both @parent and @bio have
   * completed; the chained bio will also be freed when it completes.
   *
   * The caller must not set bi_private or bi_end_io in @bio.
   */
  void bio_chain(struct bio *bio, struct bio *parent)
  {
  	BUG_ON(bio->bi_private || bio->bi_end_io);
  
  	bio->bi_private = parent;
  	bio->bi_end_io	= bio_chain_endio;
c4cf5261f   Jens Axboe   bio: skip atomic ...
332
  	bio_inc_remaining(parent);
196d38bcc   Kent Overstreet   block: Generic bi...
333
334
  }
  EXPORT_SYMBOL(bio_chain);
df2cb6daa   Kent Overstreet   block: Avoid dead...
335
336
337
338
339
340
341
342
343
344
345
346
  static void bio_alloc_rescue(struct work_struct *work)
  {
  	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
  	struct bio *bio;
  
  	while (1) {
  		spin_lock(&bs->rescue_lock);
  		bio = bio_list_pop(&bs->rescue_list);
  		spin_unlock(&bs->rescue_lock);
  
  		if (!bio)
  			break;
ed00aabd5   Christoph Hellwig   block: rename gen...
347
  		submit_bio_noacct(bio);
df2cb6daa   Kent Overstreet   block: Avoid dead...
348
349
350
351
352
353
354
  	}
  }
  
  static void punt_bios_to_rescuer(struct bio_set *bs)
  {
  	struct bio_list punt, nopunt;
  	struct bio *bio;
47e0fb461   NeilBrown   blk: make the bio...
355
356
  	if (WARN_ON_ONCE(!bs->rescue_workqueue))
  		return;
df2cb6daa   Kent Overstreet   block: Avoid dead...
357
358
359
360
361
362
363
364
365
366
367
368
369
  	/*
  	 * In order to guarantee forward progress we must punt only bios that
  	 * were allocated from this bio_set; otherwise, if there was a bio on
  	 * there for a stacking driver higher up in the stack, processing it
  	 * could require allocating bios from this bio_set, and doing that from
  	 * our own rescuer would be bad.
  	 *
  	 * Since bio lists are singly linked, pop them all instead of trying to
  	 * remove from the middle of the list:
  	 */
  
  	bio_list_init(&punt);
  	bio_list_init(&nopunt);
f5fe1b519   NeilBrown   blk: Ensure users...
370
  	while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6daa   Kent Overstreet   block: Avoid dead...
371
  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b519   NeilBrown   blk: Ensure users...
372
  	current->bio_list[0] = nopunt;
df2cb6daa   Kent Overstreet   block: Avoid dead...
373

f5fe1b519   NeilBrown   blk: Ensure users...
374
375
376
377
  	bio_list_init(&nopunt);
  	while ((bio = bio_list_pop(&current->bio_list[1])))
  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
  	current->bio_list[1] = nopunt;
df2cb6daa   Kent Overstreet   block: Avoid dead...
378
379
380
381
382
383
384
  
  	spin_lock(&bs->rescue_lock);
  	bio_list_merge(&bs->rescue_list, &punt);
  	spin_unlock(&bs->rescue_lock);
  
  	queue_work(bs->rescue_workqueue, &bs->rescue_work);
  }
f44b48c76   Kent Overstreet   block: Add bio_re...
385
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
386
   * bio_alloc_bioset - allocate a bio for I/O
519c8e9ff   Randy Dunlap   block: fix Sphinx...
387
   * @gfp_mask:   the GFP_* mask given to the slab allocator
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
388
   * @nr_iovecs:	number of iovecs to pre-allocate
db18efac0   Jaak Ristioja   bio: Fix outdated...
389
   * @bs:		the bio_set to allocate from.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
390
391
   *
   * Description:
3f86a82ae   Kent Overstreet   block: Consolidat...
392
393
394
   *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
   *   backed by the @bs's mempool.
   *
d0164adc8   Mel Gorman   mm, page_alloc: d...
395
396
397
398
399
400
   *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
   *   always be able to allocate a bio. This is due to the mempool guarantees.
   *   To make this work, callers must never allocate more than 1 bio at a time
   *   from this pool. Callers that need to allocate more than 1 bio must always
   *   submit the previously allocated bio for IO before attempting to allocate
   *   a new one. Failure to do so can cause deadlocks under memory pressure.
3f86a82ae   Kent Overstreet   block: Consolidat...
401
   *
ed00aabd5   Christoph Hellwig   block: rename gen...
402
   *   Note that when running under submit_bio_noacct() (i.e. any block
df2cb6daa   Kent Overstreet   block: Avoid dead...
403
   *   driver), bios are not submitted until after you return - see the code in
ed00aabd5   Christoph Hellwig   block: rename gen...
404
   *   submit_bio_noacct() that converts recursion into iteration, to prevent
df2cb6daa   Kent Overstreet   block: Avoid dead...
405
406
407
   *   stack overflows.
   *
   *   This would normally mean allocating multiple bios under
ed00aabd5   Christoph Hellwig   block: rename gen...
408
   *   submit_bio_noacct() would be susceptible to deadlocks, but we have
df2cb6daa   Kent Overstreet   block: Avoid dead...
409
410
411
412
413
   *   deadlock avoidance code that resubmits any blocked bios from a rescuer
   *   thread.
   *
   *   However, we do not guarantee forward progress for allocations from other
   *   mempools. Doing multiple allocations from the same mempool under
ed00aabd5   Christoph Hellwig   block: rename gen...
414
   *   submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
df2cb6daa   Kent Overstreet   block: Avoid dead...
415
416
   *   for per bio allocations.
   *
3f86a82ae   Kent Overstreet   block: Consolidat...
417
418
419
   *   RETURNS:
   *   Pointer to new bio on success, NULL on failure.
   */
7a88fa191   Dan Carpenter   block: make nr_io...
420
421
  struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
  			     struct bio_set *bs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
422
  {
df2cb6daa   Kent Overstreet   block: Avoid dead...
423
  	gfp_t saved_gfp = gfp_mask;
3f86a82ae   Kent Overstreet   block: Consolidat...
424
425
  	unsigned front_pad;
  	unsigned inline_vecs;
34053979f   Ingo Molnar   block: cleanup bi...
426
  	struct bio_vec *bvl = NULL;
451a9ebf6   Tejun Heo   bio: fix bio_kmal...
427
428
  	struct bio *bio;
  	void *p;
3f86a82ae   Kent Overstreet   block: Consolidat...
429
430
431
  	if (!bs) {
  		if (nr_iovecs > UIO_MAXIOV)
  			return NULL;
1f4fe21cf   Gustavo A. R. Silva   block: bio: Use s...
432
  		p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
3f86a82ae   Kent Overstreet   block: Consolidat...
433
434
435
  		front_pad = 0;
  		inline_vecs = nr_iovecs;
  	} else {
d8f429e16   Junichi Nomura   block: add bioset...
436
  		/* should not use nobvec bioset for nr_iovecs > 0 */
8aa6ba2f6   Kent Overstreet   block: Convert bi...
437
438
  		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
  				 nr_iovecs > 0))
d8f429e16   Junichi Nomura   block: add bioset...
439
  			return NULL;
df2cb6daa   Kent Overstreet   block: Avoid dead...
440
  		/*
ed00aabd5   Christoph Hellwig   block: rename gen...
441
  		 * submit_bio_noacct() converts recursion to iteration; this
df2cb6daa   Kent Overstreet   block: Avoid dead...
442
443
444
445
446
447
  		 * means if we're running beneath it, any bios we allocate and
  		 * submit will not be submitted (and thus freed) until after we
  		 * return.
  		 *
  		 * This exposes us to a potential deadlock if we allocate
  		 * multiple bios from the same bio_set() while running
ed00aabd5   Christoph Hellwig   block: rename gen...
448
  		 * underneath submit_bio_noacct(). If we were to allocate
df2cb6daa   Kent Overstreet   block: Avoid dead...
449
450
451
452
453
454
455
  		 * multiple bios (say a stacking block driver that was splitting
  		 * bios), we would deadlock if we exhausted the mempool's
  		 * reserve.
  		 *
  		 * We solve this, and guarantee forward progress, with a rescuer
  		 * workqueue per bio_set. If we go to allocate and there are
  		 * bios on current->bio_list, we first try the allocation
d0164adc8   Mel Gorman   mm, page_alloc: d...
456
457
458
  		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
  		 * bios we would be blocking to the rescuer workqueue before
  		 * we retry with the original gfp_flags.
df2cb6daa   Kent Overstreet   block: Avoid dead...
459
  		 */
f5fe1b519   NeilBrown   blk: Ensure users...
460
461
  		if (current->bio_list &&
  		    (!bio_list_empty(&current->bio_list[0]) ||
47e0fb461   NeilBrown   blk: make the bio...
462
463
  		     !bio_list_empty(&current->bio_list[1])) &&
  		    bs->rescue_workqueue)
d0164adc8   Mel Gorman   mm, page_alloc: d...
464
  			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
df2cb6daa   Kent Overstreet   block: Avoid dead...
465

8aa6ba2f6   Kent Overstreet   block: Convert bi...
466
  		p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6daa   Kent Overstreet   block: Avoid dead...
467
468
469
  		if (!p && gfp_mask != saved_gfp) {
  			punt_bios_to_rescuer(bs);
  			gfp_mask = saved_gfp;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
470
  			p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6daa   Kent Overstreet   block: Avoid dead...
471
  		}
3f86a82ae   Kent Overstreet   block: Consolidat...
472
473
474
  		front_pad = bs->front_pad;
  		inline_vecs = BIO_INLINE_VECS;
  	}
451a9ebf6   Tejun Heo   bio: fix bio_kmal...
475
476
  	if (unlikely(!p))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
477

3f86a82ae   Kent Overstreet   block: Consolidat...
478
  	bio = p + front_pad;
3a83f4677   Ming Lei   block: bio: pass ...
479
  	bio_init(bio, NULL, 0);
34053979f   Ingo Molnar   block: cleanup bi...
480

3f86a82ae   Kent Overstreet   block: Consolidat...
481
  	if (nr_iovecs > inline_vecs) {
ed996a52c   Christoph Hellwig   block: simplify a...
482
  		unsigned long idx = 0;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
483
  		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6daa   Kent Overstreet   block: Avoid dead...
484
485
486
  		if (!bvl && gfp_mask != saved_gfp) {
  			punt_bios_to_rescuer(bs);
  			gfp_mask = saved_gfp;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
487
  			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6daa   Kent Overstreet   block: Avoid dead...
488
  		}
34053979f   Ingo Molnar   block: cleanup bi...
489
490
  		if (unlikely(!bvl))
  			goto err_free;
a38352e0a   Kent Overstreet   block: Add an exp...
491

ed996a52c   Christoph Hellwig   block: simplify a...
492
  		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
3f86a82ae   Kent Overstreet   block: Consolidat...
493
494
  	} else if (nr_iovecs) {
  		bvl = bio->bi_inline_vecs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
495
  	}
3f86a82ae   Kent Overstreet   block: Consolidat...
496
497
  
  	bio->bi_pool = bs;
34053979f   Ingo Molnar   block: cleanup bi...
498
  	bio->bi_max_vecs = nr_iovecs;
34053979f   Ingo Molnar   block: cleanup bi...
499
  	bio->bi_io_vec = bvl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
500
  	return bio;
34053979f   Ingo Molnar   block: cleanup bi...
501
502
  
  err_free:
8aa6ba2f6   Kent Overstreet   block: Convert bi...
503
  	mempool_free(p, &bs->bio_pool);
34053979f   Ingo Molnar   block: cleanup bi...
504
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
505
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
506
  EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
507

38a72dac4   Kent Overstreet   block: Add bio_co...
508
  void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
509
510
  {
  	unsigned long flags;
7988613b0   Kent Overstreet   block: Convert bi...
511
512
  	struct bio_vec bv;
  	struct bvec_iter iter;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
513

38a72dac4   Kent Overstreet   block: Add bio_co...
514
  	__bio_for_each_segment(bv, bio, iter, start) {
7988613b0   Kent Overstreet   block: Convert bi...
515
516
517
  		char *data = bvec_kmap_irq(&bv, &flags);
  		memset(data, 0, bv.bv_len);
  		flush_dcache_page(bv.bv_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
518
519
520
  		bvec_kunmap_irq(data, &flags);
  	}
  }
38a72dac4   Kent Overstreet   block: Add bio_co...
521
  EXPORT_SYMBOL(zero_fill_bio_iter);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
522

83c9c5471   Ming Lei   fs: move guard_bi...
523
524
525
526
527
528
529
530
531
532
  /**
   * bio_truncate - truncate the bio to small size of @new_size
   * @bio:	the bio to be truncated
   * @new_size:	new size for truncating the bio
   *
   * Description:
   *   Truncate the bio to new size of @new_size. If bio_op(bio) is
   *   REQ_OP_READ, zero the truncated part. This function should only
   *   be used for handling corner cases, such as bio eod.
   */
85a8ce62c   Ming Lei   block: add bio_tr...
533
534
535
536
537
538
539
540
541
  void bio_truncate(struct bio *bio, unsigned new_size)
  {
  	struct bio_vec bv;
  	struct bvec_iter iter;
  	unsigned int done = 0;
  	bool truncated = false;
  
  	if (new_size >= bio->bi_iter.bi_size)
  		return;
83c9c5471   Ming Lei   fs: move guard_bi...
542
  	if (bio_op(bio) != REQ_OP_READ)
85a8ce62c   Ming Lei   block: add bio_tr...
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
  		goto exit;
  
  	bio_for_each_segment(bv, bio, iter) {
  		if (done + bv.bv_len > new_size) {
  			unsigned offset;
  
  			if (!truncated)
  				offset = new_size - done;
  			else
  				offset = 0;
  			zero_user(bv.bv_page, offset, bv.bv_len - offset);
  			truncated = true;
  		}
  		done += bv.bv_len;
  	}
  
   exit:
  	/*
  	 * Don't touch bvec table here and make it really immutable, since
  	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
  	 * in its .end_bio() callback.
  	 *
  	 * It is enough to truncate bio by updating .bi_size since we can make
  	 * correct bvec with the updated .bi_size for drivers.
  	 */
  	bio->bi_iter.bi_size = new_size;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
570
  /**
29125ed62   Christoph Hellwig   block: move guard...
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
   * guard_bio_eod - truncate a BIO to fit the block device
   * @bio:	bio to truncate
   *
   * This allows us to do IO even on the odd last sectors of a device, even if the
   * block size is some multiple of the physical sector size.
   *
   * We'll just truncate the bio to the size of the device, and clear the end of
   * the buffer head manually.  Truly out-of-range accesses will turn into actual
   * I/O errors, this only handles the "we need to be able to do I/O at the final
   * sector" case.
   */
  void guard_bio_eod(struct bio *bio)
  {
  	sector_t maxsector;
  	struct hd_struct *part;
  
  	rcu_read_lock();
  	part = __disk_get_part(bio->bi_disk, bio->bi_partno);
  	if (part)
  		maxsector = part_nr_sects_read(part);
  	else
  		maxsector = get_capacity(bio->bi_disk);
  	rcu_read_unlock();
  
  	if (!maxsector)
  		return;
  
  	/*
  	 * If the *whole* IO is past the end of the device,
  	 * let it through, and the IO layer will turn it into
  	 * an EIO.
  	 */
  	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
  		return;
  
  	maxsector -= bio->bi_iter.bi_sector;
  	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
  		return;
  
  	bio_truncate(bio, maxsector << 9);
  }
  
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
614
615
616
617
618
   * bio_put - release a reference to a bio
   * @bio:   bio to release reference to
   *
   * Description:
   *   Put a reference to a &struct bio, either one you have gotten with
9b10f6a9c   NeilBrown   block: remove bio...
619
   *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
620
621
622
   **/
  void bio_put(struct bio *bio)
  {
dac56212e   Jens Axboe   bio: skip atomic ...
623
  	if (!bio_flagged(bio, BIO_REFFED))
4254bba17   Kent Overstreet   block: Kill bi_de...
624
  		bio_free(bio);
dac56212e   Jens Axboe   bio: skip atomic ...
625
626
627
628
629
630
631
632
633
  	else {
  		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
  
  		/*
  		 * last put frees it
  		 */
  		if (atomic_dec_and_test(&bio->__bi_cnt))
  			bio_free(bio);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
634
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
635
  EXPORT_SYMBOL(bio_put);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
636

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
637
  /**
59d276fe0   Kent Overstreet   block: Add bio_cl...
638
639
640
641
642
643
644
645
646
647
648
649
   * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
   * 	@bio: destination bio
   * 	@bio_src: bio to clone
   *
   *	Clone a &bio. Caller will own the returned bio, but not
   *	the actual data it points to. Reference count of returned
   * 	bio will be one.
   *
   * 	Caller must ensure that @bio_src is not freed before @bio.
   */
  void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
  {
ed996a52c   Christoph Hellwig   block: simplify a...
650
  	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
59d276fe0   Kent Overstreet   block: Add bio_cl...
651
652
  
  	/*
74d46992e   Christoph Hellwig   block: replace bi...
653
  	 * most users will be overriding ->bi_disk with a new target,
59d276fe0   Kent Overstreet   block: Add bio_cl...
654
655
  	 * so we don't set nor calculate new physical/hw segment counts here
  	 */
74d46992e   Christoph Hellwig   block: replace bi...
656
  	bio->bi_disk = bio_src->bi_disk;
62530ed8b   Michael Lyle   bio: ensure __bio...
657
  	bio->bi_partno = bio_src->bi_partno;
b7c44ed9d   Jens Axboe   block: manipulate...
658
  	bio_set_flag(bio, BIO_CLONED);
111be8839   Shaohua Li   block-throttle: a...
659
660
  	if (bio_flagged(bio_src, BIO_THROTTLED))
  		bio_set_flag(bio, BIO_THROTTLED);
1eff9d322   Jens Axboe   block: rename bio...
661
  	bio->bi_opf = bio_src->bi_opf;
ca474b738   Hannes Reinecke   block: copy iopri...
662
  	bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8e   Jens Axboe   block: add suppor...
663
  	bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe0   Kent Overstreet   block: Add bio_cl...
664
665
  	bio->bi_iter = bio_src->bi_iter;
  	bio->bi_io_vec = bio_src->bi_io_vec;
20bd723ec   Paolo Valente   block: add missin...
666

db6638d7d   Dennis Zhou   blkcg: remove bio...
667
  	bio_clone_blkg_association(bio, bio_src);
e439bedf6   Dennis Zhou   blkcg: consolidat...
668
  	blkcg_bio_issue_init(bio);
59d276fe0   Kent Overstreet   block: Add bio_cl...
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
  }
  EXPORT_SYMBOL(__bio_clone_fast);
  
  /**
   *	bio_clone_fast - clone a bio that shares the original bio's biovec
   *	@bio: bio to clone
   *	@gfp_mask: allocation priority
   *	@bs: bio_set to allocate from
   *
   * 	Like __bio_clone_fast, only also allocates the returned bio
   */
  struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
  {
  	struct bio *b;
  
  	b = bio_alloc_bioset(gfp_mask, 0, bs);
  	if (!b)
  		return NULL;
  
  	__bio_clone_fast(b, bio);
07560151d   Eric Biggers   block: make bio_c...
689
690
  	if (bio_crypt_clone(b, bio, gfp_mask) < 0)
  		goto err_put;
a892c8d52   Satya Tangirala   block: Inline enc...
691

07560151d   Eric Biggers   block: make bio_c...
692
693
694
  	if (bio_integrity(bio) &&
  	    bio_integrity_clone(b, bio, gfp_mask) < 0)
  		goto err_put;
59d276fe0   Kent Overstreet   block: Add bio_cl...
695
696
  
  	return b;
07560151d   Eric Biggers   block: make bio_c...
697
698
699
700
  
  err_put:
  	bio_put(b);
  	return NULL;
59d276fe0   Kent Overstreet   block: Add bio_cl...
701
702
  }
  EXPORT_SYMBOL(bio_clone_fast);
5cbd28e3c   Christoph Hellwig   block: move disk_...
703
704
705
706
707
  const char *bio_devname(struct bio *bio, char *buf)
  {
  	return disk_name(bio->bi_disk, bio->bi_partno, buf);
  }
  EXPORT_SYMBOL(bio_devname);
5919482e2   Ming Lei   block: check if p...
708
709
  static inline bool page_is_mergeable(const struct bio_vec *bv,
  		struct page *page, unsigned int len, unsigned int off,
ff896738b   Christoph Hellwig   block: return fro...
710
  		bool *same_page)
5919482e2   Ming Lei   block: check if p...
711
  {
d81665198   Matthew Wilcox (Oracle)   block: Fix page_i...
712
713
  	size_t bv_end = bv->bv_offset + bv->bv_len;
  	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
5919482e2   Ming Lei   block: check if p...
714
715
716
717
718
719
  	phys_addr_t page_addr = page_to_phys(page);
  
  	if (vec_end_addr + 1 != page_addr + off)
  		return false;
  	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
  		return false;
52d52d1c9   Christoph Hellwig   block: only allow...
720

ff896738b   Christoph Hellwig   block: return fro...
721
  	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
d81665198   Matthew Wilcox (Oracle)   block: Fix page_i...
722
723
724
  	if (*same_page)
  		return true;
  	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
5919482e2   Ming Lei   block: check if p...
725
  }
e45811057   Christoph Hellwig   block: rename __b...
726
727
728
729
730
731
732
733
  /*
   * Try to merge a page into a segment, while obeying the hardware segment
   * size limit.  This is not for normal read/write bios, but for passthrough
   * or Zone Append operations that we can't split.
   */
  static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
  				 struct page *page, unsigned len,
  				 unsigned offset, bool *same_page)
489fbbcb5   Ming Lei   block: enable mul...
734
  {
384209cd5   Christoph Hellwig   block: create a b...
735
  	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb5   Ming Lei   block: enable mul...
736
737
738
739
740
741
  	unsigned long mask = queue_segment_boundary(q);
  	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
  	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
  
  	if ((addr1 | mask) != (addr2 | mask))
  		return false;
489fbbcb5   Ming Lei   block: enable mul...
742
743
  	if (bv->bv_len + len > queue_max_segment_size(q))
  		return false;
384209cd5   Christoph Hellwig   block: create a b...
744
  	return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb5   Ming Lei   block: enable mul...
745
  }
f45958756   Shaohua Li   block: remove bio...
746
  /**
e45811057   Christoph Hellwig   block: rename __b...
747
748
749
750
751
752
753
754
   * bio_add_hw_page - attempt to add a page to a bio with hw constraints
   * @q: the target queue
   * @bio: destination bio
   * @page: page to add
   * @len: vec entry length
   * @offset: vec entry offset
   * @max_sectors: maximum number of sectors that can be added
   * @same_page: return if the segment has been merged inside the same page
c66a14d07   Kent Overstreet   block: simplify b...
755
   *
e45811057   Christoph Hellwig   block: rename __b...
756
757
   * Add a page to a bio while respecting the hardware max_sectors, max_segment
   * and gap limitations.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
758
   */
e45811057   Christoph Hellwig   block: rename __b...
759
  int bio_add_hw_page(struct request_queue *q, struct bio *bio,
190470871   Ming Lei   block: put the sa...
760
  		struct page *page, unsigned int len, unsigned int offset,
e45811057   Christoph Hellwig   block: rename __b...
761
  		unsigned int max_sectors, bool *same_page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
762
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
763
  	struct bio_vec *bvec;
e45811057   Christoph Hellwig   block: rename __b...
764
  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
765
  		return 0;
e45811057   Christoph Hellwig   block: rename __b...
766
  	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
767
  		return 0;
80cfd548e   Jens Axboe   [BLOCK] bio: chec...
768
  	if (bio->bi_vcnt > 0) {
e45811057   Christoph Hellwig   block: rename __b...
769
  		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
384209cd5   Christoph Hellwig   block: create a b...
770
  			return len;
320ea869a   Christoph Hellwig   block: improve th...
771
772
773
774
775
  
  		/*
  		 * If the queue doesn't support SG gaps and adding this segment
  		 * would create a gap, disallow it.
  		 */
384209cd5   Christoph Hellwig   block: create a b...
776
  		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869a   Christoph Hellwig   block: improve th...
777
778
  		if (bvec_gap_to_prev(q, bvec, offset))
  			return 0;
80cfd548e   Jens Axboe   [BLOCK] bio: chec...
779
  	}
79d08f89b   Ming Lei   block: fix .bi_si...
780
  	if (bio_full(bio, len))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
781
  		return 0;
14ccb66b3   Christoph Hellwig   block: remove the...
782
  	if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb5   Ming Lei   block: enable mul...
783
  		return 0;
fcbf6a087   Maurizio Lombardi   bio: modify __bio...
784
785
786
787
788
  	bvec = &bio->bi_io_vec[bio->bi_vcnt];
  	bvec->bv_page = page;
  	bvec->bv_len = len;
  	bvec->bv_offset = offset;
  	bio->bi_vcnt++;
dcdca753c   Christoph Hellwig   block: clean up _...
789
  	bio->bi_iter.bi_size += len;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
790
791
  	return len;
  }
190470871   Ming Lei   block: put the sa...
792

e45811057   Christoph Hellwig   block: rename __b...
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
  /**
   * bio_add_pc_page	- attempt to add page to passthrough bio
   * @q: the target queue
   * @bio: destination bio
   * @page: page to add
   * @len: vec entry length
   * @offset: vec entry offset
   *
   * Attempt to add a page to the bio_vec maplist. This can fail for a
   * number of reasons, such as the bio being full or target block device
   * limitations. The target block device must allow bio's up to PAGE_SIZE,
   * so it is always possible to add a single page to an empty bio.
   *
   * This should only be used by passthrough bios.
   */
190470871   Ming Lei   block: put the sa...
808
809
810
  int bio_add_pc_page(struct request_queue *q, struct bio *bio,
  		struct page *page, unsigned int len, unsigned int offset)
  {
d1916c86c   Christoph Hellwig   block: move same ...
811
  	bool same_page = false;
e45811057   Christoph Hellwig   block: rename __b...
812
813
  	return bio_add_hw_page(q, bio, page, len, offset,
  			queue_max_hw_sectors(q), &same_page);
190470871   Ming Lei   block: put the sa...
814
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
815
  EXPORT_SYMBOL(bio_add_pc_page);
6e68af666   Mike Christie   [SCSI] Convert SC...
816
817
  
  /**
0aa69fd32   Christoph Hellwig   block: add a lowe...
818
819
   * __bio_try_merge_page - try appending data to an existing bvec.
   * @bio: destination bio
551879a48   Ming Lei   block: clarify th...
820
   * @page: start page to add
0aa69fd32   Christoph Hellwig   block: add a lowe...
821
   * @len: length of the data to add
551879a48   Ming Lei   block: clarify th...
822
   * @off: offset of the data relative to @page
ff896738b   Christoph Hellwig   block: return fro...
823
   * @same_page: return if the segment has been merged inside the same page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
824
   *
0aa69fd32   Christoph Hellwig   block: add a lowe...
825
   * Try to add the data at @page + @off to the last bvec of @bio.  This is a
3cf148891   Randy Dunlap   block: bio: delet...
826
   * useful optimisation for file systems with a block size smaller than the
0aa69fd32   Christoph Hellwig   block: add a lowe...
827
828
   * page size.
   *
551879a48   Ming Lei   block: clarify th...
829
830
   * Warn if (@len, @off) crosses pages in case that @same_page is true.
   *
0aa69fd32   Christoph Hellwig   block: add a lowe...
831
   * Return %true on success or %false on failure.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
832
   */
0aa69fd32   Christoph Hellwig   block: add a lowe...
833
  bool __bio_try_merge_page(struct bio *bio, struct page *page,
ff896738b   Christoph Hellwig   block: return fro...
834
  		unsigned int len, unsigned int off, bool *same_page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
835
  {
c66a14d07   Kent Overstreet   block: simplify b...
836
  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0aa69fd32   Christoph Hellwig   block: add a lowe...
837
  		return false;
762380ad9   Jens Axboe   block: add notion...
838

cc90bc684   Andreas Gruenbacher   block: fix "check...
839
  	if (bio->bi_vcnt > 0) {
0aa69fd32   Christoph Hellwig   block: add a lowe...
840
  		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
5919482e2   Ming Lei   block: check if p...
841
842
  
  		if (page_is_mergeable(bv, page, len, off, same_page)) {
2cd896a5e   Ritesh Harjani   block: Set same_p...
843
844
  			if (bio->bi_iter.bi_size > UINT_MAX - len) {
  				*same_page = false;
cc90bc684   Andreas Gruenbacher   block: fix "check...
845
  				return false;
2cd896a5e   Ritesh Harjani   block: Set same_p...
846
  			}
5919482e2   Ming Lei   block: check if p...
847
848
849
850
  			bv->bv_len += len;
  			bio->bi_iter.bi_size += len;
  			return true;
  		}
c66a14d07   Kent Overstreet   block: simplify b...
851
  	}
0aa69fd32   Christoph Hellwig   block: add a lowe...
852
853
854
  	return false;
  }
  EXPORT_SYMBOL_GPL(__bio_try_merge_page);
c66a14d07   Kent Overstreet   block: simplify b...
855

0aa69fd32   Christoph Hellwig   block: add a lowe...
856
  /**
551879a48   Ming Lei   block: clarify th...
857
   * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd32   Christoph Hellwig   block: add a lowe...
858
   * @bio: destination bio
551879a48   Ming Lei   block: clarify th...
859
860
861
   * @page: start page to add
   * @len: length of the data to add, may cross pages
   * @off: offset of the data relative to @page, may cross pages
0aa69fd32   Christoph Hellwig   block: add a lowe...
862
863
864
865
866
867
868
869
   *
   * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
   * that @bio has space for another bvec.
   */
  void __bio_add_page(struct bio *bio, struct page *page,
  		unsigned int len, unsigned int off)
  {
  	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d07   Kent Overstreet   block: simplify b...
870

0aa69fd32   Christoph Hellwig   block: add a lowe...
871
  	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89b   Ming Lei   block: fix .bi_si...
872
  	WARN_ON_ONCE(bio_full(bio, len));
0aa69fd32   Christoph Hellwig   block: add a lowe...
873
874
875
876
  
  	bv->bv_page = page;
  	bv->bv_offset = off;
  	bv->bv_len = len;
c66a14d07   Kent Overstreet   block: simplify b...
877

c66a14d07   Kent Overstreet   block: simplify b...
878
  	bio->bi_iter.bi_size += len;
0aa69fd32   Christoph Hellwig   block: add a lowe...
879
  	bio->bi_vcnt++;
b8e24a930   Johannes Weiner   block: annotate r...
880
881
882
  
  	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
  		bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd32   Christoph Hellwig   block: add a lowe...
883
884
885
886
  }
  EXPORT_SYMBOL_GPL(__bio_add_page);
  
  /**
551879a48   Ming Lei   block: clarify th...
887
   *	bio_add_page	-	attempt to add page(s) to bio
0aa69fd32   Christoph Hellwig   block: add a lowe...
888
   *	@bio: destination bio
551879a48   Ming Lei   block: clarify th...
889
890
891
   *	@page: start page to add
   *	@len: vec entry length, may cross pages
   *	@offset: vec entry offset relative to @page, may cross pages
0aa69fd32   Christoph Hellwig   block: add a lowe...
892
   *
551879a48   Ming Lei   block: clarify th...
893
   *	Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd32   Christoph Hellwig   block: add a lowe...
894
895
896
897
898
   *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
   */
  int bio_add_page(struct bio *bio, struct page *page,
  		 unsigned int len, unsigned int offset)
  {
ff896738b   Christoph Hellwig   block: return fro...
899
900
901
  	bool same_page = false;
  
  	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89b   Ming Lei   block: fix .bi_si...
902
  		if (bio_full(bio, len))
0aa69fd32   Christoph Hellwig   block: add a lowe...
903
904
905
  			return 0;
  		__bio_add_page(bio, page, len, offset);
  	}
c66a14d07   Kent Overstreet   block: simplify b...
906
  	return len;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
907
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
908
  EXPORT_SYMBOL(bio_add_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
909

d241a95f3   Christoph Hellwig   block: optionally...
910
  void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbfc   Christoph Hellwig   block: change how...
911
912
913
  {
  	struct bvec_iter_all iter_all;
  	struct bio_vec *bvec;
7321ecbfc   Christoph Hellwig   block: change how...
914

b2d0d9913   Christoph Hellwig   block: move the B...
915
916
  	if (bio_flagged(bio, BIO_NO_PAGE_REF))
  		return;
d241a95f3   Christoph Hellwig   block: optionally...
917
918
919
  	bio_for_each_segment_all(bvec, bio, iter_all) {
  		if (mark_dirty && !PageCompound(bvec->bv_page))
  			set_page_dirty_lock(bvec->bv_page);
7321ecbfc   Christoph Hellwig   block: change how...
920
  		put_page(bvec->bv_page);
d241a95f3   Christoph Hellwig   block: optionally...
921
  	}
7321ecbfc   Christoph Hellwig   block: change how...
922
  }
29b2a3aa2   Johannes Thumshirn   block: export bio...
923
  EXPORT_SYMBOL_GPL(bio_release_pages);
7321ecbfc   Christoph Hellwig   block: change how...
924

6d0c48aed   Jens Axboe   block: implement ...
925
926
927
928
929
930
931
932
933
934
935
936
  static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
  {
  	const struct bio_vec *bv = iter->bvec;
  	unsigned int len;
  	size_t size;
  
  	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
  		return -EINVAL;
  
  	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
  	size = bio_add_page(bio, bv->bv_page, len,
  				bv->bv_offset + iter->iov_offset);
a10584c3c   Christoph Hellwig   block: refactor _...
937
938
  	if (unlikely(size != len))
  		return -EINVAL;
a10584c3c   Christoph Hellwig   block: refactor _...
939
940
  	iov_iter_advance(iter, size);
  	return 0;
6d0c48aed   Jens Axboe   block: implement ...
941
  }
576ed9135   Christoph Hellwig   block: use bio_ad...
942
  #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
2cefe4dba   Kent Overstreet   block: add bio_io...
943
  /**
17d51b10d   Martin Wilck   block: bio_iov_it...
944
   * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4dba   Kent Overstreet   block: add bio_io...
945
946
947
   * @bio: bio to add pages to
   * @iter: iov iterator describing the region to be mapped
   *
17d51b10d   Martin Wilck   block: bio_iov_it...
948
   * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4dba   Kent Overstreet   block: add bio_io...
949
   * pages will have to be released using put_page() when done.
17d51b10d   Martin Wilck   block: bio_iov_it...
950
   * For multi-segment *iter, this function only adds pages from the
3cf148891   Randy Dunlap   block: bio: delet...
951
   * next non-empty segment of the iov iterator.
2cefe4dba   Kent Overstreet   block: add bio_io...
952
   */
17d51b10d   Martin Wilck   block: bio_iov_it...
953
  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4dba   Kent Overstreet   block: add bio_io...
954
  {
576ed9135   Christoph Hellwig   block: use bio_ad...
955
956
  	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4dba   Kent Overstreet   block: add bio_io...
957
958
  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  	struct page **pages = (struct page **)bv;
456918049   Christoph Hellwig   block: fix page l...
959
  	bool same_page = false;
576ed9135   Christoph Hellwig   block: use bio_ad...
960
961
  	ssize_t size, left;
  	unsigned len, i;
b403ea240   Martin Wilck   block: bio_iov_it...
962
  	size_t offset;
576ed9135   Christoph Hellwig   block: use bio_ad...
963
964
965
966
967
968
969
970
  
  	/*
  	 * Move page array up in the allocated memory for the bio vecs as far as
  	 * possible so that we can start filling biovecs from the beginning
  	 * without overwriting the temporary page array.
  	*/
  	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
  	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4dba   Kent Overstreet   block: add bio_io...
971
972
973
974
  
  	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
  	if (unlikely(size <= 0))
  		return size ? size : -EFAULT;
2cefe4dba   Kent Overstreet   block: add bio_io...
975

576ed9135   Christoph Hellwig   block: use bio_ad...
976
977
  	for (left = size, i = 0; left > 0; left -= len, i++) {
  		struct page *page = pages[i];
2cefe4dba   Kent Overstreet   block: add bio_io...
978

576ed9135   Christoph Hellwig   block: use bio_ad...
979
  		len = min_t(size_t, PAGE_SIZE - offset, left);
456918049   Christoph Hellwig   block: fix page l...
980
981
982
983
984
  
  		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
  			if (same_page)
  				put_page(page);
  		} else {
79d08f89b   Ming Lei   block: fix .bi_si...
985
  			if (WARN_ON_ONCE(bio_full(bio, len)))
456918049   Christoph Hellwig   block: fix page l...
986
987
988
                                  return -EINVAL;
  			__bio_add_page(bio, page, len, offset);
  		}
576ed9135   Christoph Hellwig   block: use bio_ad...
989
  		offset = 0;
2cefe4dba   Kent Overstreet   block: add bio_io...
990
  	}
2cefe4dba   Kent Overstreet   block: add bio_io...
991
992
993
  	iov_iter_advance(iter, size);
  	return 0;
  }
17d51b10d   Martin Wilck   block: bio_iov_it...
994

0512a75b9   Keith Busch   block: Introduce ...
995
996
997
998
999
1000
1001
1002
1003
1004
1005
  static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
  {
  	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
  	struct request_queue *q = bio->bi_disk->queue;
  	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  	struct page **pages = (struct page **)bv;
  	ssize_t size, left;
  	unsigned len, i;
  	size_t offset;
4977d121b   Naohiro Aota   block: advance io...
1006
  	int ret = 0;
0512a75b9   Keith Busch   block: Introduce ...
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
  
  	if (WARN_ON_ONCE(!max_append_sectors))
  		return 0;
  
  	/*
  	 * Move page array up in the allocated memory for the bio vecs as far as
  	 * possible so that we can start filling biovecs from the beginning
  	 * without overwriting the temporary page array.
  	 */
  	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
  	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
  
  	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
  	if (unlikely(size <= 0))
  		return size ? size : -EFAULT;
  
  	for (left = size, i = 0; left > 0; left -= len, i++) {
  		struct page *page = pages[i];
  		bool same_page = false;
  
  		len = min_t(size_t, PAGE_SIZE - offset, left);
  		if (bio_add_hw_page(q, bio, page, len, offset,
4977d121b   Naohiro Aota   block: advance io...
1029
1030
1031
1032
  				max_append_sectors, &same_page) != len) {
  			ret = -EINVAL;
  			break;
  		}
0512a75b9   Keith Busch   block: Introduce ...
1033
1034
1035
1036
  		if (same_page)
  			put_page(page);
  		offset = 0;
  	}
4977d121b   Naohiro Aota   block: advance io...
1037
1038
  	iov_iter_advance(iter, size - left);
  	return ret;
0512a75b9   Keith Busch   block: Introduce ...
1039
  }
17d51b10d   Martin Wilck   block: bio_iov_it...
1040
  /**
6d0c48aed   Jens Axboe   block: implement ...
1041
   * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10d   Martin Wilck   block: bio_iov_it...
1042
   * @bio: bio to add pages to
6d0c48aed   Jens Axboe   block: implement ...
1043
1044
1045
1046
1047
   * @iter: iov iterator describing the region to be added
   *
   * This takes either an iterator pointing to user memory, or one pointing to
   * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
   * map them into the kernel. On IO completion, the caller should put those
399254aaf   Jens Axboe   block: add BIO_NO...
1048
1049
1050
1051
1052
1053
   * pages. If we're adding kernel pages, and the caller told us it's safe to
   * do so, we just have to add the pages to the bio directly. We don't grab an
   * extra reference to those pages (the user should already have that), and we
   * don't put the page on IO completion. The caller needs to check if the bio is
   * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
   * released.
17d51b10d   Martin Wilck   block: bio_iov_it...
1054
   *
17d51b10d   Martin Wilck   block: bio_iov_it...
1055
   * The function tries, but does not guarantee, to pin as many pages as
5cd3ddc18   Mauro Carvalho Chehab   docs: bio: fix a ...
1056
   * fit into the bio, or are requested in @iter, whatever is smaller. If
6d0c48aed   Jens Axboe   block: implement ...
1057
1058
   * MM encounters an error pinning the requested pages, it stops. Error
   * is returned only if 0 pages could be pinned.
17d51b10d   Martin Wilck   block: bio_iov_it...
1059
1060
1061
   */
  int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  {
6d0c48aed   Jens Axboe   block: implement ...
1062
  	const bool is_bvec = iov_iter_is_bvec(iter);
14eacf12d   Christoph Hellwig   block: don't allo...
1063
1064
1065
1066
  	int ret;
  
  	if (WARN_ON_ONCE(bio->bi_vcnt))
  		return -EINVAL;
17d51b10d   Martin Wilck   block: bio_iov_it...
1067
1068
  
  	do {
0512a75b9   Keith Busch   block: Introduce ...
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
  			if (WARN_ON_ONCE(is_bvec))
  				return -EINVAL;
  			ret = __bio_iov_append_get_pages(bio, iter);
  		} else {
  			if (is_bvec)
  				ret = __bio_iov_bvec_add_pages(bio, iter);
  			else
  				ret = __bio_iov_iter_get_pages(bio, iter);
  		}
79d08f89b   Ming Lei   block: fix .bi_si...
1079
  	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10d   Martin Wilck   block: bio_iov_it...
1080

b62074307   Christoph Hellwig   block: never take...
1081
  	if (is_bvec)
7321ecbfc   Christoph Hellwig   block: change how...
1082
  		bio_set_flag(bio, BIO_NO_PAGE_REF);
14eacf12d   Christoph Hellwig   block: don't allo...
1083
  	return bio->bi_vcnt ? 0 : ret;
17d51b10d   Martin Wilck   block: bio_iov_it...
1084
  }
29b2a3aa2   Johannes Thumshirn   block: export bio...
1085
  EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
2cefe4dba   Kent Overstreet   block: add bio_io...
1086

4246a0b63   Christoph Hellwig   block: add a bi_e...
1087
  static void submit_bio_wait_endio(struct bio *bio)
9e882242c   Kent Overstreet   block: Add submit...
1088
  {
65e53aab6   Christoph Hellwig   block: Use DECLAR...
1089
  	complete(bio->bi_private);
9e882242c   Kent Overstreet   block: Add submit...
1090
1091
1092
1093
  }
  
  /**
   * submit_bio_wait - submit a bio, and wait until it completes
9e882242c   Kent Overstreet   block: Add submit...
1094
1095
1096
1097
   * @bio: The &struct bio which describes the I/O
   *
   * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
   * bio_endio() on failure.
3d289d688   Jan Kara   block: Add commen...
1098
1099
1100
1101
   *
   * WARNING: Unlike to how submit_bio() is usually used, this function does not
   * result in bio reference to be consumed. The caller must drop the reference
   * on his own.
9e882242c   Kent Overstreet   block: Add submit...
1102
   */
4e49ea4a3   Mike Christie   block/fs/drivers:...
1103
  int submit_bio_wait(struct bio *bio)
9e882242c   Kent Overstreet   block: Add submit...
1104
  {
e319e1fbd   Byungchul Park   block, locking/lo...
1105
  	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
de6a78b60   Ming Lei   block: Prevent hu...
1106
  	unsigned long hang_check;
9e882242c   Kent Overstreet   block: Add submit...
1107

65e53aab6   Christoph Hellwig   block: Use DECLAR...
1108
  	bio->bi_private = &done;
9e882242c   Kent Overstreet   block: Add submit...
1109
  	bio->bi_end_io = submit_bio_wait_endio;
1eff9d322   Jens Axboe   block: rename bio...
1110
  	bio->bi_opf |= REQ_SYNC;
4e49ea4a3   Mike Christie   block/fs/drivers:...
1111
  	submit_bio(bio);
de6a78b60   Ming Lei   block: Prevent hu...
1112
1113
1114
1115
1116
1117
1118
1119
1120
  
  	/* Prevent hang_check timer from firing at us during very long I/O */
  	hang_check = sysctl_hung_task_timeout_secs;
  	if (hang_check)
  		while (!wait_for_completion_io_timeout(&done,
  					hang_check * (HZ/2)))
  			;
  	else
  		wait_for_completion_io(&done);
9e882242c   Kent Overstreet   block: Add submit...
1121

65e53aab6   Christoph Hellwig   block: Use DECLAR...
1122
  	return blk_status_to_errno(bio->bi_status);
9e882242c   Kent Overstreet   block: Add submit...
1123
1124
  }
  EXPORT_SYMBOL(submit_bio_wait);
054bdf646   Kent Overstreet   block: Add bio_ad...
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
  /**
   * bio_advance - increment/complete a bio by some number of bytes
   * @bio:	bio to advance
   * @bytes:	number of bytes to complete
   *
   * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
   * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
   * be updated on the last bvec as well.
   *
   * @bio will then represent the remaining, uncompleted portion of the io.
   */
  void bio_advance(struct bio *bio, unsigned bytes)
  {
  	if (bio_integrity(bio))
  		bio_integrity_advance(bio, bytes);
a892c8d52   Satya Tangirala   block: Inline enc...
1140
  	bio_crypt_advance(bio, bytes);
4550dd6c6   Kent Overstreet   block: Immutable ...
1141
  	bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf646   Kent Overstreet   block: Add bio_ad...
1142
1143
  }
  EXPORT_SYMBOL(bio_advance);
45db54d58   Kent Overstreet   block: Split out ...
1144
1145
  void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
  			struct bio *src, struct bvec_iter *src_iter)
16ac3d63e   Kent Overstreet   block: Add bio_co...
1146
  {
1cb9dda4f   Kent Overstreet   block: Convert bi...
1147
  	struct bio_vec src_bv, dst_bv;
16ac3d63e   Kent Overstreet   block: Add bio_co...
1148
  	void *src_p, *dst_p;
1cb9dda4f   Kent Overstreet   block: Convert bi...
1149
  	unsigned bytes;
16ac3d63e   Kent Overstreet   block: Add bio_co...
1150

45db54d58   Kent Overstreet   block: Split out ...
1151
1152
1153
  	while (src_iter->bi_size && dst_iter->bi_size) {
  		src_bv = bio_iter_iovec(src, *src_iter);
  		dst_bv = bio_iter_iovec(dst, *dst_iter);
1cb9dda4f   Kent Overstreet   block: Convert bi...
1154
1155
  
  		bytes = min(src_bv.bv_len, dst_bv.bv_len);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1156

1cb9dda4f   Kent Overstreet   block: Convert bi...
1157
1158
  		src_p = kmap_atomic(src_bv.bv_page);
  		dst_p = kmap_atomic(dst_bv.bv_page);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1159

1cb9dda4f   Kent Overstreet   block: Convert bi...
1160
1161
  		memcpy(dst_p + dst_bv.bv_offset,
  		       src_p + src_bv.bv_offset,
16ac3d63e   Kent Overstreet   block: Add bio_co...
1162
1163
1164
1165
  		       bytes);
  
  		kunmap_atomic(dst_p);
  		kunmap_atomic(src_p);
6e6e811d7   Kent Overstreet   block: Add missin...
1166
  		flush_dcache_page(dst_bv.bv_page);
45db54d58   Kent Overstreet   block: Split out ...
1167
1168
  		bio_advance_iter(src, src_iter, bytes);
  		bio_advance_iter(dst, dst_iter, bytes);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1169
1170
  	}
  }
38a72dac4   Kent Overstreet   block: Add bio_co...
1171
1172
1173
  EXPORT_SYMBOL(bio_copy_data_iter);
  
  /**
45db54d58   Kent Overstreet   block: Split out ...
1174
1175
1176
   * bio_copy_data - copy contents of data buffers from one bio to another
   * @src: source bio
   * @dst: destination bio
38a72dac4   Kent Overstreet   block: Add bio_co...
1177
1178
1179
1180
1181
1182
   *
   * Stops when it reaches the end of either @src or @dst - that is, copies
   * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
   */
  void bio_copy_data(struct bio *dst, struct bio *src)
  {
45db54d58   Kent Overstreet   block: Split out ...
1183
1184
1185
1186
  	struct bvec_iter src_iter = src->bi_iter;
  	struct bvec_iter dst_iter = dst->bi_iter;
  
  	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac4   Kent Overstreet   block: Add bio_co...
1187
  }
16ac3d63e   Kent Overstreet   block: Add bio_co...
1188
  EXPORT_SYMBOL(bio_copy_data);
45db54d58   Kent Overstreet   block: Split out ...
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
  /**
   * bio_list_copy_data - copy contents of data buffers from one chain of bios to
   * another
   * @src: source bio list
   * @dst: destination bio list
   *
   * Stops when it reaches the end of either the @src list or @dst list - that is,
   * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
   * bios).
   */
  void bio_list_copy_data(struct bio *dst, struct bio *src)
  {
  	struct bvec_iter src_iter = src->bi_iter;
  	struct bvec_iter dst_iter = dst->bi_iter;
  
  	while (1) {
  		if (!src_iter.bi_size) {
  			src = src->bi_next;
  			if (!src)
  				break;
  
  			src_iter = src->bi_iter;
  		}
  
  		if (!dst_iter.bi_size) {
  			dst = dst->bi_next;
  			if (!dst)
  				break;
  
  			dst_iter = dst->bi_iter;
  		}
  
  		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
  	}
  }
  EXPORT_SYMBOL(bio_list_copy_data);
491221f88   Guoqing Jiang   block: export bio...
1225
  void bio_free_pages(struct bio *bio)
1dfa0f68c   Christoph Hellwig   block: add a help...
1226
1227
  {
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1228
  	struct bvec_iter_all iter_all;
1dfa0f68c   Christoph Hellwig   block: add a help...
1229

2b070cfe5   Christoph Hellwig   block: remove the...
1230
  	bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68c   Christoph Hellwig   block: add a help...
1231
1232
  		__free_page(bvec->bv_page);
  }
491221f88   Guoqing Jiang   block: export bio...
1233
  EXPORT_SYMBOL(bio_free_pages);
1dfa0f68c   Christoph Hellwig   block: add a help...
1234

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
  /*
   * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
   * for performing direct-IO in BIOs.
   *
   * The problem is that we cannot run set_page_dirty() from interrupt context
   * because the required locks are not interrupt-safe.  So what we can do is to
   * mark the pages dirty _before_ performing IO.  And in interrupt context,
   * check that the pages are still dirty.   If so, fine.  If not, redirty them
   * in process context.
   *
   * We special-case compound pages here: normally this means reads into hugetlb
   * pages.  The logic in here doesn't really work right for compound pages
   * because the VM does not uniformly chase down the head page in all cases.
   * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
   * handle them at all.  So we skip compound pages here at an early stage.
   *
   * Note that this code is very hard to test under normal circumstances because
   * direct-io pins the pages with get_user_pages().  This makes
   * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba2   Artem Bityutskiy   vfs: nuke pdflush...
1254
   * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
   * pagecache.
   *
   * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
   * deferred bio dirtying paths.
   */
  
  /*
   * bio_set_pages_dirty() will mark all the bio's pages as dirty.
   */
  void bio_set_pages_dirty(struct bio *bio)
  {
cb34e057a   Kent Overstreet   block: Convert so...
1266
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1267
  	struct bvec_iter_all iter_all;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1268

2b070cfe5   Christoph Hellwig   block: remove the...
1269
  	bio_for_each_segment_all(bvec, bio, iter_all) {
3bb509831   Christoph Hellwig   block: bio_set_pa...
1270
1271
  		if (!PageCompound(bvec->bv_page))
  			set_page_dirty_lock(bvec->bv_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1272
1273
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1274
1275
1276
1277
  /*
   * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
   * If they are, then fine.  If, however, some pages are clean then they must
   * have been written out during the direct-IO read.  So we take another ref on
24d5493f2   Christoph Hellwig   block: simplify b...
1278
   * the BIO and re-dirty the pages in process context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1279
1280
   *
   * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a08   Kirill A. Shutemov   mm, fs: remove re...
1281
1282
   * here on.  It will run one put_page() against each page and will run one
   * bio_put() against the BIO.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1283
   */
65f27f384   David Howells   WorkStruct: Pass ...
1284
  static void bio_dirty_fn(struct work_struct *work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1285

65f27f384   David Howells   WorkStruct: Pass ...
1286
  static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1287
1288
1289
1290
1291
1292
  static DEFINE_SPINLOCK(bio_dirty_lock);
  static struct bio *bio_dirty_list;
  
  /*
   * This runs in process context
   */
65f27f384   David Howells   WorkStruct: Pass ...
1293
  static void bio_dirty_fn(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1294
  {
24d5493f2   Christoph Hellwig   block: simplify b...
1295
  	struct bio *bio, *next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1296

24d5493f2   Christoph Hellwig   block: simplify b...
1297
1298
  	spin_lock_irq(&bio_dirty_lock);
  	next = bio_dirty_list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1299
  	bio_dirty_list = NULL;
24d5493f2   Christoph Hellwig   block: simplify b...
1300
  	spin_unlock_irq(&bio_dirty_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1301

24d5493f2   Christoph Hellwig   block: simplify b...
1302
1303
  	while ((bio = next) != NULL) {
  		next = bio->bi_private;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1304

d241a95f3   Christoph Hellwig   block: optionally...
1305
  		bio_release_pages(bio, true);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1306
  		bio_put(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1307
1308
1309
1310
1311
  	}
  }
  
  void bio_check_pages_dirty(struct bio *bio)
  {
cb34e057a   Kent Overstreet   block: Convert so...
1312
  	struct bio_vec *bvec;
24d5493f2   Christoph Hellwig   block: simplify b...
1313
  	unsigned long flags;
6dc4f100c   Ming Lei   block: allow bio_...
1314
  	struct bvec_iter_all iter_all;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1315

2b070cfe5   Christoph Hellwig   block: remove the...
1316
  	bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f2   Christoph Hellwig   block: simplify b...
1317
1318
  		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
  			goto defer;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1319
  	}
d241a95f3   Christoph Hellwig   block: optionally...
1320
  	bio_release_pages(bio, false);
24d5493f2   Christoph Hellwig   block: simplify b...
1321
1322
1323
1324
1325
1326
1327
1328
  	bio_put(bio);
  	return;
  defer:
  	spin_lock_irqsave(&bio_dirty_lock, flags);
  	bio->bi_private = bio_dirty_list;
  	bio_dirty_list = bio;
  	spin_unlock_irqrestore(&bio_dirty_lock, flags);
  	schedule_work(&bio_dirty_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1329
  }
c4cf5261f   Jens Axboe   bio: skip atomic ...
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
  static inline bool bio_remaining_done(struct bio *bio)
  {
  	/*
  	 * If we're not chaining, then ->__bi_remaining is always 1 and
  	 * we always end io on the first invocation.
  	 */
  	if (!bio_flagged(bio, BIO_CHAIN))
  		return true;
  
  	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
326e1dbb5   Mike Snitzer   block: remove man...
1340
  	if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9d   Jens Axboe   block: manipulate...
1341
  		bio_clear_flag(bio, BIO_CHAIN);
c4cf5261f   Jens Axboe   bio: skip atomic ...
1342
  		return true;
326e1dbb5   Mike Snitzer   block: remove man...
1343
  	}
c4cf5261f   Jens Axboe   bio: skip atomic ...
1344
1345
1346
  
  	return false;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1347
1348
1349
  /**
   * bio_endio - end I/O on a bio
   * @bio:	bio
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1350
1351
   *
   * Description:
4246a0b63   Christoph Hellwig   block: add a bi_e...
1352
1353
1354
   *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
   *   way to end I/O on a bio. No one should call bi_end_io() directly on a
   *   bio unless they own it and thus know that it has an end_io function.
fbbaf700e   NeilBrown   block: trace comp...
1355
1356
1357
1358
1359
   *
   *   bio_endio() can be called several times on a bio that has been chained
   *   using bio_chain().  The ->bi_end_io() function will only be called the
   *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
   *   generated if BIO_TRACE_COMPLETION is set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1360
   **/
4246a0b63   Christoph Hellwig   block: add a bi_e...
1361
  void bio_endio(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1362
  {
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1363
  again:
2b8855171   Christoph Hellwig   block: bio_remain...
1364
  	if (!bio_remaining_done(bio))
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1365
  		return;
7c20f1168   Christoph Hellwig   bio-integrity: st...
1366
1367
  	if (!bio_integrity_endio(bio))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1368

67b42d0bf   Josef Bacik   rq-qos: introduce...
1369
1370
  	if (bio->bi_disk)
  		rq_qos_done_bio(bio->bi_disk->queue, bio);
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
  	/*
  	 * Need to have a real endio function for chained bios, otherwise
  	 * various corner cases will break (like stacking block devices that
  	 * save/restore bi_end_io) - however, we want to avoid unbounded
  	 * recursion and blowing the stack. Tail call optimization would
  	 * handle this, but compiling with frame pointers also disables
  	 * gcc's sibling call optimization.
  	 */
  	if (bio->bi_end_io == bio_chain_endio) {
  		bio = __bio_chain_endio(bio);
  		goto again;
196d38bcc   Kent Overstreet   block: Generic bi...
1382
  	}
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1383

74d46992e   Christoph Hellwig   block: replace bi...
1384
  	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
d24de76af   Christoph Hellwig   block: remove the...
1385
  		trace_block_bio_complete(bio->bi_disk->queue, bio);
fbbaf700e   NeilBrown   block: trace comp...
1386
1387
  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
  	}
9e234eeaf   Shaohua Li   blk-throttle: add...
1388
  	blk_throtl_bio_endio(bio);
b222dd2fd   Shaohua Li   block: call bio_u...
1389
1390
  	/* release cgroup info */
  	bio_uninit(bio);
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1391
1392
  	if (bio->bi_end_io)
  		bio->bi_end_io(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1393
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
1394
  EXPORT_SYMBOL(bio_endio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1395

196d38bcc   Kent Overstreet   block: Generic bi...
1396
  /**
20d0189b1   Kent Overstreet   block: Introduce ...
1397
1398
1399
1400
1401
1402
1403
1404
1405
   * bio_split - split a bio
   * @bio:	bio to split
   * @sectors:	number of sectors to split from the front of @bio
   * @gfp:	gfp mask
   * @bs:		bio set to allocate from
   *
   * Allocates and returns a new bio which represents @sectors from the start of
   * @bio, and updates @bio to represent the remaining sectors.
   *
f3f5da624   Martin K. Petersen   block: Do a full ...
1406
   * Unless this is a discard request the newly allocated bio will point
dad775845   Bart Van Assche   block: Document t...
1407
1408
   * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
   * neither @bio nor @bs are freed before the split bio.
20d0189b1   Kent Overstreet   block: Introduce ...
1409
1410
1411
1412
   */
  struct bio *bio_split(struct bio *bio, int sectors,
  		      gfp_t gfp, struct bio_set *bs)
  {
f341a4d38   Mikulas Patocka   block: remove use...
1413
  	struct bio *split;
20d0189b1   Kent Overstreet   block: Introduce ...
1414
1415
1416
  
  	BUG_ON(sectors <= 0);
  	BUG_ON(sectors >= bio_sectors(bio));
0512a75b9   Keith Busch   block: Introduce ...
1417
1418
1419
  	/* Zone append commands cannot be split */
  	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
  		return NULL;
f9d03f96b   Christoph Hellwig   block: improve ha...
1420
  	split = bio_clone_fast(bio, gfp, bs);
20d0189b1   Kent Overstreet   block: Introduce ...
1421
1422
1423
1424
1425
1426
  	if (!split)
  		return NULL;
  
  	split->bi_iter.bi_size = sectors << 9;
  
  	if (bio_integrity(split))
fbd08e767   Dmitry Monakhov   bio-integrity: fi...
1427
  		bio_integrity_trim(split);
20d0189b1   Kent Overstreet   block: Introduce ...
1428
1429
  
  	bio_advance(bio, split->bi_iter.bi_size);
fbbaf700e   NeilBrown   block: trace comp...
1430
  	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023c   Goldwyn Rodrigues   block: Set BIO_TR...
1431
  		bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700e   NeilBrown   block: trace comp...
1432

20d0189b1   Kent Overstreet   block: Introduce ...
1433
1434
1435
  	return split;
  }
  EXPORT_SYMBOL(bio_split);
ad3316bf4   Martin K. Petersen   block: Find bio s...
1436
  /**
6678d83f1   Kent Overstreet   block: Consolidat...
1437
1438
1439
1440
1441
1442
1443
1444
1445
   * bio_trim - trim a bio
   * @bio:	bio to trim
   * @offset:	number of sectors to trim from the front of @bio
   * @size:	size we want to trim @bio to, in sectors
   */
  void bio_trim(struct bio *bio, int offset, int size)
  {
  	/* 'bio' is a cloned bio which we need to trim to match
  	 * the given offset and size.
6678d83f1   Kent Overstreet   block: Consolidat...
1446
  	 */
6678d83f1   Kent Overstreet   block: Consolidat...
1447
1448
  
  	size <<= 9;
4f024f379   Kent Overstreet   block: Abstract o...
1449
  	if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f1   Kent Overstreet   block: Consolidat...
1450
  		return;
6678d83f1   Kent Overstreet   block: Consolidat...
1451
  	bio_advance(bio, offset << 9);
4f024f379   Kent Overstreet   block: Abstract o...
1452
  	bio->bi_iter.bi_size = size;
376a78abf   Dmitry Monakhov   bio-integrity: bi...
1453
1454
  
  	if (bio_integrity(bio))
fbd08e767   Dmitry Monakhov   bio-integrity: fi...
1455
  		bio_integrity_trim(bio);
376a78abf   Dmitry Monakhov   bio-integrity: bi...
1456

6678d83f1   Kent Overstreet   block: Consolidat...
1457
1458
  }
  EXPORT_SYMBOL_GPL(bio_trim);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1459
1460
1461
1462
  /*
   * create memory pools for biovec's in a bio_set.
   * use the global biovec slabs created for general use.
   */
8aa6ba2f6   Kent Overstreet   block: Convert bi...
1463
  int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1464
  {
ed996a52c   Christoph Hellwig   block: simplify a...
1465
  	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1466

8aa6ba2f6   Kent Overstreet   block: Convert bi...
1467
  	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1468
  }
917a38c71   Kent Overstreet   block: Add bioset...
1469
1470
1471
1472
1473
1474
1475
  /*
   * bioset_exit - exit a bioset initialized with bioset_init()
   *
   * May be called on a zeroed but uninitialized bioset (i.e. allocated with
   * kzalloc()).
   */
  void bioset_exit(struct bio_set *bs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1476
  {
df2cb6daa   Kent Overstreet   block: Avoid dead...
1477
1478
  	if (bs->rescue_workqueue)
  		destroy_workqueue(bs->rescue_workqueue);
917a38c71   Kent Overstreet   block: Add bioset...
1479
  	bs->rescue_workqueue = NULL;
df2cb6daa   Kent Overstreet   block: Avoid dead...
1480

8aa6ba2f6   Kent Overstreet   block: Convert bi...
1481
1482
  	mempool_exit(&bs->bio_pool);
  	mempool_exit(&bs->bvec_pool);
9f060e223   Kent Overstreet   block: Convert in...
1483

7878cba9f   Martin K. Petersen   block: Create bip...
1484
  	bioset_integrity_free(bs);
917a38c71   Kent Overstreet   block: Add bioset...
1485
1486
1487
1488
1489
  	if (bs->bio_slab)
  		bio_put_slab(bs);
  	bs->bio_slab = NULL;
  }
  EXPORT_SYMBOL(bioset_exit);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1490

011067b05   NeilBrown   blk: replace bios...
1491
  /**
917a38c71   Kent Overstreet   block: Add bioset...
1492
   * bioset_init - Initialize a bio_set
dad085275   Kent Overstreet   block: Drop biose...
1493
   * @bs:		pool to initialize
917a38c71   Kent Overstreet   block: Add bioset...
1494
1495
1496
1497
1498
   * @pool_size:	Number of bio and bio_vecs to cache in the mempool
   * @front_pad:	Number of bytes to allocate in front of the returned bio
   * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
   *              and %BIOSET_NEED_RESCUER
   *
dad085275   Kent Overstreet   block: Drop biose...
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
   * Description:
   *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
   *    to ask for a number of bytes to be allocated in front of the bio.
   *    Front pad allocation is useful for embedding the bio inside
   *    another structure, to avoid allocating extra data to go with the bio.
   *    Note that the bio must be embedded at the END of that structure always,
   *    or things will break badly.
   *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
   *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
   *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
   *    dispatch queued requests when the mempool runs out of space.
   *
917a38c71   Kent Overstreet   block: Add bioset...
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
   */
  int bioset_init(struct bio_set *bs,
  		unsigned int pool_size,
  		unsigned int front_pad,
  		int flags)
  {
  	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
  
  	bs->front_pad = front_pad;
  
  	spin_lock_init(&bs->rescue_lock);
  	bio_list_init(&bs->rescue_list);
  	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
  
  	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
  	if (!bs->bio_slab)
  		return -ENOMEM;
  
  	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
  		goto bad;
  
  	if ((flags & BIOSET_NEED_BVECS) &&
  	    biovec_init_pool(&bs->bvec_pool, pool_size))
  		goto bad;
  
  	if (!(flags & BIOSET_NEED_RESCUER))
  		return 0;
  
  	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
  	if (!bs->rescue_workqueue)
  		goto bad;
  
  	return 0;
  bad:
  	bioset_exit(bs);
  	return -ENOMEM;
  }
  EXPORT_SYMBOL(bioset_init);
28e89fd91   Jens Axboe   block: add bioset...
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
  /*
   * Initialize and setup a new bio_set, based on the settings from
   * another bio_set.
   */
  int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
  {
  	int flags;
  
  	flags = 0;
  	if (src->bvec_pool.min_nr)
  		flags |= BIOSET_NEED_BVECS;
  	if (src->rescue_workqueue)
  		flags |= BIOSET_NEED_RESCUER;
  
  	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
  }
  EXPORT_SYMBOL(bioset_init_from_src);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1566
1567
1568
  static void __init biovec_init_slabs(void)
  {
  	int i;
ed996a52c   Christoph Hellwig   block: simplify a...
1569
  	for (i = 0; i < BVEC_POOL_NR; i++) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1570
1571
  		int size;
  		struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37cd   Jens Axboe   block: don't crea...
1572
1573
1574
1575
  		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
  			bvs->slab = NULL;
  			continue;
  		}
a7fcd37cd   Jens Axboe   block: don't crea...
1576

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1577
1578
  		size = bvs->nr_vecs * sizeof(struct bio_vec);
  		bvs->slab = kmem_cache_create(bvs->name, size, 0,
20c2df83d   Paul Mundt   mm: Remove slab d...
1579
                                  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1580
1581
1582
1583
1584
  	}
  }
  
  static int __init init_bio(void)
  {
bb799ca02   Jens Axboe   bio: allow indivi...
1585
1586
  	bio_slab_max = 2;
  	bio_slab_nr = 0;
6396bb221   Kees Cook   treewide: kzalloc...
1587
1588
  	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
  			    GFP_KERNEL);
2b24e6f63   Johannes Thumshirn   block: bio: ensur...
1589
1590
  
  	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
bb799ca02   Jens Axboe   bio: allow indivi...
1591
1592
1593
  	if (!bio_slabs)
  		panic("bio: can't allocate bios
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1594

7878cba9f   Martin K. Petersen   block: Create bip...
1595
  	bio_integrity_init();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1596
  	biovec_init_slabs();
f4f8154a0   Kent Overstreet   block: Use bioset...
1597
  	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1598
1599
  		panic("bio: can't allocate bios
  ");
f4f8154a0   Kent Overstreet   block: Use bioset...
1600
  	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785b   Martin K. Petersen   block: Require su...
1601
1602
  		panic("bio: can't create integrity pool
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1603
1604
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1605
  subsys_initcall(init_bio);