Blame view

block/bio.c 55.4 KB
8c16567d8   Christoph Hellwig   block: switch all...
1
  // SPDX-License-Identifier: GPL-2.0
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
  /*
0fe234795   Jens Axboe   [PATCH] Update ax...
3
   * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4
5
6
7
8
   */
  #include <linux/mm.h>
  #include <linux/swap.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
a27bb332c   Kent Overstreet   aio: don't includ...
9
  #include <linux/uio.h>
852c788f8   Tejun Heo   block: implement ...
10
  #include <linux/iocontext.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
  #include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
630d9c472   Paul Gortmaker   fs: reduce the us...
14
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
16
  #include <linux/mempool.h>
  #include <linux/workqueue.h>
852c788f8   Tejun Heo   block: implement ...
17
  #include <linux/cgroup.h>
08e18eab0   Josef Bacik   block: add bi_blk...
18
  #include <linux/blk-cgroup.h>
b4c5875d3   Damien Le Moal   block: Allow mapp...
19
  #include <linux/highmem.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20

55782138e   Li Zefan   tracing/events: c...
21
  #include <trace/events/block.h>
9e234eeaf   Shaohua Li   blk-throttle: add...
22
  #include "blk.h"
67b42d0bf   Josef Bacik   rq-qos: introduce...
23
  #include "blk-rq-qos.h"
0bfc24559   Ingo Molnar   blktrace: port to...
24

392ddc329   Jens Axboe   bio: add support ...
25
26
27
28
29
  /*
   * Test patch to inline a certain number of bi_io_vec's inside the bio
   * itself, to shrink a bio data allocation from two mempool calls to one
   */
  #define BIO_INLINE_VECS		4
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30
31
32
33
34
  /*
   * if you change this list, also change bvec_alloc or things will
   * break badly! cannot be bigger than what you can fit into an
   * unsigned short
   */
bd5c4facf   Mikulas Patocka   Fix slab name "bi...
35
  #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
ed996a52c   Christoph Hellwig   block: simplify a...
36
  static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
bd5c4facf   Mikulas Patocka   Fix slab name "bi...
37
  	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
38
39
40
41
  };
  #undef BV
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42
43
44
   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
   * IO code that does not need private memory pools.
   */
f4f8154a0   Kent Overstreet   block: Use bioset...
45
  struct bio_set fs_bio_set;
3f86a82ae   Kent Overstreet   block: Consolidat...
46
  EXPORT_SYMBOL(fs_bio_set);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47

bb799ca02   Jens Axboe   bio: allow indivi...
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  /*
   * Our slab pool management
   */
  struct bio_slab {
  	struct kmem_cache *slab;
  	unsigned int slab_ref;
  	unsigned int slab_size;
  	char name[8];
  };
  static DEFINE_MUTEX(bio_slab_lock);
  static struct bio_slab *bio_slabs;
  static unsigned int bio_slab_nr, bio_slab_max;
  
  static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  {
  	unsigned int sz = sizeof(struct bio) + extra_size;
  	struct kmem_cache *slab = NULL;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
65
  	struct bio_slab *bslab, *new_bio_slabs;
386bc35a2   Anna Leuschner   vfs: fix: don't i...
66
  	unsigned int new_bio_slab_max;
bb799ca02   Jens Axboe   bio: allow indivi...
67
68
69
70
71
72
  	unsigned int i, entry = -1;
  
  	mutex_lock(&bio_slab_lock);
  
  	i = 0;
  	while (i < bio_slab_nr) {
f06f135d8   Thiago Farina   fs/bio.c: fix sha...
73
  		bslab = &bio_slabs[i];
bb799ca02   Jens Axboe   bio: allow indivi...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
  
  		if (!bslab->slab && entry == -1)
  			entry = i;
  		else if (bslab->slab_size == sz) {
  			slab = bslab->slab;
  			bslab->slab_ref++;
  			break;
  		}
  		i++;
  	}
  
  	if (slab)
  		goto out_unlock;
  
  	if (bio_slab_nr == bio_slab_max && entry == -1) {
386bc35a2   Anna Leuschner   vfs: fix: don't i...
89
  		new_bio_slab_max = bio_slab_max << 1;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
90
  		new_bio_slabs = krealloc(bio_slabs,
386bc35a2   Anna Leuschner   vfs: fix: don't i...
91
  					 new_bio_slab_max * sizeof(struct bio_slab),
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
92
93
  					 GFP_KERNEL);
  		if (!new_bio_slabs)
bb799ca02   Jens Axboe   bio: allow indivi...
94
  			goto out_unlock;
386bc35a2   Anna Leuschner   vfs: fix: don't i...
95
  		bio_slab_max = new_bio_slab_max;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
96
  		bio_slabs = new_bio_slabs;
bb799ca02   Jens Axboe   bio: allow indivi...
97
98
99
100
101
102
103
  	}
  	if (entry == -1)
  		entry = bio_slab_nr++;
  
  	bslab = &bio_slabs[entry];
  
  	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
6a2414836   Mikulas Patocka   block: use kmallo...
104
105
  	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
  				 SLAB_HWCACHE_ALIGN, NULL);
bb799ca02   Jens Axboe   bio: allow indivi...
106
107
  	if (!slab)
  		goto out_unlock;
bb799ca02   Jens Axboe   bio: allow indivi...
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
  	bslab->slab = slab;
  	bslab->slab_ref = 1;
  	bslab->slab_size = sz;
  out_unlock:
  	mutex_unlock(&bio_slab_lock);
  	return slab;
  }
  
  static void bio_put_slab(struct bio_set *bs)
  {
  	struct bio_slab *bslab = NULL;
  	unsigned int i;
  
  	mutex_lock(&bio_slab_lock);
  
  	for (i = 0; i < bio_slab_nr; i++) {
  		if (bs->bio_slab == bio_slabs[i].slab) {
  			bslab = &bio_slabs[i];
  			break;
  		}
  	}
  
  	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!
  "))
  		goto out;
  
  	WARN_ON(!bslab->slab_ref);
  
  	if (--bslab->slab_ref)
  		goto out;
  
  	kmem_cache_destroy(bslab->slab);
  	bslab->slab = NULL;
  
  out:
  	mutex_unlock(&bio_slab_lock);
  }
7ba1ba12e   Martin K. Petersen   block: Block laye...
145
146
  unsigned int bvec_nr_vecs(unsigned short idx)
  {
d6c02a9be   Greg Edwards   block: bvec_nr_ve...
147
  	return bvec_slabs[--idx].nr_vecs;
7ba1ba12e   Martin K. Petersen   block: Block laye...
148
  }
9f060e223   Kent Overstreet   block: Convert in...
149
  void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
bb799ca02   Jens Axboe   bio: allow indivi...
150
  {
ed996a52c   Christoph Hellwig   block: simplify a...
151
152
153
154
155
  	if (!idx)
  		return;
  	idx--;
  
  	BIO_BUG_ON(idx >= BVEC_POOL_NR);
bb799ca02   Jens Axboe   bio: allow indivi...
156

ed996a52c   Christoph Hellwig   block: simplify a...
157
  	if (idx == BVEC_POOL_MAX) {
9f060e223   Kent Overstreet   block: Convert in...
158
  		mempool_free(bv, pool);
ed996a52c   Christoph Hellwig   block: simplify a...
159
  	} else {
bb799ca02   Jens Axboe   bio: allow indivi...
160
161
162
163
164
  		struct biovec_slab *bvs = bvec_slabs + idx;
  
  		kmem_cache_free(bvs->slab, bv);
  	}
  }
9f060e223   Kent Overstreet   block: Convert in...
165
166
  struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
  			   mempool_t *pool)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
168
  {
  	struct bio_vec *bvl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
170
  
  	/*
7ff9345ff   Jens Axboe   bio: only mempool...
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
  	 * see comment near bvec_array define!
  	 */
  	switch (nr) {
  	case 1:
  		*idx = 0;
  		break;
  	case 2 ... 4:
  		*idx = 1;
  		break;
  	case 5 ... 16:
  		*idx = 2;
  		break;
  	case 17 ... 64:
  		*idx = 3;
  		break;
  	case 65 ... 128:
  		*idx = 4;
  		break;
  	case 129 ... BIO_MAX_PAGES:
  		*idx = 5;
  		break;
  	default:
  		return NULL;
  	}
  
  	/*
  	 * idx now points to the pool we want to allocate from. only the
  	 * 1-vec entry pool is mempool backed.
  	 */
ed996a52c   Christoph Hellwig   block: simplify a...
200
  	if (*idx == BVEC_POOL_MAX) {
7ff9345ff   Jens Axboe   bio: only mempool...
201
  fallback:
9f060e223   Kent Overstreet   block: Convert in...
202
  		bvl = mempool_alloc(pool, gfp_mask);
7ff9345ff   Jens Axboe   bio: only mempool...
203
204
  	} else {
  		struct biovec_slab *bvs = bvec_slabs + *idx;
d0164adc8   Mel Gorman   mm, page_alloc: d...
205
  		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
7ff9345ff   Jens Axboe   bio: only mempool...
206

0a0d96b03   Jens Axboe   block: add bio_km...
207
  		/*
7ff9345ff   Jens Axboe   bio: only mempool...
208
209
210
  		 * Make this allocation restricted and don't dump info on
  		 * allocation failures, since we'll fallback to the mempool
  		 * in case of failure.
0a0d96b03   Jens Axboe   block: add bio_km...
211
  		 */
7ff9345ff   Jens Axboe   bio: only mempool...
212
  		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213

0a0d96b03   Jens Axboe   block: add bio_km...
214
  		/*
d0164adc8   Mel Gorman   mm, page_alloc: d...
215
  		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
7ff9345ff   Jens Axboe   bio: only mempool...
216
  		 * is set, retry with the 1-entry mempool
0a0d96b03   Jens Axboe   block: add bio_km...
217
  		 */
7ff9345ff   Jens Axboe   bio: only mempool...
218
  		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
d0164adc8   Mel Gorman   mm, page_alloc: d...
219
  		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
ed996a52c   Christoph Hellwig   block: simplify a...
220
  			*idx = BVEC_POOL_MAX;
7ff9345ff   Jens Axboe   bio: only mempool...
221
222
223
  			goto fallback;
  		}
  	}
ed996a52c   Christoph Hellwig   block: simplify a...
224
  	(*idx)++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
225
226
  	return bvl;
  }
9ae3b3f52   Jens Axboe   block: provide bi...
227
  void bio_uninit(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
  {
6f70fb661   Dennis Zhou   blkcg: remove bio...
229
  	bio_disassociate_blkg(bio);
ccbc5d03c   Justin Tee   block: fix memlea...
230
231
232
  
  	if (bio_integrity(bio))
  		bio_integrity_free(bio);
4254bba17   Kent Overstreet   block: Kill bi_de...
233
  }
9ae3b3f52   Jens Axboe   block: provide bi...
234
  EXPORT_SYMBOL(bio_uninit);
7ba1ba12e   Martin K. Petersen   block: Block laye...
235

4254bba17   Kent Overstreet   block: Kill bi_de...
236
237
238
239
  static void bio_free(struct bio *bio)
  {
  	struct bio_set *bs = bio->bi_pool;
  	void *p;
9ae3b3f52   Jens Axboe   block: provide bi...
240
  	bio_uninit(bio);
4254bba17   Kent Overstreet   block: Kill bi_de...
241
242
  
  	if (bs) {
8aa6ba2f6   Kent Overstreet   block: Convert bi...
243
  		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
4254bba17   Kent Overstreet   block: Kill bi_de...
244
245
246
247
248
  
  		/*
  		 * If we have front padding, adjust the bio pointer before freeing
  		 */
  		p = bio;
bb799ca02   Jens Axboe   bio: allow indivi...
249
  		p -= bs->front_pad;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
250
  		mempool_free(p, &bs->bio_pool);
4254bba17   Kent Overstreet   block: Kill bi_de...
251
252
253
254
  	} else {
  		/* Bio was allocated by bio_kmalloc() */
  		kfree(bio);
  	}
3676347a5   Peter Osterlund   [PATCH] kill bio-...
255
  }
9ae3b3f52   Jens Axboe   block: provide bi...
256
257
258
259
260
  /*
   * Users of this function have their own bio allocation. Subsequently,
   * they must remember to pair any call to bio_init() with bio_uninit()
   * when IO has completed, or when the bio is released.
   */
3a83f4677   Ming Lei   block: bio: pass ...
261
262
  void bio_init(struct bio *bio, struct bio_vec *table,
  	      unsigned short max_vecs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
263
  {
2b94de552   Jens Axboe   bio: use memset()...
264
  	memset(bio, 0, sizeof(*bio));
c4cf5261f   Jens Axboe   bio: skip atomic ...
265
  	atomic_set(&bio->__bi_remaining, 1);
dac56212e   Jens Axboe   bio: skip atomic ...
266
  	atomic_set(&bio->__bi_cnt, 1);
3a83f4677   Ming Lei   block: bio: pass ...
267
268
269
  
  	bio->bi_io_vec = table;
  	bio->bi_max_vecs = max_vecs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
270
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
271
  EXPORT_SYMBOL(bio_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
272
273
  
  /**
f44b48c76   Kent Overstreet   block: Add bio_re...
274
275
276
277
278
279
280
281
282
283
284
285
   * bio_reset - reinitialize a bio
   * @bio:	bio to reset
   *
   * Description:
   *   After calling bio_reset(), @bio will be in the same state as a freshly
   *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
   *   preserved are the ones that are initialized by bio_alloc_bioset(). See
   *   comment in struct bio.
   */
  void bio_reset(struct bio *bio)
  {
  	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
9ae3b3f52   Jens Axboe   block: provide bi...
286
  	bio_uninit(bio);
f44b48c76   Kent Overstreet   block: Add bio_re...
287
288
  
  	memset(bio, 0, BIO_RESET_BYTES);
4246a0b63   Christoph Hellwig   block: add a bi_e...
289
  	bio->bi_flags = flags;
c4cf5261f   Jens Axboe   bio: skip atomic ...
290
  	atomic_set(&bio->__bi_remaining, 1);
f44b48c76   Kent Overstreet   block: Add bio_re...
291
292
  }
  EXPORT_SYMBOL(bio_reset);
38f8baae8   Christoph Hellwig   block: factor out...
293
  static struct bio *__bio_chain_endio(struct bio *bio)
196d38bcc   Kent Overstreet   block: Generic bi...
294
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
295
  	struct bio *parent = bio->bi_private;
4e4cbee93   Christoph Hellwig   block: switch bio...
296
297
  	if (!parent->bi_status)
  		parent->bi_status = bio->bi_status;
196d38bcc   Kent Overstreet   block: Generic bi...
298
  	bio_put(bio);
38f8baae8   Christoph Hellwig   block: factor out...
299
300
301
302
303
304
  	return parent;
  }
  
  static void bio_chain_endio(struct bio *bio)
  {
  	bio_endio(__bio_chain_endio(bio));
196d38bcc   Kent Overstreet   block: Generic bi...
305
306
307
308
  }
  
  /**
   * bio_chain - chain bio completions
1051a902f   Randy Dunlap   fs: fix new kerne...
309
310
   * @bio: the target bio
   * @parent: the @bio's parent bio
196d38bcc   Kent Overstreet   block: Generic bi...
311
312
313
314
315
316
317
318
319
320
321
322
323
   *
   * The caller won't have a bi_end_io called when @bio completes - instead,
   * @parent's bi_end_io won't be called until both @parent and @bio have
   * completed; the chained bio will also be freed when it completes.
   *
   * The caller must not set bi_private or bi_end_io in @bio.
   */
  void bio_chain(struct bio *bio, struct bio *parent)
  {
  	BUG_ON(bio->bi_private || bio->bi_end_io);
  
  	bio->bi_private = parent;
  	bio->bi_end_io	= bio_chain_endio;
c4cf5261f   Jens Axboe   bio: skip atomic ...
324
  	bio_inc_remaining(parent);
196d38bcc   Kent Overstreet   block: Generic bi...
325
326
  }
  EXPORT_SYMBOL(bio_chain);
df2cb6daa   Kent Overstreet   block: Avoid dead...
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  static void bio_alloc_rescue(struct work_struct *work)
  {
  	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
  	struct bio *bio;
  
  	while (1) {
  		spin_lock(&bs->rescue_lock);
  		bio = bio_list_pop(&bs->rescue_list);
  		spin_unlock(&bs->rescue_lock);
  
  		if (!bio)
  			break;
  
  		generic_make_request(bio);
  	}
  }
  
  static void punt_bios_to_rescuer(struct bio_set *bs)
  {
  	struct bio_list punt, nopunt;
  	struct bio *bio;
47e0fb461   NeilBrown   blk: make the bio...
348
349
  	if (WARN_ON_ONCE(!bs->rescue_workqueue))
  		return;
df2cb6daa   Kent Overstreet   block: Avoid dead...
350
351
352
353
354
355
356
357
358
359
360
361
362
  	/*
  	 * In order to guarantee forward progress we must punt only bios that
  	 * were allocated from this bio_set; otherwise, if there was a bio on
  	 * there for a stacking driver higher up in the stack, processing it
  	 * could require allocating bios from this bio_set, and doing that from
  	 * our own rescuer would be bad.
  	 *
  	 * Since bio lists are singly linked, pop them all instead of trying to
  	 * remove from the middle of the list:
  	 */
  
  	bio_list_init(&punt);
  	bio_list_init(&nopunt);
f5fe1b519   NeilBrown   blk: Ensure users...
363
  	while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6daa   Kent Overstreet   block: Avoid dead...
364
  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b519   NeilBrown   blk: Ensure users...
365
  	current->bio_list[0] = nopunt;
df2cb6daa   Kent Overstreet   block: Avoid dead...
366

f5fe1b519   NeilBrown   blk: Ensure users...
367
368
369
370
  	bio_list_init(&nopunt);
  	while ((bio = bio_list_pop(&current->bio_list[1])))
  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
  	current->bio_list[1] = nopunt;
df2cb6daa   Kent Overstreet   block: Avoid dead...
371
372
373
374
375
376
377
  
  	spin_lock(&bs->rescue_lock);
  	bio_list_merge(&bs->rescue_list, &punt);
  	spin_unlock(&bs->rescue_lock);
  
  	queue_work(bs->rescue_workqueue, &bs->rescue_work);
  }
f44b48c76   Kent Overstreet   block: Add bio_re...
378
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
379
   * bio_alloc_bioset - allocate a bio for I/O
519c8e9ff   Randy Dunlap   block: fix Sphinx...
380
   * @gfp_mask:   the GFP_* mask given to the slab allocator
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
381
   * @nr_iovecs:	number of iovecs to pre-allocate
db18efac0   Jaak Ristioja   bio: Fix outdated...
382
   * @bs:		the bio_set to allocate from.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
384
   *
   * Description:
3f86a82ae   Kent Overstreet   block: Consolidat...
385
386
387
   *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
   *   backed by the @bs's mempool.
   *
d0164adc8   Mel Gorman   mm, page_alloc: d...
388
389
390
391
392
393
   *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
   *   always be able to allocate a bio. This is due to the mempool guarantees.
   *   To make this work, callers must never allocate more than 1 bio at a time
   *   from this pool. Callers that need to allocate more than 1 bio must always
   *   submit the previously allocated bio for IO before attempting to allocate
   *   a new one. Failure to do so can cause deadlocks under memory pressure.
3f86a82ae   Kent Overstreet   block: Consolidat...
394
   *
df2cb6daa   Kent Overstreet   block: Avoid dead...
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
   *   Note that when running under generic_make_request() (i.e. any block
   *   driver), bios are not submitted until after you return - see the code in
   *   generic_make_request() that converts recursion into iteration, to prevent
   *   stack overflows.
   *
   *   This would normally mean allocating multiple bios under
   *   generic_make_request() would be susceptible to deadlocks, but we have
   *   deadlock avoidance code that resubmits any blocked bios from a rescuer
   *   thread.
   *
   *   However, we do not guarantee forward progress for allocations from other
   *   mempools. Doing multiple allocations from the same mempool under
   *   generic_make_request() should be avoided - instead, use bio_set's front_pad
   *   for per bio allocations.
   *
3f86a82ae   Kent Overstreet   block: Consolidat...
410
411
412
   *   RETURNS:
   *   Pointer to new bio on success, NULL on failure.
   */
7a88fa191   Dan Carpenter   block: make nr_io...
413
414
  struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
  			     struct bio_set *bs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415
  {
df2cb6daa   Kent Overstreet   block: Avoid dead...
416
  	gfp_t saved_gfp = gfp_mask;
3f86a82ae   Kent Overstreet   block: Consolidat...
417
418
  	unsigned front_pad;
  	unsigned inline_vecs;
34053979f   Ingo Molnar   block: cleanup bi...
419
  	struct bio_vec *bvl = NULL;
451a9ebf6   Tejun Heo   bio: fix bio_kmal...
420
421
  	struct bio *bio;
  	void *p;
3f86a82ae   Kent Overstreet   block: Consolidat...
422
423
424
425
426
427
428
429
430
431
  	if (!bs) {
  		if (nr_iovecs > UIO_MAXIOV)
  			return NULL;
  
  		p = kmalloc(sizeof(struct bio) +
  			    nr_iovecs * sizeof(struct bio_vec),
  			    gfp_mask);
  		front_pad = 0;
  		inline_vecs = nr_iovecs;
  	} else {
d8f429e16   Junichi Nomura   block: add bioset...
432
  		/* should not use nobvec bioset for nr_iovecs > 0 */
8aa6ba2f6   Kent Overstreet   block: Convert bi...
433
434
  		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
  				 nr_iovecs > 0))
d8f429e16   Junichi Nomura   block: add bioset...
435
  			return NULL;
df2cb6daa   Kent Overstreet   block: Avoid dead...
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
  		/*
  		 * generic_make_request() converts recursion to iteration; this
  		 * means if we're running beneath it, any bios we allocate and
  		 * submit will not be submitted (and thus freed) until after we
  		 * return.
  		 *
  		 * This exposes us to a potential deadlock if we allocate
  		 * multiple bios from the same bio_set() while running
  		 * underneath generic_make_request(). If we were to allocate
  		 * multiple bios (say a stacking block driver that was splitting
  		 * bios), we would deadlock if we exhausted the mempool's
  		 * reserve.
  		 *
  		 * We solve this, and guarantee forward progress, with a rescuer
  		 * workqueue per bio_set. If we go to allocate and there are
  		 * bios on current->bio_list, we first try the allocation
d0164adc8   Mel Gorman   mm, page_alloc: d...
452
453
454
  		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
  		 * bios we would be blocking to the rescuer workqueue before
  		 * we retry with the original gfp_flags.
df2cb6daa   Kent Overstreet   block: Avoid dead...
455
  		 */
f5fe1b519   NeilBrown   blk: Ensure users...
456
457
  		if (current->bio_list &&
  		    (!bio_list_empty(&current->bio_list[0]) ||
47e0fb461   NeilBrown   blk: make the bio...
458
459
  		     !bio_list_empty(&current->bio_list[1])) &&
  		    bs->rescue_workqueue)
d0164adc8   Mel Gorman   mm, page_alloc: d...
460
  			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
df2cb6daa   Kent Overstreet   block: Avoid dead...
461

8aa6ba2f6   Kent Overstreet   block: Convert bi...
462
  		p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6daa   Kent Overstreet   block: Avoid dead...
463
464
465
  		if (!p && gfp_mask != saved_gfp) {
  			punt_bios_to_rescuer(bs);
  			gfp_mask = saved_gfp;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
466
  			p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6daa   Kent Overstreet   block: Avoid dead...
467
  		}
3f86a82ae   Kent Overstreet   block: Consolidat...
468
469
470
  		front_pad = bs->front_pad;
  		inline_vecs = BIO_INLINE_VECS;
  	}
451a9ebf6   Tejun Heo   bio: fix bio_kmal...
471
472
  	if (unlikely(!p))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
473

3f86a82ae   Kent Overstreet   block: Consolidat...
474
  	bio = p + front_pad;
3a83f4677   Ming Lei   block: bio: pass ...
475
  	bio_init(bio, NULL, 0);
34053979f   Ingo Molnar   block: cleanup bi...
476

3f86a82ae   Kent Overstreet   block: Consolidat...
477
  	if (nr_iovecs > inline_vecs) {
ed996a52c   Christoph Hellwig   block: simplify a...
478
  		unsigned long idx = 0;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
479
  		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6daa   Kent Overstreet   block: Avoid dead...
480
481
482
  		if (!bvl && gfp_mask != saved_gfp) {
  			punt_bios_to_rescuer(bs);
  			gfp_mask = saved_gfp;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
483
  			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6daa   Kent Overstreet   block: Avoid dead...
484
  		}
34053979f   Ingo Molnar   block: cleanup bi...
485
486
  		if (unlikely(!bvl))
  			goto err_free;
a38352e0a   Kent Overstreet   block: Add an exp...
487

ed996a52c   Christoph Hellwig   block: simplify a...
488
  		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
3f86a82ae   Kent Overstreet   block: Consolidat...
489
490
  	} else if (nr_iovecs) {
  		bvl = bio->bi_inline_vecs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
491
  	}
3f86a82ae   Kent Overstreet   block: Consolidat...
492
493
  
  	bio->bi_pool = bs;
34053979f   Ingo Molnar   block: cleanup bi...
494
  	bio->bi_max_vecs = nr_iovecs;
34053979f   Ingo Molnar   block: cleanup bi...
495
  	bio->bi_io_vec = bvl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
496
  	return bio;
34053979f   Ingo Molnar   block: cleanup bi...
497
498
  
  err_free:
8aa6ba2f6   Kent Overstreet   block: Convert bi...
499
  	mempool_free(p, &bs->bio_pool);
34053979f   Ingo Molnar   block: cleanup bi...
500
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
501
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
502
  EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
503

38a72dac4   Kent Overstreet   block: Add bio_co...
504
  void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
505
506
  {
  	unsigned long flags;
7988613b0   Kent Overstreet   block: Convert bi...
507
508
  	struct bio_vec bv;
  	struct bvec_iter iter;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
509

38a72dac4   Kent Overstreet   block: Add bio_co...
510
  	__bio_for_each_segment(bv, bio, iter, start) {
7988613b0   Kent Overstreet   block: Convert bi...
511
512
513
  		char *data = bvec_kmap_irq(&bv, &flags);
  		memset(data, 0, bv.bv_len);
  		flush_dcache_page(bv.bv_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
514
515
516
  		bvec_kunmap_irq(data, &flags);
  	}
  }
38a72dac4   Kent Overstreet   block: Add bio_co...
517
  EXPORT_SYMBOL(zero_fill_bio_iter);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
518

3fe209c84   Ming Lei   fs: move guard_bi...
519
520
521
522
523
524
525
526
527
528
  /**
   * bio_truncate - truncate the bio to small size of @new_size
   * @bio:	the bio to be truncated
   * @new_size:	new size for truncating the bio
   *
   * Description:
   *   Truncate the bio to new size of @new_size. If bio_op(bio) is
   *   REQ_OP_READ, zero the truncated part. This function should only
   *   be used for handling corner cases, such as bio eod.
   */
943cd69ef   Ming Lei   block: add bio_tr...
529
530
531
532
533
534
535
536
537
  void bio_truncate(struct bio *bio, unsigned new_size)
  {
  	struct bio_vec bv;
  	struct bvec_iter iter;
  	unsigned int done = 0;
  	bool truncated = false;
  
  	if (new_size >= bio->bi_iter.bi_size)
  		return;
3fe209c84   Ming Lei   fs: move guard_bi...
538
  	if (bio_op(bio) != REQ_OP_READ)
943cd69ef   Ming Lei   block: add bio_tr...
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
  		goto exit;
  
  	bio_for_each_segment(bv, bio, iter) {
  		if (done + bv.bv_len > new_size) {
  			unsigned offset;
  
  			if (!truncated)
  				offset = new_size - done;
  			else
  				offset = 0;
  			zero_user(bv.bv_page, offset, bv.bv_len - offset);
  			truncated = true;
  		}
  		done += bv.bv_len;
  	}
  
   exit:
  	/*
  	 * Don't touch bvec table here and make it really immutable, since
  	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
  	 * in its .end_bio() callback.
  	 *
  	 * It is enough to truncate bio by updating .bi_size since we can make
  	 * correct bvec with the updated .bi_size for drivers.
  	 */
  	bio->bi_iter.bi_size = new_size;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
566
567
568
569
570
571
  /**
   * bio_put - release a reference to a bio
   * @bio:   bio to release reference to
   *
   * Description:
   *   Put a reference to a &struct bio, either one you have gotten with
9b10f6a9c   NeilBrown   block: remove bio...
572
   *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
573
574
575
   **/
  void bio_put(struct bio *bio)
  {
dac56212e   Jens Axboe   bio: skip atomic ...
576
  	if (!bio_flagged(bio, BIO_REFFED))
4254bba17   Kent Overstreet   block: Kill bi_de...
577
  		bio_free(bio);
dac56212e   Jens Axboe   bio: skip atomic ...
578
579
580
581
582
583
584
585
586
  	else {
  		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
  
  		/*
  		 * last put frees it
  		 */
  		if (atomic_dec_and_test(&bio->__bi_cnt))
  			bio_free(bio);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
587
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
588
  EXPORT_SYMBOL(bio_put);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
589

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
590
  /**
59d276fe0   Kent Overstreet   block: Add bio_cl...
591
592
593
594
595
596
597
598
599
600
601
602
   * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
   * 	@bio: destination bio
   * 	@bio_src: bio to clone
   *
   *	Clone a &bio. Caller will own the returned bio, but not
   *	the actual data it points to. Reference count of returned
   * 	bio will be one.
   *
   * 	Caller must ensure that @bio_src is not freed before @bio.
   */
  void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
  {
ed996a52c   Christoph Hellwig   block: simplify a...
603
  	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
59d276fe0   Kent Overstreet   block: Add bio_cl...
604
605
  
  	/*
74d46992e   Christoph Hellwig   block: replace bi...
606
  	 * most users will be overriding ->bi_disk with a new target,
59d276fe0   Kent Overstreet   block: Add bio_cl...
607
608
  	 * so we don't set nor calculate new physical/hw segment counts here
  	 */
74d46992e   Christoph Hellwig   block: replace bi...
609
  	bio->bi_disk = bio_src->bi_disk;
62530ed8b   Michael Lyle   bio: ensure __bio...
610
  	bio->bi_partno = bio_src->bi_partno;
b7c44ed9d   Jens Axboe   block: manipulate...
611
  	bio_set_flag(bio, BIO_CLONED);
111be8839   Shaohua Li   block-throttle: a...
612
613
  	if (bio_flagged(bio_src, BIO_THROTTLED))
  		bio_set_flag(bio, BIO_THROTTLED);
1eff9d322   Jens Axboe   block: rename bio...
614
  	bio->bi_opf = bio_src->bi_opf;
ca474b738   Hannes Reinecke   block: copy iopri...
615
  	bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8e   Jens Axboe   block: add suppor...
616
  	bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe0   Kent Overstreet   block: Add bio_cl...
617
618
  	bio->bi_iter = bio_src->bi_iter;
  	bio->bi_io_vec = bio_src->bi_io_vec;
20bd723ec   Paolo Valente   block: add missin...
619

db6638d7d   Dennis Zhou   blkcg: remove bio...
620
  	bio_clone_blkg_association(bio, bio_src);
e439bedf6   Dennis Zhou   blkcg: consolidat...
621
  	blkcg_bio_issue_init(bio);
59d276fe0   Kent Overstreet   block: Add bio_cl...
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
  }
  EXPORT_SYMBOL(__bio_clone_fast);
  
  /**
   *	bio_clone_fast - clone a bio that shares the original bio's biovec
   *	@bio: bio to clone
   *	@gfp_mask: allocation priority
   *	@bs: bio_set to allocate from
   *
   * 	Like __bio_clone_fast, only also allocates the returned bio
   */
  struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
  {
  	struct bio *b;
  
  	b = bio_alloc_bioset(gfp_mask, 0, bs);
  	if (!b)
  		return NULL;
  
  	__bio_clone_fast(b, bio);
  
  	if (bio_integrity(bio)) {
  		int ret;
  
  		ret = bio_integrity_clone(b, bio, gfp_mask);
  
  		if (ret < 0) {
  			bio_put(b);
  			return NULL;
  		}
  	}
  
  	return b;
  }
  EXPORT_SYMBOL(bio_clone_fast);
5919482e2   Ming Lei   block: check if p...
657
658
  static inline bool page_is_mergeable(const struct bio_vec *bv,
  		struct page *page, unsigned int len, unsigned int off,
ff896738b   Christoph Hellwig   block: return fro...
659
  		bool *same_page)
5919482e2   Ming Lei   block: check if p...
660
  {
229566451   Matthew Wilcox (Oracle)   block: Fix page_i...
661
662
  	size_t bv_end = bv->bv_offset + bv->bv_len;
  	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
5919482e2   Ming Lei   block: check if p...
663
664
665
666
667
668
  	phys_addr_t page_addr = page_to_phys(page);
  
  	if (vec_end_addr + 1 != page_addr + off)
  		return false;
  	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
  		return false;
52d52d1c9   Christoph Hellwig   block: only allow...
669

ff896738b   Christoph Hellwig   block: return fro...
670
  	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
229566451   Matthew Wilcox (Oracle)   block: Fix page_i...
671
672
673
  	if (*same_page)
  		return true;
  	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
5919482e2   Ming Lei   block: check if p...
674
  }
384209cd5   Christoph Hellwig   block: create a b...
675
676
677
  static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
  		struct page *page, unsigned len, unsigned offset,
  		bool *same_page)
489fbbcb5   Ming Lei   block: enable mul...
678
  {
384209cd5   Christoph Hellwig   block: create a b...
679
  	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb5   Ming Lei   block: enable mul...
680
681
682
683
684
685
  	unsigned long mask = queue_segment_boundary(q);
  	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
  	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
  
  	if ((addr1 | mask) != (addr2 | mask))
  		return false;
489fbbcb5   Ming Lei   block: enable mul...
686
687
  	if (bv->bv_len + len > queue_max_segment_size(q))
  		return false;
384209cd5   Christoph Hellwig   block: create a b...
688
  	return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb5   Ming Lei   block: enable mul...
689
  }
f45958756   Shaohua Li   block: remove bio...
690
  /**
190470871   Ming Lei   block: put the sa...
691
   *	__bio_add_pc_page	- attempt to add page to passthrough bio
c66a14d07   Kent Overstreet   block: simplify b...
692
693
694
695
696
   *	@q: the target queue
   *	@bio: destination bio
   *	@page: page to add
   *	@len: vec entry length
   *	@offset: vec entry offset
d1916c86c   Christoph Hellwig   block: move same ...
697
   *	@same_page: return if the merge happen inside the same page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
698
   *
c66a14d07   Kent Overstreet   block: simplify b...
699
700
701
702
703
   *	Attempt to add a page to the bio_vec maplist. This can fail for a
   *	number of reasons, such as the bio being full or target block device
   *	limitations. The target block device must allow bio's up to PAGE_SIZE,
   *	so it is always possible to add a single page to an empty bio.
   *
5a8ce240d   Ming Lei   block: cleanup bi...
704
   *	This should only be used by passthrough bios.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
705
   */
4713839df   Christoph Hellwig   block: remove the...
706
  static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
190470871   Ming Lei   block: put the sa...
707
  		struct page *page, unsigned int len, unsigned int offset,
d1916c86c   Christoph Hellwig   block: move same ...
708
  		bool *same_page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
709
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710
711
712
713
714
715
716
  	struct bio_vec *bvec;
  
  	/*
  	 * cloned bio must not modify vec list
  	 */
  	if (unlikely(bio_flagged(bio, BIO_CLONED)))
  		return 0;
c66a14d07   Kent Overstreet   block: simplify b...
717
  	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
718
  		return 0;
80cfd548e   Jens Axboe   [BLOCK] bio: chec...
719
  	if (bio->bi_vcnt > 0) {
d1916c86c   Christoph Hellwig   block: move same ...
720
  		if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
384209cd5   Christoph Hellwig   block: create a b...
721
  			return len;
320ea869a   Christoph Hellwig   block: improve th...
722
723
724
725
726
  
  		/*
  		 * If the queue doesn't support SG gaps and adding this segment
  		 * would create a gap, disallow it.
  		 */
384209cd5   Christoph Hellwig   block: create a b...
727
  		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869a   Christoph Hellwig   block: improve th...
728
729
  		if (bvec_gap_to_prev(q, bvec, offset))
  			return 0;
80cfd548e   Jens Axboe   [BLOCK] bio: chec...
730
  	}
79d08f89b   Ming Lei   block: fix .bi_si...
731
  	if (bio_full(bio, len))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
732
  		return 0;
14ccb66b3   Christoph Hellwig   block: remove the...
733
  	if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb5   Ming Lei   block: enable mul...
734
  		return 0;
fcbf6a087   Maurizio Lombardi   bio: modify __bio...
735
736
737
738
739
  	bvec = &bio->bi_io_vec[bio->bi_vcnt];
  	bvec->bv_page = page;
  	bvec->bv_len = len;
  	bvec->bv_offset = offset;
  	bio->bi_vcnt++;
dcdca753c   Christoph Hellwig   block: clean up _...
740
  	bio->bi_iter.bi_size += len;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
741
742
  	return len;
  }
190470871   Ming Lei   block: put the sa...
743
744
745
746
  
  int bio_add_pc_page(struct request_queue *q, struct bio *bio,
  		struct page *page, unsigned int len, unsigned int offset)
  {
d1916c86c   Christoph Hellwig   block: move same ...
747
748
  	bool same_page = false;
  	return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
190470871   Ming Lei   block: put the sa...
749
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
750
  EXPORT_SYMBOL(bio_add_pc_page);
6e68af666   Mike Christie   [SCSI] Convert SC...
751
752
  
  /**
0aa69fd32   Christoph Hellwig   block: add a lowe...
753
754
   * __bio_try_merge_page - try appending data to an existing bvec.
   * @bio: destination bio
551879a48   Ming Lei   block: clarify th...
755
   * @page: start page to add
0aa69fd32   Christoph Hellwig   block: add a lowe...
756
   * @len: length of the data to add
551879a48   Ming Lei   block: clarify th...
757
   * @off: offset of the data relative to @page
ff896738b   Christoph Hellwig   block: return fro...
758
   * @same_page: return if the segment has been merged inside the same page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
   *
0aa69fd32   Christoph Hellwig   block: add a lowe...
760
761
762
763
   * Try to add the data at @page + @off to the last bvec of @bio.  This is a
   * a useful optimisation for file systems with a block size smaller than the
   * page size.
   *
551879a48   Ming Lei   block: clarify th...
764
765
   * Warn if (@len, @off) crosses pages in case that @same_page is true.
   *
0aa69fd32   Christoph Hellwig   block: add a lowe...
766
   * Return %true on success or %false on failure.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
767
   */
0aa69fd32   Christoph Hellwig   block: add a lowe...
768
  bool __bio_try_merge_page(struct bio *bio, struct page *page,
ff896738b   Christoph Hellwig   block: return fro...
769
  		unsigned int len, unsigned int off, bool *same_page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
770
  {
c66a14d07   Kent Overstreet   block: simplify b...
771
  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0aa69fd32   Christoph Hellwig   block: add a lowe...
772
  		return false;
762380ad9   Jens Axboe   block: add notion...
773

06ad673b6   Andreas Gruenbacher   block: fix "check...
774
  	if (bio->bi_vcnt > 0) {
0aa69fd32   Christoph Hellwig   block: add a lowe...
775
  		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
5919482e2   Ming Lei   block: check if p...
776
777
  
  		if (page_is_mergeable(bv, page, len, off, same_page)) {
6736317f3   Ritesh Harjani   block: Set same_p...
778
779
  			if (bio->bi_iter.bi_size > UINT_MAX - len) {
  				*same_page = false;
06ad673b6   Andreas Gruenbacher   block: fix "check...
780
  				return false;
6736317f3   Ritesh Harjani   block: Set same_p...
781
  			}
5919482e2   Ming Lei   block: check if p...
782
783
784
785
  			bv->bv_len += len;
  			bio->bi_iter.bi_size += len;
  			return true;
  		}
c66a14d07   Kent Overstreet   block: simplify b...
786
  	}
0aa69fd32   Christoph Hellwig   block: add a lowe...
787
788
789
  	return false;
  }
  EXPORT_SYMBOL_GPL(__bio_try_merge_page);
c66a14d07   Kent Overstreet   block: simplify b...
790

0aa69fd32   Christoph Hellwig   block: add a lowe...
791
  /**
551879a48   Ming Lei   block: clarify th...
792
   * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd32   Christoph Hellwig   block: add a lowe...
793
   * @bio: destination bio
551879a48   Ming Lei   block: clarify th...
794
795
796
   * @page: start page to add
   * @len: length of the data to add, may cross pages
   * @off: offset of the data relative to @page, may cross pages
0aa69fd32   Christoph Hellwig   block: add a lowe...
797
798
799
800
801
802
803
804
   *
   * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
   * that @bio has space for another bvec.
   */
  void __bio_add_page(struct bio *bio, struct page *page,
  		unsigned int len, unsigned int off)
  {
  	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d07   Kent Overstreet   block: simplify b...
805

0aa69fd32   Christoph Hellwig   block: add a lowe...
806
  	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89b   Ming Lei   block: fix .bi_si...
807
  	WARN_ON_ONCE(bio_full(bio, len));
0aa69fd32   Christoph Hellwig   block: add a lowe...
808
809
810
811
  
  	bv->bv_page = page;
  	bv->bv_offset = off;
  	bv->bv_len = len;
c66a14d07   Kent Overstreet   block: simplify b...
812

c66a14d07   Kent Overstreet   block: simplify b...
813
  	bio->bi_iter.bi_size += len;
0aa69fd32   Christoph Hellwig   block: add a lowe...
814
  	bio->bi_vcnt++;
b8e24a930   Johannes Weiner   block: annotate r...
815
816
817
  
  	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
  		bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd32   Christoph Hellwig   block: add a lowe...
818
819
820
821
  }
  EXPORT_SYMBOL_GPL(__bio_add_page);
  
  /**
551879a48   Ming Lei   block: clarify th...
822
   *	bio_add_page	-	attempt to add page(s) to bio
0aa69fd32   Christoph Hellwig   block: add a lowe...
823
   *	@bio: destination bio
551879a48   Ming Lei   block: clarify th...
824
825
826
   *	@page: start page to add
   *	@len: vec entry length, may cross pages
   *	@offset: vec entry offset relative to @page, may cross pages
0aa69fd32   Christoph Hellwig   block: add a lowe...
827
   *
551879a48   Ming Lei   block: clarify th...
828
   *	Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd32   Christoph Hellwig   block: add a lowe...
829
830
831
832
833
   *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
   */
  int bio_add_page(struct bio *bio, struct page *page,
  		 unsigned int len, unsigned int offset)
  {
ff896738b   Christoph Hellwig   block: return fro...
834
835
836
  	bool same_page = false;
  
  	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89b   Ming Lei   block: fix .bi_si...
837
  		if (bio_full(bio, len))
0aa69fd32   Christoph Hellwig   block: add a lowe...
838
839
840
  			return 0;
  		__bio_add_page(bio, page, len, offset);
  	}
c66a14d07   Kent Overstreet   block: simplify b...
841
  	return len;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
842
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
843
  EXPORT_SYMBOL(bio_add_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
844

d241a95f3   Christoph Hellwig   block: optionally...
845
  void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbfc   Christoph Hellwig   block: change how...
846
847
848
  {
  	struct bvec_iter_all iter_all;
  	struct bio_vec *bvec;
7321ecbfc   Christoph Hellwig   block: change how...
849

b2d0d9913   Christoph Hellwig   block: move the B...
850
851
  	if (bio_flagged(bio, BIO_NO_PAGE_REF))
  		return;
d241a95f3   Christoph Hellwig   block: optionally...
852
853
854
  	bio_for_each_segment_all(bvec, bio, iter_all) {
  		if (mark_dirty && !PageCompound(bvec->bv_page))
  			set_page_dirty_lock(bvec->bv_page);
7321ecbfc   Christoph Hellwig   block: change how...
855
  		put_page(bvec->bv_page);
d241a95f3   Christoph Hellwig   block: optionally...
856
  	}
7321ecbfc   Christoph Hellwig   block: change how...
857
  }
6d0c48aed   Jens Axboe   block: implement ...
858
859
860
861
862
863
864
865
866
867
868
869
  static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
  {
  	const struct bio_vec *bv = iter->bvec;
  	unsigned int len;
  	size_t size;
  
  	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
  		return -EINVAL;
  
  	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
  	size = bio_add_page(bio, bv->bv_page, len,
  				bv->bv_offset + iter->iov_offset);
a10584c3c   Christoph Hellwig   block: refactor _...
870
871
  	if (unlikely(size != len))
  		return -EINVAL;
a10584c3c   Christoph Hellwig   block: refactor _...
872
873
  	iov_iter_advance(iter, size);
  	return 0;
6d0c48aed   Jens Axboe   block: implement ...
874
  }
576ed9135   Christoph Hellwig   block: use bio_ad...
875
  #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
2cefe4dba   Kent Overstreet   block: add bio_io...
876
  /**
17d51b10d   Martin Wilck   block: bio_iov_it...
877
   * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4dba   Kent Overstreet   block: add bio_io...
878
879
880
   * @bio: bio to add pages to
   * @iter: iov iterator describing the region to be mapped
   *
17d51b10d   Martin Wilck   block: bio_iov_it...
881
   * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4dba   Kent Overstreet   block: add bio_io...
882
   * pages will have to be released using put_page() when done.
17d51b10d   Martin Wilck   block: bio_iov_it...
883
884
   * For multi-segment *iter, this function only adds pages from the
   * the next non-empty segment of the iov iterator.
2cefe4dba   Kent Overstreet   block: add bio_io...
885
   */
17d51b10d   Martin Wilck   block: bio_iov_it...
886
  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4dba   Kent Overstreet   block: add bio_io...
887
  {
576ed9135   Christoph Hellwig   block: use bio_ad...
888
889
  	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4dba   Kent Overstreet   block: add bio_io...
890
891
  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  	struct page **pages = (struct page **)bv;
456918049   Christoph Hellwig   block: fix page l...
892
  	bool same_page = false;
576ed9135   Christoph Hellwig   block: use bio_ad...
893
894
  	ssize_t size, left;
  	unsigned len, i;
b403ea240   Martin Wilck   block: bio_iov_it...
895
  	size_t offset;
576ed9135   Christoph Hellwig   block: use bio_ad...
896
897
898
899
900
901
902
903
  
  	/*
  	 * Move page array up in the allocated memory for the bio vecs as far as
  	 * possible so that we can start filling biovecs from the beginning
  	 * without overwriting the temporary page array.
  	*/
  	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
  	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4dba   Kent Overstreet   block: add bio_io...
904
905
906
907
  
  	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
  	if (unlikely(size <= 0))
  		return size ? size : -EFAULT;
2cefe4dba   Kent Overstreet   block: add bio_io...
908

576ed9135   Christoph Hellwig   block: use bio_ad...
909
910
  	for (left = size, i = 0; left > 0; left -= len, i++) {
  		struct page *page = pages[i];
2cefe4dba   Kent Overstreet   block: add bio_io...
911

576ed9135   Christoph Hellwig   block: use bio_ad...
912
  		len = min_t(size_t, PAGE_SIZE - offset, left);
456918049   Christoph Hellwig   block: fix page l...
913
914
915
916
917
  
  		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
  			if (same_page)
  				put_page(page);
  		} else {
79d08f89b   Ming Lei   block: fix .bi_si...
918
  			if (WARN_ON_ONCE(bio_full(bio, len)))
456918049   Christoph Hellwig   block: fix page l...
919
920
921
                                  return -EINVAL;
  			__bio_add_page(bio, page, len, offset);
  		}
576ed9135   Christoph Hellwig   block: use bio_ad...
922
  		offset = 0;
2cefe4dba   Kent Overstreet   block: add bio_io...
923
  	}
2cefe4dba   Kent Overstreet   block: add bio_io...
924
925
926
  	iov_iter_advance(iter, size);
  	return 0;
  }
17d51b10d   Martin Wilck   block: bio_iov_it...
927
928
  
  /**
6d0c48aed   Jens Axboe   block: implement ...
929
   * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10d   Martin Wilck   block: bio_iov_it...
930
   * @bio: bio to add pages to
6d0c48aed   Jens Axboe   block: implement ...
931
932
933
934
935
   * @iter: iov iterator describing the region to be added
   *
   * This takes either an iterator pointing to user memory, or one pointing to
   * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
   * map them into the kernel. On IO completion, the caller should put those
399254aaf   Jens Axboe   block: add BIO_NO...
936
937
938
939
940
941
   * pages. If we're adding kernel pages, and the caller told us it's safe to
   * do so, we just have to add the pages to the bio directly. We don't grab an
   * extra reference to those pages (the user should already have that), and we
   * don't put the page on IO completion. The caller needs to check if the bio is
   * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
   * released.
17d51b10d   Martin Wilck   block: bio_iov_it...
942
   *
17d51b10d   Martin Wilck   block: bio_iov_it...
943
   * The function tries, but does not guarantee, to pin as many pages as
6d0c48aed   Jens Axboe   block: implement ...
944
945
946
   * fit into the bio, or are requested in *iter, whatever is smaller. If
   * MM encounters an error pinning the requested pages, it stops. Error
   * is returned only if 0 pages could be pinned.
17d51b10d   Martin Wilck   block: bio_iov_it...
947
948
949
   */
  int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  {
6d0c48aed   Jens Axboe   block: implement ...
950
  	const bool is_bvec = iov_iter_is_bvec(iter);
14eacf12d   Christoph Hellwig   block: don't allo...
951
952
953
954
  	int ret;
  
  	if (WARN_ON_ONCE(bio->bi_vcnt))
  		return -EINVAL;
17d51b10d   Martin Wilck   block: bio_iov_it...
955
956
  
  	do {
6d0c48aed   Jens Axboe   block: implement ...
957
958
959
960
  		if (is_bvec)
  			ret = __bio_iov_bvec_add_pages(bio, iter);
  		else
  			ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89b   Ming Lei   block: fix .bi_si...
961
  	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10d   Martin Wilck   block: bio_iov_it...
962

b62074307   Christoph Hellwig   block: never take...
963
  	if (is_bvec)
7321ecbfc   Christoph Hellwig   block: change how...
964
  		bio_set_flag(bio, BIO_NO_PAGE_REF);
14eacf12d   Christoph Hellwig   block: don't allo...
965
  	return bio->bi_vcnt ? 0 : ret;
17d51b10d   Martin Wilck   block: bio_iov_it...
966
  }
2cefe4dba   Kent Overstreet   block: add bio_io...
967

4246a0b63   Christoph Hellwig   block: add a bi_e...
968
  static void submit_bio_wait_endio(struct bio *bio)
9e882242c   Kent Overstreet   block: Add submit...
969
  {
65e53aab6   Christoph Hellwig   block: Use DECLAR...
970
  	complete(bio->bi_private);
9e882242c   Kent Overstreet   block: Add submit...
971
972
973
974
  }
  
  /**
   * submit_bio_wait - submit a bio, and wait until it completes
9e882242c   Kent Overstreet   block: Add submit...
975
976
977
978
   * @bio: The &struct bio which describes the I/O
   *
   * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
   * bio_endio() on failure.
3d289d688   Jan Kara   block: Add commen...
979
980
981
982
   *
   * WARNING: Unlike to how submit_bio() is usually used, this function does not
   * result in bio reference to be consumed. The caller must drop the reference
   * on his own.
9e882242c   Kent Overstreet   block: Add submit...
983
   */
4e49ea4a3   Mike Christie   block/fs/drivers:...
984
  int submit_bio_wait(struct bio *bio)
9e882242c   Kent Overstreet   block: Add submit...
985
  {
e319e1fbd   Byungchul Park   block, locking/lo...
986
  	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
9e882242c   Kent Overstreet   block: Add submit...
987

65e53aab6   Christoph Hellwig   block: Use DECLAR...
988
  	bio->bi_private = &done;
9e882242c   Kent Overstreet   block: Add submit...
989
  	bio->bi_end_io = submit_bio_wait_endio;
1eff9d322   Jens Axboe   block: rename bio...
990
  	bio->bi_opf |= REQ_SYNC;
4e49ea4a3   Mike Christie   block/fs/drivers:...
991
  	submit_bio(bio);
65e53aab6   Christoph Hellwig   block: Use DECLAR...
992
  	wait_for_completion_io(&done);
9e882242c   Kent Overstreet   block: Add submit...
993

65e53aab6   Christoph Hellwig   block: Use DECLAR...
994
  	return blk_status_to_errno(bio->bi_status);
9e882242c   Kent Overstreet   block: Add submit...
995
996
  }
  EXPORT_SYMBOL(submit_bio_wait);
054bdf646   Kent Overstreet   block: Add bio_ad...
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
  /**
   * bio_advance - increment/complete a bio by some number of bytes
   * @bio:	bio to advance
   * @bytes:	number of bytes to complete
   *
   * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
   * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
   * be updated on the last bvec as well.
   *
   * @bio will then represent the remaining, uncompleted portion of the io.
   */
  void bio_advance(struct bio *bio, unsigned bytes)
  {
  	if (bio_integrity(bio))
  		bio_integrity_advance(bio, bytes);
4550dd6c6   Kent Overstreet   block: Immutable ...
1012
  	bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf646   Kent Overstreet   block: Add bio_ad...
1013
1014
  }
  EXPORT_SYMBOL(bio_advance);
45db54d58   Kent Overstreet   block: Split out ...
1015
1016
  void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
  			struct bio *src, struct bvec_iter *src_iter)
16ac3d63e   Kent Overstreet   block: Add bio_co...
1017
  {
1cb9dda4f   Kent Overstreet   block: Convert bi...
1018
  	struct bio_vec src_bv, dst_bv;
16ac3d63e   Kent Overstreet   block: Add bio_co...
1019
  	void *src_p, *dst_p;
1cb9dda4f   Kent Overstreet   block: Convert bi...
1020
  	unsigned bytes;
16ac3d63e   Kent Overstreet   block: Add bio_co...
1021

45db54d58   Kent Overstreet   block: Split out ...
1022
1023
1024
  	while (src_iter->bi_size && dst_iter->bi_size) {
  		src_bv = bio_iter_iovec(src, *src_iter);
  		dst_bv = bio_iter_iovec(dst, *dst_iter);
1cb9dda4f   Kent Overstreet   block: Convert bi...
1025
1026
  
  		bytes = min(src_bv.bv_len, dst_bv.bv_len);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1027

1cb9dda4f   Kent Overstreet   block: Convert bi...
1028
1029
  		src_p = kmap_atomic(src_bv.bv_page);
  		dst_p = kmap_atomic(dst_bv.bv_page);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1030

1cb9dda4f   Kent Overstreet   block: Convert bi...
1031
1032
  		memcpy(dst_p + dst_bv.bv_offset,
  		       src_p + src_bv.bv_offset,
16ac3d63e   Kent Overstreet   block: Add bio_co...
1033
1034
1035
1036
  		       bytes);
  
  		kunmap_atomic(dst_p);
  		kunmap_atomic(src_p);
6e6e811d7   Kent Overstreet   block: Add missin...
1037
  		flush_dcache_page(dst_bv.bv_page);
45db54d58   Kent Overstreet   block: Split out ...
1038
1039
  		bio_advance_iter(src, src_iter, bytes);
  		bio_advance_iter(dst, dst_iter, bytes);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1040
1041
  	}
  }
38a72dac4   Kent Overstreet   block: Add bio_co...
1042
1043
1044
  EXPORT_SYMBOL(bio_copy_data_iter);
  
  /**
45db54d58   Kent Overstreet   block: Split out ...
1045
1046
1047
   * bio_copy_data - copy contents of data buffers from one bio to another
   * @src: source bio
   * @dst: destination bio
38a72dac4   Kent Overstreet   block: Add bio_co...
1048
1049
1050
1051
1052
1053
   *
   * Stops when it reaches the end of either @src or @dst - that is, copies
   * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
   */
  void bio_copy_data(struct bio *dst, struct bio *src)
  {
45db54d58   Kent Overstreet   block: Split out ...
1054
1055
1056
1057
  	struct bvec_iter src_iter = src->bi_iter;
  	struct bvec_iter dst_iter = dst->bi_iter;
  
  	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac4   Kent Overstreet   block: Add bio_co...
1058
  }
16ac3d63e   Kent Overstreet   block: Add bio_co...
1059
  EXPORT_SYMBOL(bio_copy_data);
45db54d58   Kent Overstreet   block: Split out ...
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
  /**
   * bio_list_copy_data - copy contents of data buffers from one chain of bios to
   * another
   * @src: source bio list
   * @dst: destination bio list
   *
   * Stops when it reaches the end of either the @src list or @dst list - that is,
   * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
   * bios).
   */
  void bio_list_copy_data(struct bio *dst, struct bio *src)
  {
  	struct bvec_iter src_iter = src->bi_iter;
  	struct bvec_iter dst_iter = dst->bi_iter;
  
  	while (1) {
  		if (!src_iter.bi_size) {
  			src = src->bi_next;
  			if (!src)
  				break;
  
  			src_iter = src->bi_iter;
  		}
  
  		if (!dst_iter.bi_size) {
  			dst = dst->bi_next;
  			if (!dst)
  				break;
  
  			dst_iter = dst->bi_iter;
  		}
  
  		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
  	}
  }
  EXPORT_SYMBOL(bio_list_copy_data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1096
  struct bio_map_data {
152e283fd   FUJITA Tomonori   block: introduce ...
1097
  	int is_our_pages;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1098
1099
  	struct iov_iter iter;
  	struct iovec iov[];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1100
  };
0e5b935d4   Al Viro   bio_alloc_map_dat...
1101
  static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
76029ff37   FUJITA Tomonori   bio: fix bio_copy...
1102
  					       gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1103
  {
0e5b935d4   Al Viro   bio_alloc_map_dat...
1104
1105
  	struct bio_map_data *bmd;
  	if (data->nr_segs > UIO_MAXIOV)
f3f63c1c2   Jens Axboe   block: limit vec ...
1106
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1107

f1f8f292c   Gustavo A. R. Silva   block: bio: Use s...
1108
  	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
0e5b935d4   Al Viro   bio_alloc_map_dat...
1109
1110
1111
1112
1113
1114
  	if (!bmd)
  		return NULL;
  	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
  	bmd->iter = *data;
  	bmd->iter.iov = bmd->iov;
  	return bmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1115
  }
9124d3fe2   Dongsu Park   block: rewrite an...
1116
1117
1118
1119
1120
1121
1122
1123
  /**
   * bio_copy_from_iter - copy all pages from iov_iter to bio
   * @bio: The &struct bio which describes the I/O as destination
   * @iter: iov_iter as source
   *
   * Copy all pages from iov_iter to bio.
   * Returns 0 on success, or error on failure.
   */
98a09d610   Al Viro   bio_copy_from_ite...
1124
  static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
c5dec1c30   FUJITA Tomonori   block: convert bi...
1125
  {
c5dec1c30   FUJITA Tomonori   block: convert bi...
1126
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1127
  	struct bvec_iter_all iter_all;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1128

2b070cfe5   Christoph Hellwig   block: remove the...
1129
  	bio_for_each_segment_all(bvec, bio, iter_all) {
9124d3fe2   Dongsu Park   block: rewrite an...
1130
  		ssize_t ret;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1131

9124d3fe2   Dongsu Park   block: rewrite an...
1132
1133
1134
  		ret = copy_page_from_iter(bvec->bv_page,
  					  bvec->bv_offset,
  					  bvec->bv_len,
98a09d610   Al Viro   bio_copy_from_ite...
1135
  					  iter);
9124d3fe2   Dongsu Park   block: rewrite an...
1136

98a09d610   Al Viro   bio_copy_from_ite...
1137
  		if (!iov_iter_count(iter))
9124d3fe2   Dongsu Park   block: rewrite an...
1138
1139
1140
1141
  			break;
  
  		if (ret < bvec->bv_len)
  			return -EFAULT;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1142
  	}
9124d3fe2   Dongsu Park   block: rewrite an...
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
  	return 0;
  }
  
  /**
   * bio_copy_to_iter - copy all pages from bio to iov_iter
   * @bio: The &struct bio which describes the I/O as source
   * @iter: iov_iter as destination
   *
   * Copy all pages from bio to iov_iter.
   * Returns 0 on success, or error on failure.
   */
  static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
  {
9124d3fe2   Dongsu Park   block: rewrite an...
1156
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1157
  	struct bvec_iter_all iter_all;
9124d3fe2   Dongsu Park   block: rewrite an...
1158

2b070cfe5   Christoph Hellwig   block: remove the...
1159
  	bio_for_each_segment_all(bvec, bio, iter_all) {
9124d3fe2   Dongsu Park   block: rewrite an...
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
  		ssize_t ret;
  
  		ret = copy_page_to_iter(bvec->bv_page,
  					bvec->bv_offset,
  					bvec->bv_len,
  					&iter);
  
  		if (!iov_iter_count(&iter))
  			break;
  
  		if (ret < bvec->bv_len)
  			return -EFAULT;
  	}
  
  	return 0;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1175
  }
491221f88   Guoqing Jiang   block: export bio...
1176
  void bio_free_pages(struct bio *bio)
1dfa0f68c   Christoph Hellwig   block: add a help...
1177
1178
  {
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1179
  	struct bvec_iter_all iter_all;
1dfa0f68c   Christoph Hellwig   block: add a help...
1180

2b070cfe5   Christoph Hellwig   block: remove the...
1181
  	bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68c   Christoph Hellwig   block: add a help...
1182
1183
  		__free_page(bvec->bv_page);
  }
491221f88   Guoqing Jiang   block: export bio...
1184
  EXPORT_SYMBOL(bio_free_pages);
1dfa0f68c   Christoph Hellwig   block: add a help...
1185

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1186
1187
1188
1189
  /**
   *	bio_uncopy_user	-	finish previously mapped bio
   *	@bio: bio being terminated
   *
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
1190
   *	Free pages allocated from bio_copy_user_iov() and write back data
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1191
1192
1193
1194
1195
   *	to user space in case of a read.
   */
  int bio_uncopy_user(struct bio *bio)
  {
  	struct bio_map_data *bmd = bio->bi_private;
1dfa0f68c   Christoph Hellwig   block: add a help...
1196
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1197

35dc24838   Roland Dreier   [SCSI] sg: Fix us...
1198
1199
1200
  	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
  		/*
  		 * if we're in a workqueue, the request is orphaned, so
2d99b55d3   Hannes Reinecke   bio: return EINTR...
1201
1202
  		 * don't copy into a random user address space, just free
  		 * and return -EINTR so user space doesn't expect any data.
35dc24838   Roland Dreier   [SCSI] sg: Fix us...
1203
  		 */
2d99b55d3   Hannes Reinecke   bio: return EINTR...
1204
1205
1206
  		if (!current->mm)
  			ret = -EINTR;
  		else if (bio_data_dir(bio) == READ)
9124d3fe2   Dongsu Park   block: rewrite an...
1207
  			ret = bio_copy_to_iter(bio, bmd->iter);
1dfa0f68c   Christoph Hellwig   block: add a help...
1208
1209
  		if (bmd->is_our_pages)
  			bio_free_pages(bio);
35dc24838   Roland Dreier   [SCSI] sg: Fix us...
1210
  	}
c8db44482   Kent Overstreet   block: Don't save...
1211
  	kfree(bmd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1212
1213
1214
1215
1216
  	bio_put(bio);
  	return ret;
  }
  
  /**
c5dec1c30   FUJITA Tomonori   block: convert bi...
1217
   *	bio_copy_user_iov	-	copy user data to bio
26e49cfc7   Kent Overstreet   block: pass iov_i...
1218
1219
1220
1221
   *	@q:		destination block queue
   *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
   *	@iter:		iovec iterator
   *	@gfp_mask:	memory allocation flags
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1222
1223
1224
1225
1226
   *
   *	Prepares and returns a bio for indirect user io, bouncing data
   *	to/from kernel pages as necessary. Must be paired with
   *	call bio_uncopy_user() on io completion.
   */
152e283fd   FUJITA Tomonori   block: introduce ...
1227
1228
  struct bio *bio_copy_user_iov(struct request_queue *q,
  			      struct rq_map_data *map_data,
e81cef5d3   Al Viro   blk_rq_map_user_i...
1229
  			      struct iov_iter *iter,
26e49cfc7   Kent Overstreet   block: pass iov_i...
1230
  			      gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1231
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1232
  	struct bio_map_data *bmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1233
1234
  	struct page *page;
  	struct bio *bio;
d16d44ebb   Al Viro   bio_copy_user_iov...
1235
1236
  	int i = 0, ret;
  	int nr_pages;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1237
  	unsigned int len = iter->count;
bd5cecea4   Geliang Tang   bio: use offset_i...
1238
  	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1239

0e5b935d4   Al Viro   bio_alloc_map_dat...
1240
  	bmd = bio_alloc_map_data(iter, gfp_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1241
1242
  	if (!bmd)
  		return ERR_PTR(-ENOMEM);
26e49cfc7   Kent Overstreet   block: pass iov_i...
1243
1244
1245
1246
1247
1248
  	/*
  	 * We need to do a deep copy of the iov_iter including the iovecs.
  	 * The caller provided iov might point to an on-stack or otherwise
  	 * shortlived one.
  	 */
  	bmd->is_our_pages = map_data ? 0 : 1;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1249

d16d44ebb   Al Viro   bio_copy_user_iov...
1250
1251
1252
  	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
  	if (nr_pages > BIO_MAX_PAGES)
  		nr_pages = BIO_MAX_PAGES;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1253

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1254
  	ret = -ENOMEM;
a9e9dc24b   Tejun Heo   bio: use bio_kmal...
1255
  	bio = bio_kmalloc(gfp_mask, nr_pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1256
1257
  	if (!bio)
  		goto out_bmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1258
  	ret = 0;
56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1259
1260
  
  	if (map_data) {
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1261
  		nr_pages = 1 << map_data->page_order;
56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1262
1263
  		i = map_data->offset / PAGE_SIZE;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1264
  	while (len) {
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1265
  		unsigned int bytes = PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1266

56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1267
  		bytes -= offset;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1268
1269
  		if (bytes > len)
  			bytes = len;
152e283fd   FUJITA Tomonori   block: introduce ...
1270
  		if (map_data) {
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1271
  			if (i == map_data->nr_entries * nr_pages) {
152e283fd   FUJITA Tomonori   block: introduce ...
1272
1273
1274
  				ret = -ENOMEM;
  				break;
  			}
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1275
1276
1277
1278
1279
1280
  
  			page = map_data->pages[i / nr_pages];
  			page += (i % nr_pages);
  
  			i++;
  		} else {
152e283fd   FUJITA Tomonori   block: introduce ...
1281
  			page = alloc_page(q->bounce_gfp | gfp_mask);
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1282
1283
1284
1285
  			if (!page) {
  				ret = -ENOMEM;
  				break;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1286
  		}
a3761c3c9   Jérôme Glisse   block: do not lea...
1287
1288
1289
  		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
  			if (!map_data)
  				__free_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1290
  			break;
a3761c3c9   Jérôme Glisse   block: do not lea...
1291
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1292
1293
  
  		len -= bytes;
56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1294
  		offset = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1295
1296
1297
1298
  	}
  
  	if (ret)
  		goto cleanup;
2884d0be8   Al Viro   move more stuff d...
1299
1300
  	if (map_data)
  		map_data->offset += bio->bi_iter.bi_size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1301
1302
1303
  	/*
  	 * success
  	 */
00e237074   David Howells   iov_iter: Use acc...
1304
  	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
ecb554a84   FUJITA Tomonori   block: fix sg SG_...
1305
  	    (map_data && map_data->from_user)) {
98a09d610   Al Viro   bio_copy_from_ite...
1306
  		ret = bio_copy_from_iter(bio, iter);
c5dec1c30   FUJITA Tomonori   block: convert bi...
1307
1308
  		if (ret)
  			goto cleanup;
98a09d610   Al Viro   bio_copy_from_ite...
1309
  	} else {
f55adad60   Keith Busch   block/bio: Do not...
1310
1311
  		if (bmd->is_our_pages)
  			zero_fill_bio(bio);
98a09d610   Al Viro   bio_copy_from_ite...
1312
  		iov_iter_advance(iter, bio->bi_iter.bi_size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1313
  	}
26e49cfc7   Kent Overstreet   block: pass iov_i...
1314
  	bio->bi_private = bmd;
2884d0be8   Al Viro   move more stuff d...
1315
1316
  	if (map_data && map_data->null_mapped)
  		bio_set_flag(bio, BIO_NULL_MAPPED);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1317
1318
  	return bio;
  cleanup:
152e283fd   FUJITA Tomonori   block: introduce ...
1319
  	if (!map_data)
1dfa0f68c   Christoph Hellwig   block: add a help...
1320
  		bio_free_pages(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1321
1322
  	bio_put(bio);
  out_bmd:
c8db44482   Kent Overstreet   block: Don't save...
1323
  	kfree(bmd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1324
1325
  	return ERR_PTR(ret);
  }
37f19e57a   Christoph Hellwig   block: merge __bi...
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
  /**
   *	bio_map_user_iov - map user iovec into bio
   *	@q:		the struct request_queue for the bio
   *	@iter:		iovec iterator
   *	@gfp_mask:	memory allocation flags
   *
   *	Map the user space address into a bio suitable for io to a block
   *	device. Returns an error pointer in case of error.
   */
  struct bio *bio_map_user_iov(struct request_queue *q,
e81cef5d3   Al Viro   blk_rq_map_user_i...
1336
  			     struct iov_iter *iter,
37f19e57a   Christoph Hellwig   block: merge __bi...
1337
  			     gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1338
  {
26e49cfc7   Kent Overstreet   block: pass iov_i...
1339
  	int j;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1340
  	struct bio *bio;
076098e51   Al Viro   bio_map_user_iov(...
1341
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1342

b282cc766   Al Viro   bio_map_user_iov(...
1343
  	if (!iov_iter_count(iter))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1344
  		return ERR_PTR(-EINVAL);
b282cc766   Al Viro   bio_map_user_iov(...
1345
  	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1346
1347
  	if (!bio)
  		return ERR_PTR(-ENOMEM);
0a0f15136   Al Viro   bio_map_user_iov(...
1348
  	while (iov_iter_count(iter)) {
629e42bcc   Al Viro   ... and with iov_...
1349
  		struct page **pages;
076098e51   Al Viro   bio_map_user_iov(...
1350
1351
1352
  		ssize_t bytes;
  		size_t offs, added = 0;
  		int npages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1353

0a0f15136   Al Viro   bio_map_user_iov(...
1354
  		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
076098e51   Al Viro   bio_map_user_iov(...
1355
1356
  		if (unlikely(bytes <= 0)) {
  			ret = bytes ? bytes : -EFAULT;
f1970baf6   James Bottomley   [PATCH] Add scatt...
1357
  			goto out_unmap;
991721572   Jens Axboe   [PATCH] Fix missi...
1358
  		}
f1970baf6   James Bottomley   [PATCH] Add scatt...
1359

076098e51   Al Viro   bio_map_user_iov(...
1360
  		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
f1970baf6   James Bottomley   [PATCH] Add scatt...
1361

98f0bc990   Al Viro   bio_map_user_iov(...
1362
1363
1364
1365
1366
1367
1368
  		if (unlikely(offs & queue_dma_alignment(q))) {
  			ret = -EINVAL;
  			j = 0;
  		} else {
  			for (j = 0; j < npages; j++) {
  				struct page *page = pages[j];
  				unsigned int n = PAGE_SIZE - offs;
d1916c86c   Christoph Hellwig   block: move same ...
1369
  				bool same_page = false;
f1970baf6   James Bottomley   [PATCH] Add scatt...
1370

98f0bc990   Al Viro   bio_map_user_iov(...
1371
1372
  				if (n > bytes)
  					n = bytes;
95d78c28b   Vitaly Mayatskikh   fix unbalanced pa...
1373

190470871   Ming Lei   block: put the sa...
1374
  				if (!__bio_add_pc_page(q, bio, page, n, offs,
d1916c86c   Christoph Hellwig   block: move same ...
1375
1376
1377
  						&same_page)) {
  					if (same_page)
  						put_page(page);
98f0bc990   Al Viro   bio_map_user_iov(...
1378
  					break;
d1916c86c   Christoph Hellwig   block: move same ...
1379
  				}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1380

98f0bc990   Al Viro   bio_map_user_iov(...
1381
1382
1383
1384
  				added += n;
  				bytes -= n;
  				offs = 0;
  			}
0a0f15136   Al Viro   bio_map_user_iov(...
1385
  			iov_iter_advance(iter, added);
f1970baf6   James Bottomley   [PATCH] Add scatt...
1386
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1387
  		/*
f1970baf6   James Bottomley   [PATCH] Add scatt...
1388
  		 * release the pages we didn't map into the bio, if any
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1389
  		 */
629e42bcc   Al Viro   ... and with iov_...
1390
  		while (j < npages)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1391
  			put_page(pages[j++]);
629e42bcc   Al Viro   ... and with iov_...
1392
  		kvfree(pages);
e2e115d18   Al Viro   don't rely upon s...
1393
1394
1395
  		/* couldn't stuff something into bio? */
  		if (bytes)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1396
  	}
b7c44ed9d   Jens Axboe   block: manipulate...
1397
  	bio_set_flag(bio, BIO_USER_MAPPED);
37f19e57a   Christoph Hellwig   block: merge __bi...
1398
1399
  
  	/*
5fad1b64a   Bart Van Assche   block: Update com...
1400
  	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
37f19e57a   Christoph Hellwig   block: merge __bi...
1401
1402
1403
1404
1405
  	 * it would normally disappear when its bi_end_io is run.
  	 * however, we need it for the unmap, so grab an extra
  	 * reference to it
  	 */
  	bio_get(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1406
  	return bio;
f1970baf6   James Bottomley   [PATCH] Add scatt...
1407
1408
  
   out_unmap:
506e07984   Christoph Hellwig   block: use bio_re...
1409
  	bio_release_pages(bio, false);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1410
1411
1412
  	bio_put(bio);
  	return ERR_PTR(ret);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1413
1414
1415
1416
  /**
   *	bio_unmap_user	-	unmap a bio
   *	@bio:		the bio being unmapped
   *
5fad1b64a   Bart Van Assche   block: Update com...
1417
1418
   *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
   *	process context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1419
1420
1421
1422
1423
   *
   *	bio_unmap_user() may sleep.
   */
  void bio_unmap_user(struct bio *bio)
  {
163cc2d3c   Christoph Hellwig   block: use bio_re...
1424
1425
  	bio_release_pages(bio, bio_data_dir(bio) == READ);
  	bio_put(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1426
1427
  	bio_put(bio);
  }
b4c5875d3   Damien Le Moal   block: Allow mapp...
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
  static void bio_invalidate_vmalloc_pages(struct bio *bio)
  {
  #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  	if (bio->bi_private && !op_is_write(bio_op(bio))) {
  		unsigned long i, len = 0;
  
  		for (i = 0; i < bio->bi_vcnt; i++)
  			len += bio->bi_io_vec[i].bv_len;
  		invalidate_kernel_vmap_range(bio->bi_private, len);
  	}
  #endif
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
1440
  static void bio_map_kern_endio(struct bio *bio)
b823825e8   Jens Axboe   [PATCH] Keep the ...
1441
  {
b4c5875d3   Damien Le Moal   block: Allow mapp...
1442
  	bio_invalidate_vmalloc_pages(bio);
b823825e8   Jens Axboe   [PATCH] Keep the ...
1443
  	bio_put(bio);
b823825e8   Jens Axboe   [PATCH] Keep the ...
1444
  }
75c72b836   Christoph Hellwig   block: merge __bi...
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
  /**
   *	bio_map_kern	-	map kernel address into bio
   *	@q: the struct request_queue for the bio
   *	@data: pointer to buffer to map
   *	@len: length in bytes
   *	@gfp_mask: allocation flags for bio allocation
   *
   *	Map the kernel address into a bio suitable for io to a block
   *	device. Returns an error pointer in case of error.
   */
  struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
  			 gfp_t gfp_mask)
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1457
1458
1459
1460
1461
  {
  	unsigned long kaddr = (unsigned long)data;
  	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  	unsigned long start = kaddr >> PAGE_SHIFT;
  	const int nr_pages = end - start;
b4c5875d3   Damien Le Moal   block: Allow mapp...
1462
1463
  	bool is_vmalloc = is_vmalloc_addr(data);
  	struct page *page;
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1464
1465
  	int offset, i;
  	struct bio *bio;
a9e9dc24b   Tejun Heo   bio: use bio_kmal...
1466
  	bio = bio_kmalloc(gfp_mask, nr_pages);
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1467
1468
  	if (!bio)
  		return ERR_PTR(-ENOMEM);
b4c5875d3   Damien Le Moal   block: Allow mapp...
1469
1470
1471
1472
  	if (is_vmalloc) {
  		flush_kernel_vmap_range(data, len);
  		bio->bi_private = data;
  	}
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1473
1474
1475
1476
1477
1478
1479
1480
1481
  	offset = offset_in_page(kaddr);
  	for (i = 0; i < nr_pages; i++) {
  		unsigned int bytes = PAGE_SIZE - offset;
  
  		if (len <= 0)
  			break;
  
  		if (bytes > len)
  			bytes = len;
b4c5875d3   Damien Le Moal   block: Allow mapp...
1482
1483
1484
1485
1486
  		if (!is_vmalloc)
  			page = virt_to_page(data);
  		else
  			page = vmalloc_to_page(data);
  		if (bio_add_pc_page(q, bio, page, bytes,
75c72b836   Christoph Hellwig   block: merge __bi...
1487
1488
1489
1490
1491
  				    offset) < bytes) {
  			/* we don't support partial mappings */
  			bio_put(bio);
  			return ERR_PTR(-EINVAL);
  		}
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1492
1493
1494
1495
1496
  
  		data += bytes;
  		len -= bytes;
  		offset = 0;
  	}
b823825e8   Jens Axboe   [PATCH] Keep the ...
1497
  	bio->bi_end_io = bio_map_kern_endio;
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1498
1499
  	return bio;
  }
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1500

4246a0b63   Christoph Hellwig   block: add a bi_e...
1501
  static void bio_copy_kern_endio(struct bio *bio)
68154e90c   FUJITA Tomonori   block: add dma al...
1502
  {
1dfa0f68c   Christoph Hellwig   block: add a help...
1503
1504
1505
  	bio_free_pages(bio);
  	bio_put(bio);
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
1506
  static void bio_copy_kern_endio_read(struct bio *bio)
1dfa0f68c   Christoph Hellwig   block: add a help...
1507
  {
42d2683a2   Christoph Hellwig   block: simplify b...
1508
  	char *p = bio->bi_private;
1dfa0f68c   Christoph Hellwig   block: add a help...
1509
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1510
  	struct bvec_iter_all iter_all;
68154e90c   FUJITA Tomonori   block: add dma al...
1511

2b070cfe5   Christoph Hellwig   block: remove the...
1512
  	bio_for_each_segment_all(bvec, bio, iter_all) {
1dfa0f68c   Christoph Hellwig   block: add a help...
1513
  		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
c8db44482   Kent Overstreet   block: Don't save...
1514
  		p += bvec->bv_len;
68154e90c   FUJITA Tomonori   block: add dma al...
1515
  	}
4246a0b63   Christoph Hellwig   block: add a bi_e...
1516
  	bio_copy_kern_endio(bio);
68154e90c   FUJITA Tomonori   block: add dma al...
1517
1518
1519
1520
1521
1522
1523
1524
  }
  
  /**
   *	bio_copy_kern	-	copy kernel address into bio
   *	@q: the struct request_queue for the bio
   *	@data: pointer to buffer to copy
   *	@len: length in bytes
   *	@gfp_mask: allocation flags for bio and page allocation
ffee0259c   Randy Dunlap   docbook: fix bio ...
1525
   *	@reading: data direction is READ
68154e90c   FUJITA Tomonori   block: add dma al...
1526
1527
1528
1529
1530
1531
1532
   *
   *	copy the kernel address into a bio suitable for io to a block
   *	device. Returns an error pointer in case of error.
   */
  struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
  			  gfp_t gfp_mask, int reading)
  {
42d2683a2   Christoph Hellwig   block: simplify b...
1533
1534
1535
  	unsigned long kaddr = (unsigned long)data;
  	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  	unsigned long start = kaddr >> PAGE_SHIFT;
42d2683a2   Christoph Hellwig   block: simplify b...
1536
1537
  	struct bio *bio;
  	void *p = data;
1dfa0f68c   Christoph Hellwig   block: add a help...
1538
  	int nr_pages = 0;
68154e90c   FUJITA Tomonori   block: add dma al...
1539

42d2683a2   Christoph Hellwig   block: simplify b...
1540
1541
1542
1543
1544
  	/*
  	 * Overflow, abort
  	 */
  	if (end < start)
  		return ERR_PTR(-EINVAL);
68154e90c   FUJITA Tomonori   block: add dma al...
1545

42d2683a2   Christoph Hellwig   block: simplify b...
1546
1547
1548
1549
  	nr_pages = end - start;
  	bio = bio_kmalloc(gfp_mask, nr_pages);
  	if (!bio)
  		return ERR_PTR(-ENOMEM);
68154e90c   FUJITA Tomonori   block: add dma al...
1550

42d2683a2   Christoph Hellwig   block: simplify b...
1551
1552
1553
  	while (len) {
  		struct page *page;
  		unsigned int bytes = PAGE_SIZE;
68154e90c   FUJITA Tomonori   block: add dma al...
1554

42d2683a2   Christoph Hellwig   block: simplify b...
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
  		if (bytes > len)
  			bytes = len;
  
  		page = alloc_page(q->bounce_gfp | gfp_mask);
  		if (!page)
  			goto cleanup;
  
  		if (!reading)
  			memcpy(page_address(page), p, bytes);
  
  		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
  			break;
  
  		len -= bytes;
  		p += bytes;
68154e90c   FUJITA Tomonori   block: add dma al...
1570
  	}
1dfa0f68c   Christoph Hellwig   block: add a help...
1571
1572
1573
1574
1575
  	if (reading) {
  		bio->bi_end_io = bio_copy_kern_endio_read;
  		bio->bi_private = data;
  	} else {
  		bio->bi_end_io = bio_copy_kern_endio;
1dfa0f68c   Christoph Hellwig   block: add a help...
1576
  	}
76029ff37   FUJITA Tomonori   bio: fix bio_copy...
1577

68154e90c   FUJITA Tomonori   block: add dma al...
1578
  	return bio;
42d2683a2   Christoph Hellwig   block: simplify b...
1579
1580
  
  cleanup:
1dfa0f68c   Christoph Hellwig   block: add a help...
1581
  	bio_free_pages(bio);
42d2683a2   Christoph Hellwig   block: simplify b...
1582
1583
  	bio_put(bio);
  	return ERR_PTR(-ENOMEM);
68154e90c   FUJITA Tomonori   block: add dma al...
1584
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
  /*
   * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
   * for performing direct-IO in BIOs.
   *
   * The problem is that we cannot run set_page_dirty() from interrupt context
   * because the required locks are not interrupt-safe.  So what we can do is to
   * mark the pages dirty _before_ performing IO.  And in interrupt context,
   * check that the pages are still dirty.   If so, fine.  If not, redirty them
   * in process context.
   *
   * We special-case compound pages here: normally this means reads into hugetlb
   * pages.  The logic in here doesn't really work right for compound pages
   * because the VM does not uniformly chase down the head page in all cases.
   * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
   * handle them at all.  So we skip compound pages here at an early stage.
   *
   * Note that this code is very hard to test under normal circumstances because
   * direct-io pins the pages with get_user_pages().  This makes
   * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba2   Artem Bityutskiy   vfs: nuke pdflush...
1604
   * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
   * pagecache.
   *
   * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
   * deferred bio dirtying paths.
   */
  
  /*
   * bio_set_pages_dirty() will mark all the bio's pages as dirty.
   */
  void bio_set_pages_dirty(struct bio *bio)
  {
cb34e057a   Kent Overstreet   block: Convert so...
1616
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1617
  	struct bvec_iter_all iter_all;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1618

2b070cfe5   Christoph Hellwig   block: remove the...
1619
  	bio_for_each_segment_all(bvec, bio, iter_all) {
3bb509831   Christoph Hellwig   block: bio_set_pa...
1620
1621
  		if (!PageCompound(bvec->bv_page))
  			set_page_dirty_lock(bvec->bv_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1622
1623
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1624
1625
1626
1627
  /*
   * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
   * If they are, then fine.  If, however, some pages are clean then they must
   * have been written out during the direct-IO read.  So we take another ref on
24d5493f2   Christoph Hellwig   block: simplify b...
1628
   * the BIO and re-dirty the pages in process context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1629
1630
   *
   * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a08   Kirill A. Shutemov   mm, fs: remove re...
1631
1632
   * here on.  It will run one put_page() against each page and will run one
   * bio_put() against the BIO.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1633
   */
65f27f384   David Howells   WorkStruct: Pass ...
1634
  static void bio_dirty_fn(struct work_struct *work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1635

65f27f384   David Howells   WorkStruct: Pass ...
1636
  static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1637
1638
1639
1640
1641
1642
  static DEFINE_SPINLOCK(bio_dirty_lock);
  static struct bio *bio_dirty_list;
  
  /*
   * This runs in process context
   */
65f27f384   David Howells   WorkStruct: Pass ...
1643
  static void bio_dirty_fn(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1644
  {
24d5493f2   Christoph Hellwig   block: simplify b...
1645
  	struct bio *bio, *next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1646

24d5493f2   Christoph Hellwig   block: simplify b...
1647
1648
  	spin_lock_irq(&bio_dirty_lock);
  	next = bio_dirty_list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1649
  	bio_dirty_list = NULL;
24d5493f2   Christoph Hellwig   block: simplify b...
1650
  	spin_unlock_irq(&bio_dirty_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1651

24d5493f2   Christoph Hellwig   block: simplify b...
1652
1653
  	while ((bio = next) != NULL) {
  		next = bio->bi_private;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1654

d241a95f3   Christoph Hellwig   block: optionally...
1655
  		bio_release_pages(bio, true);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1656
  		bio_put(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1657
1658
1659
1660
1661
  	}
  }
  
  void bio_check_pages_dirty(struct bio *bio)
  {
cb34e057a   Kent Overstreet   block: Convert so...
1662
  	struct bio_vec *bvec;
24d5493f2   Christoph Hellwig   block: simplify b...
1663
  	unsigned long flags;
6dc4f100c   Ming Lei   block: allow bio_...
1664
  	struct bvec_iter_all iter_all;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1665

2b070cfe5   Christoph Hellwig   block: remove the...
1666
  	bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f2   Christoph Hellwig   block: simplify b...
1667
1668
  		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
  			goto defer;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1669
  	}
d241a95f3   Christoph Hellwig   block: optionally...
1670
  	bio_release_pages(bio, false);
24d5493f2   Christoph Hellwig   block: simplify b...
1671
1672
1673
1674
1675
1676
1677
1678
  	bio_put(bio);
  	return;
  defer:
  	spin_lock_irqsave(&bio_dirty_lock, flags);
  	bio->bi_private = bio_dirty_list;
  	bio_dirty_list = bio;
  	spin_unlock_irqrestore(&bio_dirty_lock, flags);
  	schedule_work(&bio_dirty_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1679
  }
2334b2d5a   Konstantin Khlebnikov   block/diskstats: ...
1680
  void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
5b18b5a73   Mikulas Patocka   block: delete par...
1681
1682
1683
1684
1685
1686
  {
  	unsigned long stamp;
  again:
  	stamp = READ_ONCE(part->stamp);
  	if (unlikely(stamp != now)) {
  		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
2334b2d5a   Konstantin Khlebnikov   block/diskstats: ...
1687
  			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
5b18b5a73   Mikulas Patocka   block: delete par...
1688
1689
1690
1691
1692
1693
1694
  		}
  	}
  	if (part->partno) {
  		part = &part_to_disk(part)->part0;
  		goto again;
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1695

ddcf35d39   Michael Callahan   block: Add and us...
1696
  void generic_start_io_acct(struct request_queue *q, int op,
d62e26b3f   Jens Axboe   block: pass in qu...
1697
  			   unsigned long sectors, struct hd_struct *part)
394ffa503   Gu Zheng   blk: introduce ge...
1698
  {
ddcf35d39   Michael Callahan   block: Add and us...
1699
  	const int sgrp = op_stat_group(op);
394ffa503   Gu Zheng   blk: introduce ge...
1700

112f158f6   Mike Snitzer   block: stop passi...
1701
  	part_stat_lock();
2334b2d5a   Konstantin Khlebnikov   block/diskstats: ...
1702
  	update_io_ticks(part, jiffies, false);
112f158f6   Mike Snitzer   block: stop passi...
1703
1704
  	part_stat_inc(part, ios[sgrp]);
  	part_stat_add(part, sectors[sgrp], sectors);
ddcf35d39   Michael Callahan   block: Add and us...
1705
  	part_inc_in_flight(q, part, op_is_write(op));
394ffa503   Gu Zheng   blk: introduce ge...
1706
1707
1708
1709
  
  	part_stat_unlock();
  }
  EXPORT_SYMBOL(generic_start_io_acct);
ddcf35d39   Michael Callahan   block: Add and us...
1710
  void generic_end_io_acct(struct request_queue *q, int req_op,
d62e26b3f   Jens Axboe   block: pass in qu...
1711
  			 struct hd_struct *part, unsigned long start_time)
394ffa503   Gu Zheng   blk: introduce ge...
1712
  {
5b18b5a73   Mikulas Patocka   block: delete par...
1713
1714
  	unsigned long now = jiffies;
  	unsigned long duration = now - start_time;
ddcf35d39   Michael Callahan   block: Add and us...
1715
  	const int sgrp = op_stat_group(req_op);
394ffa503   Gu Zheng   blk: introduce ge...
1716

112f158f6   Mike Snitzer   block: stop passi...
1717
  	part_stat_lock();
2334b2d5a   Konstantin Khlebnikov   block/diskstats: ...
1718
  	update_io_ticks(part, now, true);
112f158f6   Mike Snitzer   block: stop passi...
1719
  	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
5b18b5a73   Mikulas Patocka   block: delete par...
1720
  	part_stat_add(part, time_in_queue, duration);
ddcf35d39   Michael Callahan   block: Add and us...
1721
  	part_dec_in_flight(q, part, op_is_write(req_op));
394ffa503   Gu Zheng   blk: introduce ge...
1722
1723
1724
1725
  
  	part_stat_unlock();
  }
  EXPORT_SYMBOL(generic_end_io_acct);
c4cf5261f   Jens Axboe   bio: skip atomic ...
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
  static inline bool bio_remaining_done(struct bio *bio)
  {
  	/*
  	 * If we're not chaining, then ->__bi_remaining is always 1 and
  	 * we always end io on the first invocation.
  	 */
  	if (!bio_flagged(bio, BIO_CHAIN))
  		return true;
  
  	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
326e1dbb5   Mike Snitzer   block: remove man...
1736
  	if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9d   Jens Axboe   block: manipulate...
1737
  		bio_clear_flag(bio, BIO_CHAIN);
c4cf5261f   Jens Axboe   bio: skip atomic ...
1738
  		return true;
326e1dbb5   Mike Snitzer   block: remove man...
1739
  	}
c4cf5261f   Jens Axboe   bio: skip atomic ...
1740
1741
1742
  
  	return false;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1743
1744
1745
  /**
   * bio_endio - end I/O on a bio
   * @bio:	bio
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1746
1747
   *
   * Description:
4246a0b63   Christoph Hellwig   block: add a bi_e...
1748
1749
1750
   *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
   *   way to end I/O on a bio. No one should call bi_end_io() directly on a
   *   bio unless they own it and thus know that it has an end_io function.
fbbaf700e   NeilBrown   block: trace comp...
1751
1752
1753
1754
1755
   *
   *   bio_endio() can be called several times on a bio that has been chained
   *   using bio_chain().  The ->bi_end_io() function will only be called the
   *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
   *   generated if BIO_TRACE_COMPLETION is set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1756
   **/
4246a0b63   Christoph Hellwig   block: add a bi_e...
1757
  void bio_endio(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1758
  {
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1759
  again:
2b8855171   Christoph Hellwig   block: bio_remain...
1760
  	if (!bio_remaining_done(bio))
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1761
  		return;
7c20f1168   Christoph Hellwig   bio-integrity: st...
1762
1763
  	if (!bio_integrity_endio(bio))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1764

67b42d0bf   Josef Bacik   rq-qos: introduce...
1765
1766
  	if (bio->bi_disk)
  		rq_qos_done_bio(bio->bi_disk->queue, bio);
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
  	/*
  	 * Need to have a real endio function for chained bios, otherwise
  	 * various corner cases will break (like stacking block devices that
  	 * save/restore bi_end_io) - however, we want to avoid unbounded
  	 * recursion and blowing the stack. Tail call optimization would
  	 * handle this, but compiling with frame pointers also disables
  	 * gcc's sibling call optimization.
  	 */
  	if (bio->bi_end_io == bio_chain_endio) {
  		bio = __bio_chain_endio(bio);
  		goto again;
196d38bcc   Kent Overstreet   block: Generic bi...
1778
  	}
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1779

74d46992e   Christoph Hellwig   block: replace bi...
1780
1781
  	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
  		trace_block_bio_complete(bio->bi_disk->queue, bio,
a462b9508   Bart Van Assche   block: Dedicated ...
1782
  					 blk_status_to_errno(bio->bi_status));
fbbaf700e   NeilBrown   block: trace comp...
1783
1784
  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
  	}
9e234eeaf   Shaohua Li   blk-throttle: add...
1785
  	blk_throtl_bio_endio(bio);
b222dd2fd   Shaohua Li   block: call bio_u...
1786
1787
  	/* release cgroup info */
  	bio_uninit(bio);
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1788
1789
  	if (bio->bi_end_io)
  		bio->bi_end_io(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1790
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
1791
  EXPORT_SYMBOL(bio_endio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1792

196d38bcc   Kent Overstreet   block: Generic bi...
1793
  /**
20d0189b1   Kent Overstreet   block: Introduce ...
1794
1795
1796
1797
1798
1799
1800
1801
1802
   * bio_split - split a bio
   * @bio:	bio to split
   * @sectors:	number of sectors to split from the front of @bio
   * @gfp:	gfp mask
   * @bs:		bio set to allocate from
   *
   * Allocates and returns a new bio which represents @sectors from the start of
   * @bio, and updates @bio to represent the remaining sectors.
   *
f3f5da624   Martin K. Petersen   block: Do a full ...
1803
   * Unless this is a discard request the newly allocated bio will point
dad775845   Bart Van Assche   block: Document t...
1804
1805
   * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
   * neither @bio nor @bs are freed before the split bio.
20d0189b1   Kent Overstreet   block: Introduce ...
1806
1807
1808
1809
   */
  struct bio *bio_split(struct bio *bio, int sectors,
  		      gfp_t gfp, struct bio_set *bs)
  {
f341a4d38   Mikulas Patocka   block: remove use...
1810
  	struct bio *split;
20d0189b1   Kent Overstreet   block: Introduce ...
1811
1812
1813
  
  	BUG_ON(sectors <= 0);
  	BUG_ON(sectors >= bio_sectors(bio));
f9d03f96b   Christoph Hellwig   block: improve ha...
1814
  	split = bio_clone_fast(bio, gfp, bs);
20d0189b1   Kent Overstreet   block: Introduce ...
1815
1816
1817
1818
1819
1820
  	if (!split)
  		return NULL;
  
  	split->bi_iter.bi_size = sectors << 9;
  
  	if (bio_integrity(split))
fbd08e767   Dmitry Monakhov   bio-integrity: fi...
1821
  		bio_integrity_trim(split);
20d0189b1   Kent Overstreet   block: Introduce ...
1822
1823
  
  	bio_advance(bio, split->bi_iter.bi_size);
fbbaf700e   NeilBrown   block: trace comp...
1824
  	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023c   Goldwyn Rodrigues   block: Set BIO_TR...
1825
  		bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700e   NeilBrown   block: trace comp...
1826

20d0189b1   Kent Overstreet   block: Introduce ...
1827
1828
1829
  	return split;
  }
  EXPORT_SYMBOL(bio_split);
ad3316bf4   Martin K. Petersen   block: Find bio s...
1830
  /**
6678d83f1   Kent Overstreet   block: Consolidat...
1831
1832
1833
1834
1835
1836
1837
1838
1839
   * bio_trim - trim a bio
   * @bio:	bio to trim
   * @offset:	number of sectors to trim from the front of @bio
   * @size:	size we want to trim @bio to, in sectors
   */
  void bio_trim(struct bio *bio, int offset, int size)
  {
  	/* 'bio' is a cloned bio which we need to trim to match
  	 * the given offset and size.
6678d83f1   Kent Overstreet   block: Consolidat...
1840
  	 */
6678d83f1   Kent Overstreet   block: Consolidat...
1841
1842
  
  	size <<= 9;
4f024f379   Kent Overstreet   block: Abstract o...
1843
  	if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f1   Kent Overstreet   block: Consolidat...
1844
  		return;
6678d83f1   Kent Overstreet   block: Consolidat...
1845
  	bio_advance(bio, offset << 9);
4f024f379   Kent Overstreet   block: Abstract o...
1846
  	bio->bi_iter.bi_size = size;
376a78abf   Dmitry Monakhov   bio-integrity: bi...
1847
1848
  
  	if (bio_integrity(bio))
fbd08e767   Dmitry Monakhov   bio-integrity: fi...
1849
  		bio_integrity_trim(bio);
376a78abf   Dmitry Monakhov   bio-integrity: bi...
1850

6678d83f1   Kent Overstreet   block: Consolidat...
1851
1852
  }
  EXPORT_SYMBOL_GPL(bio_trim);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1853
1854
1855
1856
  /*
   * create memory pools for biovec's in a bio_set.
   * use the global biovec slabs created for general use.
   */
8aa6ba2f6   Kent Overstreet   block: Convert bi...
1857
  int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1858
  {
ed996a52c   Christoph Hellwig   block: simplify a...
1859
  	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1860

8aa6ba2f6   Kent Overstreet   block: Convert bi...
1861
  	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1862
  }
917a38c71   Kent Overstreet   block: Add bioset...
1863
1864
1865
1866
1867
1868
1869
  /*
   * bioset_exit - exit a bioset initialized with bioset_init()
   *
   * May be called on a zeroed but uninitialized bioset (i.e. allocated with
   * kzalloc()).
   */
  void bioset_exit(struct bio_set *bs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1870
  {
df2cb6daa   Kent Overstreet   block: Avoid dead...
1871
1872
  	if (bs->rescue_workqueue)
  		destroy_workqueue(bs->rescue_workqueue);
917a38c71   Kent Overstreet   block: Add bioset...
1873
  	bs->rescue_workqueue = NULL;
df2cb6daa   Kent Overstreet   block: Avoid dead...
1874

8aa6ba2f6   Kent Overstreet   block: Convert bi...
1875
1876
  	mempool_exit(&bs->bio_pool);
  	mempool_exit(&bs->bvec_pool);
9f060e223   Kent Overstreet   block: Convert in...
1877

7878cba9f   Martin K. Petersen   block: Create bip...
1878
  	bioset_integrity_free(bs);
917a38c71   Kent Overstreet   block: Add bioset...
1879
1880
1881
1882
1883
  	if (bs->bio_slab)
  		bio_put_slab(bs);
  	bs->bio_slab = NULL;
  }
  EXPORT_SYMBOL(bioset_exit);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1884

011067b05   NeilBrown   blk: replace bios...
1885
  /**
917a38c71   Kent Overstreet   block: Add bioset...
1886
   * bioset_init - Initialize a bio_set
dad085275   Kent Overstreet   block: Drop biose...
1887
   * @bs:		pool to initialize
917a38c71   Kent Overstreet   block: Add bioset...
1888
1889
1890
1891
1892
   * @pool_size:	Number of bio and bio_vecs to cache in the mempool
   * @front_pad:	Number of bytes to allocate in front of the returned bio
   * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
   *              and %BIOSET_NEED_RESCUER
   *
dad085275   Kent Overstreet   block: Drop biose...
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
   * Description:
   *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
   *    to ask for a number of bytes to be allocated in front of the bio.
   *    Front pad allocation is useful for embedding the bio inside
   *    another structure, to avoid allocating extra data to go with the bio.
   *    Note that the bio must be embedded at the END of that structure always,
   *    or things will break badly.
   *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
   *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
   *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
   *    dispatch queued requests when the mempool runs out of space.
   *
917a38c71   Kent Overstreet   block: Add bioset...
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
   */
  int bioset_init(struct bio_set *bs,
  		unsigned int pool_size,
  		unsigned int front_pad,
  		int flags)
  {
  	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
  
  	bs->front_pad = front_pad;
  
  	spin_lock_init(&bs->rescue_lock);
  	bio_list_init(&bs->rescue_list);
  	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
  
  	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
  	if (!bs->bio_slab)
  		return -ENOMEM;
  
  	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
  		goto bad;
  
  	if ((flags & BIOSET_NEED_BVECS) &&
  	    biovec_init_pool(&bs->bvec_pool, pool_size))
  		goto bad;
  
  	if (!(flags & BIOSET_NEED_RESCUER))
  		return 0;
  
  	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
  	if (!bs->rescue_workqueue)
  		goto bad;
  
  	return 0;
  bad:
  	bioset_exit(bs);
  	return -ENOMEM;
  }
  EXPORT_SYMBOL(bioset_init);
28e89fd91   Jens Axboe   block: add bioset...
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
  /*
   * Initialize and setup a new bio_set, based on the settings from
   * another bio_set.
   */
  int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
  {
  	int flags;
  
  	flags = 0;
  	if (src->bvec_pool.min_nr)
  		flags |= BIOSET_NEED_BVECS;
  	if (src->rescue_workqueue)
  		flags |= BIOSET_NEED_RESCUER;
  
  	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
  }
  EXPORT_SYMBOL(bioset_init_from_src);
852c788f8   Tejun Heo   block: implement ...
1960
  #ifdef CONFIG_BLK_CGROUP
1d933cf09   Tejun Heo   blkcg: implement ...
1961

74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1962
  /**
2268c0feb   Dennis Zhou   blkcg: introduce ...
1963
   * bio_disassociate_blkg - puts back the blkg reference if associated
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1964
   * @bio: target bio
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1965
   *
2268c0feb   Dennis Zhou   blkcg: introduce ...
1966
   * Helper to disassociate the blkg from @bio if a blkg is associated.
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1967
   */
2268c0feb   Dennis Zhou   blkcg: introduce ...
1968
  void bio_disassociate_blkg(struct bio *bio)
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1969
  {
2268c0feb   Dennis Zhou   blkcg: introduce ...
1970
1971
1972
1973
  	if (bio->bi_blkg) {
  		blkg_put(bio->bi_blkg);
  		bio->bi_blkg = NULL;
  	}
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1974
  }
892ad71f6   Dennis Zhou   dm: set the stati...
1975
  EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1976

08e18eab0   Josef Bacik   block: add bi_blk...
1977
  /**
2268c0feb   Dennis Zhou   blkcg: introduce ...
1978
   * __bio_associate_blkg - associate a bio with the a blkg
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1979
   * @bio: target bio
b5f2954d3   Dennis Zhou   blkcg: revert blk...
1980
   * @blkg: the blkg to associate
b5f2954d3   Dennis Zhou   blkcg: revert blk...
1981
   *
beea9da07   Dennis Zhou   blkcg: convert bl...
1982
1983
1984
1985
1986
   * This tries to associate @bio with the specified @blkg.  Association failure
   * is handled by walking up the blkg tree.  Therefore, the blkg associated can
   * be anything between @blkg and the root_blkg.  This situation only happens
   * when a cgroup is dying and then the remaining bios will spill to the closest
   * alive blkg.
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1987
   *
beea9da07   Dennis Zhou   blkcg: convert bl...
1988
1989
   * A reference will be taken on the @blkg and will be released when @bio is
   * freed.
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1990
   */
2268c0feb   Dennis Zhou   blkcg: introduce ...
1991
  static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1992
  {
2268c0feb   Dennis Zhou   blkcg: introduce ...
1993
  	bio_disassociate_blkg(bio);
7754f669f   Dennis Zhou   blkcg: rename blk...
1994
  	bio->bi_blkg = blkg_tryget_closest(blkg);
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1995
1996
1997
  }
  
  /**
fd42df305   Dennis Zhou   blkcg: associate ...
1998
   * bio_associate_blkg_from_css - associate a bio with a specified css
d459d853c   Dennis Zhou   blkcg: reassociat...
1999
   * @bio: target bio
fd42df305   Dennis Zhou   blkcg: associate ...
2000
   * @css: target css
d459d853c   Dennis Zhou   blkcg: reassociat...
2001
   *
fd42df305   Dennis Zhou   blkcg: associate ...
2002
   * Associate @bio with the blkg found by combining the css's blkg and the
fc5a828bf   Dennis Zhou   blkcg: remove add...
2003
2004
   * request_queue of the @bio.  This falls back to the queue's root_blkg if
   * the association fails with the css.
d459d853c   Dennis Zhou   blkcg: reassociat...
2005
   */
fd42df305   Dennis Zhou   blkcg: associate ...
2006
2007
  void bio_associate_blkg_from_css(struct bio *bio,
  				 struct cgroup_subsys_state *css)
d459d853c   Dennis Zhou   blkcg: reassociat...
2008
  {
fc5a828bf   Dennis Zhou   blkcg: remove add...
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
  	struct request_queue *q = bio->bi_disk->queue;
  	struct blkcg_gq *blkg;
  
  	rcu_read_lock();
  
  	if (!css || !css->parent)
  		blkg = q->root_blkg;
  	else
  		blkg = blkg_lookup_create(css_to_blkcg(css), q);
  
  	__bio_associate_blkg(bio, blkg);
  
  	rcu_read_unlock();
d459d853c   Dennis Zhou   blkcg: reassociat...
2022
  }
fd42df305   Dennis Zhou   blkcg: associate ...
2023
  EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
d459d853c   Dennis Zhou   blkcg: reassociat...
2024

6a7f6d86a   Dennis Zhou   blkcg: associate ...
2025
  #ifdef CONFIG_MEMCG
d459d853c   Dennis Zhou   blkcg: reassociat...
2026
  /**
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2027
   * bio_associate_blkg_from_page - associate a bio with the page's blkg
852c788f8   Tejun Heo   block: implement ...
2028
   * @bio: target bio
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2029
2030
2031
   * @page: the page to lookup the blkcg from
   *
   * Associate @bio with the blkg from @page's owning memcg and the respective
fc5a828bf   Dennis Zhou   blkcg: remove add...
2032
2033
   * request_queue.  If cgroup_e_css returns %NULL, fall back to the queue's
   * root_blkg.
852c788f8   Tejun Heo   block: implement ...
2034
   */
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2035
  void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
852c788f8   Tejun Heo   block: implement ...
2036
  {
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2037
  	struct cgroup_subsys_state *css;
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2038
2039
  	if (!page->mem_cgroup)
  		return;
fc5a828bf   Dennis Zhou   blkcg: remove add...
2040
2041
2042
2043
2044
2045
  	rcu_read_lock();
  
  	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
  	bio_associate_blkg_from_css(bio, css);
  
  	rcu_read_unlock();
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2046
2047
  }
  #endif /* CONFIG_MEMCG */
2268c0feb   Dennis Zhou   blkcg: introduce ...
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
  /**
   * bio_associate_blkg - associate a bio with a blkg
   * @bio: target bio
   *
   * Associate @bio with the blkg found from the bio's css and request_queue.
   * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
   * already associated, the css is reused and association redone as the
   * request_queue may have changed.
   */
  void bio_associate_blkg(struct bio *bio)
  {
fc5a828bf   Dennis Zhou   blkcg: remove add...
2059
  	struct cgroup_subsys_state *css;
2268c0feb   Dennis Zhou   blkcg: introduce ...
2060
2061
  
  	rcu_read_lock();
db6638d7d   Dennis Zhou   blkcg: remove bio...
2062
  	if (bio->bi_blkg)
fc5a828bf   Dennis Zhou   blkcg: remove add...
2063
  		css = &bio_blkcg(bio)->css;
db6638d7d   Dennis Zhou   blkcg: remove bio...
2064
  	else
fc5a828bf   Dennis Zhou   blkcg: remove add...
2065
  		css = blkcg_css();
2268c0feb   Dennis Zhou   blkcg: introduce ...
2066

fc5a828bf   Dennis Zhou   blkcg: remove add...
2067
  	bio_associate_blkg_from_css(bio, css);
2268c0feb   Dennis Zhou   blkcg: introduce ...
2068
2069
  
  	rcu_read_unlock();
852c788f8   Tejun Heo   block: implement ...
2070
  }
5cdf2e3fe   Dennis Zhou   blkcg: associate ...
2071
  EXPORT_SYMBOL_GPL(bio_associate_blkg);
852c788f8   Tejun Heo   block: implement ...
2072

20bd723ec   Paolo Valente   block: add missin...
2073
  /**
db6638d7d   Dennis Zhou   blkcg: remove bio...
2074
   * bio_clone_blkg_association - clone blkg association from src to dst bio
20bd723ec   Paolo Valente   block: add missin...
2075
2076
2077
   * @dst: destination bio
   * @src: source bio
   */
db6638d7d   Dennis Zhou   blkcg: remove bio...
2078
  void bio_clone_blkg_association(struct bio *dst, struct bio *src)
20bd723ec   Paolo Valente   block: add missin...
2079
  {
6ab218799   Dennis Zhou   blkcg: clean up b...
2080
  	rcu_read_lock();
fc5a828bf   Dennis Zhou   blkcg: remove add...
2081
  	if (src->bi_blkg)
2268c0feb   Dennis Zhou   blkcg: introduce ...
2082
  		__bio_associate_blkg(dst, src->bi_blkg);
6ab218799   Dennis Zhou   blkcg: clean up b...
2083
2084
  
  	rcu_read_unlock();
20bd723ec   Paolo Valente   block: add missin...
2085
  }
db6638d7d   Dennis Zhou   blkcg: remove bio...
2086
  EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
852c788f8   Tejun Heo   block: implement ...
2087
  #endif /* CONFIG_BLK_CGROUP */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2088
2089
2090
  static void __init biovec_init_slabs(void)
  {
  	int i;
ed996a52c   Christoph Hellwig   block: simplify a...
2091
  	for (i = 0; i < BVEC_POOL_NR; i++) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2092
2093
  		int size;
  		struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37cd   Jens Axboe   block: don't crea...
2094
2095
2096
2097
  		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
  			bvs->slab = NULL;
  			continue;
  		}
a7fcd37cd   Jens Axboe   block: don't crea...
2098

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2099
2100
  		size = bvs->nr_vecs * sizeof(struct bio_vec);
  		bvs->slab = kmem_cache_create(bvs->name, size, 0,
20c2df83d   Paul Mundt   mm: Remove slab d...
2101
                                  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2102
2103
2104
2105
2106
  	}
  }
  
  static int __init init_bio(void)
  {
bb799ca02   Jens Axboe   bio: allow indivi...
2107
2108
  	bio_slab_max = 2;
  	bio_slab_nr = 0;
6396bb221   Kees Cook   treewide: kzalloc...
2109
2110
  	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
  			    GFP_KERNEL);
2b24e6f63   Johannes Thumshirn   block: bio: ensur...
2111
2112
  
  	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
bb799ca02   Jens Axboe   bio: allow indivi...
2113
2114
2115
  	if (!bio_slabs)
  		panic("bio: can't allocate bios
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2116

7878cba9f   Martin K. Petersen   block: Create bip...
2117
  	bio_integrity_init();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2118
  	biovec_init_slabs();
f4f8154a0   Kent Overstreet   block: Use bioset...
2119
  	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2120
2121
  		panic("bio: can't allocate bios
  ");
f4f8154a0   Kent Overstreet   block: Use bioset...
2122
  	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785b   Martin K. Petersen   block: Require su...
2123
2124
  		panic("bio: can't create integrity pool
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2125
2126
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2127
  subsys_initcall(init_bio);