Blame view

block/bio.c 55.3 KB
8c16567d8   Christoph Hellwig   block: switch all...
1
  // SPDX-License-Identifier: GPL-2.0
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
  /*
0fe234795   Jens Axboe   [PATCH] Update ax...
3
   * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
4
5
6
7
8
   */
  #include <linux/mm.h>
  #include <linux/swap.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
a27bb332c   Kent Overstreet   aio: don't includ...
9
  #include <linux/uio.h>
852c788f8   Tejun Heo   block: implement ...
10
  #include <linux/iocontext.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
11
12
13
  #include <linux/slab.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
630d9c472   Paul Gortmaker   fs: reduce the us...
14
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
15
16
  #include <linux/mempool.h>
  #include <linux/workqueue.h>
852c788f8   Tejun Heo   block: implement ...
17
  #include <linux/cgroup.h>
08e18eab0   Josef Bacik   block: add bi_blk...
18
  #include <linux/blk-cgroup.h>
b4c5875d3   Damien Le Moal   block: Allow mapp...
19
  #include <linux/highmem.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20

55782138e   Li Zefan   tracing/events: c...
21
  #include <trace/events/block.h>
9e234eeaf   Shaohua Li   blk-throttle: add...
22
  #include "blk.h"
67b42d0bf   Josef Bacik   rq-qos: introduce...
23
  #include "blk-rq-qos.h"
0bfc24559   Ingo Molnar   blktrace: port to...
24

392ddc329   Jens Axboe   bio: add support ...
25
26
27
28
29
  /*
   * Test patch to inline a certain number of bi_io_vec's inside the bio
   * itself, to shrink a bio data allocation from two mempool calls to one
   */
  #define BIO_INLINE_VECS		4
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30
31
32
33
34
  /*
   * if you change this list, also change bvec_alloc or things will
   * break badly! cannot be bigger than what you can fit into an
   * unsigned short
   */
bd5c4facf   Mikulas Patocka   Fix slab name "bi...
35
  #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
ed996a52c   Christoph Hellwig   block: simplify a...
36
  static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
bd5c4facf   Mikulas Patocka   Fix slab name "bi...
37
  	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
38
39
40
41
  };
  #undef BV
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42
43
44
   * fs_bio_set is the bio_set containing bio and iovec memory pools used by
   * IO code that does not need private memory pools.
   */
f4f8154a0   Kent Overstreet   block: Use bioset...
45
  struct bio_set fs_bio_set;
3f86a82ae   Kent Overstreet   block: Consolidat...
46
  EXPORT_SYMBOL(fs_bio_set);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47

bb799ca02   Jens Axboe   bio: allow indivi...
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
  /*
   * Our slab pool management
   */
  struct bio_slab {
  	struct kmem_cache *slab;
  	unsigned int slab_ref;
  	unsigned int slab_size;
  	char name[8];
  };
  static DEFINE_MUTEX(bio_slab_lock);
  static struct bio_slab *bio_slabs;
  static unsigned int bio_slab_nr, bio_slab_max;
  
  static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  {
  	unsigned int sz = sizeof(struct bio) + extra_size;
  	struct kmem_cache *slab = NULL;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
65
  	struct bio_slab *bslab, *new_bio_slabs;
386bc35a2   Anna Leuschner   vfs: fix: don't i...
66
  	unsigned int new_bio_slab_max;
bb799ca02   Jens Axboe   bio: allow indivi...
67
68
69
70
71
72
  	unsigned int i, entry = -1;
  
  	mutex_lock(&bio_slab_lock);
  
  	i = 0;
  	while (i < bio_slab_nr) {
f06f135d8   Thiago Farina   fs/bio.c: fix sha...
73
  		bslab = &bio_slabs[i];
bb799ca02   Jens Axboe   bio: allow indivi...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
  
  		if (!bslab->slab && entry == -1)
  			entry = i;
  		else if (bslab->slab_size == sz) {
  			slab = bslab->slab;
  			bslab->slab_ref++;
  			break;
  		}
  		i++;
  	}
  
  	if (slab)
  		goto out_unlock;
  
  	if (bio_slab_nr == bio_slab_max && entry == -1) {
386bc35a2   Anna Leuschner   vfs: fix: don't i...
89
  		new_bio_slab_max = bio_slab_max << 1;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
90
  		new_bio_slabs = krealloc(bio_slabs,
386bc35a2   Anna Leuschner   vfs: fix: don't i...
91
  					 new_bio_slab_max * sizeof(struct bio_slab),
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
92
93
  					 GFP_KERNEL);
  		if (!new_bio_slabs)
bb799ca02   Jens Axboe   bio: allow indivi...
94
  			goto out_unlock;
386bc35a2   Anna Leuschner   vfs: fix: don't i...
95
  		bio_slab_max = new_bio_slab_max;
389d7b26d   Alexey Khoroshilov   bio: Fix potentia...
96
  		bio_slabs = new_bio_slabs;
bb799ca02   Jens Axboe   bio: allow indivi...
97
98
99
100
101
102
103
  	}
  	if (entry == -1)
  		entry = bio_slab_nr++;
  
  	bslab = &bio_slabs[entry];
  
  	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
6a2414836   Mikulas Patocka   block: use kmallo...
104
105
  	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
  				 SLAB_HWCACHE_ALIGN, NULL);
bb799ca02   Jens Axboe   bio: allow indivi...
106
107
  	if (!slab)
  		goto out_unlock;
bb799ca02   Jens Axboe   bio: allow indivi...
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
  	bslab->slab = slab;
  	bslab->slab_ref = 1;
  	bslab->slab_size = sz;
  out_unlock:
  	mutex_unlock(&bio_slab_lock);
  	return slab;
  }
  
  static void bio_put_slab(struct bio_set *bs)
  {
  	struct bio_slab *bslab = NULL;
  	unsigned int i;
  
  	mutex_lock(&bio_slab_lock);
  
  	for (i = 0; i < bio_slab_nr; i++) {
  		if (bs->bio_slab == bio_slabs[i].slab) {
  			bslab = &bio_slabs[i];
  			break;
  		}
  	}
  
  	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!
  "))
  		goto out;
  
  	WARN_ON(!bslab->slab_ref);
  
  	if (--bslab->slab_ref)
  		goto out;
  
  	kmem_cache_destroy(bslab->slab);
  	bslab->slab = NULL;
  
  out:
  	mutex_unlock(&bio_slab_lock);
  }
7ba1ba12e   Martin K. Petersen   block: Block laye...
145
146
  unsigned int bvec_nr_vecs(unsigned short idx)
  {
d6c02a9be   Greg Edwards   block: bvec_nr_ve...
147
  	return bvec_slabs[--idx].nr_vecs;
7ba1ba12e   Martin K. Petersen   block: Block laye...
148
  }
9f060e223   Kent Overstreet   block: Convert in...
149
  void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
bb799ca02   Jens Axboe   bio: allow indivi...
150
  {
ed996a52c   Christoph Hellwig   block: simplify a...
151
152
153
154
155
  	if (!idx)
  		return;
  	idx--;
  
  	BIO_BUG_ON(idx >= BVEC_POOL_NR);
bb799ca02   Jens Axboe   bio: allow indivi...
156

ed996a52c   Christoph Hellwig   block: simplify a...
157
  	if (idx == BVEC_POOL_MAX) {
9f060e223   Kent Overstreet   block: Convert in...
158
  		mempool_free(bv, pool);
ed996a52c   Christoph Hellwig   block: simplify a...
159
  	} else {
bb799ca02   Jens Axboe   bio: allow indivi...
160
161
162
163
164
  		struct biovec_slab *bvs = bvec_slabs + idx;
  
  		kmem_cache_free(bvs->slab, bv);
  	}
  }
9f060e223   Kent Overstreet   block: Convert in...
165
166
  struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
  			   mempool_t *pool)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
167
168
  {
  	struct bio_vec *bvl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
170
  
  	/*
7ff9345ff   Jens Axboe   bio: only mempool...
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
  	 * see comment near bvec_array define!
  	 */
  	switch (nr) {
  	case 1:
  		*idx = 0;
  		break;
  	case 2 ... 4:
  		*idx = 1;
  		break;
  	case 5 ... 16:
  		*idx = 2;
  		break;
  	case 17 ... 64:
  		*idx = 3;
  		break;
  	case 65 ... 128:
  		*idx = 4;
  		break;
  	case 129 ... BIO_MAX_PAGES:
  		*idx = 5;
  		break;
  	default:
  		return NULL;
  	}
  
  	/*
  	 * idx now points to the pool we want to allocate from. only the
  	 * 1-vec entry pool is mempool backed.
  	 */
ed996a52c   Christoph Hellwig   block: simplify a...
200
  	if (*idx == BVEC_POOL_MAX) {
7ff9345ff   Jens Axboe   bio: only mempool...
201
  fallback:
9f060e223   Kent Overstreet   block: Convert in...
202
  		bvl = mempool_alloc(pool, gfp_mask);
7ff9345ff   Jens Axboe   bio: only mempool...
203
204
  	} else {
  		struct biovec_slab *bvs = bvec_slabs + *idx;
d0164adc8   Mel Gorman   mm, page_alloc: d...
205
  		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
7ff9345ff   Jens Axboe   bio: only mempool...
206

0a0d96b03   Jens Axboe   block: add bio_km...
207
  		/*
7ff9345ff   Jens Axboe   bio: only mempool...
208
209
210
  		 * Make this allocation restricted and don't dump info on
  		 * allocation failures, since we'll fallback to the mempool
  		 * in case of failure.
0a0d96b03   Jens Axboe   block: add bio_km...
211
  		 */
7ff9345ff   Jens Axboe   bio: only mempool...
212
  		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
213

0a0d96b03   Jens Axboe   block: add bio_km...
214
  		/*
d0164adc8   Mel Gorman   mm, page_alloc: d...
215
  		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
7ff9345ff   Jens Axboe   bio: only mempool...
216
  		 * is set, retry with the 1-entry mempool
0a0d96b03   Jens Axboe   block: add bio_km...
217
  		 */
7ff9345ff   Jens Axboe   bio: only mempool...
218
  		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
d0164adc8   Mel Gorman   mm, page_alloc: d...
219
  		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
ed996a52c   Christoph Hellwig   block: simplify a...
220
  			*idx = BVEC_POOL_MAX;
7ff9345ff   Jens Axboe   bio: only mempool...
221
222
223
  			goto fallback;
  		}
  	}
ed996a52c   Christoph Hellwig   block: simplify a...
224
  	(*idx)++;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
225
226
  	return bvl;
  }
9ae3b3f52   Jens Axboe   block: provide bi...
227
  void bio_uninit(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
  {
6f70fb661   Dennis Zhou   blkcg: remove bio...
229
  	bio_disassociate_blkg(bio);
ccbc5d03c   Justin Tee   block: fix memlea...
230
231
232
  
  	if (bio_integrity(bio))
  		bio_integrity_free(bio);
4254bba17   Kent Overstreet   block: Kill bi_de...
233
  }
9ae3b3f52   Jens Axboe   block: provide bi...
234
  EXPORT_SYMBOL(bio_uninit);
7ba1ba12e   Martin K. Petersen   block: Block laye...
235

4254bba17   Kent Overstreet   block: Kill bi_de...
236
237
238
239
  static void bio_free(struct bio *bio)
  {
  	struct bio_set *bs = bio->bi_pool;
  	void *p;
9ae3b3f52   Jens Axboe   block: provide bi...
240
  	bio_uninit(bio);
4254bba17   Kent Overstreet   block: Kill bi_de...
241
242
  
  	if (bs) {
8aa6ba2f6   Kent Overstreet   block: Convert bi...
243
  		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
4254bba17   Kent Overstreet   block: Kill bi_de...
244
245
246
247
248
  
  		/*
  		 * If we have front padding, adjust the bio pointer before freeing
  		 */
  		p = bio;
bb799ca02   Jens Axboe   bio: allow indivi...
249
  		p -= bs->front_pad;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
250
  		mempool_free(p, &bs->bio_pool);
4254bba17   Kent Overstreet   block: Kill bi_de...
251
252
253
254
  	} else {
  		/* Bio was allocated by bio_kmalloc() */
  		kfree(bio);
  	}
3676347a5   Peter Osterlund   [PATCH] kill bio-...
255
  }
9ae3b3f52   Jens Axboe   block: provide bi...
256
257
258
259
260
  /*
   * Users of this function have their own bio allocation. Subsequently,
   * they must remember to pair any call to bio_init() with bio_uninit()
   * when IO has completed, or when the bio is released.
   */
3a83f4677   Ming Lei   block: bio: pass ...
261
262
  void bio_init(struct bio *bio, struct bio_vec *table,
  	      unsigned short max_vecs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
263
  {
2b94de552   Jens Axboe   bio: use memset()...
264
  	memset(bio, 0, sizeof(*bio));
c4cf5261f   Jens Axboe   bio: skip atomic ...
265
  	atomic_set(&bio->__bi_remaining, 1);
dac56212e   Jens Axboe   bio: skip atomic ...
266
  	atomic_set(&bio->__bi_cnt, 1);
3a83f4677   Ming Lei   block: bio: pass ...
267
268
269
  
  	bio->bi_io_vec = table;
  	bio->bi_max_vecs = max_vecs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
270
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
271
  EXPORT_SYMBOL(bio_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
272
273
  
  /**
f44b48c76   Kent Overstreet   block: Add bio_re...
274
275
276
277
278
279
280
281
282
283
284
285
   * bio_reset - reinitialize a bio
   * @bio:	bio to reset
   *
   * Description:
   *   After calling bio_reset(), @bio will be in the same state as a freshly
   *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
   *   preserved are the ones that are initialized by bio_alloc_bioset(). See
   *   comment in struct bio.
   */
  void bio_reset(struct bio *bio)
  {
  	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
9ae3b3f52   Jens Axboe   block: provide bi...
286
  	bio_uninit(bio);
f44b48c76   Kent Overstreet   block: Add bio_re...
287
288
  
  	memset(bio, 0, BIO_RESET_BYTES);
4246a0b63   Christoph Hellwig   block: add a bi_e...
289
  	bio->bi_flags = flags;
c4cf5261f   Jens Axboe   bio: skip atomic ...
290
  	atomic_set(&bio->__bi_remaining, 1);
f44b48c76   Kent Overstreet   block: Add bio_re...
291
292
  }
  EXPORT_SYMBOL(bio_reset);
38f8baae8   Christoph Hellwig   block: factor out...
293
  static struct bio *__bio_chain_endio(struct bio *bio)
196d38bcc   Kent Overstreet   block: Generic bi...
294
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
295
  	struct bio *parent = bio->bi_private;
4e4cbee93   Christoph Hellwig   block: switch bio...
296
297
  	if (!parent->bi_status)
  		parent->bi_status = bio->bi_status;
196d38bcc   Kent Overstreet   block: Generic bi...
298
  	bio_put(bio);
38f8baae8   Christoph Hellwig   block: factor out...
299
300
301
302
303
304
  	return parent;
  }
  
  static void bio_chain_endio(struct bio *bio)
  {
  	bio_endio(__bio_chain_endio(bio));
196d38bcc   Kent Overstreet   block: Generic bi...
305
306
307
308
  }
  
  /**
   * bio_chain - chain bio completions
1051a902f   Randy Dunlap   fs: fix new kerne...
309
310
   * @bio: the target bio
   * @parent: the @bio's parent bio
196d38bcc   Kent Overstreet   block: Generic bi...
311
312
313
314
315
316
317
318
319
320
321
322
323
   *
   * The caller won't have a bi_end_io called when @bio completes - instead,
   * @parent's bi_end_io won't be called until both @parent and @bio have
   * completed; the chained bio will also be freed when it completes.
   *
   * The caller must not set bi_private or bi_end_io in @bio.
   */
  void bio_chain(struct bio *bio, struct bio *parent)
  {
  	BUG_ON(bio->bi_private || bio->bi_end_io);
  
  	bio->bi_private = parent;
  	bio->bi_end_io	= bio_chain_endio;
c4cf5261f   Jens Axboe   bio: skip atomic ...
324
  	bio_inc_remaining(parent);
196d38bcc   Kent Overstreet   block: Generic bi...
325
326
  }
  EXPORT_SYMBOL(bio_chain);
df2cb6daa   Kent Overstreet   block: Avoid dead...
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
  static void bio_alloc_rescue(struct work_struct *work)
  {
  	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
  	struct bio *bio;
  
  	while (1) {
  		spin_lock(&bs->rescue_lock);
  		bio = bio_list_pop(&bs->rescue_list);
  		spin_unlock(&bs->rescue_lock);
  
  		if (!bio)
  			break;
  
  		generic_make_request(bio);
  	}
  }
  
  static void punt_bios_to_rescuer(struct bio_set *bs)
  {
  	struct bio_list punt, nopunt;
  	struct bio *bio;
47e0fb461   NeilBrown   blk: make the bio...
348
349
  	if (WARN_ON_ONCE(!bs->rescue_workqueue))
  		return;
df2cb6daa   Kent Overstreet   block: Avoid dead...
350
351
352
353
354
355
356
357
358
359
360
361
362
  	/*
  	 * In order to guarantee forward progress we must punt only bios that
  	 * were allocated from this bio_set; otherwise, if there was a bio on
  	 * there for a stacking driver higher up in the stack, processing it
  	 * could require allocating bios from this bio_set, and doing that from
  	 * our own rescuer would be bad.
  	 *
  	 * Since bio lists are singly linked, pop them all instead of trying to
  	 * remove from the middle of the list:
  	 */
  
  	bio_list_init(&punt);
  	bio_list_init(&nopunt);
f5fe1b519   NeilBrown   blk: Ensure users...
363
  	while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6daa   Kent Overstreet   block: Avoid dead...
364
  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b519   NeilBrown   blk: Ensure users...
365
  	current->bio_list[0] = nopunt;
df2cb6daa   Kent Overstreet   block: Avoid dead...
366

f5fe1b519   NeilBrown   blk: Ensure users...
367
368
369
370
  	bio_list_init(&nopunt);
  	while ((bio = bio_list_pop(&current->bio_list[1])))
  		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
  	current->bio_list[1] = nopunt;
df2cb6daa   Kent Overstreet   block: Avoid dead...
371
372
373
374
375
376
377
  
  	spin_lock(&bs->rescue_lock);
  	bio_list_merge(&bs->rescue_list, &punt);
  	spin_unlock(&bs->rescue_lock);
  
  	queue_work(bs->rescue_workqueue, &bs->rescue_work);
  }
f44b48c76   Kent Overstreet   block: Add bio_re...
378
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
379
   * bio_alloc_bioset - allocate a bio for I/O
519c8e9ff   Randy Dunlap   block: fix Sphinx...
380
   * @gfp_mask:   the GFP_* mask given to the slab allocator
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
381
   * @nr_iovecs:	number of iovecs to pre-allocate
db18efac0   Jaak Ristioja   bio: Fix outdated...
382
   * @bs:		the bio_set to allocate from.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
383
384
   *
   * Description:
3f86a82ae   Kent Overstreet   block: Consolidat...
385
386
387
   *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
   *   backed by the @bs's mempool.
   *
d0164adc8   Mel Gorman   mm, page_alloc: d...
388
389
390
391
392
393
   *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
   *   always be able to allocate a bio. This is due to the mempool guarantees.
   *   To make this work, callers must never allocate more than 1 bio at a time
   *   from this pool. Callers that need to allocate more than 1 bio must always
   *   submit the previously allocated bio for IO before attempting to allocate
   *   a new one. Failure to do so can cause deadlocks under memory pressure.
3f86a82ae   Kent Overstreet   block: Consolidat...
394
   *
df2cb6daa   Kent Overstreet   block: Avoid dead...
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
   *   Note that when running under generic_make_request() (i.e. any block
   *   driver), bios are not submitted until after you return - see the code in
   *   generic_make_request() that converts recursion into iteration, to prevent
   *   stack overflows.
   *
   *   This would normally mean allocating multiple bios under
   *   generic_make_request() would be susceptible to deadlocks, but we have
   *   deadlock avoidance code that resubmits any blocked bios from a rescuer
   *   thread.
   *
   *   However, we do not guarantee forward progress for allocations from other
   *   mempools. Doing multiple allocations from the same mempool under
   *   generic_make_request() should be avoided - instead, use bio_set's front_pad
   *   for per bio allocations.
   *
3f86a82ae   Kent Overstreet   block: Consolidat...
410
411
412
   *   RETURNS:
   *   Pointer to new bio on success, NULL on failure.
   */
7a88fa191   Dan Carpenter   block: make nr_io...
413
414
  struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
  			     struct bio_set *bs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415
  {
df2cb6daa   Kent Overstreet   block: Avoid dead...
416
  	gfp_t saved_gfp = gfp_mask;
3f86a82ae   Kent Overstreet   block: Consolidat...
417
418
  	unsigned front_pad;
  	unsigned inline_vecs;
34053979f   Ingo Molnar   block: cleanup bi...
419
  	struct bio_vec *bvl = NULL;
451a9ebf6   Tejun Heo   bio: fix bio_kmal...
420
421
  	struct bio *bio;
  	void *p;
3f86a82ae   Kent Overstreet   block: Consolidat...
422
423
424
425
426
427
428
429
430
431
  	if (!bs) {
  		if (nr_iovecs > UIO_MAXIOV)
  			return NULL;
  
  		p = kmalloc(sizeof(struct bio) +
  			    nr_iovecs * sizeof(struct bio_vec),
  			    gfp_mask);
  		front_pad = 0;
  		inline_vecs = nr_iovecs;
  	} else {
d8f429e16   Junichi Nomura   block: add bioset...
432
  		/* should not use nobvec bioset for nr_iovecs > 0 */
8aa6ba2f6   Kent Overstreet   block: Convert bi...
433
434
  		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
  				 nr_iovecs > 0))
d8f429e16   Junichi Nomura   block: add bioset...
435
  			return NULL;
df2cb6daa   Kent Overstreet   block: Avoid dead...
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
  		/*
  		 * generic_make_request() converts recursion to iteration; this
  		 * means if we're running beneath it, any bios we allocate and
  		 * submit will not be submitted (and thus freed) until after we
  		 * return.
  		 *
  		 * This exposes us to a potential deadlock if we allocate
  		 * multiple bios from the same bio_set() while running
  		 * underneath generic_make_request(). If we were to allocate
  		 * multiple bios (say a stacking block driver that was splitting
  		 * bios), we would deadlock if we exhausted the mempool's
  		 * reserve.
  		 *
  		 * We solve this, and guarantee forward progress, with a rescuer
  		 * workqueue per bio_set. If we go to allocate and there are
  		 * bios on current->bio_list, we first try the allocation
d0164adc8   Mel Gorman   mm, page_alloc: d...
452
453
454
  		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
  		 * bios we would be blocking to the rescuer workqueue before
  		 * we retry with the original gfp_flags.
df2cb6daa   Kent Overstreet   block: Avoid dead...
455
  		 */
f5fe1b519   NeilBrown   blk: Ensure users...
456
457
  		if (current->bio_list &&
  		    (!bio_list_empty(&current->bio_list[0]) ||
47e0fb461   NeilBrown   blk: make the bio...
458
459
  		     !bio_list_empty(&current->bio_list[1])) &&
  		    bs->rescue_workqueue)
d0164adc8   Mel Gorman   mm, page_alloc: d...
460
  			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
df2cb6daa   Kent Overstreet   block: Avoid dead...
461

8aa6ba2f6   Kent Overstreet   block: Convert bi...
462
  		p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6daa   Kent Overstreet   block: Avoid dead...
463
464
465
  		if (!p && gfp_mask != saved_gfp) {
  			punt_bios_to_rescuer(bs);
  			gfp_mask = saved_gfp;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
466
  			p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6daa   Kent Overstreet   block: Avoid dead...
467
  		}
3f86a82ae   Kent Overstreet   block: Consolidat...
468
469
470
  		front_pad = bs->front_pad;
  		inline_vecs = BIO_INLINE_VECS;
  	}
451a9ebf6   Tejun Heo   bio: fix bio_kmal...
471
472
  	if (unlikely(!p))
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
473

3f86a82ae   Kent Overstreet   block: Consolidat...
474
  	bio = p + front_pad;
3a83f4677   Ming Lei   block: bio: pass ...
475
  	bio_init(bio, NULL, 0);
34053979f   Ingo Molnar   block: cleanup bi...
476

3f86a82ae   Kent Overstreet   block: Consolidat...
477
  	if (nr_iovecs > inline_vecs) {
ed996a52c   Christoph Hellwig   block: simplify a...
478
  		unsigned long idx = 0;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
479
  		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6daa   Kent Overstreet   block: Avoid dead...
480
481
482
  		if (!bvl && gfp_mask != saved_gfp) {
  			punt_bios_to_rescuer(bs);
  			gfp_mask = saved_gfp;
8aa6ba2f6   Kent Overstreet   block: Convert bi...
483
  			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6daa   Kent Overstreet   block: Avoid dead...
484
  		}
34053979f   Ingo Molnar   block: cleanup bi...
485
486
  		if (unlikely(!bvl))
  			goto err_free;
a38352e0a   Kent Overstreet   block: Add an exp...
487

ed996a52c   Christoph Hellwig   block: simplify a...
488
  		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
3f86a82ae   Kent Overstreet   block: Consolidat...
489
490
  	} else if (nr_iovecs) {
  		bvl = bio->bi_inline_vecs;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
491
  	}
3f86a82ae   Kent Overstreet   block: Consolidat...
492
493
  
  	bio->bi_pool = bs;
34053979f   Ingo Molnar   block: cleanup bi...
494
  	bio->bi_max_vecs = nr_iovecs;
34053979f   Ingo Molnar   block: cleanup bi...
495
  	bio->bi_io_vec = bvl;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
496
  	return bio;
34053979f   Ingo Molnar   block: cleanup bi...
497
498
  
  err_free:
8aa6ba2f6   Kent Overstreet   block: Convert bi...
499
  	mempool_free(p, &bs->bio_pool);
34053979f   Ingo Molnar   block: cleanup bi...
500
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
501
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
502
  EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
503

38a72dac4   Kent Overstreet   block: Add bio_co...
504
  void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
505
506
  {
  	unsigned long flags;
7988613b0   Kent Overstreet   block: Convert bi...
507
508
  	struct bio_vec bv;
  	struct bvec_iter iter;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
509

38a72dac4   Kent Overstreet   block: Add bio_co...
510
  	__bio_for_each_segment(bv, bio, iter, start) {
7988613b0   Kent Overstreet   block: Convert bi...
511
512
513
  		char *data = bvec_kmap_irq(&bv, &flags);
  		memset(data, 0, bv.bv_len);
  		flush_dcache_page(bv.bv_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
514
515
516
  		bvec_kunmap_irq(data, &flags);
  	}
  }
38a72dac4   Kent Overstreet   block: Add bio_co...
517
  EXPORT_SYMBOL(zero_fill_bio_iter);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
518

3fe209c84   Ming Lei   fs: move guard_bi...
519
520
521
522
523
524
525
526
527
528
  /**
   * bio_truncate - truncate the bio to small size of @new_size
   * @bio:	the bio to be truncated
   * @new_size:	new size for truncating the bio
   *
   * Description:
   *   Truncate the bio to new size of @new_size. If bio_op(bio) is
   *   REQ_OP_READ, zero the truncated part. This function should only
   *   be used for handling corner cases, such as bio eod.
   */
943cd69ef   Ming Lei   block: add bio_tr...
529
530
531
532
533
534
535
536
537
  void bio_truncate(struct bio *bio, unsigned new_size)
  {
  	struct bio_vec bv;
  	struct bvec_iter iter;
  	unsigned int done = 0;
  	bool truncated = false;
  
  	if (new_size >= bio->bi_iter.bi_size)
  		return;
3fe209c84   Ming Lei   fs: move guard_bi...
538
  	if (bio_op(bio) != REQ_OP_READ)
943cd69ef   Ming Lei   block: add bio_tr...
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
  		goto exit;
  
  	bio_for_each_segment(bv, bio, iter) {
  		if (done + bv.bv_len > new_size) {
  			unsigned offset;
  
  			if (!truncated)
  				offset = new_size - done;
  			else
  				offset = 0;
  			zero_user(bv.bv_page, offset, bv.bv_len - offset);
  			truncated = true;
  		}
  		done += bv.bv_len;
  	}
  
   exit:
  	/*
  	 * Don't touch bvec table here and make it really immutable, since
  	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
  	 * in its .end_bio() callback.
  	 *
  	 * It is enough to truncate bio by updating .bi_size since we can make
  	 * correct bvec with the updated .bi_size for drivers.
  	 */
  	bio->bi_iter.bi_size = new_size;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
566
567
568
569
570
571
  /**
   * bio_put - release a reference to a bio
   * @bio:   bio to release reference to
   *
   * Description:
   *   Put a reference to a &struct bio, either one you have gotten with
9b10f6a9c   NeilBrown   block: remove bio...
572
   *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
573
574
575
   **/
  void bio_put(struct bio *bio)
  {
dac56212e   Jens Axboe   bio: skip atomic ...
576
  	if (!bio_flagged(bio, BIO_REFFED))
4254bba17   Kent Overstreet   block: Kill bi_de...
577
  		bio_free(bio);
dac56212e   Jens Axboe   bio: skip atomic ...
578
579
580
581
582
583
584
585
586
  	else {
  		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
  
  		/*
  		 * last put frees it
  		 */
  		if (atomic_dec_and_test(&bio->__bi_cnt))
  			bio_free(bio);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
587
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
588
  EXPORT_SYMBOL(bio_put);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
589

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
590
  /**
59d276fe0   Kent Overstreet   block: Add bio_cl...
591
592
593
594
595
596
597
598
599
600
601
602
   * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
   * 	@bio: destination bio
   * 	@bio_src: bio to clone
   *
   *	Clone a &bio. Caller will own the returned bio, but not
   *	the actual data it points to. Reference count of returned
   * 	bio will be one.
   *
   * 	Caller must ensure that @bio_src is not freed before @bio.
   */
  void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
  {
ed996a52c   Christoph Hellwig   block: simplify a...
603
  	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
59d276fe0   Kent Overstreet   block: Add bio_cl...
604
605
  
  	/*
74d46992e   Christoph Hellwig   block: replace bi...
606
  	 * most users will be overriding ->bi_disk with a new target,
59d276fe0   Kent Overstreet   block: Add bio_cl...
607
608
  	 * so we don't set nor calculate new physical/hw segment counts here
  	 */
74d46992e   Christoph Hellwig   block: replace bi...
609
  	bio->bi_disk = bio_src->bi_disk;
62530ed8b   Michael Lyle   bio: ensure __bio...
610
  	bio->bi_partno = bio_src->bi_partno;
b7c44ed9d   Jens Axboe   block: manipulate...
611
  	bio_set_flag(bio, BIO_CLONED);
111be8839   Shaohua Li   block-throttle: a...
612
613
  	if (bio_flagged(bio_src, BIO_THROTTLED))
  		bio_set_flag(bio, BIO_THROTTLED);
1eff9d322   Jens Axboe   block: rename bio...
614
  	bio->bi_opf = bio_src->bi_opf;
ca474b738   Hannes Reinecke   block: copy iopri...
615
  	bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8e   Jens Axboe   block: add suppor...
616
  	bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe0   Kent Overstreet   block: Add bio_cl...
617
618
  	bio->bi_iter = bio_src->bi_iter;
  	bio->bi_io_vec = bio_src->bi_io_vec;
20bd723ec   Paolo Valente   block: add missin...
619

db6638d7d   Dennis Zhou   blkcg: remove bio...
620
  	bio_clone_blkg_association(bio, bio_src);
e439bedf6   Dennis Zhou   blkcg: consolidat...
621
  	blkcg_bio_issue_init(bio);
59d276fe0   Kent Overstreet   block: Add bio_cl...
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
  }
  EXPORT_SYMBOL(__bio_clone_fast);
  
  /**
   *	bio_clone_fast - clone a bio that shares the original bio's biovec
   *	@bio: bio to clone
   *	@gfp_mask: allocation priority
   *	@bs: bio_set to allocate from
   *
   * 	Like __bio_clone_fast, only also allocates the returned bio
   */
  struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
  {
  	struct bio *b;
  
  	b = bio_alloc_bioset(gfp_mask, 0, bs);
  	if (!b)
  		return NULL;
  
  	__bio_clone_fast(b, bio);
  
  	if (bio_integrity(bio)) {
  		int ret;
  
  		ret = bio_integrity_clone(b, bio, gfp_mask);
  
  		if (ret < 0) {
  			bio_put(b);
  			return NULL;
  		}
  	}
  
  	return b;
  }
  EXPORT_SYMBOL(bio_clone_fast);
5919482e2   Ming Lei   block: check if p...
657
658
  static inline bool page_is_mergeable(const struct bio_vec *bv,
  		struct page *page, unsigned int len, unsigned int off,
ff896738b   Christoph Hellwig   block: return fro...
659
  		bool *same_page)
5919482e2   Ming Lei   block: check if p...
660
661
662
663
664
665
666
667
668
  {
  	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
  		bv->bv_offset + bv->bv_len - 1;
  	phys_addr_t page_addr = page_to_phys(page);
  
  	if (vec_end_addr + 1 != page_addr + off)
  		return false;
  	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
  		return false;
52d52d1c9   Christoph Hellwig   block: only allow...
669

ff896738b   Christoph Hellwig   block: return fro...
670
671
672
  	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
  	if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
  		return false;
5919482e2   Ming Lei   block: check if p...
673
674
  	return true;
  }
384209cd5   Christoph Hellwig   block: create a b...
675
676
677
  static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
  		struct page *page, unsigned len, unsigned offset,
  		bool *same_page)
489fbbcb5   Ming Lei   block: enable mul...
678
  {
384209cd5   Christoph Hellwig   block: create a b...
679
  	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb5   Ming Lei   block: enable mul...
680
681
682
683
684
685
  	unsigned long mask = queue_segment_boundary(q);
  	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
  	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
  
  	if ((addr1 | mask) != (addr2 | mask))
  		return false;
489fbbcb5   Ming Lei   block: enable mul...
686
687
  	if (bv->bv_len + len > queue_max_segment_size(q))
  		return false;
384209cd5   Christoph Hellwig   block: create a b...
688
  	return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb5   Ming Lei   block: enable mul...
689
  }
f45958756   Shaohua Li   block: remove bio...
690
  /**
190470871   Ming Lei   block: put the sa...
691
   *	__bio_add_pc_page	- attempt to add page to passthrough bio
c66a14d07   Kent Overstreet   block: simplify b...
692
693
694
695
696
   *	@q: the target queue
   *	@bio: destination bio
   *	@page: page to add
   *	@len: vec entry length
   *	@offset: vec entry offset
d1916c86c   Christoph Hellwig   block: move same ...
697
   *	@same_page: return if the merge happen inside the same page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
698
   *
c66a14d07   Kent Overstreet   block: simplify b...
699
700
701
702
703
   *	Attempt to add a page to the bio_vec maplist. This can fail for a
   *	number of reasons, such as the bio being full or target block device
   *	limitations. The target block device must allow bio's up to PAGE_SIZE,
   *	so it is always possible to add a single page to an empty bio.
   *
5a8ce240d   Ming Lei   block: cleanup bi...
704
   *	This should only be used by passthrough bios.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
705
   */
4713839df   Christoph Hellwig   block: remove the...
706
  static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
190470871   Ming Lei   block: put the sa...
707
  		struct page *page, unsigned int len, unsigned int offset,
d1916c86c   Christoph Hellwig   block: move same ...
708
  		bool *same_page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
709
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710
711
712
713
714
715
716
  	struct bio_vec *bvec;
  
  	/*
  	 * cloned bio must not modify vec list
  	 */
  	if (unlikely(bio_flagged(bio, BIO_CLONED)))
  		return 0;
c66a14d07   Kent Overstreet   block: simplify b...
717
  	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
718
  		return 0;
80cfd548e   Jens Axboe   [BLOCK] bio: chec...
719
  	if (bio->bi_vcnt > 0) {
d1916c86c   Christoph Hellwig   block: move same ...
720
  		if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
384209cd5   Christoph Hellwig   block: create a b...
721
  			return len;
320ea869a   Christoph Hellwig   block: improve th...
722
723
724
725
726
  
  		/*
  		 * If the queue doesn't support SG gaps and adding this segment
  		 * would create a gap, disallow it.
  		 */
384209cd5   Christoph Hellwig   block: create a b...
727
  		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869a   Christoph Hellwig   block: improve th...
728
729
  		if (bvec_gap_to_prev(q, bvec, offset))
  			return 0;
80cfd548e   Jens Axboe   [BLOCK] bio: chec...
730
  	}
79d08f89b   Ming Lei   block: fix .bi_si...
731
  	if (bio_full(bio, len))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
732
  		return 0;
14ccb66b3   Christoph Hellwig   block: remove the...
733
  	if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb5   Ming Lei   block: enable mul...
734
  		return 0;
fcbf6a087   Maurizio Lombardi   bio: modify __bio...
735
736
737
738
739
  	bvec = &bio->bi_io_vec[bio->bi_vcnt];
  	bvec->bv_page = page;
  	bvec->bv_len = len;
  	bvec->bv_offset = offset;
  	bio->bi_vcnt++;
dcdca753c   Christoph Hellwig   block: clean up _...
740
  	bio->bi_iter.bi_size += len;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
741
742
  	return len;
  }
190470871   Ming Lei   block: put the sa...
743
744
745
746
  
  int bio_add_pc_page(struct request_queue *q, struct bio *bio,
  		struct page *page, unsigned int len, unsigned int offset)
  {
d1916c86c   Christoph Hellwig   block: move same ...
747
748
  	bool same_page = false;
  	return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
190470871   Ming Lei   block: put the sa...
749
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
750
  EXPORT_SYMBOL(bio_add_pc_page);
6e68af666   Mike Christie   [SCSI] Convert SC...
751
752
  
  /**
0aa69fd32   Christoph Hellwig   block: add a lowe...
753
754
   * __bio_try_merge_page - try appending data to an existing bvec.
   * @bio: destination bio
551879a48   Ming Lei   block: clarify th...
755
   * @page: start page to add
0aa69fd32   Christoph Hellwig   block: add a lowe...
756
   * @len: length of the data to add
551879a48   Ming Lei   block: clarify th...
757
   * @off: offset of the data relative to @page
ff896738b   Christoph Hellwig   block: return fro...
758
   * @same_page: return if the segment has been merged inside the same page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
   *
0aa69fd32   Christoph Hellwig   block: add a lowe...
760
761
762
763
   * Try to add the data at @page + @off to the last bvec of @bio.  This is a
   * a useful optimisation for file systems with a block size smaller than the
   * page size.
   *
551879a48   Ming Lei   block: clarify th...
764
765
   * Warn if (@len, @off) crosses pages in case that @same_page is true.
   *
0aa69fd32   Christoph Hellwig   block: add a lowe...
766
   * Return %true on success or %false on failure.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
767
   */
0aa69fd32   Christoph Hellwig   block: add a lowe...
768
  bool __bio_try_merge_page(struct bio *bio, struct page *page,
ff896738b   Christoph Hellwig   block: return fro...
769
  		unsigned int len, unsigned int off, bool *same_page)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
770
  {
c66a14d07   Kent Overstreet   block: simplify b...
771
  	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0aa69fd32   Christoph Hellwig   block: add a lowe...
772
  		return false;
762380ad9   Jens Axboe   block: add notion...
773

06ad673b6   Andreas Gruenbacher   block: fix "check...
774
  	if (bio->bi_vcnt > 0) {
0aa69fd32   Christoph Hellwig   block: add a lowe...
775
  		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
5919482e2   Ming Lei   block: check if p...
776
777
  
  		if (page_is_mergeable(bv, page, len, off, same_page)) {
06ad673b6   Andreas Gruenbacher   block: fix "check...
778
779
  			if (bio->bi_iter.bi_size > UINT_MAX - len)
  				return false;
5919482e2   Ming Lei   block: check if p...
780
781
782
783
  			bv->bv_len += len;
  			bio->bi_iter.bi_size += len;
  			return true;
  		}
c66a14d07   Kent Overstreet   block: simplify b...
784
  	}
0aa69fd32   Christoph Hellwig   block: add a lowe...
785
786
787
  	return false;
  }
  EXPORT_SYMBOL_GPL(__bio_try_merge_page);
c66a14d07   Kent Overstreet   block: simplify b...
788

0aa69fd32   Christoph Hellwig   block: add a lowe...
789
  /**
551879a48   Ming Lei   block: clarify th...
790
   * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd32   Christoph Hellwig   block: add a lowe...
791
   * @bio: destination bio
551879a48   Ming Lei   block: clarify th...
792
793
794
   * @page: start page to add
   * @len: length of the data to add, may cross pages
   * @off: offset of the data relative to @page, may cross pages
0aa69fd32   Christoph Hellwig   block: add a lowe...
795
796
797
798
799
800
801
802
   *
   * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
   * that @bio has space for another bvec.
   */
  void __bio_add_page(struct bio *bio, struct page *page,
  		unsigned int len, unsigned int off)
  {
  	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d07   Kent Overstreet   block: simplify b...
803

0aa69fd32   Christoph Hellwig   block: add a lowe...
804
  	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89b   Ming Lei   block: fix .bi_si...
805
  	WARN_ON_ONCE(bio_full(bio, len));
0aa69fd32   Christoph Hellwig   block: add a lowe...
806
807
808
809
  
  	bv->bv_page = page;
  	bv->bv_offset = off;
  	bv->bv_len = len;
c66a14d07   Kent Overstreet   block: simplify b...
810

c66a14d07   Kent Overstreet   block: simplify b...
811
  	bio->bi_iter.bi_size += len;
0aa69fd32   Christoph Hellwig   block: add a lowe...
812
  	bio->bi_vcnt++;
b8e24a930   Johannes Weiner   block: annotate r...
813
814
815
  
  	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
  		bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd32   Christoph Hellwig   block: add a lowe...
816
817
818
819
  }
  EXPORT_SYMBOL_GPL(__bio_add_page);
  
  /**
551879a48   Ming Lei   block: clarify th...
820
   *	bio_add_page	-	attempt to add page(s) to bio
0aa69fd32   Christoph Hellwig   block: add a lowe...
821
   *	@bio: destination bio
551879a48   Ming Lei   block: clarify th...
822
823
824
   *	@page: start page to add
   *	@len: vec entry length, may cross pages
   *	@offset: vec entry offset relative to @page, may cross pages
0aa69fd32   Christoph Hellwig   block: add a lowe...
825
   *
551879a48   Ming Lei   block: clarify th...
826
   *	Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd32   Christoph Hellwig   block: add a lowe...
827
828
829
830
831
   *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
   */
  int bio_add_page(struct bio *bio, struct page *page,
  		 unsigned int len, unsigned int offset)
  {
ff896738b   Christoph Hellwig   block: return fro...
832
833
834
  	bool same_page = false;
  
  	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89b   Ming Lei   block: fix .bi_si...
835
  		if (bio_full(bio, len))
0aa69fd32   Christoph Hellwig   block: add a lowe...
836
837
838
  			return 0;
  		__bio_add_page(bio, page, len, offset);
  	}
c66a14d07   Kent Overstreet   block: simplify b...
839
  	return len;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
840
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
841
  EXPORT_SYMBOL(bio_add_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
842

d241a95f3   Christoph Hellwig   block: optionally...
843
  void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbfc   Christoph Hellwig   block: change how...
844
845
846
  {
  	struct bvec_iter_all iter_all;
  	struct bio_vec *bvec;
7321ecbfc   Christoph Hellwig   block: change how...
847

b2d0d9913   Christoph Hellwig   block: move the B...
848
849
  	if (bio_flagged(bio, BIO_NO_PAGE_REF))
  		return;
d241a95f3   Christoph Hellwig   block: optionally...
850
851
852
  	bio_for_each_segment_all(bvec, bio, iter_all) {
  		if (mark_dirty && !PageCompound(bvec->bv_page))
  			set_page_dirty_lock(bvec->bv_page);
7321ecbfc   Christoph Hellwig   block: change how...
853
  		put_page(bvec->bv_page);
d241a95f3   Christoph Hellwig   block: optionally...
854
  	}
7321ecbfc   Christoph Hellwig   block: change how...
855
  }
6d0c48aed   Jens Axboe   block: implement ...
856
857
858
859
860
861
862
863
864
865
866
867
  static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
  {
  	const struct bio_vec *bv = iter->bvec;
  	unsigned int len;
  	size_t size;
  
  	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
  		return -EINVAL;
  
  	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
  	size = bio_add_page(bio, bv->bv_page, len,
  				bv->bv_offset + iter->iov_offset);
a10584c3c   Christoph Hellwig   block: refactor _...
868
869
  	if (unlikely(size != len))
  		return -EINVAL;
a10584c3c   Christoph Hellwig   block: refactor _...
870
871
  	iov_iter_advance(iter, size);
  	return 0;
6d0c48aed   Jens Axboe   block: implement ...
872
  }
576ed9135   Christoph Hellwig   block: use bio_ad...
873
  #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
2cefe4dba   Kent Overstreet   block: add bio_io...
874
  /**
17d51b10d   Martin Wilck   block: bio_iov_it...
875
   * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4dba   Kent Overstreet   block: add bio_io...
876
877
878
   * @bio: bio to add pages to
   * @iter: iov iterator describing the region to be mapped
   *
17d51b10d   Martin Wilck   block: bio_iov_it...
879
   * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4dba   Kent Overstreet   block: add bio_io...
880
   * pages will have to be released using put_page() when done.
17d51b10d   Martin Wilck   block: bio_iov_it...
881
882
   * For multi-segment *iter, this function only adds pages from the
   * the next non-empty segment of the iov iterator.
2cefe4dba   Kent Overstreet   block: add bio_io...
883
   */
17d51b10d   Martin Wilck   block: bio_iov_it...
884
  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4dba   Kent Overstreet   block: add bio_io...
885
  {
576ed9135   Christoph Hellwig   block: use bio_ad...
886
887
  	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4dba   Kent Overstreet   block: add bio_io...
888
889
  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  	struct page **pages = (struct page **)bv;
456918049   Christoph Hellwig   block: fix page l...
890
  	bool same_page = false;
576ed9135   Christoph Hellwig   block: use bio_ad...
891
892
  	ssize_t size, left;
  	unsigned len, i;
b403ea240   Martin Wilck   block: bio_iov_it...
893
  	size_t offset;
576ed9135   Christoph Hellwig   block: use bio_ad...
894
895
896
897
898
899
900
901
  
  	/*
  	 * Move page array up in the allocated memory for the bio vecs as far as
  	 * possible so that we can start filling biovecs from the beginning
  	 * without overwriting the temporary page array.
  	*/
  	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
  	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4dba   Kent Overstreet   block: add bio_io...
902
903
904
905
  
  	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
  	if (unlikely(size <= 0))
  		return size ? size : -EFAULT;
2cefe4dba   Kent Overstreet   block: add bio_io...
906

576ed9135   Christoph Hellwig   block: use bio_ad...
907
908
  	for (left = size, i = 0; left > 0; left -= len, i++) {
  		struct page *page = pages[i];
2cefe4dba   Kent Overstreet   block: add bio_io...
909

576ed9135   Christoph Hellwig   block: use bio_ad...
910
  		len = min_t(size_t, PAGE_SIZE - offset, left);
456918049   Christoph Hellwig   block: fix page l...
911
912
913
914
915
  
  		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
  			if (same_page)
  				put_page(page);
  		} else {
79d08f89b   Ming Lei   block: fix .bi_si...
916
  			if (WARN_ON_ONCE(bio_full(bio, len)))
456918049   Christoph Hellwig   block: fix page l...
917
918
919
                                  return -EINVAL;
  			__bio_add_page(bio, page, len, offset);
  		}
576ed9135   Christoph Hellwig   block: use bio_ad...
920
  		offset = 0;
2cefe4dba   Kent Overstreet   block: add bio_io...
921
  	}
2cefe4dba   Kent Overstreet   block: add bio_io...
922
923
924
  	iov_iter_advance(iter, size);
  	return 0;
  }
17d51b10d   Martin Wilck   block: bio_iov_it...
925
926
  
  /**
6d0c48aed   Jens Axboe   block: implement ...
927
   * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10d   Martin Wilck   block: bio_iov_it...
928
   * @bio: bio to add pages to
6d0c48aed   Jens Axboe   block: implement ...
929
930
931
932
933
   * @iter: iov iterator describing the region to be added
   *
   * This takes either an iterator pointing to user memory, or one pointing to
   * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
   * map them into the kernel. On IO completion, the caller should put those
399254aaf   Jens Axboe   block: add BIO_NO...
934
935
936
937
938
939
   * pages. If we're adding kernel pages, and the caller told us it's safe to
   * do so, we just have to add the pages to the bio directly. We don't grab an
   * extra reference to those pages (the user should already have that), and we
   * don't put the page on IO completion. The caller needs to check if the bio is
   * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
   * released.
17d51b10d   Martin Wilck   block: bio_iov_it...
940
   *
17d51b10d   Martin Wilck   block: bio_iov_it...
941
   * The function tries, but does not guarantee, to pin as many pages as
6d0c48aed   Jens Axboe   block: implement ...
942
943
944
   * fit into the bio, or are requested in *iter, whatever is smaller. If
   * MM encounters an error pinning the requested pages, it stops. Error
   * is returned only if 0 pages could be pinned.
17d51b10d   Martin Wilck   block: bio_iov_it...
945
946
947
   */
  int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  {
6d0c48aed   Jens Axboe   block: implement ...
948
  	const bool is_bvec = iov_iter_is_bvec(iter);
14eacf12d   Christoph Hellwig   block: don't allo...
949
950
951
952
  	int ret;
  
  	if (WARN_ON_ONCE(bio->bi_vcnt))
  		return -EINVAL;
17d51b10d   Martin Wilck   block: bio_iov_it...
953
954
  
  	do {
6d0c48aed   Jens Axboe   block: implement ...
955
956
957
958
  		if (is_bvec)
  			ret = __bio_iov_bvec_add_pages(bio, iter);
  		else
  			ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89b   Ming Lei   block: fix .bi_si...
959
  	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10d   Martin Wilck   block: bio_iov_it...
960

b62074307   Christoph Hellwig   block: never take...
961
  	if (is_bvec)
7321ecbfc   Christoph Hellwig   block: change how...
962
  		bio_set_flag(bio, BIO_NO_PAGE_REF);
14eacf12d   Christoph Hellwig   block: don't allo...
963
  	return bio->bi_vcnt ? 0 : ret;
17d51b10d   Martin Wilck   block: bio_iov_it...
964
  }
2cefe4dba   Kent Overstreet   block: add bio_io...
965

4246a0b63   Christoph Hellwig   block: add a bi_e...
966
  static void submit_bio_wait_endio(struct bio *bio)
9e882242c   Kent Overstreet   block: Add submit...
967
  {
65e53aab6   Christoph Hellwig   block: Use DECLAR...
968
  	complete(bio->bi_private);
9e882242c   Kent Overstreet   block: Add submit...
969
970
971
972
  }
  
  /**
   * submit_bio_wait - submit a bio, and wait until it completes
9e882242c   Kent Overstreet   block: Add submit...
973
974
975
976
   * @bio: The &struct bio which describes the I/O
   *
   * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
   * bio_endio() on failure.
3d289d688   Jan Kara   block: Add commen...
977
978
979
980
   *
   * WARNING: Unlike to how submit_bio() is usually used, this function does not
   * result in bio reference to be consumed. The caller must drop the reference
   * on his own.
9e882242c   Kent Overstreet   block: Add submit...
981
   */
4e49ea4a3   Mike Christie   block/fs/drivers:...
982
  int submit_bio_wait(struct bio *bio)
9e882242c   Kent Overstreet   block: Add submit...
983
  {
e319e1fbd   Byungchul Park   block, locking/lo...
984
  	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
9e882242c   Kent Overstreet   block: Add submit...
985

65e53aab6   Christoph Hellwig   block: Use DECLAR...
986
  	bio->bi_private = &done;
9e882242c   Kent Overstreet   block: Add submit...
987
  	bio->bi_end_io = submit_bio_wait_endio;
1eff9d322   Jens Axboe   block: rename bio...
988
  	bio->bi_opf |= REQ_SYNC;
4e49ea4a3   Mike Christie   block/fs/drivers:...
989
  	submit_bio(bio);
65e53aab6   Christoph Hellwig   block: Use DECLAR...
990
  	wait_for_completion_io(&done);
9e882242c   Kent Overstreet   block: Add submit...
991

65e53aab6   Christoph Hellwig   block: Use DECLAR...
992
  	return blk_status_to_errno(bio->bi_status);
9e882242c   Kent Overstreet   block: Add submit...
993
994
  }
  EXPORT_SYMBOL(submit_bio_wait);
054bdf646   Kent Overstreet   block: Add bio_ad...
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
  /**
   * bio_advance - increment/complete a bio by some number of bytes
   * @bio:	bio to advance
   * @bytes:	number of bytes to complete
   *
   * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
   * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
   * be updated on the last bvec as well.
   *
   * @bio will then represent the remaining, uncompleted portion of the io.
   */
  void bio_advance(struct bio *bio, unsigned bytes)
  {
  	if (bio_integrity(bio))
  		bio_integrity_advance(bio, bytes);
4550dd6c6   Kent Overstreet   block: Immutable ...
1010
  	bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf646   Kent Overstreet   block: Add bio_ad...
1011
1012
  }
  EXPORT_SYMBOL(bio_advance);
45db54d58   Kent Overstreet   block: Split out ...
1013
1014
  void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
  			struct bio *src, struct bvec_iter *src_iter)
16ac3d63e   Kent Overstreet   block: Add bio_co...
1015
  {
1cb9dda4f   Kent Overstreet   block: Convert bi...
1016
  	struct bio_vec src_bv, dst_bv;
16ac3d63e   Kent Overstreet   block: Add bio_co...
1017
  	void *src_p, *dst_p;
1cb9dda4f   Kent Overstreet   block: Convert bi...
1018
  	unsigned bytes;
16ac3d63e   Kent Overstreet   block: Add bio_co...
1019

45db54d58   Kent Overstreet   block: Split out ...
1020
1021
1022
  	while (src_iter->bi_size && dst_iter->bi_size) {
  		src_bv = bio_iter_iovec(src, *src_iter);
  		dst_bv = bio_iter_iovec(dst, *dst_iter);
1cb9dda4f   Kent Overstreet   block: Convert bi...
1023
1024
  
  		bytes = min(src_bv.bv_len, dst_bv.bv_len);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1025

1cb9dda4f   Kent Overstreet   block: Convert bi...
1026
1027
  		src_p = kmap_atomic(src_bv.bv_page);
  		dst_p = kmap_atomic(dst_bv.bv_page);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1028

1cb9dda4f   Kent Overstreet   block: Convert bi...
1029
1030
  		memcpy(dst_p + dst_bv.bv_offset,
  		       src_p + src_bv.bv_offset,
16ac3d63e   Kent Overstreet   block: Add bio_co...
1031
1032
1033
1034
  		       bytes);
  
  		kunmap_atomic(dst_p);
  		kunmap_atomic(src_p);
6e6e811d7   Kent Overstreet   block: Add missin...
1035
  		flush_dcache_page(dst_bv.bv_page);
45db54d58   Kent Overstreet   block: Split out ...
1036
1037
  		bio_advance_iter(src, src_iter, bytes);
  		bio_advance_iter(dst, dst_iter, bytes);
16ac3d63e   Kent Overstreet   block: Add bio_co...
1038
1039
  	}
  }
38a72dac4   Kent Overstreet   block: Add bio_co...
1040
1041
1042
  EXPORT_SYMBOL(bio_copy_data_iter);
  
  /**
45db54d58   Kent Overstreet   block: Split out ...
1043
1044
1045
   * bio_copy_data - copy contents of data buffers from one bio to another
   * @src: source bio
   * @dst: destination bio
38a72dac4   Kent Overstreet   block: Add bio_co...
1046
1047
1048
1049
1050
1051
   *
   * Stops when it reaches the end of either @src or @dst - that is, copies
   * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
   */
  void bio_copy_data(struct bio *dst, struct bio *src)
  {
45db54d58   Kent Overstreet   block: Split out ...
1052
1053
1054
1055
  	struct bvec_iter src_iter = src->bi_iter;
  	struct bvec_iter dst_iter = dst->bi_iter;
  
  	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac4   Kent Overstreet   block: Add bio_co...
1056
  }
16ac3d63e   Kent Overstreet   block: Add bio_co...
1057
  EXPORT_SYMBOL(bio_copy_data);
45db54d58   Kent Overstreet   block: Split out ...
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
  /**
   * bio_list_copy_data - copy contents of data buffers from one chain of bios to
   * another
   * @src: source bio list
   * @dst: destination bio list
   *
   * Stops when it reaches the end of either the @src list or @dst list - that is,
   * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
   * bios).
   */
  void bio_list_copy_data(struct bio *dst, struct bio *src)
  {
  	struct bvec_iter src_iter = src->bi_iter;
  	struct bvec_iter dst_iter = dst->bi_iter;
  
  	while (1) {
  		if (!src_iter.bi_size) {
  			src = src->bi_next;
  			if (!src)
  				break;
  
  			src_iter = src->bi_iter;
  		}
  
  		if (!dst_iter.bi_size) {
  			dst = dst->bi_next;
  			if (!dst)
  				break;
  
  			dst_iter = dst->bi_iter;
  		}
  
  		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
  	}
  }
  EXPORT_SYMBOL(bio_list_copy_data);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1094
  struct bio_map_data {
152e283fd   FUJITA Tomonori   block: introduce ...
1095
  	int is_our_pages;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1096
1097
  	struct iov_iter iter;
  	struct iovec iov[];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1098
  };
0e5b935d4   Al Viro   bio_alloc_map_dat...
1099
  static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
76029ff37   FUJITA Tomonori   bio: fix bio_copy...
1100
  					       gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1101
  {
0e5b935d4   Al Viro   bio_alloc_map_dat...
1102
1103
  	struct bio_map_data *bmd;
  	if (data->nr_segs > UIO_MAXIOV)
f3f63c1c2   Jens Axboe   block: limit vec ...
1104
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1105

f1f8f292c   Gustavo A. R. Silva   block: bio: Use s...
1106
  	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
0e5b935d4   Al Viro   bio_alloc_map_dat...
1107
1108
1109
1110
1111
1112
  	if (!bmd)
  		return NULL;
  	memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
  	bmd->iter = *data;
  	bmd->iter.iov = bmd->iov;
  	return bmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1113
  }
9124d3fe2   Dongsu Park   block: rewrite an...
1114
1115
1116
1117
1118
1119
1120
1121
  /**
   * bio_copy_from_iter - copy all pages from iov_iter to bio
   * @bio: The &struct bio which describes the I/O as destination
   * @iter: iov_iter as source
   *
   * Copy all pages from iov_iter to bio.
   * Returns 0 on success, or error on failure.
   */
98a09d610   Al Viro   bio_copy_from_ite...
1122
  static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
c5dec1c30   FUJITA Tomonori   block: convert bi...
1123
  {
c5dec1c30   FUJITA Tomonori   block: convert bi...
1124
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1125
  	struct bvec_iter_all iter_all;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1126

2b070cfe5   Christoph Hellwig   block: remove the...
1127
  	bio_for_each_segment_all(bvec, bio, iter_all) {
9124d3fe2   Dongsu Park   block: rewrite an...
1128
  		ssize_t ret;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1129

9124d3fe2   Dongsu Park   block: rewrite an...
1130
1131
1132
  		ret = copy_page_from_iter(bvec->bv_page,
  					  bvec->bv_offset,
  					  bvec->bv_len,
98a09d610   Al Viro   bio_copy_from_ite...
1133
  					  iter);
9124d3fe2   Dongsu Park   block: rewrite an...
1134

98a09d610   Al Viro   bio_copy_from_ite...
1135
  		if (!iov_iter_count(iter))
9124d3fe2   Dongsu Park   block: rewrite an...
1136
1137
1138
1139
  			break;
  
  		if (ret < bvec->bv_len)
  			return -EFAULT;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1140
  	}
9124d3fe2   Dongsu Park   block: rewrite an...
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
  	return 0;
  }
  
  /**
   * bio_copy_to_iter - copy all pages from bio to iov_iter
   * @bio: The &struct bio which describes the I/O as source
   * @iter: iov_iter as destination
   *
   * Copy all pages from bio to iov_iter.
   * Returns 0 on success, or error on failure.
   */
  static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
  {
9124d3fe2   Dongsu Park   block: rewrite an...
1154
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1155
  	struct bvec_iter_all iter_all;
9124d3fe2   Dongsu Park   block: rewrite an...
1156

2b070cfe5   Christoph Hellwig   block: remove the...
1157
  	bio_for_each_segment_all(bvec, bio, iter_all) {
9124d3fe2   Dongsu Park   block: rewrite an...
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
  		ssize_t ret;
  
  		ret = copy_page_to_iter(bvec->bv_page,
  					bvec->bv_offset,
  					bvec->bv_len,
  					&iter);
  
  		if (!iov_iter_count(&iter))
  			break;
  
  		if (ret < bvec->bv_len)
  			return -EFAULT;
  	}
  
  	return 0;
c5dec1c30   FUJITA Tomonori   block: convert bi...
1173
  }
491221f88   Guoqing Jiang   block: export bio...
1174
  void bio_free_pages(struct bio *bio)
1dfa0f68c   Christoph Hellwig   block: add a help...
1175
1176
  {
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1177
  	struct bvec_iter_all iter_all;
1dfa0f68c   Christoph Hellwig   block: add a help...
1178

2b070cfe5   Christoph Hellwig   block: remove the...
1179
  	bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68c   Christoph Hellwig   block: add a help...
1180
1181
  		__free_page(bvec->bv_page);
  }
491221f88   Guoqing Jiang   block: export bio...
1182
  EXPORT_SYMBOL(bio_free_pages);
1dfa0f68c   Christoph Hellwig   block: add a help...
1183

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1184
1185
1186
1187
  /**
   *	bio_uncopy_user	-	finish previously mapped bio
   *	@bio: bio being terminated
   *
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
1188
   *	Free pages allocated from bio_copy_user_iov() and write back data
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1189
1190
1191
1192
1193
   *	to user space in case of a read.
   */
  int bio_uncopy_user(struct bio *bio)
  {
  	struct bio_map_data *bmd = bio->bi_private;
1dfa0f68c   Christoph Hellwig   block: add a help...
1194
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1195

35dc24838   Roland Dreier   [SCSI] sg: Fix us...
1196
1197
1198
  	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
  		/*
  		 * if we're in a workqueue, the request is orphaned, so
2d99b55d3   Hannes Reinecke   bio: return EINTR...
1199
1200
  		 * don't copy into a random user address space, just free
  		 * and return -EINTR so user space doesn't expect any data.
35dc24838   Roland Dreier   [SCSI] sg: Fix us...
1201
  		 */
2d99b55d3   Hannes Reinecke   bio: return EINTR...
1202
1203
1204
  		if (!current->mm)
  			ret = -EINTR;
  		else if (bio_data_dir(bio) == READ)
9124d3fe2   Dongsu Park   block: rewrite an...
1205
  			ret = bio_copy_to_iter(bio, bmd->iter);
1dfa0f68c   Christoph Hellwig   block: add a help...
1206
1207
  		if (bmd->is_our_pages)
  			bio_free_pages(bio);
35dc24838   Roland Dreier   [SCSI] sg: Fix us...
1208
  	}
c8db44482   Kent Overstreet   block: Don't save...
1209
  	kfree(bmd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1210
1211
1212
1213
1214
  	bio_put(bio);
  	return ret;
  }
  
  /**
c5dec1c30   FUJITA Tomonori   block: convert bi...
1215
   *	bio_copy_user_iov	-	copy user data to bio
26e49cfc7   Kent Overstreet   block: pass iov_i...
1216
1217
1218
1219
   *	@q:		destination block queue
   *	@map_data:	pointer to the rq_map_data holding pages (if necessary)
   *	@iter:		iovec iterator
   *	@gfp_mask:	memory allocation flags
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1220
1221
1222
1223
1224
   *
   *	Prepares and returns a bio for indirect user io, bouncing data
   *	to/from kernel pages as necessary. Must be paired with
   *	call bio_uncopy_user() on io completion.
   */
152e283fd   FUJITA Tomonori   block: introduce ...
1225
1226
  struct bio *bio_copy_user_iov(struct request_queue *q,
  			      struct rq_map_data *map_data,
e81cef5d3   Al Viro   blk_rq_map_user_i...
1227
  			      struct iov_iter *iter,
26e49cfc7   Kent Overstreet   block: pass iov_i...
1228
  			      gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1229
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1230
  	struct bio_map_data *bmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1231
1232
  	struct page *page;
  	struct bio *bio;
d16d44ebb   Al Viro   bio_copy_user_iov...
1233
1234
  	int i = 0, ret;
  	int nr_pages;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1235
  	unsigned int len = iter->count;
bd5cecea4   Geliang Tang   bio: use offset_i...
1236
  	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1237

0e5b935d4   Al Viro   bio_alloc_map_dat...
1238
  	bmd = bio_alloc_map_data(iter, gfp_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1239
1240
  	if (!bmd)
  		return ERR_PTR(-ENOMEM);
26e49cfc7   Kent Overstreet   block: pass iov_i...
1241
1242
1243
1244
1245
1246
  	/*
  	 * We need to do a deep copy of the iov_iter including the iovecs.
  	 * The caller provided iov might point to an on-stack or otherwise
  	 * shortlived one.
  	 */
  	bmd->is_our_pages = map_data ? 0 : 1;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1247

d16d44ebb   Al Viro   bio_copy_user_iov...
1248
1249
1250
  	nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
  	if (nr_pages > BIO_MAX_PAGES)
  		nr_pages = BIO_MAX_PAGES;
26e49cfc7   Kent Overstreet   block: pass iov_i...
1251

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1252
  	ret = -ENOMEM;
a9e9dc24b   Tejun Heo   bio: use bio_kmal...
1253
  	bio = bio_kmalloc(gfp_mask, nr_pages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1254
1255
  	if (!bio)
  		goto out_bmd;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1256
  	ret = 0;
56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1257
1258
  
  	if (map_data) {
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1259
  		nr_pages = 1 << map_data->page_order;
56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1260
1261
  		i = map_data->offset / PAGE_SIZE;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1262
  	while (len) {
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1263
  		unsigned int bytes = PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1264

56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1265
  		bytes -= offset;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1266
1267
  		if (bytes > len)
  			bytes = len;
152e283fd   FUJITA Tomonori   block: introduce ...
1268
  		if (map_data) {
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1269
  			if (i == map_data->nr_entries * nr_pages) {
152e283fd   FUJITA Tomonori   block: introduce ...
1270
1271
1272
  				ret = -ENOMEM;
  				break;
  			}
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1273
1274
1275
1276
1277
1278
  
  			page = map_data->pages[i / nr_pages];
  			page += (i % nr_pages);
  
  			i++;
  		} else {
152e283fd   FUJITA Tomonori   block: introduce ...
1279
  			page = alloc_page(q->bounce_gfp | gfp_mask);
e623ddb4e   FUJITA Tomonori   [SCSI] block: fix...
1280
1281
1282
1283
  			if (!page) {
  				ret = -ENOMEM;
  				break;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1284
  		}
a3761c3c9   Jérôme Glisse   block: do not lea...
1285
1286
1287
  		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
  			if (!map_data)
  				__free_page(page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1288
  			break;
a3761c3c9   Jérôme Glisse   block: do not lea...
1289
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1290
1291
  
  		len -= bytes;
56c451f4b   FUJITA Tomonori   [SCSI] block: fix...
1292
  		offset = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1293
1294
1295
1296
  	}
  
  	if (ret)
  		goto cleanup;
2884d0be8   Al Viro   move more stuff d...
1297
1298
  	if (map_data)
  		map_data->offset += bio->bi_iter.bi_size;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1299
1300
1301
  	/*
  	 * success
  	 */
00e237074   David Howells   iov_iter: Use acc...
1302
  	if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
ecb554a84   FUJITA Tomonori   block: fix sg SG_...
1303
  	    (map_data && map_data->from_user)) {
98a09d610   Al Viro   bio_copy_from_ite...
1304
  		ret = bio_copy_from_iter(bio, iter);
c5dec1c30   FUJITA Tomonori   block: convert bi...
1305
1306
  		if (ret)
  			goto cleanup;
98a09d610   Al Viro   bio_copy_from_ite...
1307
  	} else {
f55adad60   Keith Busch   block/bio: Do not...
1308
1309
  		if (bmd->is_our_pages)
  			zero_fill_bio(bio);
98a09d610   Al Viro   bio_copy_from_ite...
1310
  		iov_iter_advance(iter, bio->bi_iter.bi_size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1311
  	}
26e49cfc7   Kent Overstreet   block: pass iov_i...
1312
  	bio->bi_private = bmd;
2884d0be8   Al Viro   move more stuff d...
1313
1314
  	if (map_data && map_data->null_mapped)
  		bio_set_flag(bio, BIO_NULL_MAPPED);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1315
1316
  	return bio;
  cleanup:
152e283fd   FUJITA Tomonori   block: introduce ...
1317
  	if (!map_data)
1dfa0f68c   Christoph Hellwig   block: add a help...
1318
  		bio_free_pages(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1319
1320
  	bio_put(bio);
  out_bmd:
c8db44482   Kent Overstreet   block: Don't save...
1321
  	kfree(bmd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1322
1323
  	return ERR_PTR(ret);
  }
37f19e57a   Christoph Hellwig   block: merge __bi...
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
  /**
   *	bio_map_user_iov - map user iovec into bio
   *	@q:		the struct request_queue for the bio
   *	@iter:		iovec iterator
   *	@gfp_mask:	memory allocation flags
   *
   *	Map the user space address into a bio suitable for io to a block
   *	device. Returns an error pointer in case of error.
   */
  struct bio *bio_map_user_iov(struct request_queue *q,
e81cef5d3   Al Viro   blk_rq_map_user_i...
1334
  			     struct iov_iter *iter,
37f19e57a   Christoph Hellwig   block: merge __bi...
1335
  			     gfp_t gfp_mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1336
  {
26e49cfc7   Kent Overstreet   block: pass iov_i...
1337
  	int j;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1338
  	struct bio *bio;
076098e51   Al Viro   bio_map_user_iov(...
1339
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1340

b282cc766   Al Viro   bio_map_user_iov(...
1341
  	if (!iov_iter_count(iter))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1342
  		return ERR_PTR(-EINVAL);
b282cc766   Al Viro   bio_map_user_iov(...
1343
  	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1344
1345
  	if (!bio)
  		return ERR_PTR(-ENOMEM);
0a0f15136   Al Viro   bio_map_user_iov(...
1346
  	while (iov_iter_count(iter)) {
629e42bcc   Al Viro   ... and with iov_...
1347
  		struct page **pages;
076098e51   Al Viro   bio_map_user_iov(...
1348
1349
1350
  		ssize_t bytes;
  		size_t offs, added = 0;
  		int npages;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1351

0a0f15136   Al Viro   bio_map_user_iov(...
1352
  		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
076098e51   Al Viro   bio_map_user_iov(...
1353
1354
  		if (unlikely(bytes <= 0)) {
  			ret = bytes ? bytes : -EFAULT;
f1970baf6   James Bottomley   [PATCH] Add scatt...
1355
  			goto out_unmap;
991721572   Jens Axboe   [PATCH] Fix missi...
1356
  		}
f1970baf6   James Bottomley   [PATCH] Add scatt...
1357

076098e51   Al Viro   bio_map_user_iov(...
1358
  		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
f1970baf6   James Bottomley   [PATCH] Add scatt...
1359

98f0bc990   Al Viro   bio_map_user_iov(...
1360
1361
1362
1363
1364
1365
1366
  		if (unlikely(offs & queue_dma_alignment(q))) {
  			ret = -EINVAL;
  			j = 0;
  		} else {
  			for (j = 0; j < npages; j++) {
  				struct page *page = pages[j];
  				unsigned int n = PAGE_SIZE - offs;
d1916c86c   Christoph Hellwig   block: move same ...
1367
  				bool same_page = false;
f1970baf6   James Bottomley   [PATCH] Add scatt...
1368

98f0bc990   Al Viro   bio_map_user_iov(...
1369
1370
  				if (n > bytes)
  					n = bytes;
95d78c28b   Vitaly Mayatskikh   fix unbalanced pa...
1371

190470871   Ming Lei   block: put the sa...
1372
  				if (!__bio_add_pc_page(q, bio, page, n, offs,
d1916c86c   Christoph Hellwig   block: move same ...
1373
1374
1375
  						&same_page)) {
  					if (same_page)
  						put_page(page);
98f0bc990   Al Viro   bio_map_user_iov(...
1376
  					break;
d1916c86c   Christoph Hellwig   block: move same ...
1377
  				}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1378

98f0bc990   Al Viro   bio_map_user_iov(...
1379
1380
1381
1382
  				added += n;
  				bytes -= n;
  				offs = 0;
  			}
0a0f15136   Al Viro   bio_map_user_iov(...
1383
  			iov_iter_advance(iter, added);
f1970baf6   James Bottomley   [PATCH] Add scatt...
1384
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1385
  		/*
f1970baf6   James Bottomley   [PATCH] Add scatt...
1386
  		 * release the pages we didn't map into the bio, if any
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1387
  		 */
629e42bcc   Al Viro   ... and with iov_...
1388
  		while (j < npages)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
1389
  			put_page(pages[j++]);
629e42bcc   Al Viro   ... and with iov_...
1390
  		kvfree(pages);
e2e115d18   Al Viro   don't rely upon s...
1391
1392
1393
  		/* couldn't stuff something into bio? */
  		if (bytes)
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1394
  	}
b7c44ed9d   Jens Axboe   block: manipulate...
1395
  	bio_set_flag(bio, BIO_USER_MAPPED);
37f19e57a   Christoph Hellwig   block: merge __bi...
1396
1397
  
  	/*
5fad1b64a   Bart Van Assche   block: Update com...
1398
  	 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
37f19e57a   Christoph Hellwig   block: merge __bi...
1399
1400
1401
1402
1403
  	 * it would normally disappear when its bi_end_io is run.
  	 * however, we need it for the unmap, so grab an extra
  	 * reference to it
  	 */
  	bio_get(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1404
  	return bio;
f1970baf6   James Bottomley   [PATCH] Add scatt...
1405
1406
  
   out_unmap:
506e07984   Christoph Hellwig   block: use bio_re...
1407
  	bio_release_pages(bio, false);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1408
1409
1410
  	bio_put(bio);
  	return ERR_PTR(ret);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1411
1412
1413
1414
  /**
   *	bio_unmap_user	-	unmap a bio
   *	@bio:		the bio being unmapped
   *
5fad1b64a   Bart Van Assche   block: Update com...
1415
1416
   *	Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
   *	process context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1417
1418
1419
1420
1421
   *
   *	bio_unmap_user() may sleep.
   */
  void bio_unmap_user(struct bio *bio)
  {
163cc2d3c   Christoph Hellwig   block: use bio_re...
1422
1423
  	bio_release_pages(bio, bio_data_dir(bio) == READ);
  	bio_put(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1424
1425
  	bio_put(bio);
  }
b4c5875d3   Damien Le Moal   block: Allow mapp...
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
  static void bio_invalidate_vmalloc_pages(struct bio *bio)
  {
  #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  	if (bio->bi_private && !op_is_write(bio_op(bio))) {
  		unsigned long i, len = 0;
  
  		for (i = 0; i < bio->bi_vcnt; i++)
  			len += bio->bi_io_vec[i].bv_len;
  		invalidate_kernel_vmap_range(bio->bi_private, len);
  	}
  #endif
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
1438
  static void bio_map_kern_endio(struct bio *bio)
b823825e8   Jens Axboe   [PATCH] Keep the ...
1439
  {
b4c5875d3   Damien Le Moal   block: Allow mapp...
1440
  	bio_invalidate_vmalloc_pages(bio);
b823825e8   Jens Axboe   [PATCH] Keep the ...
1441
  	bio_put(bio);
b823825e8   Jens Axboe   [PATCH] Keep the ...
1442
  }
75c72b836   Christoph Hellwig   block: merge __bi...
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
  /**
   *	bio_map_kern	-	map kernel address into bio
   *	@q: the struct request_queue for the bio
   *	@data: pointer to buffer to map
   *	@len: length in bytes
   *	@gfp_mask: allocation flags for bio allocation
   *
   *	Map the kernel address into a bio suitable for io to a block
   *	device. Returns an error pointer in case of error.
   */
  struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
  			 gfp_t gfp_mask)
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1455
1456
1457
1458
1459
  {
  	unsigned long kaddr = (unsigned long)data;
  	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  	unsigned long start = kaddr >> PAGE_SHIFT;
  	const int nr_pages = end - start;
b4c5875d3   Damien Le Moal   block: Allow mapp...
1460
1461
  	bool is_vmalloc = is_vmalloc_addr(data);
  	struct page *page;
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1462
1463
  	int offset, i;
  	struct bio *bio;
a9e9dc24b   Tejun Heo   bio: use bio_kmal...
1464
  	bio = bio_kmalloc(gfp_mask, nr_pages);
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1465
1466
  	if (!bio)
  		return ERR_PTR(-ENOMEM);
b4c5875d3   Damien Le Moal   block: Allow mapp...
1467
1468
1469
1470
  	if (is_vmalloc) {
  		flush_kernel_vmap_range(data, len);
  		bio->bi_private = data;
  	}
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1471
1472
1473
1474
1475
1476
1477
1478
1479
  	offset = offset_in_page(kaddr);
  	for (i = 0; i < nr_pages; i++) {
  		unsigned int bytes = PAGE_SIZE - offset;
  
  		if (len <= 0)
  			break;
  
  		if (bytes > len)
  			bytes = len;
b4c5875d3   Damien Le Moal   block: Allow mapp...
1480
1481
1482
1483
1484
  		if (!is_vmalloc)
  			page = virt_to_page(data);
  		else
  			page = vmalloc_to_page(data);
  		if (bio_add_pc_page(q, bio, page, bytes,
75c72b836   Christoph Hellwig   block: merge __bi...
1485
1486
1487
1488
1489
  				    offset) < bytes) {
  			/* we don't support partial mappings */
  			bio_put(bio);
  			return ERR_PTR(-EINVAL);
  		}
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1490
1491
1492
1493
1494
  
  		data += bytes;
  		len -= bytes;
  		offset = 0;
  	}
b823825e8   Jens Axboe   [PATCH] Keep the ...
1495
  	bio->bi_end_io = bio_map_kern_endio;
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1496
1497
  	return bio;
  }
df46b9a44   Mike Christie   [PATCH] Add blk_r...
1498

4246a0b63   Christoph Hellwig   block: add a bi_e...
1499
  static void bio_copy_kern_endio(struct bio *bio)
68154e90c   FUJITA Tomonori   block: add dma al...
1500
  {
1dfa0f68c   Christoph Hellwig   block: add a help...
1501
1502
1503
  	bio_free_pages(bio);
  	bio_put(bio);
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
1504
  static void bio_copy_kern_endio_read(struct bio *bio)
1dfa0f68c   Christoph Hellwig   block: add a help...
1505
  {
42d2683a2   Christoph Hellwig   block: simplify b...
1506
  	char *p = bio->bi_private;
1dfa0f68c   Christoph Hellwig   block: add a help...
1507
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1508
  	struct bvec_iter_all iter_all;
68154e90c   FUJITA Tomonori   block: add dma al...
1509

2b070cfe5   Christoph Hellwig   block: remove the...
1510
  	bio_for_each_segment_all(bvec, bio, iter_all) {
1dfa0f68c   Christoph Hellwig   block: add a help...
1511
  		memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
c8db44482   Kent Overstreet   block: Don't save...
1512
  		p += bvec->bv_len;
68154e90c   FUJITA Tomonori   block: add dma al...
1513
  	}
4246a0b63   Christoph Hellwig   block: add a bi_e...
1514
  	bio_copy_kern_endio(bio);
68154e90c   FUJITA Tomonori   block: add dma al...
1515
1516
1517
1518
1519
1520
1521
1522
  }
  
  /**
   *	bio_copy_kern	-	copy kernel address into bio
   *	@q: the struct request_queue for the bio
   *	@data: pointer to buffer to copy
   *	@len: length in bytes
   *	@gfp_mask: allocation flags for bio and page allocation
ffee0259c   Randy Dunlap   docbook: fix bio ...
1523
   *	@reading: data direction is READ
68154e90c   FUJITA Tomonori   block: add dma al...
1524
1525
1526
1527
1528
1529
1530
   *
   *	copy the kernel address into a bio suitable for io to a block
   *	device. Returns an error pointer in case of error.
   */
  struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
  			  gfp_t gfp_mask, int reading)
  {
42d2683a2   Christoph Hellwig   block: simplify b...
1531
1532
1533
  	unsigned long kaddr = (unsigned long)data;
  	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  	unsigned long start = kaddr >> PAGE_SHIFT;
42d2683a2   Christoph Hellwig   block: simplify b...
1534
1535
  	struct bio *bio;
  	void *p = data;
1dfa0f68c   Christoph Hellwig   block: add a help...
1536
  	int nr_pages = 0;
68154e90c   FUJITA Tomonori   block: add dma al...
1537

42d2683a2   Christoph Hellwig   block: simplify b...
1538
1539
1540
1541
1542
  	/*
  	 * Overflow, abort
  	 */
  	if (end < start)
  		return ERR_PTR(-EINVAL);
68154e90c   FUJITA Tomonori   block: add dma al...
1543

42d2683a2   Christoph Hellwig   block: simplify b...
1544
1545
1546
1547
  	nr_pages = end - start;
  	bio = bio_kmalloc(gfp_mask, nr_pages);
  	if (!bio)
  		return ERR_PTR(-ENOMEM);
68154e90c   FUJITA Tomonori   block: add dma al...
1548

42d2683a2   Christoph Hellwig   block: simplify b...
1549
1550
1551
  	while (len) {
  		struct page *page;
  		unsigned int bytes = PAGE_SIZE;
68154e90c   FUJITA Tomonori   block: add dma al...
1552

42d2683a2   Christoph Hellwig   block: simplify b...
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
  		if (bytes > len)
  			bytes = len;
  
  		page = alloc_page(q->bounce_gfp | gfp_mask);
  		if (!page)
  			goto cleanup;
  
  		if (!reading)
  			memcpy(page_address(page), p, bytes);
  
  		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
  			break;
  
  		len -= bytes;
  		p += bytes;
68154e90c   FUJITA Tomonori   block: add dma al...
1568
  	}
1dfa0f68c   Christoph Hellwig   block: add a help...
1569
1570
1571
1572
1573
  	if (reading) {
  		bio->bi_end_io = bio_copy_kern_endio_read;
  		bio->bi_private = data;
  	} else {
  		bio->bi_end_io = bio_copy_kern_endio;
1dfa0f68c   Christoph Hellwig   block: add a help...
1574
  	}
76029ff37   FUJITA Tomonori   bio: fix bio_copy...
1575

68154e90c   FUJITA Tomonori   block: add dma al...
1576
  	return bio;
42d2683a2   Christoph Hellwig   block: simplify b...
1577
1578
  
  cleanup:
1dfa0f68c   Christoph Hellwig   block: add a help...
1579
  	bio_free_pages(bio);
42d2683a2   Christoph Hellwig   block: simplify b...
1580
1581
  	bio_put(bio);
  	return ERR_PTR(-ENOMEM);
68154e90c   FUJITA Tomonori   block: add dma al...
1582
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
  /*
   * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
   * for performing direct-IO in BIOs.
   *
   * The problem is that we cannot run set_page_dirty() from interrupt context
   * because the required locks are not interrupt-safe.  So what we can do is to
   * mark the pages dirty _before_ performing IO.  And in interrupt context,
   * check that the pages are still dirty.   If so, fine.  If not, redirty them
   * in process context.
   *
   * We special-case compound pages here: normally this means reads into hugetlb
   * pages.  The logic in here doesn't really work right for compound pages
   * because the VM does not uniformly chase down the head page in all cases.
   * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
   * handle them at all.  So we skip compound pages here at an early stage.
   *
   * Note that this code is very hard to test under normal circumstances because
   * direct-io pins the pages with get_user_pages().  This makes
   * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba2   Artem Bityutskiy   vfs: nuke pdflush...
1602
   * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
   * pagecache.
   *
   * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
   * deferred bio dirtying paths.
   */
  
  /*
   * bio_set_pages_dirty() will mark all the bio's pages as dirty.
   */
  void bio_set_pages_dirty(struct bio *bio)
  {
cb34e057a   Kent Overstreet   block: Convert so...
1614
  	struct bio_vec *bvec;
6dc4f100c   Ming Lei   block: allow bio_...
1615
  	struct bvec_iter_all iter_all;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1616

2b070cfe5   Christoph Hellwig   block: remove the...
1617
  	bio_for_each_segment_all(bvec, bio, iter_all) {
3bb509831   Christoph Hellwig   block: bio_set_pa...
1618
1619
  		if (!PageCompound(bvec->bv_page))
  			set_page_dirty_lock(bvec->bv_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1620
1621
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1622
1623
1624
1625
  /*
   * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
   * If they are, then fine.  If, however, some pages are clean then they must
   * have been written out during the direct-IO read.  So we take another ref on
24d5493f2   Christoph Hellwig   block: simplify b...
1626
   * the BIO and re-dirty the pages in process context.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1627
1628
   *
   * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a08   Kirill A. Shutemov   mm, fs: remove re...
1629
1630
   * here on.  It will run one put_page() against each page and will run one
   * bio_put() against the BIO.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1631
   */
65f27f384   David Howells   WorkStruct: Pass ...
1632
  static void bio_dirty_fn(struct work_struct *work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1633

65f27f384   David Howells   WorkStruct: Pass ...
1634
  static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1635
1636
1637
1638
1639
1640
  static DEFINE_SPINLOCK(bio_dirty_lock);
  static struct bio *bio_dirty_list;
  
  /*
   * This runs in process context
   */
65f27f384   David Howells   WorkStruct: Pass ...
1641
  static void bio_dirty_fn(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1642
  {
24d5493f2   Christoph Hellwig   block: simplify b...
1643
  	struct bio *bio, *next;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1644

24d5493f2   Christoph Hellwig   block: simplify b...
1645
1646
  	spin_lock_irq(&bio_dirty_lock);
  	next = bio_dirty_list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1647
  	bio_dirty_list = NULL;
24d5493f2   Christoph Hellwig   block: simplify b...
1648
  	spin_unlock_irq(&bio_dirty_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1649

24d5493f2   Christoph Hellwig   block: simplify b...
1650
1651
  	while ((bio = next) != NULL) {
  		next = bio->bi_private;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1652

d241a95f3   Christoph Hellwig   block: optionally...
1653
  		bio_release_pages(bio, true);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1654
  		bio_put(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1655
1656
1657
1658
1659
  	}
  }
  
  void bio_check_pages_dirty(struct bio *bio)
  {
cb34e057a   Kent Overstreet   block: Convert so...
1660
  	struct bio_vec *bvec;
24d5493f2   Christoph Hellwig   block: simplify b...
1661
  	unsigned long flags;
6dc4f100c   Ming Lei   block: allow bio_...
1662
  	struct bvec_iter_all iter_all;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1663

2b070cfe5   Christoph Hellwig   block: remove the...
1664
  	bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f2   Christoph Hellwig   block: simplify b...
1665
1666
  		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
  			goto defer;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1667
  	}
d241a95f3   Christoph Hellwig   block: optionally...
1668
  	bio_release_pages(bio, false);
24d5493f2   Christoph Hellwig   block: simplify b...
1669
1670
1671
1672
1673
1674
1675
1676
  	bio_put(bio);
  	return;
  defer:
  	spin_lock_irqsave(&bio_dirty_lock, flags);
  	bio->bi_private = bio_dirty_list;
  	bio_dirty_list = bio;
  	spin_unlock_irqrestore(&bio_dirty_lock, flags);
  	schedule_work(&bio_dirty_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1677
  }
5b18b5a73   Mikulas Patocka   block: delete par...
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
  void update_io_ticks(struct hd_struct *part, unsigned long now)
  {
  	unsigned long stamp;
  again:
  	stamp = READ_ONCE(part->stamp);
  	if (unlikely(stamp != now)) {
  		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
  			__part_stat_add(part, io_ticks, 1);
  		}
  	}
  	if (part->partno) {
  		part = &part_to_disk(part)->part0;
  		goto again;
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1693

ddcf35d39   Michael Callahan   block: Add and us...
1694
  void generic_start_io_acct(struct request_queue *q, int op,
d62e26b3f   Jens Axboe   block: pass in qu...
1695
  			   unsigned long sectors, struct hd_struct *part)
394ffa503   Gu Zheng   blk: introduce ge...
1696
  {
ddcf35d39   Michael Callahan   block: Add and us...
1697
  	const int sgrp = op_stat_group(op);
394ffa503   Gu Zheng   blk: introduce ge...
1698

112f158f6   Mike Snitzer   block: stop passi...
1699
  	part_stat_lock();
5b18b5a73   Mikulas Patocka   block: delete par...
1700
  	update_io_ticks(part, jiffies);
112f158f6   Mike Snitzer   block: stop passi...
1701
1702
  	part_stat_inc(part, ios[sgrp]);
  	part_stat_add(part, sectors[sgrp], sectors);
ddcf35d39   Michael Callahan   block: Add and us...
1703
  	part_inc_in_flight(q, part, op_is_write(op));
394ffa503   Gu Zheng   blk: introduce ge...
1704
1705
1706
1707
  
  	part_stat_unlock();
  }
  EXPORT_SYMBOL(generic_start_io_acct);
ddcf35d39   Michael Callahan   block: Add and us...
1708
  void generic_end_io_acct(struct request_queue *q, int req_op,
d62e26b3f   Jens Axboe   block: pass in qu...
1709
  			 struct hd_struct *part, unsigned long start_time)
394ffa503   Gu Zheng   blk: introduce ge...
1710
  {
5b18b5a73   Mikulas Patocka   block: delete par...
1711
1712
  	unsigned long now = jiffies;
  	unsigned long duration = now - start_time;
ddcf35d39   Michael Callahan   block: Add and us...
1713
  	const int sgrp = op_stat_group(req_op);
394ffa503   Gu Zheng   blk: introduce ge...
1714

112f158f6   Mike Snitzer   block: stop passi...
1715
  	part_stat_lock();
5b18b5a73   Mikulas Patocka   block: delete par...
1716
  	update_io_ticks(part, now);
112f158f6   Mike Snitzer   block: stop passi...
1717
  	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
5b18b5a73   Mikulas Patocka   block: delete par...
1718
  	part_stat_add(part, time_in_queue, duration);
ddcf35d39   Michael Callahan   block: Add and us...
1719
  	part_dec_in_flight(q, part, op_is_write(req_op));
394ffa503   Gu Zheng   blk: introduce ge...
1720
1721
1722
1723
  
  	part_stat_unlock();
  }
  EXPORT_SYMBOL(generic_end_io_acct);
c4cf5261f   Jens Axboe   bio: skip atomic ...
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
  static inline bool bio_remaining_done(struct bio *bio)
  {
  	/*
  	 * If we're not chaining, then ->__bi_remaining is always 1 and
  	 * we always end io on the first invocation.
  	 */
  	if (!bio_flagged(bio, BIO_CHAIN))
  		return true;
  
  	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
326e1dbb5   Mike Snitzer   block: remove man...
1734
  	if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9d   Jens Axboe   block: manipulate...
1735
  		bio_clear_flag(bio, BIO_CHAIN);
c4cf5261f   Jens Axboe   bio: skip atomic ...
1736
  		return true;
326e1dbb5   Mike Snitzer   block: remove man...
1737
  	}
c4cf5261f   Jens Axboe   bio: skip atomic ...
1738
1739
1740
  
  	return false;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1741
1742
1743
  /**
   * bio_endio - end I/O on a bio
   * @bio:	bio
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1744
1745
   *
   * Description:
4246a0b63   Christoph Hellwig   block: add a bi_e...
1746
1747
1748
   *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
   *   way to end I/O on a bio. No one should call bi_end_io() directly on a
   *   bio unless they own it and thus know that it has an end_io function.
fbbaf700e   NeilBrown   block: trace comp...
1749
1750
1751
1752
1753
   *
   *   bio_endio() can be called several times on a bio that has been chained
   *   using bio_chain().  The ->bi_end_io() function will only be called the
   *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
   *   generated if BIO_TRACE_COMPLETION is set.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1754
   **/
4246a0b63   Christoph Hellwig   block: add a bi_e...
1755
  void bio_endio(struct bio *bio)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1756
  {
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1757
  again:
2b8855171   Christoph Hellwig   block: bio_remain...
1758
  	if (!bio_remaining_done(bio))
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1759
  		return;
7c20f1168   Christoph Hellwig   bio-integrity: st...
1760
1761
  	if (!bio_integrity_endio(bio))
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1762

67b42d0bf   Josef Bacik   rq-qos: introduce...
1763
1764
  	if (bio->bi_disk)
  		rq_qos_done_bio(bio->bi_disk->queue, bio);
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
  	/*
  	 * Need to have a real endio function for chained bios, otherwise
  	 * various corner cases will break (like stacking block devices that
  	 * save/restore bi_end_io) - however, we want to avoid unbounded
  	 * recursion and blowing the stack. Tail call optimization would
  	 * handle this, but compiling with frame pointers also disables
  	 * gcc's sibling call optimization.
  	 */
  	if (bio->bi_end_io == bio_chain_endio) {
  		bio = __bio_chain_endio(bio);
  		goto again;
196d38bcc   Kent Overstreet   block: Generic bi...
1776
  	}
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1777

74d46992e   Christoph Hellwig   block: replace bi...
1778
1779
  	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
  		trace_block_bio_complete(bio->bi_disk->queue, bio,
a462b9508   Bart Van Assche   block: Dedicated ...
1780
  					 blk_status_to_errno(bio->bi_status));
fbbaf700e   NeilBrown   block: trace comp...
1781
1782
  		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
  	}
9e234eeaf   Shaohua Li   blk-throttle: add...
1783
  	blk_throtl_bio_endio(bio);
b222dd2fd   Shaohua Li   block: call bio_u...
1784
1785
  	/* release cgroup info */
  	bio_uninit(bio);
ba8c6967b   Christoph Hellwig   block: cleanup bi...
1786
1787
  	if (bio->bi_end_io)
  		bio->bi_end_io(bio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1788
  }
a112a71d4   H Hartley Sweeten   fs/bio.c: move EX...
1789
  EXPORT_SYMBOL(bio_endio);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1790

196d38bcc   Kent Overstreet   block: Generic bi...
1791
  /**
20d0189b1   Kent Overstreet   block: Introduce ...
1792
1793
1794
1795
1796
1797
1798
1799
1800
   * bio_split - split a bio
   * @bio:	bio to split
   * @sectors:	number of sectors to split from the front of @bio
   * @gfp:	gfp mask
   * @bs:		bio set to allocate from
   *
   * Allocates and returns a new bio which represents @sectors from the start of
   * @bio, and updates @bio to represent the remaining sectors.
   *
f3f5da624   Martin K. Petersen   block: Do a full ...
1801
   * Unless this is a discard request the newly allocated bio will point
dad775845   Bart Van Assche   block: Document t...
1802
1803
   * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
   * neither @bio nor @bs are freed before the split bio.
20d0189b1   Kent Overstreet   block: Introduce ...
1804
1805
1806
1807
   */
  struct bio *bio_split(struct bio *bio, int sectors,
  		      gfp_t gfp, struct bio_set *bs)
  {
f341a4d38   Mikulas Patocka   block: remove use...
1808
  	struct bio *split;
20d0189b1   Kent Overstreet   block: Introduce ...
1809
1810
1811
  
  	BUG_ON(sectors <= 0);
  	BUG_ON(sectors >= bio_sectors(bio));
f9d03f96b   Christoph Hellwig   block: improve ha...
1812
  	split = bio_clone_fast(bio, gfp, bs);
20d0189b1   Kent Overstreet   block: Introduce ...
1813
1814
1815
1816
1817
1818
  	if (!split)
  		return NULL;
  
  	split->bi_iter.bi_size = sectors << 9;
  
  	if (bio_integrity(split))
fbd08e767   Dmitry Monakhov   bio-integrity: fi...
1819
  		bio_integrity_trim(split);
20d0189b1   Kent Overstreet   block: Introduce ...
1820
1821
  
  	bio_advance(bio, split->bi_iter.bi_size);
fbbaf700e   NeilBrown   block: trace comp...
1822
  	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023c   Goldwyn Rodrigues   block: Set BIO_TR...
1823
  		bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700e   NeilBrown   block: trace comp...
1824

20d0189b1   Kent Overstreet   block: Introduce ...
1825
1826
1827
  	return split;
  }
  EXPORT_SYMBOL(bio_split);
ad3316bf4   Martin K. Petersen   block: Find bio s...
1828
  /**
6678d83f1   Kent Overstreet   block: Consolidat...
1829
1830
1831
1832
1833
1834
1835
1836
1837
   * bio_trim - trim a bio
   * @bio:	bio to trim
   * @offset:	number of sectors to trim from the front of @bio
   * @size:	size we want to trim @bio to, in sectors
   */
  void bio_trim(struct bio *bio, int offset, int size)
  {
  	/* 'bio' is a cloned bio which we need to trim to match
  	 * the given offset and size.
6678d83f1   Kent Overstreet   block: Consolidat...
1838
  	 */
6678d83f1   Kent Overstreet   block: Consolidat...
1839
1840
  
  	size <<= 9;
4f024f379   Kent Overstreet   block: Abstract o...
1841
  	if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f1   Kent Overstreet   block: Consolidat...
1842
  		return;
6678d83f1   Kent Overstreet   block: Consolidat...
1843
  	bio_advance(bio, offset << 9);
4f024f379   Kent Overstreet   block: Abstract o...
1844
  	bio->bi_iter.bi_size = size;
376a78abf   Dmitry Monakhov   bio-integrity: bi...
1845
1846
  
  	if (bio_integrity(bio))
fbd08e767   Dmitry Monakhov   bio-integrity: fi...
1847
  		bio_integrity_trim(bio);
376a78abf   Dmitry Monakhov   bio-integrity: bi...
1848

6678d83f1   Kent Overstreet   block: Consolidat...
1849
1850
  }
  EXPORT_SYMBOL_GPL(bio_trim);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1851
1852
1853
1854
  /*
   * create memory pools for biovec's in a bio_set.
   * use the global biovec slabs created for general use.
   */
8aa6ba2f6   Kent Overstreet   block: Convert bi...
1855
  int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1856
  {
ed996a52c   Christoph Hellwig   block: simplify a...
1857
  	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1858

8aa6ba2f6   Kent Overstreet   block: Convert bi...
1859
  	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1860
  }
917a38c71   Kent Overstreet   block: Add bioset...
1861
1862
1863
1864
1865
1866
1867
  /*
   * bioset_exit - exit a bioset initialized with bioset_init()
   *
   * May be called on a zeroed but uninitialized bioset (i.e. allocated with
   * kzalloc()).
   */
  void bioset_exit(struct bio_set *bs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1868
  {
df2cb6daa   Kent Overstreet   block: Avoid dead...
1869
1870
  	if (bs->rescue_workqueue)
  		destroy_workqueue(bs->rescue_workqueue);
917a38c71   Kent Overstreet   block: Add bioset...
1871
  	bs->rescue_workqueue = NULL;
df2cb6daa   Kent Overstreet   block: Avoid dead...
1872

8aa6ba2f6   Kent Overstreet   block: Convert bi...
1873
1874
  	mempool_exit(&bs->bio_pool);
  	mempool_exit(&bs->bvec_pool);
9f060e223   Kent Overstreet   block: Convert in...
1875

7878cba9f   Martin K. Petersen   block: Create bip...
1876
  	bioset_integrity_free(bs);
917a38c71   Kent Overstreet   block: Add bioset...
1877
1878
1879
1880
1881
  	if (bs->bio_slab)
  		bio_put_slab(bs);
  	bs->bio_slab = NULL;
  }
  EXPORT_SYMBOL(bioset_exit);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1882

011067b05   NeilBrown   blk: replace bios...
1883
  /**
917a38c71   Kent Overstreet   block: Add bioset...
1884
   * bioset_init - Initialize a bio_set
dad085275   Kent Overstreet   block: Drop biose...
1885
   * @bs:		pool to initialize
917a38c71   Kent Overstreet   block: Add bioset...
1886
1887
1888
1889
1890
   * @pool_size:	Number of bio and bio_vecs to cache in the mempool
   * @front_pad:	Number of bytes to allocate in front of the returned bio
   * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
   *              and %BIOSET_NEED_RESCUER
   *
dad085275   Kent Overstreet   block: Drop biose...
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
   * Description:
   *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
   *    to ask for a number of bytes to be allocated in front of the bio.
   *    Front pad allocation is useful for embedding the bio inside
   *    another structure, to avoid allocating extra data to go with the bio.
   *    Note that the bio must be embedded at the END of that structure always,
   *    or things will break badly.
   *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
   *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
   *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
   *    dispatch queued requests when the mempool runs out of space.
   *
917a38c71   Kent Overstreet   block: Add bioset...
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
   */
  int bioset_init(struct bio_set *bs,
  		unsigned int pool_size,
  		unsigned int front_pad,
  		int flags)
  {
  	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
  
  	bs->front_pad = front_pad;
  
  	spin_lock_init(&bs->rescue_lock);
  	bio_list_init(&bs->rescue_list);
  	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
  
  	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
  	if (!bs->bio_slab)
  		return -ENOMEM;
  
  	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
  		goto bad;
  
  	if ((flags & BIOSET_NEED_BVECS) &&
  	    biovec_init_pool(&bs->bvec_pool, pool_size))
  		goto bad;
  
  	if (!(flags & BIOSET_NEED_RESCUER))
  		return 0;
  
  	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
  	if (!bs->rescue_workqueue)
  		goto bad;
  
  	return 0;
  bad:
  	bioset_exit(bs);
  	return -ENOMEM;
  }
  EXPORT_SYMBOL(bioset_init);
28e89fd91   Jens Axboe   block: add bioset...
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
  /*
   * Initialize and setup a new bio_set, based on the settings from
   * another bio_set.
   */
  int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
  {
  	int flags;
  
  	flags = 0;
  	if (src->bvec_pool.min_nr)
  		flags |= BIOSET_NEED_BVECS;
  	if (src->rescue_workqueue)
  		flags |= BIOSET_NEED_RESCUER;
  
  	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
  }
  EXPORT_SYMBOL(bioset_init_from_src);
852c788f8   Tejun Heo   block: implement ...
1958
  #ifdef CONFIG_BLK_CGROUP
1d933cf09   Tejun Heo   blkcg: implement ...
1959

74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1960
  /**
2268c0feb   Dennis Zhou   blkcg: introduce ...
1961
   * bio_disassociate_blkg - puts back the blkg reference if associated
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1962
   * @bio: target bio
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1963
   *
2268c0feb   Dennis Zhou   blkcg: introduce ...
1964
   * Helper to disassociate the blkg from @bio if a blkg is associated.
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1965
   */
2268c0feb   Dennis Zhou   blkcg: introduce ...
1966
  void bio_disassociate_blkg(struct bio *bio)
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1967
  {
2268c0feb   Dennis Zhou   blkcg: introduce ...
1968
1969
1970
1971
  	if (bio->bi_blkg) {
  		blkg_put(bio->bi_blkg);
  		bio->bi_blkg = NULL;
  	}
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1972
  }
892ad71f6   Dennis Zhou   dm: set the stati...
1973
  EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
74b7c02a9   Dennis Zhou (Facebook)   blkcg: associate ...
1974

08e18eab0   Josef Bacik   block: add bi_blk...
1975
  /**
2268c0feb   Dennis Zhou   blkcg: introduce ...
1976
   * __bio_associate_blkg - associate a bio with the a blkg
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1977
   * @bio: target bio
b5f2954d3   Dennis Zhou   blkcg: revert blk...
1978
   * @blkg: the blkg to associate
b5f2954d3   Dennis Zhou   blkcg: revert blk...
1979
   *
beea9da07   Dennis Zhou   blkcg: convert bl...
1980
1981
1982
1983
1984
   * This tries to associate @bio with the specified @blkg.  Association failure
   * is handled by walking up the blkg tree.  Therefore, the blkg associated can
   * be anything between @blkg and the root_blkg.  This situation only happens
   * when a cgroup is dying and then the remaining bios will spill to the closest
   * alive blkg.
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1985
   *
beea9da07   Dennis Zhou   blkcg: convert bl...
1986
1987
   * A reference will be taken on the @blkg and will be released when @bio is
   * freed.
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1988
   */
2268c0feb   Dennis Zhou   blkcg: introduce ...
1989
  static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1990
  {
2268c0feb   Dennis Zhou   blkcg: introduce ...
1991
  	bio_disassociate_blkg(bio);
7754f669f   Dennis Zhou   blkcg: rename blk...
1992
  	bio->bi_blkg = blkg_tryget_closest(blkg);
a7b39b4e9   Dennis Zhou (Facebook)   blkcg: always ass...
1993
1994
1995
  }
  
  /**
fd42df305   Dennis Zhou   blkcg: associate ...
1996
   * bio_associate_blkg_from_css - associate a bio with a specified css
d459d853c   Dennis Zhou   blkcg: reassociat...
1997
   * @bio: target bio
fd42df305   Dennis Zhou   blkcg: associate ...
1998
   * @css: target css
d459d853c   Dennis Zhou   blkcg: reassociat...
1999
   *
fd42df305   Dennis Zhou   blkcg: associate ...
2000
   * Associate @bio with the blkg found by combining the css's blkg and the
fc5a828bf   Dennis Zhou   blkcg: remove add...
2001
2002
   * request_queue of the @bio.  This falls back to the queue's root_blkg if
   * the association fails with the css.
d459d853c   Dennis Zhou   blkcg: reassociat...
2003
   */
fd42df305   Dennis Zhou   blkcg: associate ...
2004
2005
  void bio_associate_blkg_from_css(struct bio *bio,
  				 struct cgroup_subsys_state *css)
d459d853c   Dennis Zhou   blkcg: reassociat...
2006
  {
fc5a828bf   Dennis Zhou   blkcg: remove add...
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
  	struct request_queue *q = bio->bi_disk->queue;
  	struct blkcg_gq *blkg;
  
  	rcu_read_lock();
  
  	if (!css || !css->parent)
  		blkg = q->root_blkg;
  	else
  		blkg = blkg_lookup_create(css_to_blkcg(css), q);
  
  	__bio_associate_blkg(bio, blkg);
  
  	rcu_read_unlock();
d459d853c   Dennis Zhou   blkcg: reassociat...
2020
  }
fd42df305   Dennis Zhou   blkcg: associate ...
2021
  EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
d459d853c   Dennis Zhou   blkcg: reassociat...
2022

6a7f6d86a   Dennis Zhou   blkcg: associate ...
2023
  #ifdef CONFIG_MEMCG
d459d853c   Dennis Zhou   blkcg: reassociat...
2024
  /**
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2025
   * bio_associate_blkg_from_page - associate a bio with the page's blkg
852c788f8   Tejun Heo   block: implement ...
2026
   * @bio: target bio
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2027
2028
2029
   * @page: the page to lookup the blkcg from
   *
   * Associate @bio with the blkg from @page's owning memcg and the respective
fc5a828bf   Dennis Zhou   blkcg: remove add...
2030
2031
   * request_queue.  If cgroup_e_css returns %NULL, fall back to the queue's
   * root_blkg.
852c788f8   Tejun Heo   block: implement ...
2032
   */
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2033
  void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
852c788f8   Tejun Heo   block: implement ...
2034
  {
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2035
  	struct cgroup_subsys_state *css;
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2036
2037
  	if (!page->mem_cgroup)
  		return;
fc5a828bf   Dennis Zhou   blkcg: remove add...
2038
2039
2040
2041
2042
2043
  	rcu_read_lock();
  
  	css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
  	bio_associate_blkg_from_css(bio, css);
  
  	rcu_read_unlock();
6a7f6d86a   Dennis Zhou   blkcg: associate ...
2044
2045
  }
  #endif /* CONFIG_MEMCG */
2268c0feb   Dennis Zhou   blkcg: introduce ...
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
  /**
   * bio_associate_blkg - associate a bio with a blkg
   * @bio: target bio
   *
   * Associate @bio with the blkg found from the bio's css and request_queue.
   * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
   * already associated, the css is reused and association redone as the
   * request_queue may have changed.
   */
  void bio_associate_blkg(struct bio *bio)
  {
fc5a828bf   Dennis Zhou   blkcg: remove add...
2057
  	struct cgroup_subsys_state *css;
2268c0feb   Dennis Zhou   blkcg: introduce ...
2058
2059
  
  	rcu_read_lock();
db6638d7d   Dennis Zhou   blkcg: remove bio...
2060
  	if (bio->bi_blkg)
fc5a828bf   Dennis Zhou   blkcg: remove add...
2061
  		css = &bio_blkcg(bio)->css;
db6638d7d   Dennis Zhou   blkcg: remove bio...
2062
  	else
fc5a828bf   Dennis Zhou   blkcg: remove add...
2063
  		css = blkcg_css();
2268c0feb   Dennis Zhou   blkcg: introduce ...
2064

fc5a828bf   Dennis Zhou   blkcg: remove add...
2065
  	bio_associate_blkg_from_css(bio, css);
2268c0feb   Dennis Zhou   blkcg: introduce ...
2066
2067
  
  	rcu_read_unlock();
852c788f8   Tejun Heo   block: implement ...
2068
  }
5cdf2e3fe   Dennis Zhou   blkcg: associate ...
2069
  EXPORT_SYMBOL_GPL(bio_associate_blkg);
852c788f8   Tejun Heo   block: implement ...
2070

20bd723ec   Paolo Valente   block: add missin...
2071
  /**
db6638d7d   Dennis Zhou   blkcg: remove bio...
2072
   * bio_clone_blkg_association - clone blkg association from src to dst bio
20bd723ec   Paolo Valente   block: add missin...
2073
2074
2075
   * @dst: destination bio
   * @src: source bio
   */
db6638d7d   Dennis Zhou   blkcg: remove bio...
2076
  void bio_clone_blkg_association(struct bio *dst, struct bio *src)
20bd723ec   Paolo Valente   block: add missin...
2077
  {
6ab218799   Dennis Zhou   blkcg: clean up b...
2078
  	rcu_read_lock();
fc5a828bf   Dennis Zhou   blkcg: remove add...
2079
  	if (src->bi_blkg)
2268c0feb   Dennis Zhou   blkcg: introduce ...
2080
  		__bio_associate_blkg(dst, src->bi_blkg);
6ab218799   Dennis Zhou   blkcg: clean up b...
2081
2082
  
  	rcu_read_unlock();
20bd723ec   Paolo Valente   block: add missin...
2083
  }
db6638d7d   Dennis Zhou   blkcg: remove bio...
2084
  EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
852c788f8   Tejun Heo   block: implement ...
2085
  #endif /* CONFIG_BLK_CGROUP */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2086
2087
2088
  static void __init biovec_init_slabs(void)
  {
  	int i;
ed996a52c   Christoph Hellwig   block: simplify a...
2089
  	for (i = 0; i < BVEC_POOL_NR; i++) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2090
2091
  		int size;
  		struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37cd   Jens Axboe   block: don't crea...
2092
2093
2094
2095
  		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
  			bvs->slab = NULL;
  			continue;
  		}
a7fcd37cd   Jens Axboe   block: don't crea...
2096

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2097
2098
  		size = bvs->nr_vecs * sizeof(struct bio_vec);
  		bvs->slab = kmem_cache_create(bvs->name, size, 0,
20c2df83d   Paul Mundt   mm: Remove slab d...
2099
                                  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2100
2101
2102
2103
2104
  	}
  }
  
  static int __init init_bio(void)
  {
bb799ca02   Jens Axboe   bio: allow indivi...
2105
2106
  	bio_slab_max = 2;
  	bio_slab_nr = 0;
6396bb221   Kees Cook   treewide: kzalloc...
2107
2108
  	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
  			    GFP_KERNEL);
2b24e6f63   Johannes Thumshirn   block: bio: ensur...
2109
2110
  
  	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
bb799ca02   Jens Axboe   bio: allow indivi...
2111
2112
2113
  	if (!bio_slabs)
  		panic("bio: can't allocate bios
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2114

7878cba9f   Martin K. Petersen   block: Create bip...
2115
  	bio_integrity_init();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2116
  	biovec_init_slabs();
f4f8154a0   Kent Overstreet   block: Use bioset...
2117
  	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2118
2119
  		panic("bio: can't allocate bios
  ");
f4f8154a0   Kent Overstreet   block: Use bioset...
2120
  	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785b   Martin K. Petersen   block: Require su...
2121
2122
  		panic("bio: can't create integrity pool
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2123
2124
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2125
  subsys_initcall(init_bio);