Blame view

block/bounce.c 9.34 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
831058dec   David Howells   [PATCH] BLOCK: Se...
2
3
4
5
  /* bounce buffer handling for block devices
   *
   * - Split from highmem.c
   */
b1de0d139   Mitchel Humpherys   mm: convert some ...
6
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
831058dec   David Howells   [PATCH] BLOCK: Se...
7
  #include <linux/mm.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
8
  #include <linux/export.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
9
  #include <linux/swap.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/gfp.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
11
12
13
14
  #include <linux/bio.h>
  #include <linux/pagemap.h>
  #include <linux/mempool.h>
  #include <linux/blkdev.h>
66114cad6   Tejun Heo   writeback: separa...
15
  #include <linux/backing-dev.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
16
17
18
  #include <linux/init.h>
  #include <linux/hash.h>
  #include <linux/highmem.h>
57c8a661d   Mike Rapoport   mm: remove includ...
19
  #include <linux/memblock.h>
b1de0d139   Mitchel Humpherys   mm: convert some ...
20
  #include <linux/printk.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
21
  #include <asm/tlbflush.h>
55782138e   Li Zefan   tracing/events: c...
22
  #include <trace/events/block.h>
3bce016a4   Christoph Hellwig   block: move bounc...
23
  #include "blk.h"
55782138e   Li Zefan   tracing/events: c...
24

831058dec   David Howells   [PATCH] BLOCK: Se...
25
26
  #define POOL_SIZE	64
  #define ISA_POOL_SIZE	16
338aa96d5   Kent Overstreet   block: convert bo...
27
28
  static struct bio_set bounce_bio_set, bounce_bio_split;
  static mempool_t page_pool, isa_page_pool;
831058dec   David Howells   [PATCH] BLOCK: Se...
29

52990a5fb   Jens Axboe   block: setup boun...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
  static void init_bounce_bioset(void)
  {
  	static bool bounce_bs_setup;
  	int ret;
  
  	if (bounce_bs_setup)
  		return;
  
  	ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
  	BUG_ON(ret);
  	if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
  		BUG_ON(1);
  
  	ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
  	BUG_ON(ret);
  	bounce_bs_setup = true;
  }
a687a5337   Arnd Bergmann   treewide: simplif...
47
  #if defined(CONFIG_HIGHMEM)
831058dec   David Howells   [PATCH] BLOCK: Se...
48
49
  static __init int init_emergency_pool(void)
  {
338aa96d5   Kent Overstreet   block: convert bo...
50
  	int ret;
f10062578   Chris Metcalf   bounce: allow use...
51
  #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
3bcfeaf93   David Vrabel   block: initialize...
52
  	if (max_pfn <= max_low_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
53
  		return 0;
3bcfeaf93   David Vrabel   block: initialize...
54
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
55

338aa96d5   Kent Overstreet   block: convert bo...
56
57
  	ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0);
  	BUG_ON(ret);
b1de0d139   Mitchel Humpherys   mm: convert some ...
58
59
  	pr_info("pool size: %d pages
  ", POOL_SIZE);
831058dec   David Howells   [PATCH] BLOCK: Se...
60

52990a5fb   Jens Axboe   block: setup boun...
61
  	init_bounce_bioset();
831058dec   David Howells   [PATCH] BLOCK: Se...
62
63
64
65
  	return 0;
  }
  
  __initcall(init_emergency_pool);
f10062578   Chris Metcalf   bounce: allow use...
66
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
67

f10062578   Chris Metcalf   bounce: allow use...
68
  #ifdef CONFIG_HIGHMEM
831058dec   David Howells   [PATCH] BLOCK: Se...
69
70
71
72
73
  /*
   * highmem version, map in to vec
   */
  static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
74
  	unsigned char *vto;
9b04c5fec   Cong Wang   mm: remove the se...
75
  	vto = kmap_atomic(to->bv_page);
831058dec   David Howells   [PATCH] BLOCK: Se...
76
  	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
9b04c5fec   Cong Wang   mm: remove the se...
77
  	kunmap_atomic(vto);
831058dec   David Howells   [PATCH] BLOCK: Se...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
  }
  
  #else /* CONFIG_HIGHMEM */
  
  #define bounce_copy_vec(to, vfrom)	\
  	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
  
  #endif /* CONFIG_HIGHMEM */
  
  /*
   * allocate pages in the DMA region for the ISA pool
   */
  static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
  {
  	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
  }
52990a5fb   Jens Axboe   block: setup boun...
94
  static DEFINE_MUTEX(isa_mutex);
831058dec   David Howells   [PATCH] BLOCK: Se...
95
96
97
98
99
100
  /*
   * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
   * as the max address, so check if the pool has already been created.
   */
  int init_emergency_isa_pool(void)
  {
338aa96d5   Kent Overstreet   block: convert bo...
101
  	int ret;
52990a5fb   Jens Axboe   block: setup boun...
102
103
104
105
  	mutex_lock(&isa_mutex);
  
  	if (mempool_initialized(&isa_page_pool)) {
  		mutex_unlock(&isa_mutex);
831058dec   David Howells   [PATCH] BLOCK: Se...
106
  		return 0;
52990a5fb   Jens Axboe   block: setup boun...
107
  	}
831058dec   David Howells   [PATCH] BLOCK: Se...
108

338aa96d5   Kent Overstreet   block: convert bo...
109
110
111
  	ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
  			   mempool_free_pages, (void *) 0);
  	BUG_ON(ret);
831058dec   David Howells   [PATCH] BLOCK: Se...
112

b1de0d139   Mitchel Humpherys   mm: convert some ...
113
114
  	pr_info("isa pool size: %d pages
  ", ISA_POOL_SIZE);
52990a5fb   Jens Axboe   block: setup boun...
115
116
  	init_bounce_bioset();
  	mutex_unlock(&isa_mutex);
831058dec   David Howells   [PATCH] BLOCK: Se...
117
118
119
120
121
122
123
124
125
126
127
  	return 0;
  }
  
  /*
   * Simple bounce buffer support for highmem pages. Depending on the
   * queue gfp mask set, *to may or may not be a highmem page. kmap it
   * always, it will do the Right Thing
   */
  static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
  {
  	unsigned char *vfrom;
3c892a098   Ming Lei   block: bounce: do...
128
  	struct bio_vec tovec, fromvec;
7988613b0   Kent Overstreet   block: Convert bi...
129
  	struct bvec_iter iter;
3c892a098   Ming Lei   block: bounce: do...
130
131
132
133
134
135
  	/*
  	 * The bio of @from is created by bounce, so we can iterate
  	 * its bvec from start to end, but the @from->bi_iter can't be
  	 * trusted because it might be changed by splitting.
  	 */
  	struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
7988613b0   Kent Overstreet   block: Convert bi...
136
137
  
  	bio_for_each_segment(tovec, to, iter) {
3c892a098   Ming Lei   block: bounce: do...
138
139
  		fromvec = bio_iter_iovec(from, from_iter);
  		if (tovec.bv_page != fromvec.bv_page) {
7988613b0   Kent Overstreet   block: Convert bi...
140
141
142
143
144
  			/*
  			 * fromvec->bv_offset and fromvec->bv_len might have
  			 * been modified by the block layer, so use the original
  			 * copy, bounce_copy_vec already uses tovec->bv_len
  			 */
3c892a098   Ming Lei   block: bounce: do...
145
  			vfrom = page_address(fromvec.bv_page) +
7988613b0   Kent Overstreet   block: Convert bi...
146
147
148
149
150
  				tovec.bv_offset;
  
  			bounce_copy_vec(&tovec, vfrom);
  			flush_dcache_page(tovec.bv_page);
  		}
3c892a098   Ming Lei   block: bounce: do...
151
  		bio_advance_iter(from, &from_iter, tovec.bv_len);
831058dec   David Howells   [PATCH] BLOCK: Se...
152
153
  	}
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
154
  static void bounce_end_io(struct bio *bio, mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
155
156
  {
  	struct bio *bio_orig = bio->bi_private;
7891f05cb   Ming Lei   block: bounce: av...
157
  	struct bio_vec *bvec, orig_vec;
7891f05cb   Ming Lei   block: bounce: av...
158
  	struct bvec_iter orig_iter = bio_orig->bi_iter;
6dc4f100c   Ming Lei   block: allow bio_...
159
  	struct bvec_iter_all iter_all;
831058dec   David Howells   [PATCH] BLOCK: Se...
160

831058dec   David Howells   [PATCH] BLOCK: Se...
161
162
163
  	/*
  	 * free up bounce indirect pages used
  	 */
2b070cfe5   Christoph Hellwig   block: remove the...
164
  	bio_for_each_segment_all(bvec, bio, iter_all) {
7891f05cb   Ming Lei   block: bounce: av...
165
166
167
168
169
170
  		orig_vec = bio_iter_iovec(bio_orig, orig_iter);
  		if (bvec->bv_page != orig_vec.bv_page) {
  			dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
  			mempool_free(bvec->bv_page, pool);
  		}
  		bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
831058dec   David Howells   [PATCH] BLOCK: Se...
171
  	}
4e4cbee93   Christoph Hellwig   block: switch bio...
172
  	bio_orig->bi_status = bio->bi_status;
4246a0b63   Christoph Hellwig   block: add a bi_e...
173
  	bio_endio(bio_orig);
831058dec   David Howells   [PATCH] BLOCK: Se...
174
175
  	bio_put(bio);
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
176
  static void bounce_end_io_write(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
177
  {
338aa96d5   Kent Overstreet   block: convert bo...
178
  	bounce_end_io(bio, &page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
179
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
180
  static void bounce_end_io_write_isa(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
181
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
182

338aa96d5   Kent Overstreet   block: convert bo...
183
  	bounce_end_io(bio, &isa_page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
184
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
185
  static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
186
187
  {
  	struct bio *bio_orig = bio->bi_private;
4e4cbee93   Christoph Hellwig   block: switch bio...
188
  	if (!bio->bi_status)
831058dec   David Howells   [PATCH] BLOCK: Se...
189
  		copy_to_high_bio_irq(bio_orig, bio);
4246a0b63   Christoph Hellwig   block: add a bi_e...
190
  	bounce_end_io(bio, pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
191
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
192
  static void bounce_end_io_read(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
193
  {
338aa96d5   Kent Overstreet   block: convert bo...
194
  	__bounce_end_io_read(bio, &page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
195
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
196
  static void bounce_end_io_read_isa(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
197
  {
338aa96d5   Kent Overstreet   block: convert bo...
198
  	__bounce_end_io_read(bio, &isa_page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
199
  }
c55183c9a   Christoph Hellwig   block: unexport b...
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
  static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
  		struct bio_set *bs)
  {
  	struct bvec_iter iter;
  	struct bio_vec bv;
  	struct bio *bio;
  
  	/*
  	 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
  	 * bio_src->bi_io_vec to bio->bi_io_vec.
  	 *
  	 * We can't do that anymore, because:
  	 *
  	 *  - The point of cloning the biovec is to produce a bio with a biovec
  	 *    the caller can modify: bi_idx and bi_bvec_done should be 0.
  	 *
  	 *  - The original bio could've had more than BIO_MAX_PAGES biovecs; if
  	 *    we tried to clone the whole thing bio_alloc_bioset() would fail.
  	 *    But the clone should succeed as long as the number of biovecs we
  	 *    actually need to allocate is fewer than BIO_MAX_PAGES.
  	 *
  	 *  - Lastly, bi_vcnt should not be looked at or relied upon by code
  	 *    that does not own the bio - reason being drivers don't use it for
  	 *    iterating over the biovec anymore, so expecting it to be kept up
  	 *    to date (i.e. for clones that share the parent biovec) is just
  	 *    asking for trouble and would force extra work on
  	 *    __bio_clone_fast() anyways.
  	 */
  
  	bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
  	if (!bio)
  		return NULL;
  	bio->bi_disk		= bio_src->bi_disk;
  	bio->bi_opf		= bio_src->bi_opf;
ca474b738   Hannes Reinecke   block: copy iopri...
234
  	bio->bi_ioprio		= bio_src->bi_ioprio;
c55183c9a   Christoph Hellwig   block: unexport b...
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
  	bio->bi_write_hint	= bio_src->bi_write_hint;
  	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
  	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
  
  	switch (bio_op(bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
  	case REQ_OP_WRITE_ZEROES:
  		break;
  	case REQ_OP_WRITE_SAME:
  		bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
  		break;
  	default:
  		bio_for_each_segment(bv, bio_src, iter)
  			bio->bi_io_vec[bio->bi_vcnt++] = bv;
  		break;
  	}
  
  	if (bio_integrity(bio_src)) {
  		int ret;
  
  		ret = bio_integrity_clone(bio, bio_src, gfp_mask);
  		if (ret < 0) {
  			bio_put(bio);
  			return NULL;
  		}
  	}
db6638d7d   Dennis Zhou   blkcg: remove bio...
262
  	bio_clone_blkg_association(bio, bio_src);
e439bedf6   Dennis Zhou   blkcg: consolidat...
263
  	blkcg_bio_issue_init(bio);
5bf9a1f3b   Dennis Zhou (Facebook)   blkcg: consolidat...
264

c55183c9a   Christoph Hellwig   block: unexport b...
265
266
  	return bio;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
267
  static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
a3ad0a9da   Jan Kara   block: Remove for...
268
  			       mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
269
  {
6bc454d15   Kent Overstreet   bounce: Refactor ...
270
271
  	struct bio *bio;
  	int rw = bio_data_dir(*bio_orig);
7988613b0   Kent Overstreet   block: Convert bi...
272
273
  	struct bio_vec *to, from;
  	struct bvec_iter iter;
a8821f3f3   NeilBrown   block: Improvemen...
274
275
276
  	unsigned i = 0;
  	bool bounce = false;
  	int sectors = 0;
14cb0dc64   Ming Lei   block: don't let ...
277
  	bool passthrough = bio_is_passthrough(*bio_orig);
831058dec   David Howells   [PATCH] BLOCK: Se...
278

a8821f3f3   NeilBrown   block: Improvemen...
279
280
281
  	bio_for_each_segment(from, *bio_orig, iter) {
  		if (i++ < BIO_MAX_PAGES)
  			sectors += from.bv_len >> 9;
1c4bc3ab9   Christoph Hellwig   block: remove the...
282
  		if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
a8821f3f3   NeilBrown   block: Improvemen...
283
284
285
286
  			bounce = true;
  	}
  	if (!bounce)
  		return;
14cb0dc64   Ming Lei   block: don't let ...
287
  	if (!passthrough && sectors < bio_sectors(*bio_orig)) {
338aa96d5   Kent Overstreet   block: convert bo...
288
  		bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
a8821f3f3   NeilBrown   block: Improvemen...
289
290
291
292
  		bio_chain(bio, *bio_orig);
  		generic_make_request(*bio_orig);
  		*bio_orig = bio;
  	}
c55183c9a   Christoph Hellwig   block: unexport b...
293
  	bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
338aa96d5   Kent Overstreet   block: convert bo...
294
  			&bounce_bio_set);
831058dec   David Howells   [PATCH] BLOCK: Se...
295

8f4e80da7   Ming Lei   block: bounce: ma...
296
297
298
299
300
301
  	/*
  	 * Bvec table can't be updated by bio_for_each_segment_all(),
  	 * so retrieve bvec from the table directly. This way is safe
  	 * because the 'bio' is single-page bvec.
  	 */
  	for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
6bc454d15   Kent Overstreet   bounce: Refactor ...
302
  		struct page *page = to->bv_page;
f735b5eeb   Jens Axboe   bounce: don't rel...
303

1c4bc3ab9   Christoph Hellwig   block: remove the...
304
  		if (page_to_pfn(page) <= q->limits.bounce_pfn)
6bc454d15   Kent Overstreet   bounce: Refactor ...
305
  			continue;
831058dec   David Howells   [PATCH] BLOCK: Se...
306

6bc454d15   Kent Overstreet   bounce: Refactor ...
307
  		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
393a33970   Wang YanQing   block:bounce: fix...
308
  		inc_zone_page_state(to->bv_page, NR_BOUNCE);
831058dec   David Howells   [PATCH] BLOCK: Se...
309
310
311
  
  		if (rw == WRITE) {
  			char *vto, *vfrom;
6bc454d15   Kent Overstreet   bounce: Refactor ...
312
  			flush_dcache_page(page);
831058dec   David Howells   [PATCH] BLOCK: Se...
313
  			vto = page_address(to->bv_page) + to->bv_offset;
6bc454d15   Kent Overstreet   bounce: Refactor ...
314
  			vfrom = kmap_atomic(page) + to->bv_offset;
831058dec   David Howells   [PATCH] BLOCK: Se...
315
  			memcpy(vto, vfrom, to->bv_len);
6bc454d15   Kent Overstreet   bounce: Refactor ...
316
  			kunmap_atomic(vfrom);
831058dec   David Howells   [PATCH] BLOCK: Se...
317
318
  		}
  	}
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
319
  	trace_block_bio_bounce(q, *bio_orig);
c43a5082a   Jens Axboe   [PATCH] blktrace:...
320

831058dec   David Howells   [PATCH] BLOCK: Se...
321
  	bio->bi_flags |= (1 << BIO_BOUNCED);
831058dec   David Howells   [PATCH] BLOCK: Se...
322

338aa96d5   Kent Overstreet   block: convert bo...
323
  	if (pool == &page_pool) {
831058dec   David Howells   [PATCH] BLOCK: Se...
324
325
326
327
328
329
330
331
332
333
334
335
  		bio->bi_end_io = bounce_end_io_write;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read;
  	} else {
  		bio->bi_end_io = bounce_end_io_write_isa;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read_isa;
  	}
  
  	bio->bi_private = *bio_orig;
  	*bio_orig = bio;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
336
  void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
831058dec   David Howells   [PATCH] BLOCK: Se...
337
338
339
340
  {
  	mempool_t *pool;
  
  	/*
bf2de6f5a   Jens Axboe   block: Initial su...
341
342
  	 * Data-less bio, nothing to bounce
  	 */
36144077b   Jens Axboe   highmem: use bio_...
343
  	if (!bio_has_data(*bio_orig))
bf2de6f5a   Jens Axboe   block: Initial su...
344
345
346
  		return;
  
  	/*
831058dec   David Howells   [PATCH] BLOCK: Se...
347
348
349
350
351
  	 * for non-isa bounce case, just check if the bounce pfn is equal
  	 * to or bigger than the highest pfn in the system -- in that case,
  	 * don't waste time iterating over bio segments
  	 */
  	if (!(q->bounce_gfp & GFP_DMA)) {
1c4bc3ab9   Christoph Hellwig   block: remove the...
352
  		if (q->limits.bounce_pfn >= blk_max_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
353
  			return;
338aa96d5   Kent Overstreet   block: convert bo...
354
  		pool = &page_pool;
831058dec   David Howells   [PATCH] BLOCK: Se...
355
  	} else {
338aa96d5   Kent Overstreet   block: convert bo...
356
357
  		BUG_ON(!mempool_initialized(&isa_page_pool));
  		pool = &isa_page_pool;
831058dec   David Howells   [PATCH] BLOCK: Se...
358
  	}
831058dec   David Howells   [PATCH] BLOCK: Se...
359
360
361
  	/*
  	 * slow path
  	 */
a3ad0a9da   Jan Kara   block: Remove for...
362
  	__blk_queue_bounce(q, bio_orig, pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
363
  }