Blame view
block/bounce.c
9.09 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
831058dec [PATCH] BLOCK: Se... |
2 3 4 5 |
/* bounce buffer handling for block devices * * - Split from highmem.c */ |
b1de0d139 mm: convert some ... |
6 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
831058dec [PATCH] BLOCK: Se... |
7 |
#include <linux/mm.h> |
b95f1b31b mm: Map most file... |
8 |
#include <linux/export.h> |
831058dec [PATCH] BLOCK: Se... |
9 |
#include <linux/swap.h> |
5a0e3ad6a include cleanup: ... |
10 |
#include <linux/gfp.h> |
831058dec [PATCH] BLOCK: Se... |
11 12 13 14 |
#include <linux/bio.h> #include <linux/pagemap.h> #include <linux/mempool.h> #include <linux/blkdev.h> |
66114cad6 writeback: separa... |
15 |
#include <linux/backing-dev.h> |
831058dec [PATCH] BLOCK: Se... |
16 17 18 |
#include <linux/init.h> #include <linux/hash.h> #include <linux/highmem.h> |
3bcfeaf93 block: initialize... |
19 |
#include <linux/bootmem.h> |
b1de0d139 mm: convert some ... |
20 |
#include <linux/printk.h> |
831058dec [PATCH] BLOCK: Se... |
21 |
#include <asm/tlbflush.h> |
55782138e tracing/events: c... |
22 |
#include <trace/events/block.h> |
3bce016a4 block: move bounc... |
23 |
#include "blk.h" |
55782138e tracing/events: c... |
24 |
|
831058dec [PATCH] BLOCK: Se... |
25 26 |
#define POOL_SIZE 64 #define ISA_POOL_SIZE 16 |
338aa96d5 block: convert bo... |
27 28 |
static struct bio_set bounce_bio_set, bounce_bio_split; static mempool_t page_pool, isa_page_pool; |
831058dec [PATCH] BLOCK: Se... |
29 |
|
cf8d0973c block: setup boun... |
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
static void init_bounce_bioset(void) { static bool bounce_bs_setup; int ret; if (bounce_bs_setup) return; ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); BUG_ON(ret); if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE)) BUG_ON(1); ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0); BUG_ON(ret); bounce_bs_setup = true; } |
a687a5337 treewide: simplif... |
47 |
#if defined(CONFIG_HIGHMEM) |
831058dec [PATCH] BLOCK: Se... |
48 49 |
static __init int init_emergency_pool(void) { |
338aa96d5 block: convert bo... |
50 |
int ret; |
f10062578 bounce: allow use... |
51 |
#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) |
3bcfeaf93 block: initialize... |
52 |
if (max_pfn <= max_low_pfn) |
831058dec [PATCH] BLOCK: Se... |
53 |
return 0; |
3bcfeaf93 block: initialize... |
54 |
#endif |
831058dec [PATCH] BLOCK: Se... |
55 |
|
338aa96d5 block: convert bo... |
56 57 |
ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0); BUG_ON(ret); |
b1de0d139 mm: convert some ... |
58 59 |
pr_info("pool size: %d pages ", POOL_SIZE); |
831058dec [PATCH] BLOCK: Se... |
60 |
|
cf8d0973c block: setup boun... |
61 |
init_bounce_bioset(); |
831058dec [PATCH] BLOCK: Se... |
62 63 64 65 |
return 0; } __initcall(init_emergency_pool); |
f10062578 bounce: allow use... |
66 |
#endif |
831058dec [PATCH] BLOCK: Se... |
67 |
|
f10062578 bounce: allow use... |
68 |
#ifdef CONFIG_HIGHMEM |
831058dec [PATCH] BLOCK: Se... |
69 70 71 72 73 |
/* * highmem version, map in to vec */ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) { |
831058dec [PATCH] BLOCK: Se... |
74 |
unsigned char *vto; |
9b04c5fec mm: remove the se... |
75 |
vto = kmap_atomic(to->bv_page); |
831058dec [PATCH] BLOCK: Se... |
76 |
memcpy(vto + to->bv_offset, vfrom, to->bv_len); |
9b04c5fec mm: remove the se... |
77 |
kunmap_atomic(vto); |
831058dec [PATCH] BLOCK: Se... |
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
} #else /* CONFIG_HIGHMEM */ #define bounce_copy_vec(to, vfrom) \ memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) #endif /* CONFIG_HIGHMEM */ /* * allocate pages in the DMA region for the ISA pool */ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) { return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } |
cf8d0973c block: setup boun... |
94 |
static DEFINE_MUTEX(isa_mutex); |
831058dec [PATCH] BLOCK: Se... |
95 96 97 98 99 100 |
/* * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA * as the max address, so check if the pool has already been created. */ int init_emergency_isa_pool(void) { |
338aa96d5 block: convert bo... |
101 |
int ret; |
cf8d0973c block: setup boun... |
102 103 104 105 |
mutex_lock(&isa_mutex); if (mempool_initialized(&isa_page_pool)) { mutex_unlock(&isa_mutex); |
831058dec [PATCH] BLOCK: Se... |
106 |
return 0; |
cf8d0973c block: setup boun... |
107 |
} |
831058dec [PATCH] BLOCK: Se... |
108 |
|
338aa96d5 block: convert bo... |
109 110 111 |
ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa, mempool_free_pages, (void *) 0); BUG_ON(ret); |
831058dec [PATCH] BLOCK: Se... |
112 |
|
b1de0d139 mm: convert some ... |
113 114 |
pr_info("isa pool size: %d pages ", ISA_POOL_SIZE); |
cf8d0973c block: setup boun... |
115 116 |
init_bounce_bioset(); mutex_unlock(&isa_mutex); |
831058dec [PATCH] BLOCK: Se... |
117 118 119 120 121 122 123 124 125 126 127 |
return 0; } /* * Simple bounce buffer support for highmem pages. Depending on the * queue gfp mask set, *to may or may not be a highmem page. kmap it * always, it will do the Right Thing */ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) { unsigned char *vfrom; |
3c892a098 block: bounce: do... |
128 |
struct bio_vec tovec, fromvec; |
7988613b0 block: Convert bi... |
129 |
struct bvec_iter iter; |
3c892a098 block: bounce: do... |
130 131 132 133 134 135 |
/* * The bio of @from is created by bounce, so we can iterate * its bvec from start to end, but the @from->bi_iter can't be * trusted because it might be changed by splitting. */ struct bvec_iter from_iter = BVEC_ITER_ALL_INIT; |
7988613b0 block: Convert bi... |
136 137 |
bio_for_each_segment(tovec, to, iter) { |
3c892a098 block: bounce: do... |
138 139 |
fromvec = bio_iter_iovec(from, from_iter); if (tovec.bv_page != fromvec.bv_page) { |
7988613b0 block: Convert bi... |
140 141 142 143 144 |
/* * fromvec->bv_offset and fromvec->bv_len might have * been modified by the block layer, so use the original * copy, bounce_copy_vec already uses tovec->bv_len */ |
3c892a098 block: bounce: do... |
145 |
vfrom = page_address(fromvec.bv_page) + |
7988613b0 block: Convert bi... |
146 147 148 149 150 |
tovec.bv_offset; bounce_copy_vec(&tovec, vfrom); flush_dcache_page(tovec.bv_page); } |
3c892a098 block: bounce: do... |
151 |
bio_advance_iter(from, &from_iter, tovec.bv_len); |
831058dec [PATCH] BLOCK: Se... |
152 153 |
} } |
4246a0b63 block: add a bi_e... |
154 |
static void bounce_end_io(struct bio *bio, mempool_t *pool) |
831058dec [PATCH] BLOCK: Se... |
155 156 |
{ struct bio *bio_orig = bio->bi_private; |
7891f05cb block: bounce: av... |
157 |
struct bio_vec *bvec, orig_vec; |
831058dec [PATCH] BLOCK: Se... |
158 |
int i; |
7891f05cb block: bounce: av... |
159 |
struct bvec_iter orig_iter = bio_orig->bi_iter; |
831058dec [PATCH] BLOCK: Se... |
160 |
|
831058dec [PATCH] BLOCK: Se... |
161 162 163 |
/* * free up bounce indirect pages used */ |
d74c6d514 block: Add bio_fo... |
164 |
bio_for_each_segment_all(bvec, bio, i) { |
7891f05cb block: bounce: av... |
165 166 167 168 169 170 |
orig_vec = bio_iter_iovec(bio_orig, orig_iter); if (bvec->bv_page != orig_vec.bv_page) { dec_zone_page_state(bvec->bv_page, NR_BOUNCE); mempool_free(bvec->bv_page, pool); } bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len); |
831058dec [PATCH] BLOCK: Se... |
171 |
} |
4e4cbee93 block: switch bio... |
172 |
bio_orig->bi_status = bio->bi_status; |
4246a0b63 block: add a bi_e... |
173 |
bio_endio(bio_orig); |
831058dec [PATCH] BLOCK: Se... |
174 175 |
bio_put(bio); } |
4246a0b63 block: add a bi_e... |
176 |
static void bounce_end_io_write(struct bio *bio) |
831058dec [PATCH] BLOCK: Se... |
177 |
{ |
338aa96d5 block: convert bo... |
178 |
bounce_end_io(bio, &page_pool); |
831058dec [PATCH] BLOCK: Se... |
179 |
} |
4246a0b63 block: add a bi_e... |
180 |
static void bounce_end_io_write_isa(struct bio *bio) |
831058dec [PATCH] BLOCK: Se... |
181 |
{ |
831058dec [PATCH] BLOCK: Se... |
182 |
|
338aa96d5 block: convert bo... |
183 |
bounce_end_io(bio, &isa_page_pool); |
831058dec [PATCH] BLOCK: Se... |
184 |
} |
4246a0b63 block: add a bi_e... |
185 |
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) |
831058dec [PATCH] BLOCK: Se... |
186 187 |
{ struct bio *bio_orig = bio->bi_private; |
4e4cbee93 block: switch bio... |
188 |
if (!bio->bi_status) |
831058dec [PATCH] BLOCK: Se... |
189 |
copy_to_high_bio_irq(bio_orig, bio); |
4246a0b63 block: add a bi_e... |
190 |
bounce_end_io(bio, pool); |
831058dec [PATCH] BLOCK: Se... |
191 |
} |
4246a0b63 block: add a bi_e... |
192 |
static void bounce_end_io_read(struct bio *bio) |
831058dec [PATCH] BLOCK: Se... |
193 |
{ |
338aa96d5 block: convert bo... |
194 |
__bounce_end_io_read(bio, &page_pool); |
831058dec [PATCH] BLOCK: Se... |
195 |
} |
4246a0b63 block: add a bi_e... |
196 |
static void bounce_end_io_read_isa(struct bio *bio) |
831058dec [PATCH] BLOCK: Se... |
197 |
{ |
338aa96d5 block: convert bo... |
198 |
__bounce_end_io_read(bio, &isa_page_pool); |
831058dec [PATCH] BLOCK: Se... |
199 |
} |
c55183c9a block: unexport b... |
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask, struct bio_set *bs) { struct bvec_iter iter; struct bio_vec bv; struct bio *bio; /* * Pre immutable biovecs, __bio_clone() used to just do a memcpy from * bio_src->bi_io_vec to bio->bi_io_vec. * * We can't do that anymore, because: * * - The point of cloning the biovec is to produce a bio with a biovec * the caller can modify: bi_idx and bi_bvec_done should be 0. * * - The original bio could've had more than BIO_MAX_PAGES biovecs; if * we tried to clone the whole thing bio_alloc_bioset() would fail. * But the clone should succeed as long as the number of biovecs we * actually need to allocate is fewer than BIO_MAX_PAGES. * * - Lastly, bi_vcnt should not be looked at or relied upon by code * that does not own the bio - reason being drivers don't use it for * iterating over the biovec anymore, so expecting it to be kept up * to date (i.e. for clones that share the parent biovec) is just * asking for trouble and would force extra work on * __bio_clone_fast() anyways. */ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); if (!bio) return NULL; bio->bi_disk = bio_src->bi_disk; bio->bi_opf = bio_src->bi_opf; |
487d58a9c block: copy iopri... |
234 |
bio->bi_ioprio = bio_src->bi_ioprio; |
c55183c9a block: unexport b... |
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 |
bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: break; case REQ_OP_WRITE_SAME: bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; break; default: bio_for_each_segment(bv, bio_src, iter) bio->bi_io_vec[bio->bi_vcnt++] = bv; break; } if (bio_integrity(bio_src)) { int ret; ret = bio_integrity_clone(bio, bio_src, gfp_mask); if (ret < 0) { bio_put(bio); return NULL; } } bio_clone_blkcg_association(bio, bio_src); return bio; } |
165125e1e [BLOCK] Get rid o... |
267 |
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, |
a3ad0a9da block: Remove for... |
268 |
mempool_t *pool) |
831058dec [PATCH] BLOCK: Se... |
269 |
{ |
6bc454d15 bounce: Refactor ... |
270 271 |
struct bio *bio; int rw = bio_data_dir(*bio_orig); |
7988613b0 block: Convert bi... |
272 273 |
struct bio_vec *to, from; struct bvec_iter iter; |
a8821f3f3 block: Improvemen... |
274 275 276 |
unsigned i = 0; bool bounce = false; int sectors = 0; |
14cb0dc64 block: don't let ... |
277 |
bool passthrough = bio_is_passthrough(*bio_orig); |
831058dec [PATCH] BLOCK: Se... |
278 |
|
a8821f3f3 block: Improvemen... |
279 280 281 |
bio_for_each_segment(from, *bio_orig, iter) { if (i++ < BIO_MAX_PAGES) sectors += from.bv_len >> 9; |
1c4bc3ab9 block: remove the... |
282 |
if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) |
a8821f3f3 block: Improvemen... |
283 284 285 286 |
bounce = true; } if (!bounce) return; |
14cb0dc64 block: don't let ... |
287 |
if (!passthrough && sectors < bio_sectors(*bio_orig)) { |
338aa96d5 block: convert bo... |
288 |
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split); |
a8821f3f3 block: Improvemen... |
289 290 291 292 |
bio_chain(bio, *bio_orig); generic_make_request(*bio_orig); *bio_orig = bio; } |
c55183c9a block: unexport b... |
293 |
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL : |
338aa96d5 block: convert bo... |
294 |
&bounce_bio_set); |
831058dec [PATCH] BLOCK: Se... |
295 |
|
cb34e057a block: Convert so... |
296 |
bio_for_each_segment_all(to, bio, i) { |
6bc454d15 bounce: Refactor ... |
297 |
struct page *page = to->bv_page; |
f735b5eeb bounce: don't rel... |
298 |
|
1c4bc3ab9 block: remove the... |
299 |
if (page_to_pfn(page) <= q->limits.bounce_pfn) |
6bc454d15 bounce: Refactor ... |
300 |
continue; |
831058dec [PATCH] BLOCK: Se... |
301 |
|
6bc454d15 bounce: Refactor ... |
302 |
to->bv_page = mempool_alloc(pool, q->bounce_gfp); |
393a33970 block:bounce: fix... |
303 |
inc_zone_page_state(to->bv_page, NR_BOUNCE); |
831058dec [PATCH] BLOCK: Se... |
304 305 306 |
if (rw == WRITE) { char *vto, *vfrom; |
6bc454d15 bounce: Refactor ... |
307 |
flush_dcache_page(page); |
831058dec [PATCH] BLOCK: Se... |
308 |
vto = page_address(to->bv_page) + to->bv_offset; |
6bc454d15 bounce: Refactor ... |
309 |
vfrom = kmap_atomic(page) + to->bv_offset; |
831058dec [PATCH] BLOCK: Se... |
310 |
memcpy(vto, vfrom, to->bv_len); |
6bc454d15 bounce: Refactor ... |
311 |
kunmap_atomic(vfrom); |
831058dec [PATCH] BLOCK: Se... |
312 313 |
} } |
5f3ea37c7 blktrace: port to... |
314 |
trace_block_bio_bounce(q, *bio_orig); |
c43a5082a [PATCH] blktrace:... |
315 |
|
831058dec [PATCH] BLOCK: Se... |
316 |
bio->bi_flags |= (1 << BIO_BOUNCED); |
831058dec [PATCH] BLOCK: Se... |
317 |
|
338aa96d5 block: convert bo... |
318 |
if (pool == &page_pool) { |
831058dec [PATCH] BLOCK: Se... |
319 320 321 322 323 324 325 326 327 328 329 330 |
bio->bi_end_io = bounce_end_io_write; if (rw == READ) bio->bi_end_io = bounce_end_io_read; } else { bio->bi_end_io = bounce_end_io_write_isa; if (rw == READ) bio->bi_end_io = bounce_end_io_read_isa; } bio->bi_private = *bio_orig; *bio_orig = bio; } |
165125e1e [BLOCK] Get rid o... |
331 |
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) |
831058dec [PATCH] BLOCK: Se... |
332 333 334 335 |
{ mempool_t *pool; /* |
bf2de6f5a block: Initial su... |
336 337 |
* Data-less bio, nothing to bounce */ |
36144077b highmem: use bio_... |
338 |
if (!bio_has_data(*bio_orig)) |
bf2de6f5a block: Initial su... |
339 340 341 |
return; /* |
831058dec [PATCH] BLOCK: Se... |
342 343 344 345 346 |
* for non-isa bounce case, just check if the bounce pfn is equal * to or bigger than the highest pfn in the system -- in that case, * don't waste time iterating over bio segments */ if (!(q->bounce_gfp & GFP_DMA)) { |
1c4bc3ab9 block: remove the... |
347 |
if (q->limits.bounce_pfn >= blk_max_pfn) |
831058dec [PATCH] BLOCK: Se... |
348 |
return; |
338aa96d5 block: convert bo... |
349 |
pool = &page_pool; |
831058dec [PATCH] BLOCK: Se... |
350 |
} else { |
338aa96d5 block: convert bo... |
351 352 |
BUG_ON(!mempool_initialized(&isa_page_pool)); pool = &isa_page_pool; |
831058dec [PATCH] BLOCK: Se... |
353 |
} |
831058dec [PATCH] BLOCK: Se... |
354 355 356 |
/* * slow path */ |
a3ad0a9da block: Remove for... |
357 |
__blk_queue_bounce(q, bio_orig, pool); |
831058dec [PATCH] BLOCK: Se... |
358 |
} |