Blame view

block/bounce.c 6.55 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
831058dec   David Howells   [PATCH] BLOCK: Se...
2
3
4
5
  /* bounce buffer handling for block devices
   *
   * - Split from highmem.c
   */
b1de0d139   Mitchel Humpherys   mm: convert some ...
6
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
831058dec   David Howells   [PATCH] BLOCK: Se...
7
  #include <linux/mm.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
8
  #include <linux/export.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
9
  #include <linux/swap.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
10
  #include <linux/gfp.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
11
12
13
14
  #include <linux/bio.h>
  #include <linux/pagemap.h>
  #include <linux/mempool.h>
  #include <linux/blkdev.h>
66114cad6   Tejun Heo   writeback: separa...
15
  #include <linux/backing-dev.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
16
17
18
  #include <linux/init.h>
  #include <linux/hash.h>
  #include <linux/highmem.h>
3bcfeaf93   David Vrabel   block: initialize...
19
  #include <linux/bootmem.h>
b1de0d139   Mitchel Humpherys   mm: convert some ...
20
  #include <linux/printk.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
21
  #include <asm/tlbflush.h>
55782138e   Li Zefan   tracing/events: c...
22
  #include <trace/events/block.h>
3bce016a4   Christoph Hellwig   block: move bounc...
23
  #include "blk.h"
55782138e   Li Zefan   tracing/events: c...
24

831058dec   David Howells   [PATCH] BLOCK: Se...
25
26
  #define POOL_SIZE	64
  #define ISA_POOL_SIZE	16
e0fc443a8   Bart Van Assche   block: Declare lo...
27
  static struct bio_set *bounce_bio_set, *bounce_bio_split;
831058dec   David Howells   [PATCH] BLOCK: Se...
28
  static mempool_t *page_pool, *isa_page_pool;
f10062578   Chris Metcalf   bounce: allow use...
29
  #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
831058dec   David Howells   [PATCH] BLOCK: Se...
30
31
  static __init int init_emergency_pool(void)
  {
f10062578   Chris Metcalf   bounce: allow use...
32
  #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
3bcfeaf93   David Vrabel   block: initialize...
33
  	if (max_pfn <= max_low_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
34
  		return 0;
3bcfeaf93   David Vrabel   block: initialize...
35
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
36
37
38
  
  	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
  	BUG_ON(!page_pool);
b1de0d139   Mitchel Humpherys   mm: convert some ...
39
40
  	pr_info("pool size: %d pages
  ", POOL_SIZE);
831058dec   David Howells   [PATCH] BLOCK: Se...
41

a8821f3f3   NeilBrown   block: Improvemen...
42
43
44
45
46
47
48
  	bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
  	BUG_ON(!bounce_bio_set);
  	if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
  		BUG_ON(1);
  
  	bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
  	BUG_ON(!bounce_bio_split);
831058dec   David Howells   [PATCH] BLOCK: Se...
49
50
51
52
  	return 0;
  }
  
  __initcall(init_emergency_pool);
f10062578   Chris Metcalf   bounce: allow use...
53
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
54

f10062578   Chris Metcalf   bounce: allow use...
55
  #ifdef CONFIG_HIGHMEM
831058dec   David Howells   [PATCH] BLOCK: Se...
56
57
58
59
60
61
62
63
64
  /*
   * highmem version, map in to vec
   */
  static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  {
  	unsigned long flags;
  	unsigned char *vto;
  
  	local_irq_save(flags);
9b04c5fec   Cong Wang   mm: remove the se...
65
  	vto = kmap_atomic(to->bv_page);
831058dec   David Howells   [PATCH] BLOCK: Se...
66
  	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
9b04c5fec   Cong Wang   mm: remove the se...
67
  	kunmap_atomic(vto);
831058dec   David Howells   [PATCH] BLOCK: Se...
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  	local_irq_restore(flags);
  }
  
  #else /* CONFIG_HIGHMEM */
  
  #define bounce_copy_vec(to, vfrom)	\
  	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
  
  #endif /* CONFIG_HIGHMEM */
  
  /*
   * allocate pages in the DMA region for the ISA pool
   */
  static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
  {
  	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
  }
  
  /*
   * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
   * as the max address, so check if the pool has already been created.
   */
  int init_emergency_isa_pool(void)
  {
  	if (isa_page_pool)
  		return 0;
  
  	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
  				       mempool_free_pages, (void *) 0);
  	BUG_ON(!isa_page_pool);
b1de0d139   Mitchel Humpherys   mm: convert some ...
98
99
  	pr_info("isa pool size: %d pages
  ", ISA_POOL_SIZE);
831058dec   David Howells   [PATCH] BLOCK: Se...
100
101
102
103
104
105
106
107
108
109
110
  	return 0;
  }
  
  /*
   * Simple bounce buffer support for highmem pages. Depending on the
   * queue gfp mask set, *to may or may not be a highmem page. kmap it
   * always, it will do the Right Thing
   */
  static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
  {
  	unsigned char *vfrom;
7988613b0   Kent Overstreet   block: Convert bi...
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  	struct bio_vec tovec, *fromvec = from->bi_io_vec;
  	struct bvec_iter iter;
  
  	bio_for_each_segment(tovec, to, iter) {
  		if (tovec.bv_page != fromvec->bv_page) {
  			/*
  			 * fromvec->bv_offset and fromvec->bv_len might have
  			 * been modified by the block layer, so use the original
  			 * copy, bounce_copy_vec already uses tovec->bv_len
  			 */
  			vfrom = page_address(fromvec->bv_page) +
  				tovec.bv_offset;
  
  			bounce_copy_vec(&tovec, vfrom);
  			flush_dcache_page(tovec.bv_page);
  		}
831058dec   David Howells   [PATCH] BLOCK: Se...
127

7988613b0   Kent Overstreet   block: Convert bi...
128
  		fromvec++;
831058dec   David Howells   [PATCH] BLOCK: Se...
129
130
  	}
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
131
  static void bounce_end_io(struct bio *bio, mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
132
133
134
135
  {
  	struct bio *bio_orig = bio->bi_private;
  	struct bio_vec *bvec, *org_vec;
  	int i;
994518799   Ming Lei   block: fix bounce...
136
  	int start = bio_orig->bi_iter.bi_idx;
831058dec   David Howells   [PATCH] BLOCK: Se...
137

831058dec   David Howells   [PATCH] BLOCK: Se...
138
139
140
  	/*
  	 * free up bounce indirect pages used
  	 */
d74c6d514   Kent Overstreet   block: Add bio_fo...
141
  	bio_for_each_segment_all(bvec, bio, i) {
994518799   Ming Lei   block: fix bounce...
142
  		org_vec = bio_orig->bi_io_vec + i + start;
831058dec   David Howells   [PATCH] BLOCK: Se...
143
144
145
146
147
148
  		if (bvec->bv_page == org_vec->bv_page)
  			continue;
  
  		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
  		mempool_free(bvec->bv_page, pool);
  	}
4e4cbee93   Christoph Hellwig   block: switch bio...
149
  	bio_orig->bi_status = bio->bi_status;
4246a0b63   Christoph Hellwig   block: add a bi_e...
150
  	bio_endio(bio_orig);
831058dec   David Howells   [PATCH] BLOCK: Se...
151
152
  	bio_put(bio);
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
153
  static void bounce_end_io_write(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
154
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
155
  	bounce_end_io(bio, page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
156
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
157
  static void bounce_end_io_write_isa(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
158
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
159

4246a0b63   Christoph Hellwig   block: add a bi_e...
160
  	bounce_end_io(bio, isa_page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
161
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
162
  static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
163
164
  {
  	struct bio *bio_orig = bio->bi_private;
4e4cbee93   Christoph Hellwig   block: switch bio...
165
  	if (!bio->bi_status)
831058dec   David Howells   [PATCH] BLOCK: Se...
166
  		copy_to_high_bio_irq(bio_orig, bio);
4246a0b63   Christoph Hellwig   block: add a bi_e...
167
  	bounce_end_io(bio, pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
168
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
169
  static void bounce_end_io_read(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
170
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
171
  	__bounce_end_io_read(bio, page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
172
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
173
  static void bounce_end_io_read_isa(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
174
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
175
  	__bounce_end_io_read(bio, isa_page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
176
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
177
  static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
a3ad0a9da   Jan Kara   block: Remove for...
178
  			       mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
179
  {
6bc454d15   Kent Overstreet   bounce: Refactor ...
180
181
  	struct bio *bio;
  	int rw = bio_data_dir(*bio_orig);
7988613b0   Kent Overstreet   block: Convert bi...
182
183
  	struct bio_vec *to, from;
  	struct bvec_iter iter;
a8821f3f3   NeilBrown   block: Improvemen...
184
185
186
  	unsigned i = 0;
  	bool bounce = false;
  	int sectors = 0;
eaedee932   Ming Lei   block: don't let ...
187
  	bool passthrough = bio_is_passthrough(*bio_orig);
831058dec   David Howells   [PATCH] BLOCK: Se...
188

a8821f3f3   NeilBrown   block: Improvemen...
189
190
191
  	bio_for_each_segment(from, *bio_orig, iter) {
  		if (i++ < BIO_MAX_PAGES)
  			sectors += from.bv_len >> 9;
1c4bc3ab9   Christoph Hellwig   block: remove the...
192
  		if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
a8821f3f3   NeilBrown   block: Improvemen...
193
194
195
196
  			bounce = true;
  	}
  	if (!bounce)
  		return;
eaedee932   Ming Lei   block: don't let ...
197
  	if (!passthrough && sectors < bio_sectors(*bio_orig)) {
a8821f3f3   NeilBrown   block: Improvemen...
198
199
200
201
202
  		bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
  		bio_chain(bio, *bio_orig);
  		generic_make_request(*bio_orig);
  		*bio_orig = bio;
  	}
eaedee932   Ming Lei   block: don't let ...
203
204
  	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
  			bounce_bio_set);
831058dec   David Howells   [PATCH] BLOCK: Se...
205

cb34e057a   Kent Overstreet   block: Convert so...
206
  	bio_for_each_segment_all(to, bio, i) {
6bc454d15   Kent Overstreet   bounce: Refactor ...
207
  		struct page *page = to->bv_page;
f735b5eeb   Jens Axboe   bounce: don't rel...
208

1c4bc3ab9   Christoph Hellwig   block: remove the...
209
  		if (page_to_pfn(page) <= q->limits.bounce_pfn)
6bc454d15   Kent Overstreet   bounce: Refactor ...
210
  			continue;
831058dec   David Howells   [PATCH] BLOCK: Se...
211

6bc454d15   Kent Overstreet   bounce: Refactor ...
212
  		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
393a33970   Wang YanQing   block:bounce: fix...
213
  		inc_zone_page_state(to->bv_page, NR_BOUNCE);
831058dec   David Howells   [PATCH] BLOCK: Se...
214
215
216
  
  		if (rw == WRITE) {
  			char *vto, *vfrom;
6bc454d15   Kent Overstreet   bounce: Refactor ...
217
  			flush_dcache_page(page);
831058dec   David Howells   [PATCH] BLOCK: Se...
218
  			vto = page_address(to->bv_page) + to->bv_offset;
6bc454d15   Kent Overstreet   bounce: Refactor ...
219
  			vfrom = kmap_atomic(page) + to->bv_offset;
831058dec   David Howells   [PATCH] BLOCK: Se...
220
  			memcpy(vto, vfrom, to->bv_len);
6bc454d15   Kent Overstreet   bounce: Refactor ...
221
  			kunmap_atomic(vfrom);
831058dec   David Howells   [PATCH] BLOCK: Se...
222
223
  		}
  	}
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
224
  	trace_block_bio_bounce(q, *bio_orig);
c43a5082a   Jens Axboe   [PATCH] blktrace:...
225

831058dec   David Howells   [PATCH] BLOCK: Se...
226
  	bio->bi_flags |= (1 << BIO_BOUNCED);
831058dec   David Howells   [PATCH] BLOCK: Se...
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  
  	if (pool == page_pool) {
  		bio->bi_end_io = bounce_end_io_write;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read;
  	} else {
  		bio->bi_end_io = bounce_end_io_write_isa;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read_isa;
  	}
  
  	bio->bi_private = *bio_orig;
  	*bio_orig = bio;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
241
  void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
831058dec   David Howells   [PATCH] BLOCK: Se...
242
243
244
245
  {
  	mempool_t *pool;
  
  	/*
bf2de6f5a   Jens Axboe   block: Initial su...
246
247
  	 * Data-less bio, nothing to bounce
  	 */
36144077b   Jens Axboe   highmem: use bio_...
248
  	if (!bio_has_data(*bio_orig))
bf2de6f5a   Jens Axboe   block: Initial su...
249
250
251
  		return;
  
  	/*
831058dec   David Howells   [PATCH] BLOCK: Se...
252
253
254
255
256
  	 * for non-isa bounce case, just check if the bounce pfn is equal
  	 * to or bigger than the highest pfn in the system -- in that case,
  	 * don't waste time iterating over bio segments
  	 */
  	if (!(q->bounce_gfp & GFP_DMA)) {
1c4bc3ab9   Christoph Hellwig   block: remove the...
257
  		if (q->limits.bounce_pfn >= blk_max_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
258
259
260
261
262
263
  			return;
  		pool = page_pool;
  	} else {
  		BUG_ON(!isa_page_pool);
  		pool = isa_page_pool;
  	}
831058dec   David Howells   [PATCH] BLOCK: Se...
264
265
266
  	/*
  	 * slow path
  	 */
a3ad0a9da   Jan Kara   block: Remove for...
267
  	__blk_queue_bounce(q, bio_orig, pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
268
  }