Blame view

block/bounce.c 5.82 KB
831058dec   David Howells   [PATCH] BLOCK: Se...
1
2
3
4
  /* bounce buffer handling for block devices
   *
   * - Split from highmem.c
   */
b1de0d139   Mitchel Humpherys   mm: convert some ...
5
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
831058dec   David Howells   [PATCH] BLOCK: Se...
6
  #include <linux/mm.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
7
  #include <linux/export.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
8
  #include <linux/swap.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
9
  #include <linux/gfp.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
10
11
12
13
  #include <linux/bio.h>
  #include <linux/pagemap.h>
  #include <linux/mempool.h>
  #include <linux/blkdev.h>
66114cad6   Tejun Heo   writeback: separa...
14
  #include <linux/backing-dev.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
15
16
17
  #include <linux/init.h>
  #include <linux/hash.h>
  #include <linux/highmem.h>
3bcfeaf93   David Vrabel   block: initialize...
18
  #include <linux/bootmem.h>
b1de0d139   Mitchel Humpherys   mm: convert some ...
19
  #include <linux/printk.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
20
  #include <asm/tlbflush.h>
55782138e   Li Zefan   tracing/events: c...
21
  #include <trace/events/block.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
22
23
24
25
  #define POOL_SIZE	64
  #define ISA_POOL_SIZE	16
  
  static mempool_t *page_pool, *isa_page_pool;
f10062578   Chris Metcalf   bounce: allow use...
26
  #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
831058dec   David Howells   [PATCH] BLOCK: Se...
27
28
  static __init int init_emergency_pool(void)
  {
f10062578   Chris Metcalf   bounce: allow use...
29
  #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
3bcfeaf93   David Vrabel   block: initialize...
30
  	if (max_pfn <= max_low_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
31
  		return 0;
3bcfeaf93   David Vrabel   block: initialize...
32
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
33
34
35
  
  	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
  	BUG_ON(!page_pool);
b1de0d139   Mitchel Humpherys   mm: convert some ...
36
37
  	pr_info("pool size: %d pages
  ", POOL_SIZE);
831058dec   David Howells   [PATCH] BLOCK: Se...
38
39
40
41
42
  
  	return 0;
  }
  
  __initcall(init_emergency_pool);
f10062578   Chris Metcalf   bounce: allow use...
43
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
44

f10062578   Chris Metcalf   bounce: allow use...
45
  #ifdef CONFIG_HIGHMEM
831058dec   David Howells   [PATCH] BLOCK: Se...
46
47
48
49
50
51
52
53
54
  /*
   * highmem version, map in to vec
   */
  static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  {
  	unsigned long flags;
  	unsigned char *vto;
  
  	local_irq_save(flags);
9b04c5fec   Cong Wang   mm: remove the se...
55
  	vto = kmap_atomic(to->bv_page);
831058dec   David Howells   [PATCH] BLOCK: Se...
56
  	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
9b04c5fec   Cong Wang   mm: remove the se...
57
  	kunmap_atomic(vto);
831058dec   David Howells   [PATCH] BLOCK: Se...
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
  	local_irq_restore(flags);
  }
  
  #else /* CONFIG_HIGHMEM */
  
  #define bounce_copy_vec(to, vfrom)	\
  	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
  
  #endif /* CONFIG_HIGHMEM */
  
  /*
   * allocate pages in the DMA region for the ISA pool
   */
  static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
  {
  	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
  }
  
  /*
   * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
   * as the max address, so check if the pool has already been created.
   */
  int init_emergency_isa_pool(void)
  {
  	if (isa_page_pool)
  		return 0;
  
  	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
  				       mempool_free_pages, (void *) 0);
  	BUG_ON(!isa_page_pool);
b1de0d139   Mitchel Humpherys   mm: convert some ...
88
89
  	pr_info("isa pool size: %d pages
  ", ISA_POOL_SIZE);
831058dec   David Howells   [PATCH] BLOCK: Se...
90
91
92
93
94
95
96
97
98
99
100
  	return 0;
  }
  
  /*
   * Simple bounce buffer support for highmem pages. Depending on the
   * queue gfp mask set, *to may or may not be a highmem page. kmap it
   * always, it will do the Right Thing
   */
  static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
  {
  	unsigned char *vfrom;
7988613b0   Kent Overstreet   block: Convert bi...
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  	struct bio_vec tovec, *fromvec = from->bi_io_vec;
  	struct bvec_iter iter;
  
  	bio_for_each_segment(tovec, to, iter) {
  		if (tovec.bv_page != fromvec->bv_page) {
  			/*
  			 * fromvec->bv_offset and fromvec->bv_len might have
  			 * been modified by the block layer, so use the original
  			 * copy, bounce_copy_vec already uses tovec->bv_len
  			 */
  			vfrom = page_address(fromvec->bv_page) +
  				tovec.bv_offset;
  
  			bounce_copy_vec(&tovec, vfrom);
  			flush_dcache_page(tovec.bv_page);
  		}
831058dec   David Howells   [PATCH] BLOCK: Se...
117

7988613b0   Kent Overstreet   block: Convert bi...
118
  		fromvec++;
831058dec   David Howells   [PATCH] BLOCK: Se...
119
120
  	}
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
121
  static void bounce_end_io(struct bio *bio, mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
122
123
124
125
  {
  	struct bio *bio_orig = bio->bi_private;
  	struct bio_vec *bvec, *org_vec;
  	int i;
994518799   Ming Lei   block: fix bounce...
126
  	int start = bio_orig->bi_iter.bi_idx;
831058dec   David Howells   [PATCH] BLOCK: Se...
127

831058dec   David Howells   [PATCH] BLOCK: Se...
128
129
130
  	/*
  	 * free up bounce indirect pages used
  	 */
d74c6d514   Kent Overstreet   block: Add bio_fo...
131
  	bio_for_each_segment_all(bvec, bio, i) {
994518799   Ming Lei   block: fix bounce...
132
  		org_vec = bio_orig->bi_io_vec + i + start;
831058dec   David Howells   [PATCH] BLOCK: Se...
133
134
135
136
137
138
  		if (bvec->bv_page == org_vec->bv_page)
  			continue;
  
  		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
  		mempool_free(bvec->bv_page, pool);
  	}
4246a0b63   Christoph Hellwig   block: add a bi_e...
139
140
  	bio_orig->bi_error = bio->bi_error;
  	bio_endio(bio_orig);
831058dec   David Howells   [PATCH] BLOCK: Se...
141
142
  	bio_put(bio);
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
143
  static void bounce_end_io_write(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
144
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
145
  	bounce_end_io(bio, page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
146
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
147
  static void bounce_end_io_write_isa(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
148
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
149

4246a0b63   Christoph Hellwig   block: add a bi_e...
150
  	bounce_end_io(bio, isa_page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
151
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
152
  static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
153
154
  {
  	struct bio *bio_orig = bio->bi_private;
4246a0b63   Christoph Hellwig   block: add a bi_e...
155
  	if (!bio->bi_error)
831058dec   David Howells   [PATCH] BLOCK: Se...
156
  		copy_to_high_bio_irq(bio_orig, bio);
4246a0b63   Christoph Hellwig   block: add a bi_e...
157
  	bounce_end_io(bio, pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
158
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
159
  static void bounce_end_io_read(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
160
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
161
  	__bounce_end_io_read(bio, page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
162
  }
4246a0b63   Christoph Hellwig   block: add a bi_e...
163
  static void bounce_end_io_read_isa(struct bio *bio)
831058dec   David Howells   [PATCH] BLOCK: Se...
164
  {
4246a0b63   Christoph Hellwig   block: add a bi_e...
165
  	__bounce_end_io_read(bio, isa_page_pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
166
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
167
  static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
a3ad0a9da   Jan Kara   block: Remove for...
168
  			       mempool_t *pool)
831058dec   David Howells   [PATCH] BLOCK: Se...
169
  {
6bc454d15   Kent Overstreet   bounce: Refactor ...
170
171
  	struct bio *bio;
  	int rw = bio_data_dir(*bio_orig);
7988613b0   Kent Overstreet   block: Convert bi...
172
173
  	struct bio_vec *to, from;
  	struct bvec_iter iter;
6bc454d15   Kent Overstreet   bounce: Refactor ...
174
  	unsigned i;
831058dec   David Howells   [PATCH] BLOCK: Se...
175

7988613b0   Kent Overstreet   block: Convert bi...
176
177
  	bio_for_each_segment(from, *bio_orig, iter)
  		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
6bc454d15   Kent Overstreet   bounce: Refactor ...
178
  			goto bounce;
831058dec   David Howells   [PATCH] BLOCK: Se...
179

6bc454d15   Kent Overstreet   bounce: Refactor ...
180
181
182
  	return;
  bounce:
  	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
831058dec   David Howells   [PATCH] BLOCK: Se...
183

cb34e057a   Kent Overstreet   block: Convert so...
184
  	bio_for_each_segment_all(to, bio, i) {
6bc454d15   Kent Overstreet   bounce: Refactor ...
185
  		struct page *page = to->bv_page;
f735b5eeb   Jens Axboe   bounce: don't rel...
186

a3ad0a9da   Jan Kara   block: Remove for...
187
  		if (page_to_pfn(page) <= queue_bounce_pfn(q))
6bc454d15   Kent Overstreet   bounce: Refactor ...
188
  			continue;
831058dec   David Howells   [PATCH] BLOCK: Se...
189

6bc454d15   Kent Overstreet   bounce: Refactor ...
190
  		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
393a33970   Wang YanQing   block:bounce: fix...
191
  		inc_zone_page_state(to->bv_page, NR_BOUNCE);
831058dec   David Howells   [PATCH] BLOCK: Se...
192
193
194
  
  		if (rw == WRITE) {
  			char *vto, *vfrom;
6bc454d15   Kent Overstreet   bounce: Refactor ...
195
  			flush_dcache_page(page);
831058dec   David Howells   [PATCH] BLOCK: Se...
196
  			vto = page_address(to->bv_page) + to->bv_offset;
6bc454d15   Kent Overstreet   bounce: Refactor ...
197
  			vfrom = kmap_atomic(page) + to->bv_offset;
831058dec   David Howells   [PATCH] BLOCK: Se...
198
  			memcpy(vto, vfrom, to->bv_len);
6bc454d15   Kent Overstreet   bounce: Refactor ...
199
  			kunmap_atomic(vfrom);
831058dec   David Howells   [PATCH] BLOCK: Se...
200
201
  		}
  	}
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
202
  	trace_block_bio_bounce(q, *bio_orig);
c43a5082a   Jens Axboe   [PATCH] blktrace:...
203

831058dec   David Howells   [PATCH] BLOCK: Se...
204
  	bio->bi_flags |= (1 << BIO_BOUNCED);
831058dec   David Howells   [PATCH] BLOCK: Se...
205
206
207
208
209
210
211
212
213
214
215
216
217
218
  
  	if (pool == page_pool) {
  		bio->bi_end_io = bounce_end_io_write;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read;
  	} else {
  		bio->bi_end_io = bounce_end_io_write_isa;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read_isa;
  	}
  
  	bio->bi_private = *bio_orig;
  	*bio_orig = bio;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
219
  void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
831058dec   David Howells   [PATCH] BLOCK: Se...
220
221
222
223
  {
  	mempool_t *pool;
  
  	/*
bf2de6f5a   Jens Axboe   block: Initial su...
224
225
  	 * Data-less bio, nothing to bounce
  	 */
36144077b   Jens Axboe   highmem: use bio_...
226
  	if (!bio_has_data(*bio_orig))
bf2de6f5a   Jens Axboe   block: Initial su...
227
228
229
  		return;
  
  	/*
831058dec   David Howells   [PATCH] BLOCK: Se...
230
231
232
233
234
  	 * for non-isa bounce case, just check if the bounce pfn is equal
  	 * to or bigger than the highest pfn in the system -- in that case,
  	 * don't waste time iterating over bio segments
  	 */
  	if (!(q->bounce_gfp & GFP_DMA)) {
a3ad0a9da   Jan Kara   block: Remove for...
235
  		if (queue_bounce_pfn(q) >= blk_max_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
236
237
238
239
240
241
  			return;
  		pool = page_pool;
  	} else {
  		BUG_ON(!isa_page_pool);
  		pool = isa_page_pool;
  	}
831058dec   David Howells   [PATCH] BLOCK: Se...
242
243
244
  	/*
  	 * slow path
  	 */
a3ad0a9da   Jan Kara   block: Remove for...
245
  	__blk_queue_bounce(q, bio_orig, pool);
831058dec   David Howells   [PATCH] BLOCK: Se...
246
247
248
  }
  
  EXPORT_SYMBOL(blk_queue_bounce);