Blame view

mm/bounce.c 6.41 KB
831058dec   David Howells   [PATCH] BLOCK: Se...
1
2
3
4
5
6
  /* bounce buffer handling for block devices
   *
   * - Split from highmem.c
   */
  
  #include <linux/mm.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
7
  #include <linux/export.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
8
  #include <linux/swap.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
9
  #include <linux/gfp.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
10
11
12
13
14
15
16
  #include <linux/bio.h>
  #include <linux/pagemap.h>
  #include <linux/mempool.h>
  #include <linux/blkdev.h>
  #include <linux/init.h>
  #include <linux/hash.h>
  #include <linux/highmem.h>
3bcfeaf93   David Vrabel   block: initialize...
17
  #include <linux/bootmem.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
18
  #include <asm/tlbflush.h>
55782138e   Li Zefan   tracing/events: c...
19
  #include <trace/events/block.h>
831058dec   David Howells   [PATCH] BLOCK: Se...
20
21
22
23
  #define POOL_SIZE	64
  #define ISA_POOL_SIZE	16
  
  static mempool_t *page_pool, *isa_page_pool;
f10062578   Chris Metcalf   bounce: allow use...
24
  #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
831058dec   David Howells   [PATCH] BLOCK: Se...
25
26
  static __init int init_emergency_pool(void)
  {
f10062578   Chris Metcalf   bounce: allow use...
27
  #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
3bcfeaf93   David Vrabel   block: initialize...
28
  	if (max_pfn <= max_low_pfn)
831058dec   David Howells   [PATCH] BLOCK: Se...
29
  		return 0;
3bcfeaf93   David Vrabel   block: initialize...
30
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
31
32
33
  
  	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
  	BUG_ON(!page_pool);
f10062578   Chris Metcalf   bounce: allow use...
34
35
  	printk("bounce pool size: %d pages
  ", POOL_SIZE);
831058dec   David Howells   [PATCH] BLOCK: Se...
36
37
38
39
40
  
  	return 0;
  }
  
  __initcall(init_emergency_pool);
f10062578   Chris Metcalf   bounce: allow use...
41
  #endif
831058dec   David Howells   [PATCH] BLOCK: Se...
42

f10062578   Chris Metcalf   bounce: allow use...
43
  #ifdef CONFIG_HIGHMEM
831058dec   David Howells   [PATCH] BLOCK: Se...
44
45
46
47
48
49
50
51
52
  /*
   * highmem version, map in to vec
   */
  static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  {
  	unsigned long flags;
  	unsigned char *vto;
  
  	local_irq_save(flags);
9b04c5fec   Cong Wang   mm: remove the se...
53
  	vto = kmap_atomic(to->bv_page);
831058dec   David Howells   [PATCH] BLOCK: Se...
54
  	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
9b04c5fec   Cong Wang   mm: remove the se...
55
  	kunmap_atomic(vto);
831058dec   David Howells   [PATCH] BLOCK: Se...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
  	local_irq_restore(flags);
  }
  
  #else /* CONFIG_HIGHMEM */
  
  #define bounce_copy_vec(to, vfrom)	\
  	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
  
  #endif /* CONFIG_HIGHMEM */
  
  /*
   * allocate pages in the DMA region for the ISA pool
   */
  static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
  {
  	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
  }
  
  /*
   * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
   * as the max address, so check if the pool has already been created.
   */
  int init_emergency_isa_pool(void)
  {
  	if (isa_page_pool)
  		return 0;
  
  	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
  				       mempool_free_pages, (void *) 0);
  	BUG_ON(!isa_page_pool);
  
  	printk("isa bounce pool size: %d pages
  ", ISA_POOL_SIZE);
  	return 0;
  }
  
  /*
   * Simple bounce buffer support for highmem pages. Depending on the
   * queue gfp mask set, *to may or may not be a highmem page. kmap it
   * always, it will do the Right Thing
   */
  static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
  {
  	unsigned char *vfrom;
7988613b0   Kent Overstreet   block: Convert bi...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
  	struct bio_vec tovec, *fromvec = from->bi_io_vec;
  	struct bvec_iter iter;
  
  	bio_for_each_segment(tovec, to, iter) {
  		if (tovec.bv_page != fromvec->bv_page) {
  			/*
  			 * fromvec->bv_offset and fromvec->bv_len might have
  			 * been modified by the block layer, so use the original
  			 * copy, bounce_copy_vec already uses tovec->bv_len
  			 */
  			vfrom = page_address(fromvec->bv_page) +
  				tovec.bv_offset;
  
  			bounce_copy_vec(&tovec, vfrom);
  			flush_dcache_page(tovec.bv_page);
  		}
831058dec   David Howells   [PATCH] BLOCK: Se...
116

7988613b0   Kent Overstreet   block: Convert bi...
117
  		fromvec++;
831058dec   David Howells   [PATCH] BLOCK: Se...
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
  	}
  }
  
  static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
  {
  	struct bio *bio_orig = bio->bi_private;
  	struct bio_vec *bvec, *org_vec;
  	int i;
  
  	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
  		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
  
  	/*
  	 * free up bounce indirect pages used
  	 */
d74c6d514   Kent Overstreet   block: Add bio_fo...
133
  	bio_for_each_segment_all(bvec, bio, i) {
831058dec   David Howells   [PATCH] BLOCK: Se...
134
135
136
137
138
139
140
  		org_vec = bio_orig->bi_io_vec + i;
  		if (bvec->bv_page == org_vec->bv_page)
  			continue;
  
  		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
  		mempool_free(bvec->bv_page, pool);
  	}
6712ecf8f   NeilBrown   Drop 'size' argum...
141
  	bio_endio(bio_orig, err);
831058dec   David Howells   [PATCH] BLOCK: Se...
142
143
  	bio_put(bio);
  }
6712ecf8f   NeilBrown   Drop 'size' argum...
144
  static void bounce_end_io_write(struct bio *bio, int err)
831058dec   David Howells   [PATCH] BLOCK: Se...
145
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
146
  	bounce_end_io(bio, page_pool, err);
831058dec   David Howells   [PATCH] BLOCK: Se...
147
  }
6712ecf8f   NeilBrown   Drop 'size' argum...
148
  static void bounce_end_io_write_isa(struct bio *bio, int err)
831058dec   David Howells   [PATCH] BLOCK: Se...
149
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
150
151
  
  	bounce_end_io(bio, isa_page_pool, err);
831058dec   David Howells   [PATCH] BLOCK: Se...
152
153
154
155
156
157
158
159
160
161
162
  }
  
  static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
  {
  	struct bio *bio_orig = bio->bi_private;
  
  	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  		copy_to_high_bio_irq(bio_orig, bio);
  
  	bounce_end_io(bio, pool, err);
  }
6712ecf8f   NeilBrown   Drop 'size' argum...
163
  static void bounce_end_io_read(struct bio *bio, int err)
831058dec   David Howells   [PATCH] BLOCK: Se...
164
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
165
  	__bounce_end_io_read(bio, page_pool, err);
831058dec   David Howells   [PATCH] BLOCK: Se...
166
  }
6712ecf8f   NeilBrown   Drop 'size' argum...
167
  static void bounce_end_io_read_isa(struct bio *bio, int err)
831058dec   David Howells   [PATCH] BLOCK: Se...
168
  {
831058dec   David Howells   [PATCH] BLOCK: Se...
169
  	__bounce_end_io_read(bio, isa_page_pool, err);
831058dec   David Howells   [PATCH] BLOCK: Se...
170
  }
ffecfd1a7   Darrick J. Wong   block: optionally...
171
172
173
  #ifdef CONFIG_NEED_BOUNCE_POOL
  static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
  {
ffecfd1a7   Darrick J. Wong   block: optionally...
174
175
176
177
178
  	if (bio_data_dir(bio) != WRITE)
  		return 0;
  
  	if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
  		return 0;
713685111   Darrick J. Wong   mm: make snapshot...
179
  	return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
ffecfd1a7   Darrick J. Wong   block: optionally...
180
181
182
183
184
185
186
  }
  #else
  static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
  {
  	return 0;
  }
  #endif /* CONFIG_NEED_BOUNCE_POOL */
165125e1e   Jens Axboe   [BLOCK] Get rid o...
187
  static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
ffecfd1a7   Darrick J. Wong   block: optionally...
188
  			       mempool_t *pool, int force)
831058dec   David Howells   [PATCH] BLOCK: Se...
189
  {
6bc454d15   Kent Overstreet   bounce: Refactor ...
190
191
  	struct bio *bio;
  	int rw = bio_data_dir(*bio_orig);
7988613b0   Kent Overstreet   block: Convert bi...
192
193
  	struct bio_vec *to, from;
  	struct bvec_iter iter;
6bc454d15   Kent Overstreet   bounce: Refactor ...
194
  	unsigned i;
831058dec   David Howells   [PATCH] BLOCK: Se...
195

83b2944fd   Darrick J. Wong   mm/bounce.c: fix ...
196
197
  	if (force)
  		goto bounce;
7988613b0   Kent Overstreet   block: Convert bi...
198
199
  	bio_for_each_segment(from, *bio_orig, iter)
  		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
6bc454d15   Kent Overstreet   bounce: Refactor ...
200
  			goto bounce;
831058dec   David Howells   [PATCH] BLOCK: Se...
201

6bc454d15   Kent Overstreet   bounce: Refactor ...
202
203
204
  	return;
  bounce:
  	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
831058dec   David Howells   [PATCH] BLOCK: Se...
205

cb34e057a   Kent Overstreet   block: Convert so...
206
  	bio_for_each_segment_all(to, bio, i) {
6bc454d15   Kent Overstreet   bounce: Refactor ...
207
  		struct page *page = to->bv_page;
f735b5eeb   Jens Axboe   bounce: don't rel...
208

6bc454d15   Kent Overstreet   bounce: Refactor ...
209
210
  		if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
  			continue;
831058dec   David Howells   [PATCH] BLOCK: Se...
211

831058dec   David Howells   [PATCH] BLOCK: Se...
212
  		inc_zone_page_state(to->bv_page, NR_BOUNCE);
6bc454d15   Kent Overstreet   bounce: Refactor ...
213
  		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
831058dec   David Howells   [PATCH] BLOCK: Se...
214
215
216
  
  		if (rw == WRITE) {
  			char *vto, *vfrom;
6bc454d15   Kent Overstreet   bounce: Refactor ...
217
  			flush_dcache_page(page);
831058dec   David Howells   [PATCH] BLOCK: Se...
218
  			vto = page_address(to->bv_page) + to->bv_offset;
6bc454d15   Kent Overstreet   bounce: Refactor ...
219
  			vfrom = kmap_atomic(page) + to->bv_offset;
831058dec   David Howells   [PATCH] BLOCK: Se...
220
  			memcpy(vto, vfrom, to->bv_len);
6bc454d15   Kent Overstreet   bounce: Refactor ...
221
  			kunmap_atomic(vfrom);
831058dec   David Howells   [PATCH] BLOCK: Se...
222
223
  		}
  	}
5f3ea37c7   Arnaldo Carvalho de Melo   blktrace: port to...
224
  	trace_block_bio_bounce(q, *bio_orig);
c43a5082a   Jens Axboe   [PATCH] blktrace:...
225

831058dec   David Howells   [PATCH] BLOCK: Se...
226
  	bio->bi_flags |= (1 << BIO_BOUNCED);
831058dec   David Howells   [PATCH] BLOCK: Se...
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  
  	if (pool == page_pool) {
  		bio->bi_end_io = bounce_end_io_write;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read;
  	} else {
  		bio->bi_end_io = bounce_end_io_write_isa;
  		if (rw == READ)
  			bio->bi_end_io = bounce_end_io_read_isa;
  	}
  
  	bio->bi_private = *bio_orig;
  	*bio_orig = bio;
  }
165125e1e   Jens Axboe   [BLOCK] Get rid o...
241
  void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
831058dec   David Howells   [PATCH] BLOCK: Se...
242
  {
ffecfd1a7   Darrick J. Wong   block: optionally...
243
  	int must_bounce;
831058dec   David Howells   [PATCH] BLOCK: Se...
244
245
246
  	mempool_t *pool;
  
  	/*
bf2de6f5a   Jens Axboe   block: Initial su...
247
248
  	 * Data-less bio, nothing to bounce
  	 */
36144077b   Jens Axboe   highmem: use bio_...
249
  	if (!bio_has_data(*bio_orig))
bf2de6f5a   Jens Axboe   block: Initial su...
250
  		return;
ffecfd1a7   Darrick J. Wong   block: optionally...
251
  	must_bounce = must_snapshot_stable_pages(q, *bio_orig);
bf2de6f5a   Jens Axboe   block: Initial su...
252
  	/*
831058dec   David Howells   [PATCH] BLOCK: Se...
253
254
255
256
257
  	 * for non-isa bounce case, just check if the bounce pfn is equal
  	 * to or bigger than the highest pfn in the system -- in that case,
  	 * don't waste time iterating over bio segments
  	 */
  	if (!(q->bounce_gfp & GFP_DMA)) {
ffecfd1a7   Darrick J. Wong   block: optionally...
258
  		if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
831058dec   David Howells   [PATCH] BLOCK: Se...
259
260
261
262
263
264
  			return;
  		pool = page_pool;
  	} else {
  		BUG_ON(!isa_page_pool);
  		pool = isa_page_pool;
  	}
831058dec   David Howells   [PATCH] BLOCK: Se...
265
266
267
  	/*
  	 * slow path
  	 */
ffecfd1a7   Darrick J. Wong   block: optionally...
268
  	__blk_queue_bounce(q, bio_orig, pool, must_bounce);
831058dec   David Howells   [PATCH] BLOCK: Se...
269
270
271
  }
  
  EXPORT_SYMBOL(blk_queue_bounce);