Commit bf02c082bf7a464518d45b9c178b8aa83f74dd5d
Committed by
Linus Torvalds
1 parent
49a6cbe1cd
Exists in
master
and in
7 other branches
[PATCH] fs/bio.c: tweaks
- Calculate a variable in bvec_alloc_bs() only once needed, not earlier (bio.o down from 18408 to 18376 Bytes, 32 Bytes saved, probably due to data locality improvements). - Init variable idx to silence a gcc warning which already existed in the unmodified original base file (bvec_alloc_bs() handles idx correctly, so there's no need for the warning): fs/bio.c: In function `bio_alloc_bioset': fs/bio.c:169: warning: `idx' may be used uninitialized in this function Signed-off-by: Andreas Mohr <andi@lisas.de> Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 5 additions and 4 deletions Inline Diff
fs/bio.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> | 2 | * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * This program is distributed in the hope that it will be useful, | 8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
12 | * | 12 | * |
13 | * You should have received a copy of the GNU General Public Licens | 13 | * You should have received a copy of the GNU General Public Licens |
14 | * along with this program; if not, write to the Free Software | 14 | * along with this program; if not, write to the Free Software |
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | 15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/bio.h> | 20 | #include <linux/bio.h> |
21 | #include <linux/blkdev.h> | 21 | #include <linux/blkdev.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/mempool.h> | 26 | #include <linux/mempool.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/blktrace_api.h> | 28 | #include <linux/blktrace_api.h> |
29 | #include <scsi/sg.h> /* for struct sg_iovec */ | 29 | #include <scsi/sg.h> /* for struct sg_iovec */ |
30 | 30 | ||
31 | #define BIO_POOL_SIZE 256 | 31 | #define BIO_POOL_SIZE 256 |
32 | 32 | ||
33 | static kmem_cache_t *bio_slab __read_mostly; | 33 | static kmem_cache_t *bio_slab __read_mostly; |
34 | 34 | ||
35 | #define BIOVEC_NR_POOLS 6 | 35 | #define BIOVEC_NR_POOLS 6 |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * a small number of entries is fine, not going to be performance critical. | 38 | * a small number of entries is fine, not going to be performance critical. |
39 | * basically we just need to survive | 39 | * basically we just need to survive |
40 | */ | 40 | */ |
41 | #define BIO_SPLIT_ENTRIES 8 | 41 | #define BIO_SPLIT_ENTRIES 8 |
42 | mempool_t *bio_split_pool __read_mostly; | 42 | mempool_t *bio_split_pool __read_mostly; |
43 | 43 | ||
44 | struct biovec_slab { | 44 | struct biovec_slab { |
45 | int nr_vecs; | 45 | int nr_vecs; |
46 | char *name; | 46 | char *name; |
47 | kmem_cache_t *slab; | 47 | kmem_cache_t *slab; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * if you change this list, also change bvec_alloc or things will | 51 | * if you change this list, also change bvec_alloc or things will |
52 | * break badly! cannot be bigger than what you can fit into an | 52 | * break badly! cannot be bigger than what you can fit into an |
53 | * unsigned short | 53 | * unsigned short |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } | 56 | #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } |
57 | static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { | 57 | static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { |
58 | BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), | 58 | BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), |
59 | }; | 59 | }; |
60 | #undef BV | 60 | #undef BV |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * bio_set is used to allow other portions of the IO system to | 63 | * bio_set is used to allow other portions of the IO system to |
64 | * allocate their own private memory pools for bio and iovec structures. | 64 | * allocate their own private memory pools for bio and iovec structures. |
65 | * These memory pools in turn all allocate from the bio_slab | 65 | * These memory pools in turn all allocate from the bio_slab |
66 | * and the bvec_slabs[]. | 66 | * and the bvec_slabs[]. |
67 | */ | 67 | */ |
68 | struct bio_set { | 68 | struct bio_set { |
69 | mempool_t *bio_pool; | 69 | mempool_t *bio_pool; |
70 | mempool_t *bvec_pools[BIOVEC_NR_POOLS]; | 70 | mempool_t *bvec_pools[BIOVEC_NR_POOLS]; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by | 74 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by |
75 | * IO code that does not need private memory pools. | 75 | * IO code that does not need private memory pools. |
76 | */ | 76 | */ |
77 | static struct bio_set *fs_bio_set; | 77 | static struct bio_set *fs_bio_set; |
78 | 78 | ||
79 | static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) | 79 | static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) |
80 | { | 80 | { |
81 | struct bio_vec *bvl; | 81 | struct bio_vec *bvl; |
82 | struct biovec_slab *bp; | ||
83 | 82 | ||
84 | /* | 83 | /* |
85 | * see comment near bvec_array define! | 84 | * see comment near bvec_array define! |
86 | */ | 85 | */ |
87 | switch (nr) { | 86 | switch (nr) { |
88 | case 1 : *idx = 0; break; | 87 | case 1 : *idx = 0; break; |
89 | case 2 ... 4: *idx = 1; break; | 88 | case 2 ... 4: *idx = 1; break; |
90 | case 5 ... 16: *idx = 2; break; | 89 | case 5 ... 16: *idx = 2; break; |
91 | case 17 ... 64: *idx = 3; break; | 90 | case 17 ... 64: *idx = 3; break; |
92 | case 65 ... 128: *idx = 4; break; | 91 | case 65 ... 128: *idx = 4; break; |
93 | case 129 ... BIO_MAX_PAGES: *idx = 5; break; | 92 | case 129 ... BIO_MAX_PAGES: *idx = 5; break; |
94 | default: | 93 | default: |
95 | return NULL; | 94 | return NULL; |
96 | } | 95 | } |
97 | /* | 96 | /* |
98 | * idx now points to the pool we want to allocate from | 97 | * idx now points to the pool we want to allocate from |
99 | */ | 98 | */ |
100 | 99 | ||
101 | bp = bvec_slabs + *idx; | ||
102 | bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); | 100 | bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); |
103 | if (bvl) | 101 | if (bvl) { |
102 | struct biovec_slab *bp = bvec_slabs + *idx; | ||
103 | |||
104 | memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); | 104 | memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec)); |
105 | } | ||
105 | 106 | ||
106 | return bvl; | 107 | return bvl; |
107 | } | 108 | } |
108 | 109 | ||
109 | void bio_free(struct bio *bio, struct bio_set *bio_set) | 110 | void bio_free(struct bio *bio, struct bio_set *bio_set) |
110 | { | 111 | { |
111 | const int pool_idx = BIO_POOL_IDX(bio); | 112 | const int pool_idx = BIO_POOL_IDX(bio); |
112 | 113 | ||
113 | BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); | 114 | BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); |
114 | 115 | ||
115 | mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); | 116 | mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); |
116 | mempool_free(bio, bio_set->bio_pool); | 117 | mempool_free(bio, bio_set->bio_pool); |
117 | } | 118 | } |
118 | 119 | ||
119 | /* | 120 | /* |
120 | * default destructor for a bio allocated with bio_alloc_bioset() | 121 | * default destructor for a bio allocated with bio_alloc_bioset() |
121 | */ | 122 | */ |
122 | static void bio_fs_destructor(struct bio *bio) | 123 | static void bio_fs_destructor(struct bio *bio) |
123 | { | 124 | { |
124 | bio_free(bio, fs_bio_set); | 125 | bio_free(bio, fs_bio_set); |
125 | } | 126 | } |
126 | 127 | ||
127 | void bio_init(struct bio *bio) | 128 | void bio_init(struct bio *bio) |
128 | { | 129 | { |
129 | bio->bi_next = NULL; | 130 | bio->bi_next = NULL; |
130 | bio->bi_bdev = NULL; | 131 | bio->bi_bdev = NULL; |
131 | bio->bi_flags = 1 << BIO_UPTODATE; | 132 | bio->bi_flags = 1 << BIO_UPTODATE; |
132 | bio->bi_rw = 0; | 133 | bio->bi_rw = 0; |
133 | bio->bi_vcnt = 0; | 134 | bio->bi_vcnt = 0; |
134 | bio->bi_idx = 0; | 135 | bio->bi_idx = 0; |
135 | bio->bi_phys_segments = 0; | 136 | bio->bi_phys_segments = 0; |
136 | bio->bi_hw_segments = 0; | 137 | bio->bi_hw_segments = 0; |
137 | bio->bi_hw_front_size = 0; | 138 | bio->bi_hw_front_size = 0; |
138 | bio->bi_hw_back_size = 0; | 139 | bio->bi_hw_back_size = 0; |
139 | bio->bi_size = 0; | 140 | bio->bi_size = 0; |
140 | bio->bi_max_vecs = 0; | 141 | bio->bi_max_vecs = 0; |
141 | bio->bi_end_io = NULL; | 142 | bio->bi_end_io = NULL; |
142 | atomic_set(&bio->bi_cnt, 1); | 143 | atomic_set(&bio->bi_cnt, 1); |
143 | bio->bi_private = NULL; | 144 | bio->bi_private = NULL; |
144 | } | 145 | } |
145 | 146 | ||
146 | /** | 147 | /** |
147 | * bio_alloc_bioset - allocate a bio for I/O | 148 | * bio_alloc_bioset - allocate a bio for I/O |
148 | * @gfp_mask: the GFP_ mask given to the slab allocator | 149 | * @gfp_mask: the GFP_ mask given to the slab allocator |
149 | * @nr_iovecs: number of iovecs to pre-allocate | 150 | * @nr_iovecs: number of iovecs to pre-allocate |
150 | * @bs: the bio_set to allocate from | 151 | * @bs: the bio_set to allocate from |
151 | * | 152 | * |
152 | * Description: | 153 | * Description: |
153 | * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. | 154 | * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. |
154 | * If %__GFP_WAIT is set then we will block on the internal pool waiting | 155 | * If %__GFP_WAIT is set then we will block on the internal pool waiting |
155 | * for a &struct bio to become free. | 156 | * for a &struct bio to become free. |
156 | * | 157 | * |
157 | * allocate bio and iovecs from the memory pools specified by the | 158 | * allocate bio and iovecs from the memory pools specified by the |
158 | * bio_set structure. | 159 | * bio_set structure. |
159 | **/ | 160 | **/ |
160 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 161 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
161 | { | 162 | { |
162 | struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); | 163 | struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); |
163 | 164 | ||
164 | if (likely(bio)) { | 165 | if (likely(bio)) { |
165 | struct bio_vec *bvl = NULL; | 166 | struct bio_vec *bvl = NULL; |
166 | 167 | ||
167 | bio_init(bio); | 168 | bio_init(bio); |
168 | if (likely(nr_iovecs)) { | 169 | if (likely(nr_iovecs)) { |
169 | unsigned long idx; | 170 | unsigned long idx = 0; /* shut up gcc */ |
170 | 171 | ||
171 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); | 172 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); |
172 | if (unlikely(!bvl)) { | 173 | if (unlikely(!bvl)) { |
173 | mempool_free(bio, bs->bio_pool); | 174 | mempool_free(bio, bs->bio_pool); |
174 | bio = NULL; | 175 | bio = NULL; |
175 | goto out; | 176 | goto out; |
176 | } | 177 | } |
177 | bio->bi_flags |= idx << BIO_POOL_OFFSET; | 178 | bio->bi_flags |= idx << BIO_POOL_OFFSET; |
178 | bio->bi_max_vecs = bvec_slabs[idx].nr_vecs; | 179 | bio->bi_max_vecs = bvec_slabs[idx].nr_vecs; |
179 | } | 180 | } |
180 | bio->bi_io_vec = bvl; | 181 | bio->bi_io_vec = bvl; |
181 | } | 182 | } |
182 | out: | 183 | out: |
183 | return bio; | 184 | return bio; |
184 | } | 185 | } |
185 | 186 | ||
186 | struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | 187 | struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) |
187 | { | 188 | { |
188 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | 189 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); |
189 | 190 | ||
190 | if (bio) | 191 | if (bio) |
191 | bio->bi_destructor = bio_fs_destructor; | 192 | bio->bi_destructor = bio_fs_destructor; |
192 | 193 | ||
193 | return bio; | 194 | return bio; |
194 | } | 195 | } |
195 | 196 | ||
196 | void zero_fill_bio(struct bio *bio) | 197 | void zero_fill_bio(struct bio *bio) |
197 | { | 198 | { |
198 | unsigned long flags; | 199 | unsigned long flags; |
199 | struct bio_vec *bv; | 200 | struct bio_vec *bv; |
200 | int i; | 201 | int i; |
201 | 202 | ||
202 | bio_for_each_segment(bv, bio, i) { | 203 | bio_for_each_segment(bv, bio, i) { |
203 | char *data = bvec_kmap_irq(bv, &flags); | 204 | char *data = bvec_kmap_irq(bv, &flags); |
204 | memset(data, 0, bv->bv_len); | 205 | memset(data, 0, bv->bv_len); |
205 | flush_dcache_page(bv->bv_page); | 206 | flush_dcache_page(bv->bv_page); |
206 | bvec_kunmap_irq(data, &flags); | 207 | bvec_kunmap_irq(data, &flags); |
207 | } | 208 | } |
208 | } | 209 | } |
209 | EXPORT_SYMBOL(zero_fill_bio); | 210 | EXPORT_SYMBOL(zero_fill_bio); |
210 | 211 | ||
211 | /** | 212 | /** |
212 | * bio_put - release a reference to a bio | 213 | * bio_put - release a reference to a bio |
213 | * @bio: bio to release reference to | 214 | * @bio: bio to release reference to |
214 | * | 215 | * |
215 | * Description: | 216 | * Description: |
216 | * Put a reference to a &struct bio, either one you have gotten with | 217 | * Put a reference to a &struct bio, either one you have gotten with |
217 | * bio_alloc or bio_get. The last put of a bio will free it. | 218 | * bio_alloc or bio_get. The last put of a bio will free it. |
218 | **/ | 219 | **/ |
219 | void bio_put(struct bio *bio) | 220 | void bio_put(struct bio *bio) |
220 | { | 221 | { |
221 | BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); | 222 | BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); |
222 | 223 | ||
223 | /* | 224 | /* |
224 | * last put frees it | 225 | * last put frees it |
225 | */ | 226 | */ |
226 | if (atomic_dec_and_test(&bio->bi_cnt)) { | 227 | if (atomic_dec_and_test(&bio->bi_cnt)) { |
227 | bio->bi_next = NULL; | 228 | bio->bi_next = NULL; |
228 | bio->bi_destructor(bio); | 229 | bio->bi_destructor(bio); |
229 | } | 230 | } |
230 | } | 231 | } |
231 | 232 | ||
232 | inline int bio_phys_segments(request_queue_t *q, struct bio *bio) | 233 | inline int bio_phys_segments(request_queue_t *q, struct bio *bio) |
233 | { | 234 | { |
234 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 235 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
235 | blk_recount_segments(q, bio); | 236 | blk_recount_segments(q, bio); |
236 | 237 | ||
237 | return bio->bi_phys_segments; | 238 | return bio->bi_phys_segments; |
238 | } | 239 | } |
239 | 240 | ||
240 | inline int bio_hw_segments(request_queue_t *q, struct bio *bio) | 241 | inline int bio_hw_segments(request_queue_t *q, struct bio *bio) |
241 | { | 242 | { |
242 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 243 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
243 | blk_recount_segments(q, bio); | 244 | blk_recount_segments(q, bio); |
244 | 245 | ||
245 | return bio->bi_hw_segments; | 246 | return bio->bi_hw_segments; |
246 | } | 247 | } |
247 | 248 | ||
248 | /** | 249 | /** |
249 | * __bio_clone - clone a bio | 250 | * __bio_clone - clone a bio |
250 | * @bio: destination bio | 251 | * @bio: destination bio |
251 | * @bio_src: bio to clone | 252 | * @bio_src: bio to clone |
252 | * | 253 | * |
253 | * Clone a &bio. Caller will own the returned bio, but not | 254 | * Clone a &bio. Caller will own the returned bio, but not |
254 | * the actual data it points to. Reference count of returned | 255 | * the actual data it points to. Reference count of returned |
255 | * bio will be one. | 256 | * bio will be one. |
256 | */ | 257 | */ |
257 | void __bio_clone(struct bio *bio, struct bio *bio_src) | 258 | void __bio_clone(struct bio *bio, struct bio *bio_src) |
258 | { | 259 | { |
259 | request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); | 260 | request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); |
260 | 261 | ||
261 | memcpy(bio->bi_io_vec, bio_src->bi_io_vec, | 262 | memcpy(bio->bi_io_vec, bio_src->bi_io_vec, |
262 | bio_src->bi_max_vecs * sizeof(struct bio_vec)); | 263 | bio_src->bi_max_vecs * sizeof(struct bio_vec)); |
263 | 264 | ||
264 | bio->bi_sector = bio_src->bi_sector; | 265 | bio->bi_sector = bio_src->bi_sector; |
265 | bio->bi_bdev = bio_src->bi_bdev; | 266 | bio->bi_bdev = bio_src->bi_bdev; |
266 | bio->bi_flags |= 1 << BIO_CLONED; | 267 | bio->bi_flags |= 1 << BIO_CLONED; |
267 | bio->bi_rw = bio_src->bi_rw; | 268 | bio->bi_rw = bio_src->bi_rw; |
268 | bio->bi_vcnt = bio_src->bi_vcnt; | 269 | bio->bi_vcnt = bio_src->bi_vcnt; |
269 | bio->bi_size = bio_src->bi_size; | 270 | bio->bi_size = bio_src->bi_size; |
270 | bio->bi_idx = bio_src->bi_idx; | 271 | bio->bi_idx = bio_src->bi_idx; |
271 | bio_phys_segments(q, bio); | 272 | bio_phys_segments(q, bio); |
272 | bio_hw_segments(q, bio); | 273 | bio_hw_segments(q, bio); |
273 | } | 274 | } |
274 | 275 | ||
275 | /** | 276 | /** |
276 | * bio_clone - clone a bio | 277 | * bio_clone - clone a bio |
277 | * @bio: bio to clone | 278 | * @bio: bio to clone |
278 | * @gfp_mask: allocation priority | 279 | * @gfp_mask: allocation priority |
279 | * | 280 | * |
280 | * Like __bio_clone, only also allocates the returned bio | 281 | * Like __bio_clone, only also allocates the returned bio |
281 | */ | 282 | */ |
282 | struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | 283 | struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) |
283 | { | 284 | { |
284 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); | 285 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); |
285 | 286 | ||
286 | if (b) { | 287 | if (b) { |
287 | b->bi_destructor = bio_fs_destructor; | 288 | b->bi_destructor = bio_fs_destructor; |
288 | __bio_clone(b, bio); | 289 | __bio_clone(b, bio); |
289 | } | 290 | } |
290 | 291 | ||
291 | return b; | 292 | return b; |
292 | } | 293 | } |
293 | 294 | ||
294 | /** | 295 | /** |
295 | * bio_get_nr_vecs - return approx number of vecs | 296 | * bio_get_nr_vecs - return approx number of vecs |
296 | * @bdev: I/O target | 297 | * @bdev: I/O target |
297 | * | 298 | * |
298 | * Return the approximate number of pages we can send to this target. | 299 | * Return the approximate number of pages we can send to this target. |
299 | * There's no guarantee that you will be able to fit this number of pages | 300 | * There's no guarantee that you will be able to fit this number of pages |
300 | * into a bio, it does not account for dynamic restrictions that vary | 301 | * into a bio, it does not account for dynamic restrictions that vary |
301 | * on offset. | 302 | * on offset. |
302 | */ | 303 | */ |
303 | int bio_get_nr_vecs(struct block_device *bdev) | 304 | int bio_get_nr_vecs(struct block_device *bdev) |
304 | { | 305 | { |
305 | request_queue_t *q = bdev_get_queue(bdev); | 306 | request_queue_t *q = bdev_get_queue(bdev); |
306 | int nr_pages; | 307 | int nr_pages; |
307 | 308 | ||
308 | nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 309 | nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
309 | if (nr_pages > q->max_phys_segments) | 310 | if (nr_pages > q->max_phys_segments) |
310 | nr_pages = q->max_phys_segments; | 311 | nr_pages = q->max_phys_segments; |
311 | if (nr_pages > q->max_hw_segments) | 312 | if (nr_pages > q->max_hw_segments) |
312 | nr_pages = q->max_hw_segments; | 313 | nr_pages = q->max_hw_segments; |
313 | 314 | ||
314 | return nr_pages; | 315 | return nr_pages; |
315 | } | 316 | } |
316 | 317 | ||
317 | static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | 318 | static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page |
318 | *page, unsigned int len, unsigned int offset, | 319 | *page, unsigned int len, unsigned int offset, |
319 | unsigned short max_sectors) | 320 | unsigned short max_sectors) |
320 | { | 321 | { |
321 | int retried_segments = 0; | 322 | int retried_segments = 0; |
322 | struct bio_vec *bvec; | 323 | struct bio_vec *bvec; |
323 | 324 | ||
324 | /* | 325 | /* |
325 | * cloned bio must not modify vec list | 326 | * cloned bio must not modify vec list |
326 | */ | 327 | */ |
327 | if (unlikely(bio_flagged(bio, BIO_CLONED))) | 328 | if (unlikely(bio_flagged(bio, BIO_CLONED))) |
328 | return 0; | 329 | return 0; |
329 | 330 | ||
330 | if (((bio->bi_size + len) >> 9) > max_sectors) | 331 | if (((bio->bi_size + len) >> 9) > max_sectors) |
331 | return 0; | 332 | return 0; |
332 | 333 | ||
333 | /* | 334 | /* |
334 | * For filesystems with a blocksize smaller than the pagesize | 335 | * For filesystems with a blocksize smaller than the pagesize |
335 | * we will often be called with the same page as last time and | 336 | * we will often be called with the same page as last time and |
336 | * a consecutive offset. Optimize this special case. | 337 | * a consecutive offset. Optimize this special case. |
337 | */ | 338 | */ |
338 | if (bio->bi_vcnt > 0) { | 339 | if (bio->bi_vcnt > 0) { |
339 | struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; | 340 | struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
340 | 341 | ||
341 | if (page == prev->bv_page && | 342 | if (page == prev->bv_page && |
342 | offset == prev->bv_offset + prev->bv_len) { | 343 | offset == prev->bv_offset + prev->bv_len) { |
343 | prev->bv_len += len; | 344 | prev->bv_len += len; |
344 | if (q->merge_bvec_fn && | 345 | if (q->merge_bvec_fn && |
345 | q->merge_bvec_fn(q, bio, prev) < len) { | 346 | q->merge_bvec_fn(q, bio, prev) < len) { |
346 | prev->bv_len -= len; | 347 | prev->bv_len -= len; |
347 | return 0; | 348 | return 0; |
348 | } | 349 | } |
349 | 350 | ||
350 | goto done; | 351 | goto done; |
351 | } | 352 | } |
352 | } | 353 | } |
353 | 354 | ||
354 | if (bio->bi_vcnt >= bio->bi_max_vecs) | 355 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
355 | return 0; | 356 | return 0; |
356 | 357 | ||
357 | /* | 358 | /* |
358 | * we might lose a segment or two here, but rather that than | 359 | * we might lose a segment or two here, but rather that than |
359 | * make this too complex. | 360 | * make this too complex. |
360 | */ | 361 | */ |
361 | 362 | ||
362 | while (bio->bi_phys_segments >= q->max_phys_segments | 363 | while (bio->bi_phys_segments >= q->max_phys_segments |
363 | || bio->bi_hw_segments >= q->max_hw_segments | 364 | || bio->bi_hw_segments >= q->max_hw_segments |
364 | || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) { | 365 | || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) { |
365 | 366 | ||
366 | if (retried_segments) | 367 | if (retried_segments) |
367 | return 0; | 368 | return 0; |
368 | 369 | ||
369 | retried_segments = 1; | 370 | retried_segments = 1; |
370 | blk_recount_segments(q, bio); | 371 | blk_recount_segments(q, bio); |
371 | } | 372 | } |
372 | 373 | ||
373 | /* | 374 | /* |
374 | * setup the new entry, we might clear it again later if we | 375 | * setup the new entry, we might clear it again later if we |
375 | * cannot add the page | 376 | * cannot add the page |
376 | */ | 377 | */ |
377 | bvec = &bio->bi_io_vec[bio->bi_vcnt]; | 378 | bvec = &bio->bi_io_vec[bio->bi_vcnt]; |
378 | bvec->bv_page = page; | 379 | bvec->bv_page = page; |
379 | bvec->bv_len = len; | 380 | bvec->bv_len = len; |
380 | bvec->bv_offset = offset; | 381 | bvec->bv_offset = offset; |
381 | 382 | ||
382 | /* | 383 | /* |
383 | * if queue has other restrictions (eg varying max sector size | 384 | * if queue has other restrictions (eg varying max sector size |
384 | * depending on offset), it can specify a merge_bvec_fn in the | 385 | * depending on offset), it can specify a merge_bvec_fn in the |
385 | * queue to get further control | 386 | * queue to get further control |
386 | */ | 387 | */ |
387 | if (q->merge_bvec_fn) { | 388 | if (q->merge_bvec_fn) { |
388 | /* | 389 | /* |
389 | * merge_bvec_fn() returns number of bytes it can accept | 390 | * merge_bvec_fn() returns number of bytes it can accept |
390 | * at this offset | 391 | * at this offset |
391 | */ | 392 | */ |
392 | if (q->merge_bvec_fn(q, bio, bvec) < len) { | 393 | if (q->merge_bvec_fn(q, bio, bvec) < len) { |
393 | bvec->bv_page = NULL; | 394 | bvec->bv_page = NULL; |
394 | bvec->bv_len = 0; | 395 | bvec->bv_len = 0; |
395 | bvec->bv_offset = 0; | 396 | bvec->bv_offset = 0; |
396 | return 0; | 397 | return 0; |
397 | } | 398 | } |
398 | } | 399 | } |
399 | 400 | ||
400 | /* If we may be able to merge these biovecs, force a recount */ | 401 | /* If we may be able to merge these biovecs, force a recount */ |
401 | if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || | 402 | if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || |
402 | BIOVEC_VIRT_MERGEABLE(bvec-1, bvec))) | 403 | BIOVEC_VIRT_MERGEABLE(bvec-1, bvec))) |
403 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | 404 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); |
404 | 405 | ||
405 | bio->bi_vcnt++; | 406 | bio->bi_vcnt++; |
406 | bio->bi_phys_segments++; | 407 | bio->bi_phys_segments++; |
407 | bio->bi_hw_segments++; | 408 | bio->bi_hw_segments++; |
408 | done: | 409 | done: |
409 | bio->bi_size += len; | 410 | bio->bi_size += len; |
410 | return len; | 411 | return len; |
411 | } | 412 | } |
412 | 413 | ||
413 | /** | 414 | /** |
414 | * bio_add_pc_page - attempt to add page to bio | 415 | * bio_add_pc_page - attempt to add page to bio |
415 | * @q: the target queue | 416 | * @q: the target queue |
416 | * @bio: destination bio | 417 | * @bio: destination bio |
417 | * @page: page to add | 418 | * @page: page to add |
418 | * @len: vec entry length | 419 | * @len: vec entry length |
419 | * @offset: vec entry offset | 420 | * @offset: vec entry offset |
420 | * | 421 | * |
421 | * Attempt to add a page to the bio_vec maplist. This can fail for a | 422 | * Attempt to add a page to the bio_vec maplist. This can fail for a |
422 | * number of reasons, such as the bio being full or target block | 423 | * number of reasons, such as the bio being full or target block |
423 | * device limitations. The target block device must allow bio's | 424 | * device limitations. The target block device must allow bio's |
424 | * smaller than PAGE_SIZE, so it is always possible to add a single | 425 | * smaller than PAGE_SIZE, so it is always possible to add a single |
425 | * page to an empty bio. This should only be used by REQ_PC bios. | 426 | * page to an empty bio. This should only be used by REQ_PC bios. |
426 | */ | 427 | */ |
427 | int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, | 428 | int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, |
428 | unsigned int len, unsigned int offset) | 429 | unsigned int len, unsigned int offset) |
429 | { | 430 | { |
430 | return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); | 431 | return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); |
431 | } | 432 | } |
432 | 433 | ||
433 | /** | 434 | /** |
434 | * bio_add_page - attempt to add page to bio | 435 | * bio_add_page - attempt to add page to bio |
435 | * @bio: destination bio | 436 | * @bio: destination bio |
436 | * @page: page to add | 437 | * @page: page to add |
437 | * @len: vec entry length | 438 | * @len: vec entry length |
438 | * @offset: vec entry offset | 439 | * @offset: vec entry offset |
439 | * | 440 | * |
440 | * Attempt to add a page to the bio_vec maplist. This can fail for a | 441 | * Attempt to add a page to the bio_vec maplist. This can fail for a |
441 | * number of reasons, such as the bio being full or target block | 442 | * number of reasons, such as the bio being full or target block |
442 | * device limitations. The target block device must allow bio's | 443 | * device limitations. The target block device must allow bio's |
443 | * smaller than PAGE_SIZE, so it is always possible to add a single | 444 | * smaller than PAGE_SIZE, so it is always possible to add a single |
444 | * page to an empty bio. | 445 | * page to an empty bio. |
445 | */ | 446 | */ |
446 | int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | 447 | int bio_add_page(struct bio *bio, struct page *page, unsigned int len, |
447 | unsigned int offset) | 448 | unsigned int offset) |
448 | { | 449 | { |
449 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 450 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
450 | return __bio_add_page(q, bio, page, len, offset, q->max_sectors); | 451 | return __bio_add_page(q, bio, page, len, offset, q->max_sectors); |
451 | } | 452 | } |
452 | 453 | ||
453 | struct bio_map_data { | 454 | struct bio_map_data { |
454 | struct bio_vec *iovecs; | 455 | struct bio_vec *iovecs; |
455 | void __user *userptr; | 456 | void __user *userptr; |
456 | }; | 457 | }; |
457 | 458 | ||
458 | static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio) | 459 | static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio) |
459 | { | 460 | { |
460 | memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); | 461 | memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); |
461 | bio->bi_private = bmd; | 462 | bio->bi_private = bmd; |
462 | } | 463 | } |
463 | 464 | ||
464 | static void bio_free_map_data(struct bio_map_data *bmd) | 465 | static void bio_free_map_data(struct bio_map_data *bmd) |
465 | { | 466 | { |
466 | kfree(bmd->iovecs); | 467 | kfree(bmd->iovecs); |
467 | kfree(bmd); | 468 | kfree(bmd); |
468 | } | 469 | } |
469 | 470 | ||
470 | static struct bio_map_data *bio_alloc_map_data(int nr_segs) | 471 | static struct bio_map_data *bio_alloc_map_data(int nr_segs) |
471 | { | 472 | { |
472 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); | 473 | struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL); |
473 | 474 | ||
474 | if (!bmd) | 475 | if (!bmd) |
475 | return NULL; | 476 | return NULL; |
476 | 477 | ||
477 | bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); | 478 | bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL); |
478 | if (bmd->iovecs) | 479 | if (bmd->iovecs) |
479 | return bmd; | 480 | return bmd; |
480 | 481 | ||
481 | kfree(bmd); | 482 | kfree(bmd); |
482 | return NULL; | 483 | return NULL; |
483 | } | 484 | } |
484 | 485 | ||
485 | /** | 486 | /** |
486 | * bio_uncopy_user - finish previously mapped bio | 487 | * bio_uncopy_user - finish previously mapped bio |
487 | * @bio: bio being terminated | 488 | * @bio: bio being terminated |
488 | * | 489 | * |
489 | * Free pages allocated from bio_copy_user() and write back data | 490 | * Free pages allocated from bio_copy_user() and write back data |
490 | * to user space in case of a read. | 491 | * to user space in case of a read. |
491 | */ | 492 | */ |
492 | int bio_uncopy_user(struct bio *bio) | 493 | int bio_uncopy_user(struct bio *bio) |
493 | { | 494 | { |
494 | struct bio_map_data *bmd = bio->bi_private; | 495 | struct bio_map_data *bmd = bio->bi_private; |
495 | const int read = bio_data_dir(bio) == READ; | 496 | const int read = bio_data_dir(bio) == READ; |
496 | struct bio_vec *bvec; | 497 | struct bio_vec *bvec; |
497 | int i, ret = 0; | 498 | int i, ret = 0; |
498 | 499 | ||
499 | __bio_for_each_segment(bvec, bio, i, 0) { | 500 | __bio_for_each_segment(bvec, bio, i, 0) { |
500 | char *addr = page_address(bvec->bv_page); | 501 | char *addr = page_address(bvec->bv_page); |
501 | unsigned int len = bmd->iovecs[i].bv_len; | 502 | unsigned int len = bmd->iovecs[i].bv_len; |
502 | 503 | ||
503 | if (read && !ret && copy_to_user(bmd->userptr, addr, len)) | 504 | if (read && !ret && copy_to_user(bmd->userptr, addr, len)) |
504 | ret = -EFAULT; | 505 | ret = -EFAULT; |
505 | 506 | ||
506 | __free_page(bvec->bv_page); | 507 | __free_page(bvec->bv_page); |
507 | bmd->userptr += len; | 508 | bmd->userptr += len; |
508 | } | 509 | } |
509 | bio_free_map_data(bmd); | 510 | bio_free_map_data(bmd); |
510 | bio_put(bio); | 511 | bio_put(bio); |
511 | return ret; | 512 | return ret; |
512 | } | 513 | } |
513 | 514 | ||
514 | /** | 515 | /** |
515 | * bio_copy_user - copy user data to bio | 516 | * bio_copy_user - copy user data to bio |
516 | * @q: destination block queue | 517 | * @q: destination block queue |
517 | * @uaddr: start of user address | 518 | * @uaddr: start of user address |
518 | * @len: length in bytes | 519 | * @len: length in bytes |
519 | * @write_to_vm: bool indicating writing to pages or not | 520 | * @write_to_vm: bool indicating writing to pages or not |
520 | * | 521 | * |
521 | * Prepares and returns a bio for indirect user io, bouncing data | 522 | * Prepares and returns a bio for indirect user io, bouncing data |
522 | * to/from kernel pages as necessary. Must be paired with | 523 | * to/from kernel pages as necessary. Must be paired with |
523 | * call bio_uncopy_user() on io completion. | 524 | * call bio_uncopy_user() on io completion. |
524 | */ | 525 | */ |
525 | struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, | 526 | struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, |
526 | unsigned int len, int write_to_vm) | 527 | unsigned int len, int write_to_vm) |
527 | { | 528 | { |
528 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 529 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
529 | unsigned long start = uaddr >> PAGE_SHIFT; | 530 | unsigned long start = uaddr >> PAGE_SHIFT; |
530 | struct bio_map_data *bmd; | 531 | struct bio_map_data *bmd; |
531 | struct bio_vec *bvec; | 532 | struct bio_vec *bvec; |
532 | struct page *page; | 533 | struct page *page; |
533 | struct bio *bio; | 534 | struct bio *bio; |
534 | int i, ret; | 535 | int i, ret; |
535 | 536 | ||
536 | bmd = bio_alloc_map_data(end - start); | 537 | bmd = bio_alloc_map_data(end - start); |
537 | if (!bmd) | 538 | if (!bmd) |
538 | return ERR_PTR(-ENOMEM); | 539 | return ERR_PTR(-ENOMEM); |
539 | 540 | ||
540 | bmd->userptr = (void __user *) uaddr; | 541 | bmd->userptr = (void __user *) uaddr; |
541 | 542 | ||
542 | ret = -ENOMEM; | 543 | ret = -ENOMEM; |
543 | bio = bio_alloc(GFP_KERNEL, end - start); | 544 | bio = bio_alloc(GFP_KERNEL, end - start); |
544 | if (!bio) | 545 | if (!bio) |
545 | goto out_bmd; | 546 | goto out_bmd; |
546 | 547 | ||
547 | bio->bi_rw |= (!write_to_vm << BIO_RW); | 548 | bio->bi_rw |= (!write_to_vm << BIO_RW); |
548 | 549 | ||
549 | ret = 0; | 550 | ret = 0; |
550 | while (len) { | 551 | while (len) { |
551 | unsigned int bytes = PAGE_SIZE; | 552 | unsigned int bytes = PAGE_SIZE; |
552 | 553 | ||
553 | if (bytes > len) | 554 | if (bytes > len) |
554 | bytes = len; | 555 | bytes = len; |
555 | 556 | ||
556 | page = alloc_page(q->bounce_gfp | GFP_KERNEL); | 557 | page = alloc_page(q->bounce_gfp | GFP_KERNEL); |
557 | if (!page) { | 558 | if (!page) { |
558 | ret = -ENOMEM; | 559 | ret = -ENOMEM; |
559 | break; | 560 | break; |
560 | } | 561 | } |
561 | 562 | ||
562 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { | 563 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { |
563 | ret = -EINVAL; | 564 | ret = -EINVAL; |
564 | break; | 565 | break; |
565 | } | 566 | } |
566 | 567 | ||
567 | len -= bytes; | 568 | len -= bytes; |
568 | } | 569 | } |
569 | 570 | ||
570 | if (ret) | 571 | if (ret) |
571 | goto cleanup; | 572 | goto cleanup; |
572 | 573 | ||
573 | /* | 574 | /* |
574 | * success | 575 | * success |
575 | */ | 576 | */ |
576 | if (!write_to_vm) { | 577 | if (!write_to_vm) { |
577 | char __user *p = (char __user *) uaddr; | 578 | char __user *p = (char __user *) uaddr; |
578 | 579 | ||
579 | /* | 580 | /* |
580 | * for a write, copy in data to kernel pages | 581 | * for a write, copy in data to kernel pages |
581 | */ | 582 | */ |
582 | ret = -EFAULT; | 583 | ret = -EFAULT; |
583 | bio_for_each_segment(bvec, bio, i) { | 584 | bio_for_each_segment(bvec, bio, i) { |
584 | char *addr = page_address(bvec->bv_page); | 585 | char *addr = page_address(bvec->bv_page); |
585 | 586 | ||
586 | if (copy_from_user(addr, p, bvec->bv_len)) | 587 | if (copy_from_user(addr, p, bvec->bv_len)) |
587 | goto cleanup; | 588 | goto cleanup; |
588 | p += bvec->bv_len; | 589 | p += bvec->bv_len; |
589 | } | 590 | } |
590 | } | 591 | } |
591 | 592 | ||
592 | bio_set_map_data(bmd, bio); | 593 | bio_set_map_data(bmd, bio); |
593 | return bio; | 594 | return bio; |
594 | cleanup: | 595 | cleanup: |
595 | bio_for_each_segment(bvec, bio, i) | 596 | bio_for_each_segment(bvec, bio, i) |
596 | __free_page(bvec->bv_page); | 597 | __free_page(bvec->bv_page); |
597 | 598 | ||
598 | bio_put(bio); | 599 | bio_put(bio); |
599 | out_bmd: | 600 | out_bmd: |
600 | bio_free_map_data(bmd); | 601 | bio_free_map_data(bmd); |
601 | return ERR_PTR(ret); | 602 | return ERR_PTR(ret); |
602 | } | 603 | } |
603 | 604 | ||
604 | static struct bio *__bio_map_user_iov(request_queue_t *q, | 605 | static struct bio *__bio_map_user_iov(request_queue_t *q, |
605 | struct block_device *bdev, | 606 | struct block_device *bdev, |
606 | struct sg_iovec *iov, int iov_count, | 607 | struct sg_iovec *iov, int iov_count, |
607 | int write_to_vm) | 608 | int write_to_vm) |
608 | { | 609 | { |
609 | int i, j; | 610 | int i, j; |
610 | int nr_pages = 0; | 611 | int nr_pages = 0; |
611 | struct page **pages; | 612 | struct page **pages; |
612 | struct bio *bio; | 613 | struct bio *bio; |
613 | int cur_page = 0; | 614 | int cur_page = 0; |
614 | int ret, offset; | 615 | int ret, offset; |
615 | 616 | ||
616 | for (i = 0; i < iov_count; i++) { | 617 | for (i = 0; i < iov_count; i++) { |
617 | unsigned long uaddr = (unsigned long)iov[i].iov_base; | 618 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
618 | unsigned long len = iov[i].iov_len; | 619 | unsigned long len = iov[i].iov_len; |
619 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 620 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
620 | unsigned long start = uaddr >> PAGE_SHIFT; | 621 | unsigned long start = uaddr >> PAGE_SHIFT; |
621 | 622 | ||
622 | nr_pages += end - start; | 623 | nr_pages += end - start; |
623 | /* | 624 | /* |
624 | * transfer and buffer must be aligned to at least hardsector | 625 | * transfer and buffer must be aligned to at least hardsector |
625 | * size for now, in the future we can relax this restriction | 626 | * size for now, in the future we can relax this restriction |
626 | */ | 627 | */ |
627 | if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) | 628 | if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) |
628 | return ERR_PTR(-EINVAL); | 629 | return ERR_PTR(-EINVAL); |
629 | } | 630 | } |
630 | 631 | ||
631 | if (!nr_pages) | 632 | if (!nr_pages) |
632 | return ERR_PTR(-EINVAL); | 633 | return ERR_PTR(-EINVAL); |
633 | 634 | ||
634 | bio = bio_alloc(GFP_KERNEL, nr_pages); | 635 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
635 | if (!bio) | 636 | if (!bio) |
636 | return ERR_PTR(-ENOMEM); | 637 | return ERR_PTR(-ENOMEM); |
637 | 638 | ||
638 | ret = -ENOMEM; | 639 | ret = -ENOMEM; |
639 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | 640 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
640 | if (!pages) | 641 | if (!pages) |
641 | goto out; | 642 | goto out; |
642 | 643 | ||
643 | for (i = 0; i < iov_count; i++) { | 644 | for (i = 0; i < iov_count; i++) { |
644 | unsigned long uaddr = (unsigned long)iov[i].iov_base; | 645 | unsigned long uaddr = (unsigned long)iov[i].iov_base; |
645 | unsigned long len = iov[i].iov_len; | 646 | unsigned long len = iov[i].iov_len; |
646 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 647 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
647 | unsigned long start = uaddr >> PAGE_SHIFT; | 648 | unsigned long start = uaddr >> PAGE_SHIFT; |
648 | const int local_nr_pages = end - start; | 649 | const int local_nr_pages = end - start; |
649 | const int page_limit = cur_page + local_nr_pages; | 650 | const int page_limit = cur_page + local_nr_pages; |
650 | 651 | ||
651 | down_read(¤t->mm->mmap_sem); | 652 | down_read(¤t->mm->mmap_sem); |
652 | ret = get_user_pages(current, current->mm, uaddr, | 653 | ret = get_user_pages(current, current->mm, uaddr, |
653 | local_nr_pages, | 654 | local_nr_pages, |
654 | write_to_vm, 0, &pages[cur_page], NULL); | 655 | write_to_vm, 0, &pages[cur_page], NULL); |
655 | up_read(¤t->mm->mmap_sem); | 656 | up_read(¤t->mm->mmap_sem); |
656 | 657 | ||
657 | if (ret < local_nr_pages) { | 658 | if (ret < local_nr_pages) { |
658 | ret = -EFAULT; | 659 | ret = -EFAULT; |
659 | goto out_unmap; | 660 | goto out_unmap; |
660 | } | 661 | } |
661 | 662 | ||
662 | offset = uaddr & ~PAGE_MASK; | 663 | offset = uaddr & ~PAGE_MASK; |
663 | for (j = cur_page; j < page_limit; j++) { | 664 | for (j = cur_page; j < page_limit; j++) { |
664 | unsigned int bytes = PAGE_SIZE - offset; | 665 | unsigned int bytes = PAGE_SIZE - offset; |
665 | 666 | ||
666 | if (len <= 0) | 667 | if (len <= 0) |
667 | break; | 668 | break; |
668 | 669 | ||
669 | if (bytes > len) | 670 | if (bytes > len) |
670 | bytes = len; | 671 | bytes = len; |
671 | 672 | ||
672 | /* | 673 | /* |
673 | * sorry... | 674 | * sorry... |
674 | */ | 675 | */ |
675 | if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < | 676 | if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < |
676 | bytes) | 677 | bytes) |
677 | break; | 678 | break; |
678 | 679 | ||
679 | len -= bytes; | 680 | len -= bytes; |
680 | offset = 0; | 681 | offset = 0; |
681 | } | 682 | } |
682 | 683 | ||
683 | cur_page = j; | 684 | cur_page = j; |
684 | /* | 685 | /* |
685 | * release the pages we didn't map into the bio, if any | 686 | * release the pages we didn't map into the bio, if any |
686 | */ | 687 | */ |
687 | while (j < page_limit) | 688 | while (j < page_limit) |
688 | page_cache_release(pages[j++]); | 689 | page_cache_release(pages[j++]); |
689 | } | 690 | } |
690 | 691 | ||
691 | kfree(pages); | 692 | kfree(pages); |
692 | 693 | ||
693 | /* | 694 | /* |
694 | * set data direction, and check if mapped pages need bouncing | 695 | * set data direction, and check if mapped pages need bouncing |
695 | */ | 696 | */ |
696 | if (!write_to_vm) | 697 | if (!write_to_vm) |
697 | bio->bi_rw |= (1 << BIO_RW); | 698 | bio->bi_rw |= (1 << BIO_RW); |
698 | 699 | ||
699 | bio->bi_bdev = bdev; | 700 | bio->bi_bdev = bdev; |
700 | bio->bi_flags |= (1 << BIO_USER_MAPPED); | 701 | bio->bi_flags |= (1 << BIO_USER_MAPPED); |
701 | return bio; | 702 | return bio; |
702 | 703 | ||
703 | out_unmap: | 704 | out_unmap: |
704 | for (i = 0; i < nr_pages; i++) { | 705 | for (i = 0; i < nr_pages; i++) { |
705 | if(!pages[i]) | 706 | if(!pages[i]) |
706 | break; | 707 | break; |
707 | page_cache_release(pages[i]); | 708 | page_cache_release(pages[i]); |
708 | } | 709 | } |
709 | out: | 710 | out: |
710 | kfree(pages); | 711 | kfree(pages); |
711 | bio_put(bio); | 712 | bio_put(bio); |
712 | return ERR_PTR(ret); | 713 | return ERR_PTR(ret); |
713 | } | 714 | } |
714 | 715 | ||
715 | /** | 716 | /** |
716 | * bio_map_user - map user address into bio | 717 | * bio_map_user - map user address into bio |
717 | * @q: the request_queue_t for the bio | 718 | * @q: the request_queue_t for the bio |
718 | * @bdev: destination block device | 719 | * @bdev: destination block device |
719 | * @uaddr: start of user address | 720 | * @uaddr: start of user address |
720 | * @len: length in bytes | 721 | * @len: length in bytes |
721 | * @write_to_vm: bool indicating writing to pages or not | 722 | * @write_to_vm: bool indicating writing to pages or not |
722 | * | 723 | * |
723 | * Map the user space address into a bio suitable for io to a block | 724 | * Map the user space address into a bio suitable for io to a block |
724 | * device. Returns an error pointer in case of error. | 725 | * device. Returns an error pointer in case of error. |
725 | */ | 726 | */ |
726 | struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, | 727 | struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, |
727 | unsigned long uaddr, unsigned int len, int write_to_vm) | 728 | unsigned long uaddr, unsigned int len, int write_to_vm) |
728 | { | 729 | { |
729 | struct sg_iovec iov; | 730 | struct sg_iovec iov; |
730 | 731 | ||
731 | iov.iov_base = (void __user *)uaddr; | 732 | iov.iov_base = (void __user *)uaddr; |
732 | iov.iov_len = len; | 733 | iov.iov_len = len; |
733 | 734 | ||
734 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); | 735 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); |
735 | } | 736 | } |
736 | 737 | ||
737 | /** | 738 | /** |
738 | * bio_map_user_iov - map user sg_iovec table into bio | 739 | * bio_map_user_iov - map user sg_iovec table into bio |
739 | * @q: the request_queue_t for the bio | 740 | * @q: the request_queue_t for the bio |
740 | * @bdev: destination block device | 741 | * @bdev: destination block device |
741 | * @iov: the iovec. | 742 | * @iov: the iovec. |
742 | * @iov_count: number of elements in the iovec | 743 | * @iov_count: number of elements in the iovec |
743 | * @write_to_vm: bool indicating writing to pages or not | 744 | * @write_to_vm: bool indicating writing to pages or not |
744 | * | 745 | * |
745 | * Map the user space address into a bio suitable for io to a block | 746 | * Map the user space address into a bio suitable for io to a block |
746 | * device. Returns an error pointer in case of error. | 747 | * device. Returns an error pointer in case of error. |
747 | */ | 748 | */ |
748 | struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | 749 | struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, |
749 | struct sg_iovec *iov, int iov_count, | 750 | struct sg_iovec *iov, int iov_count, |
750 | int write_to_vm) | 751 | int write_to_vm) |
751 | { | 752 | { |
752 | struct bio *bio; | 753 | struct bio *bio; |
753 | int len = 0, i; | 754 | int len = 0, i; |
754 | 755 | ||
755 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); | 756 | bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); |
756 | 757 | ||
757 | if (IS_ERR(bio)) | 758 | if (IS_ERR(bio)) |
758 | return bio; | 759 | return bio; |
759 | 760 | ||
760 | /* | 761 | /* |
761 | * subtle -- if __bio_map_user() ended up bouncing a bio, | 762 | * subtle -- if __bio_map_user() ended up bouncing a bio, |
762 | * it would normally disappear when its bi_end_io is run. | 763 | * it would normally disappear when its bi_end_io is run. |
763 | * however, we need it for the unmap, so grab an extra | 764 | * however, we need it for the unmap, so grab an extra |
764 | * reference to it | 765 | * reference to it |
765 | */ | 766 | */ |
766 | bio_get(bio); | 767 | bio_get(bio); |
767 | 768 | ||
768 | for (i = 0; i < iov_count; i++) | 769 | for (i = 0; i < iov_count; i++) |
769 | len += iov[i].iov_len; | 770 | len += iov[i].iov_len; |
770 | 771 | ||
771 | if (bio->bi_size == len) | 772 | if (bio->bi_size == len) |
772 | return bio; | 773 | return bio; |
773 | 774 | ||
774 | /* | 775 | /* |
775 | * don't support partial mappings | 776 | * don't support partial mappings |
776 | */ | 777 | */ |
777 | bio_endio(bio, bio->bi_size, 0); | 778 | bio_endio(bio, bio->bi_size, 0); |
778 | bio_unmap_user(bio); | 779 | bio_unmap_user(bio); |
779 | return ERR_PTR(-EINVAL); | 780 | return ERR_PTR(-EINVAL); |
780 | } | 781 | } |
781 | 782 | ||
782 | static void __bio_unmap_user(struct bio *bio) | 783 | static void __bio_unmap_user(struct bio *bio) |
783 | { | 784 | { |
784 | struct bio_vec *bvec; | 785 | struct bio_vec *bvec; |
785 | int i; | 786 | int i; |
786 | 787 | ||
787 | /* | 788 | /* |
788 | * make sure we dirty pages we wrote to | 789 | * make sure we dirty pages we wrote to |
789 | */ | 790 | */ |
790 | __bio_for_each_segment(bvec, bio, i, 0) { | 791 | __bio_for_each_segment(bvec, bio, i, 0) { |
791 | if (bio_data_dir(bio) == READ) | 792 | if (bio_data_dir(bio) == READ) |
792 | set_page_dirty_lock(bvec->bv_page); | 793 | set_page_dirty_lock(bvec->bv_page); |
793 | 794 | ||
794 | page_cache_release(bvec->bv_page); | 795 | page_cache_release(bvec->bv_page); |
795 | } | 796 | } |
796 | 797 | ||
797 | bio_put(bio); | 798 | bio_put(bio); |
798 | } | 799 | } |
799 | 800 | ||
800 | /** | 801 | /** |
801 | * bio_unmap_user - unmap a bio | 802 | * bio_unmap_user - unmap a bio |
802 | * @bio: the bio being unmapped | 803 | * @bio: the bio being unmapped |
803 | * | 804 | * |
804 | * Unmap a bio previously mapped by bio_map_user(). Must be called with | 805 | * Unmap a bio previously mapped by bio_map_user(). Must be called with |
805 | * a process context. | 806 | * a process context. |
806 | * | 807 | * |
807 | * bio_unmap_user() may sleep. | 808 | * bio_unmap_user() may sleep. |
808 | */ | 809 | */ |
809 | void bio_unmap_user(struct bio *bio) | 810 | void bio_unmap_user(struct bio *bio) |
810 | { | 811 | { |
811 | __bio_unmap_user(bio); | 812 | __bio_unmap_user(bio); |
812 | bio_put(bio); | 813 | bio_put(bio); |
813 | } | 814 | } |
814 | 815 | ||
815 | static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) | 816 | static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) |
816 | { | 817 | { |
817 | if (bio->bi_size) | 818 | if (bio->bi_size) |
818 | return 1; | 819 | return 1; |
819 | 820 | ||
820 | bio_put(bio); | 821 | bio_put(bio); |
821 | return 0; | 822 | return 0; |
822 | } | 823 | } |
823 | 824 | ||
824 | 825 | ||
825 | static struct bio *__bio_map_kern(request_queue_t *q, void *data, | 826 | static struct bio *__bio_map_kern(request_queue_t *q, void *data, |
826 | unsigned int len, gfp_t gfp_mask) | 827 | unsigned int len, gfp_t gfp_mask) |
827 | { | 828 | { |
828 | unsigned long kaddr = (unsigned long)data; | 829 | unsigned long kaddr = (unsigned long)data; |
829 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 830 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
830 | unsigned long start = kaddr >> PAGE_SHIFT; | 831 | unsigned long start = kaddr >> PAGE_SHIFT; |
831 | const int nr_pages = end - start; | 832 | const int nr_pages = end - start; |
832 | int offset, i; | 833 | int offset, i; |
833 | struct bio *bio; | 834 | struct bio *bio; |
834 | 835 | ||
835 | bio = bio_alloc(gfp_mask, nr_pages); | 836 | bio = bio_alloc(gfp_mask, nr_pages); |
836 | if (!bio) | 837 | if (!bio) |
837 | return ERR_PTR(-ENOMEM); | 838 | return ERR_PTR(-ENOMEM); |
838 | 839 | ||
839 | offset = offset_in_page(kaddr); | 840 | offset = offset_in_page(kaddr); |
840 | for (i = 0; i < nr_pages; i++) { | 841 | for (i = 0; i < nr_pages; i++) { |
841 | unsigned int bytes = PAGE_SIZE - offset; | 842 | unsigned int bytes = PAGE_SIZE - offset; |
842 | 843 | ||
843 | if (len <= 0) | 844 | if (len <= 0) |
844 | break; | 845 | break; |
845 | 846 | ||
846 | if (bytes > len) | 847 | if (bytes > len) |
847 | bytes = len; | 848 | bytes = len; |
848 | 849 | ||
849 | if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, | 850 | if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, |
850 | offset) < bytes) | 851 | offset) < bytes) |
851 | break; | 852 | break; |
852 | 853 | ||
853 | data += bytes; | 854 | data += bytes; |
854 | len -= bytes; | 855 | len -= bytes; |
855 | offset = 0; | 856 | offset = 0; |
856 | } | 857 | } |
857 | 858 | ||
858 | bio->bi_end_io = bio_map_kern_endio; | 859 | bio->bi_end_io = bio_map_kern_endio; |
859 | return bio; | 860 | return bio; |
860 | } | 861 | } |
861 | 862 | ||
862 | /** | 863 | /** |
863 | * bio_map_kern - map kernel address into bio | 864 | * bio_map_kern - map kernel address into bio |
864 | * @q: the request_queue_t for the bio | 865 | * @q: the request_queue_t for the bio |
865 | * @data: pointer to buffer to map | 866 | * @data: pointer to buffer to map |
866 | * @len: length in bytes | 867 | * @len: length in bytes |
867 | * @gfp_mask: allocation flags for bio allocation | 868 | * @gfp_mask: allocation flags for bio allocation |
868 | * | 869 | * |
869 | * Map the kernel address into a bio suitable for io to a block | 870 | * Map the kernel address into a bio suitable for io to a block |
870 | * device. Returns an error pointer in case of error. | 871 | * device. Returns an error pointer in case of error. |
871 | */ | 872 | */ |
872 | struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, | 873 | struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, |
873 | gfp_t gfp_mask) | 874 | gfp_t gfp_mask) |
874 | { | 875 | { |
875 | struct bio *bio; | 876 | struct bio *bio; |
876 | 877 | ||
877 | bio = __bio_map_kern(q, data, len, gfp_mask); | 878 | bio = __bio_map_kern(q, data, len, gfp_mask); |
878 | if (IS_ERR(bio)) | 879 | if (IS_ERR(bio)) |
879 | return bio; | 880 | return bio; |
880 | 881 | ||
881 | if (bio->bi_size == len) | 882 | if (bio->bi_size == len) |
882 | return bio; | 883 | return bio; |
883 | 884 | ||
884 | /* | 885 | /* |
885 | * Don't support partial mappings. | 886 | * Don't support partial mappings. |
886 | */ | 887 | */ |
887 | bio_put(bio); | 888 | bio_put(bio); |
888 | return ERR_PTR(-EINVAL); | 889 | return ERR_PTR(-EINVAL); |
889 | } | 890 | } |
890 | 891 | ||
891 | /* | 892 | /* |
892 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | 893 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
893 | * for performing direct-IO in BIOs. | 894 | * for performing direct-IO in BIOs. |
894 | * | 895 | * |
895 | * The problem is that we cannot run set_page_dirty() from interrupt context | 896 | * The problem is that we cannot run set_page_dirty() from interrupt context |
896 | * because the required locks are not interrupt-safe. So what we can do is to | 897 | * because the required locks are not interrupt-safe. So what we can do is to |
897 | * mark the pages dirty _before_ performing IO. And in interrupt context, | 898 | * mark the pages dirty _before_ performing IO. And in interrupt context, |
898 | * check that the pages are still dirty. If so, fine. If not, redirty them | 899 | * check that the pages are still dirty. If so, fine. If not, redirty them |
899 | * in process context. | 900 | * in process context. |
900 | * | 901 | * |
901 | * We special-case compound pages here: normally this means reads into hugetlb | 902 | * We special-case compound pages here: normally this means reads into hugetlb |
902 | * pages. The logic in here doesn't really work right for compound pages | 903 | * pages. The logic in here doesn't really work right for compound pages |
903 | * because the VM does not uniformly chase down the head page in all cases. | 904 | * because the VM does not uniformly chase down the head page in all cases. |
904 | * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't | 905 | * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't |
905 | * handle them at all. So we skip compound pages here at an early stage. | 906 | * handle them at all. So we skip compound pages here at an early stage. |
906 | * | 907 | * |
907 | * Note that this code is very hard to test under normal circumstances because | 908 | * Note that this code is very hard to test under normal circumstances because |
908 | * direct-io pins the pages with get_user_pages(). This makes | 909 | * direct-io pins the pages with get_user_pages(). This makes |
909 | * is_page_cache_freeable return false, and the VM will not clean the pages. | 910 | * is_page_cache_freeable return false, and the VM will not clean the pages. |
910 | * But other code (eg, pdflush) could clean the pages if they are mapped | 911 | * But other code (eg, pdflush) could clean the pages if they are mapped |
911 | * pagecache. | 912 | * pagecache. |
912 | * | 913 | * |
913 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the | 914 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the |
914 | * deferred bio dirtying paths. | 915 | * deferred bio dirtying paths. |
915 | */ | 916 | */ |
916 | 917 | ||
917 | /* | 918 | /* |
918 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. | 919 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. |
919 | */ | 920 | */ |
920 | void bio_set_pages_dirty(struct bio *bio) | 921 | void bio_set_pages_dirty(struct bio *bio) |
921 | { | 922 | { |
922 | struct bio_vec *bvec = bio->bi_io_vec; | 923 | struct bio_vec *bvec = bio->bi_io_vec; |
923 | int i; | 924 | int i; |
924 | 925 | ||
925 | for (i = 0; i < bio->bi_vcnt; i++) { | 926 | for (i = 0; i < bio->bi_vcnt; i++) { |
926 | struct page *page = bvec[i].bv_page; | 927 | struct page *page = bvec[i].bv_page; |
927 | 928 | ||
928 | if (page && !PageCompound(page)) | 929 | if (page && !PageCompound(page)) |
929 | set_page_dirty_lock(page); | 930 | set_page_dirty_lock(page); |
930 | } | 931 | } |
931 | } | 932 | } |
932 | 933 | ||
933 | static void bio_release_pages(struct bio *bio) | 934 | static void bio_release_pages(struct bio *bio) |
934 | { | 935 | { |
935 | struct bio_vec *bvec = bio->bi_io_vec; | 936 | struct bio_vec *bvec = bio->bi_io_vec; |
936 | int i; | 937 | int i; |
937 | 938 | ||
938 | for (i = 0; i < bio->bi_vcnt; i++) { | 939 | for (i = 0; i < bio->bi_vcnt; i++) { |
939 | struct page *page = bvec[i].bv_page; | 940 | struct page *page = bvec[i].bv_page; |
940 | 941 | ||
941 | if (page) | 942 | if (page) |
942 | put_page(page); | 943 | put_page(page); |
943 | } | 944 | } |
944 | } | 945 | } |
945 | 946 | ||
946 | /* | 947 | /* |
947 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. | 948 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. |
948 | * If they are, then fine. If, however, some pages are clean then they must | 949 | * If they are, then fine. If, however, some pages are clean then they must |
949 | * have been written out during the direct-IO read. So we take another ref on | 950 | * have been written out during the direct-IO read. So we take another ref on |
950 | * the BIO and the offending pages and re-dirty the pages in process context. | 951 | * the BIO and the offending pages and re-dirty the pages in process context. |
951 | * | 952 | * |
952 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from | 953 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from |
953 | * here on. It will run one page_cache_release() against each page and will | 954 | * here on. It will run one page_cache_release() against each page and will |
954 | * run one bio_put() against the BIO. | 955 | * run one bio_put() against the BIO. |
955 | */ | 956 | */ |
956 | 957 | ||
957 | static void bio_dirty_fn(void *data); | 958 | static void bio_dirty_fn(void *data); |
958 | 959 | ||
959 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); | 960 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); |
960 | static DEFINE_SPINLOCK(bio_dirty_lock); | 961 | static DEFINE_SPINLOCK(bio_dirty_lock); |
961 | static struct bio *bio_dirty_list; | 962 | static struct bio *bio_dirty_list; |
962 | 963 | ||
963 | /* | 964 | /* |
964 | * This runs in process context | 965 | * This runs in process context |
965 | */ | 966 | */ |
966 | static void bio_dirty_fn(void *data) | 967 | static void bio_dirty_fn(void *data) |
967 | { | 968 | { |
968 | unsigned long flags; | 969 | unsigned long flags; |
969 | struct bio *bio; | 970 | struct bio *bio; |
970 | 971 | ||
971 | spin_lock_irqsave(&bio_dirty_lock, flags); | 972 | spin_lock_irqsave(&bio_dirty_lock, flags); |
972 | bio = bio_dirty_list; | 973 | bio = bio_dirty_list; |
973 | bio_dirty_list = NULL; | 974 | bio_dirty_list = NULL; |
974 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | 975 | spin_unlock_irqrestore(&bio_dirty_lock, flags); |
975 | 976 | ||
976 | while (bio) { | 977 | while (bio) { |
977 | struct bio *next = bio->bi_private; | 978 | struct bio *next = bio->bi_private; |
978 | 979 | ||
979 | bio_set_pages_dirty(bio); | 980 | bio_set_pages_dirty(bio); |
980 | bio_release_pages(bio); | 981 | bio_release_pages(bio); |
981 | bio_put(bio); | 982 | bio_put(bio); |
982 | bio = next; | 983 | bio = next; |
983 | } | 984 | } |
984 | } | 985 | } |
985 | 986 | ||
986 | void bio_check_pages_dirty(struct bio *bio) | 987 | void bio_check_pages_dirty(struct bio *bio) |
987 | { | 988 | { |
988 | struct bio_vec *bvec = bio->bi_io_vec; | 989 | struct bio_vec *bvec = bio->bi_io_vec; |
989 | int nr_clean_pages = 0; | 990 | int nr_clean_pages = 0; |
990 | int i; | 991 | int i; |
991 | 992 | ||
992 | for (i = 0; i < bio->bi_vcnt; i++) { | 993 | for (i = 0; i < bio->bi_vcnt; i++) { |
993 | struct page *page = bvec[i].bv_page; | 994 | struct page *page = bvec[i].bv_page; |
994 | 995 | ||
995 | if (PageDirty(page) || PageCompound(page)) { | 996 | if (PageDirty(page) || PageCompound(page)) { |
996 | page_cache_release(page); | 997 | page_cache_release(page); |
997 | bvec[i].bv_page = NULL; | 998 | bvec[i].bv_page = NULL; |
998 | } else { | 999 | } else { |
999 | nr_clean_pages++; | 1000 | nr_clean_pages++; |
1000 | } | 1001 | } |
1001 | } | 1002 | } |
1002 | 1003 | ||
1003 | if (nr_clean_pages) { | 1004 | if (nr_clean_pages) { |
1004 | unsigned long flags; | 1005 | unsigned long flags; |
1005 | 1006 | ||
1006 | spin_lock_irqsave(&bio_dirty_lock, flags); | 1007 | spin_lock_irqsave(&bio_dirty_lock, flags); |
1007 | bio->bi_private = bio_dirty_list; | 1008 | bio->bi_private = bio_dirty_list; |
1008 | bio_dirty_list = bio; | 1009 | bio_dirty_list = bio; |
1009 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | 1010 | spin_unlock_irqrestore(&bio_dirty_lock, flags); |
1010 | schedule_work(&bio_dirty_work); | 1011 | schedule_work(&bio_dirty_work); |
1011 | } else { | 1012 | } else { |
1012 | bio_put(bio); | 1013 | bio_put(bio); |
1013 | } | 1014 | } |
1014 | } | 1015 | } |
1015 | 1016 | ||
1016 | /** | 1017 | /** |
1017 | * bio_endio - end I/O on a bio | 1018 | * bio_endio - end I/O on a bio |
1018 | * @bio: bio | 1019 | * @bio: bio |
1019 | * @bytes_done: number of bytes completed | 1020 | * @bytes_done: number of bytes completed |
1020 | * @error: error, if any | 1021 | * @error: error, if any |
1021 | * | 1022 | * |
1022 | * Description: | 1023 | * Description: |
1023 | * bio_endio() will end I/O on @bytes_done number of bytes. This may be | 1024 | * bio_endio() will end I/O on @bytes_done number of bytes. This may be |
1024 | * just a partial part of the bio, or it may be the whole bio. bio_endio() | 1025 | * just a partial part of the bio, or it may be the whole bio. bio_endio() |
1025 | * is the preferred way to end I/O on a bio, it takes care of decrementing | 1026 | * is the preferred way to end I/O on a bio, it takes care of decrementing |
1026 | * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and | 1027 | * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and |
1027 | * and one of the established -Exxxx (-EIO, for instance) error values in | 1028 | * and one of the established -Exxxx (-EIO, for instance) error values in |
1028 | * case something went wrong. Noone should call bi_end_io() directly on | 1029 | * case something went wrong. Noone should call bi_end_io() directly on |
1029 | * a bio unless they own it and thus know that it has an end_io function. | 1030 | * a bio unless they own it and thus know that it has an end_io function. |
1030 | **/ | 1031 | **/ |
1031 | void bio_endio(struct bio *bio, unsigned int bytes_done, int error) | 1032 | void bio_endio(struct bio *bio, unsigned int bytes_done, int error) |
1032 | { | 1033 | { |
1033 | if (error) | 1034 | if (error) |
1034 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 1035 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
1035 | 1036 | ||
1036 | if (unlikely(bytes_done > bio->bi_size)) { | 1037 | if (unlikely(bytes_done > bio->bi_size)) { |
1037 | printk("%s: want %u bytes done, only %u left\n", __FUNCTION__, | 1038 | printk("%s: want %u bytes done, only %u left\n", __FUNCTION__, |
1038 | bytes_done, bio->bi_size); | 1039 | bytes_done, bio->bi_size); |
1039 | bytes_done = bio->bi_size; | 1040 | bytes_done = bio->bi_size; |
1040 | } | 1041 | } |
1041 | 1042 | ||
1042 | bio->bi_size -= bytes_done; | 1043 | bio->bi_size -= bytes_done; |
1043 | bio->bi_sector += (bytes_done >> 9); | 1044 | bio->bi_sector += (bytes_done >> 9); |
1044 | 1045 | ||
1045 | if (bio->bi_end_io) | 1046 | if (bio->bi_end_io) |
1046 | bio->bi_end_io(bio, bytes_done, error); | 1047 | bio->bi_end_io(bio, bytes_done, error); |
1047 | } | 1048 | } |
1048 | 1049 | ||
1049 | void bio_pair_release(struct bio_pair *bp) | 1050 | void bio_pair_release(struct bio_pair *bp) |
1050 | { | 1051 | { |
1051 | if (atomic_dec_and_test(&bp->cnt)) { | 1052 | if (atomic_dec_and_test(&bp->cnt)) { |
1052 | struct bio *master = bp->bio1.bi_private; | 1053 | struct bio *master = bp->bio1.bi_private; |
1053 | 1054 | ||
1054 | bio_endio(master, master->bi_size, bp->error); | 1055 | bio_endio(master, master->bi_size, bp->error); |
1055 | mempool_free(bp, bp->bio2.bi_private); | 1056 | mempool_free(bp, bp->bio2.bi_private); |
1056 | } | 1057 | } |
1057 | } | 1058 | } |
1058 | 1059 | ||
1059 | static int bio_pair_end_1(struct bio * bi, unsigned int done, int err) | 1060 | static int bio_pair_end_1(struct bio * bi, unsigned int done, int err) |
1060 | { | 1061 | { |
1061 | struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); | 1062 | struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); |
1062 | 1063 | ||
1063 | if (err) | 1064 | if (err) |
1064 | bp->error = err; | 1065 | bp->error = err; |
1065 | 1066 | ||
1066 | if (bi->bi_size) | 1067 | if (bi->bi_size) |
1067 | return 1; | 1068 | return 1; |
1068 | 1069 | ||
1069 | bio_pair_release(bp); | 1070 | bio_pair_release(bp); |
1070 | return 0; | 1071 | return 0; |
1071 | } | 1072 | } |
1072 | 1073 | ||
1073 | static int bio_pair_end_2(struct bio * bi, unsigned int done, int err) | 1074 | static int bio_pair_end_2(struct bio * bi, unsigned int done, int err) |
1074 | { | 1075 | { |
1075 | struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); | 1076 | struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); |
1076 | 1077 | ||
1077 | if (err) | 1078 | if (err) |
1078 | bp->error = err; | 1079 | bp->error = err; |
1079 | 1080 | ||
1080 | if (bi->bi_size) | 1081 | if (bi->bi_size) |
1081 | return 1; | 1082 | return 1; |
1082 | 1083 | ||
1083 | bio_pair_release(bp); | 1084 | bio_pair_release(bp); |
1084 | return 0; | 1085 | return 0; |
1085 | } | 1086 | } |
1086 | 1087 | ||
1087 | /* | 1088 | /* |
1088 | * split a bio - only worry about a bio with a single page | 1089 | * split a bio - only worry about a bio with a single page |
1089 | * in it's iovec | 1090 | * in it's iovec |
1090 | */ | 1091 | */ |
1091 | struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | 1092 | struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) |
1092 | { | 1093 | { |
1093 | struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO); | 1094 | struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO); |
1094 | 1095 | ||
1095 | if (!bp) | 1096 | if (!bp) |
1096 | return bp; | 1097 | return bp; |
1097 | 1098 | ||
1098 | blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, | 1099 | blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, |
1099 | bi->bi_sector + first_sectors); | 1100 | bi->bi_sector + first_sectors); |
1100 | 1101 | ||
1101 | BUG_ON(bi->bi_vcnt != 1); | 1102 | BUG_ON(bi->bi_vcnt != 1); |
1102 | BUG_ON(bi->bi_idx != 0); | 1103 | BUG_ON(bi->bi_idx != 0); |
1103 | atomic_set(&bp->cnt, 3); | 1104 | atomic_set(&bp->cnt, 3); |
1104 | bp->error = 0; | 1105 | bp->error = 0; |
1105 | bp->bio1 = *bi; | 1106 | bp->bio1 = *bi; |
1106 | bp->bio2 = *bi; | 1107 | bp->bio2 = *bi; |
1107 | bp->bio2.bi_sector += first_sectors; | 1108 | bp->bio2.bi_sector += first_sectors; |
1108 | bp->bio2.bi_size -= first_sectors << 9; | 1109 | bp->bio2.bi_size -= first_sectors << 9; |
1109 | bp->bio1.bi_size = first_sectors << 9; | 1110 | bp->bio1.bi_size = first_sectors << 9; |
1110 | 1111 | ||
1111 | bp->bv1 = bi->bi_io_vec[0]; | 1112 | bp->bv1 = bi->bi_io_vec[0]; |
1112 | bp->bv2 = bi->bi_io_vec[0]; | 1113 | bp->bv2 = bi->bi_io_vec[0]; |
1113 | bp->bv2.bv_offset += first_sectors << 9; | 1114 | bp->bv2.bv_offset += first_sectors << 9; |
1114 | bp->bv2.bv_len -= first_sectors << 9; | 1115 | bp->bv2.bv_len -= first_sectors << 9; |
1115 | bp->bv1.bv_len = first_sectors << 9; | 1116 | bp->bv1.bv_len = first_sectors << 9; |
1116 | 1117 | ||
1117 | bp->bio1.bi_io_vec = &bp->bv1; | 1118 | bp->bio1.bi_io_vec = &bp->bv1; |
1118 | bp->bio2.bi_io_vec = &bp->bv2; | 1119 | bp->bio2.bi_io_vec = &bp->bv2; |
1119 | 1120 | ||
1120 | bp->bio1.bi_max_vecs = 1; | 1121 | bp->bio1.bi_max_vecs = 1; |
1121 | bp->bio2.bi_max_vecs = 1; | 1122 | bp->bio2.bi_max_vecs = 1; |
1122 | 1123 | ||
1123 | bp->bio1.bi_end_io = bio_pair_end_1; | 1124 | bp->bio1.bi_end_io = bio_pair_end_1; |
1124 | bp->bio2.bi_end_io = bio_pair_end_2; | 1125 | bp->bio2.bi_end_io = bio_pair_end_2; |
1125 | 1126 | ||
1126 | bp->bio1.bi_private = bi; | 1127 | bp->bio1.bi_private = bi; |
1127 | bp->bio2.bi_private = pool; | 1128 | bp->bio2.bi_private = pool; |
1128 | 1129 | ||
1129 | return bp; | 1130 | return bp; |
1130 | } | 1131 | } |
1131 | 1132 | ||
1132 | 1133 | ||
1133 | /* | 1134 | /* |
1134 | * create memory pools for biovec's in a bio_set. | 1135 | * create memory pools for biovec's in a bio_set. |
1135 | * use the global biovec slabs created for general use. | 1136 | * use the global biovec slabs created for general use. |
1136 | */ | 1137 | */ |
1137 | static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) | 1138 | static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) |
1138 | { | 1139 | { |
1139 | int i; | 1140 | int i; |
1140 | 1141 | ||
1141 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { | 1142 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { |
1142 | struct biovec_slab *bp = bvec_slabs + i; | 1143 | struct biovec_slab *bp = bvec_slabs + i; |
1143 | mempool_t **bvp = bs->bvec_pools + i; | 1144 | mempool_t **bvp = bs->bvec_pools + i; |
1144 | 1145 | ||
1145 | if (pool_entries > 1 && i >= scale) | 1146 | if (pool_entries > 1 && i >= scale) |
1146 | pool_entries >>= 1; | 1147 | pool_entries >>= 1; |
1147 | 1148 | ||
1148 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); | 1149 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); |
1149 | if (!*bvp) | 1150 | if (!*bvp) |
1150 | return -ENOMEM; | 1151 | return -ENOMEM; |
1151 | } | 1152 | } |
1152 | return 0; | 1153 | return 0; |
1153 | } | 1154 | } |
1154 | 1155 | ||
1155 | static void biovec_free_pools(struct bio_set *bs) | 1156 | static void biovec_free_pools(struct bio_set *bs) |
1156 | { | 1157 | { |
1157 | int i; | 1158 | int i; |
1158 | 1159 | ||
1159 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { | 1160 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { |
1160 | mempool_t *bvp = bs->bvec_pools[i]; | 1161 | mempool_t *bvp = bs->bvec_pools[i]; |
1161 | 1162 | ||
1162 | if (bvp) | 1163 | if (bvp) |
1163 | mempool_destroy(bvp); | 1164 | mempool_destroy(bvp); |
1164 | } | 1165 | } |
1165 | 1166 | ||
1166 | } | 1167 | } |
1167 | 1168 | ||
1168 | void bioset_free(struct bio_set *bs) | 1169 | void bioset_free(struct bio_set *bs) |
1169 | { | 1170 | { |
1170 | if (bs->bio_pool) | 1171 | if (bs->bio_pool) |
1171 | mempool_destroy(bs->bio_pool); | 1172 | mempool_destroy(bs->bio_pool); |
1172 | 1173 | ||
1173 | biovec_free_pools(bs); | 1174 | biovec_free_pools(bs); |
1174 | 1175 | ||
1175 | kfree(bs); | 1176 | kfree(bs); |
1176 | } | 1177 | } |
1177 | 1178 | ||
1178 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) | 1179 | struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) |
1179 | { | 1180 | { |
1180 | struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); | 1181 | struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); |
1181 | 1182 | ||
1182 | if (!bs) | 1183 | if (!bs) |
1183 | return NULL; | 1184 | return NULL; |
1184 | 1185 | ||
1185 | bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); | 1186 | bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); |
1186 | if (!bs->bio_pool) | 1187 | if (!bs->bio_pool) |
1187 | goto bad; | 1188 | goto bad; |
1188 | 1189 | ||
1189 | if (!biovec_create_pools(bs, bvec_pool_size, scale)) | 1190 | if (!biovec_create_pools(bs, bvec_pool_size, scale)) |
1190 | return bs; | 1191 | return bs; |
1191 | 1192 | ||
1192 | bad: | 1193 | bad: |
1193 | bioset_free(bs); | 1194 | bioset_free(bs); |
1194 | return NULL; | 1195 | return NULL; |
1195 | } | 1196 | } |
1196 | 1197 | ||
1197 | static void __init biovec_init_slabs(void) | 1198 | static void __init biovec_init_slabs(void) |
1198 | { | 1199 | { |
1199 | int i; | 1200 | int i; |
1200 | 1201 | ||
1201 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { | 1202 | for (i = 0; i < BIOVEC_NR_POOLS; i++) { |
1202 | int size; | 1203 | int size; |
1203 | struct biovec_slab *bvs = bvec_slabs + i; | 1204 | struct biovec_slab *bvs = bvec_slabs + i; |
1204 | 1205 | ||
1205 | size = bvs->nr_vecs * sizeof(struct bio_vec); | 1206 | size = bvs->nr_vecs * sizeof(struct bio_vec); |
1206 | bvs->slab = kmem_cache_create(bvs->name, size, 0, | 1207 | bvs->slab = kmem_cache_create(bvs->name, size, 0, |
1207 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1208 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
1208 | } | 1209 | } |
1209 | } | 1210 | } |
1210 | 1211 | ||
1211 | static int __init init_bio(void) | 1212 | static int __init init_bio(void) |
1212 | { | 1213 | { |
1213 | int megabytes, bvec_pool_entries; | 1214 | int megabytes, bvec_pool_entries; |
1214 | int scale = BIOVEC_NR_POOLS; | 1215 | int scale = BIOVEC_NR_POOLS; |
1215 | 1216 | ||
1216 | bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, | 1217 | bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, |
1217 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1218 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
1218 | 1219 | ||
1219 | biovec_init_slabs(); | 1220 | biovec_init_slabs(); |
1220 | 1221 | ||
1221 | megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); | 1222 | megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); |
1222 | 1223 | ||
1223 | /* | 1224 | /* |
1224 | * find out where to start scaling | 1225 | * find out where to start scaling |
1225 | */ | 1226 | */ |
1226 | if (megabytes <= 16) | 1227 | if (megabytes <= 16) |
1227 | scale = 0; | 1228 | scale = 0; |
1228 | else if (megabytes <= 32) | 1229 | else if (megabytes <= 32) |
1229 | scale = 1; | 1230 | scale = 1; |
1230 | else if (megabytes <= 64) | 1231 | else if (megabytes <= 64) |
1231 | scale = 2; | 1232 | scale = 2; |
1232 | else if (megabytes <= 96) | 1233 | else if (megabytes <= 96) |
1233 | scale = 3; | 1234 | scale = 3; |
1234 | else if (megabytes <= 128) | 1235 | else if (megabytes <= 128) |
1235 | scale = 4; | 1236 | scale = 4; |
1236 | 1237 | ||
1237 | /* | 1238 | /* |
1238 | * Limit number of entries reserved -- mempools are only used when | 1239 | * Limit number of entries reserved -- mempools are only used when |
1239 | * the system is completely unable to allocate memory, so we only | 1240 | * the system is completely unable to allocate memory, so we only |
1240 | * need enough to make progress. | 1241 | * need enough to make progress. |
1241 | */ | 1242 | */ |
1242 | bvec_pool_entries = 1 + scale; | 1243 | bvec_pool_entries = 1 + scale; |
1243 | 1244 | ||
1244 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); | 1245 | fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); |
1245 | if (!fs_bio_set) | 1246 | if (!fs_bio_set) |
1246 | panic("bio: can't allocate bios\n"); | 1247 | panic("bio: can't allocate bios\n"); |
1247 | 1248 | ||
1248 | bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, | 1249 | bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, |
1249 | sizeof(struct bio_pair)); | 1250 | sizeof(struct bio_pair)); |
1250 | if (!bio_split_pool) | 1251 | if (!bio_split_pool) |
1251 | panic("bio: can't create split pool\n"); | 1252 | panic("bio: can't create split pool\n"); |
1252 | 1253 | ||
1253 | return 0; | 1254 | return 0; |
1254 | } | 1255 | } |
1255 | 1256 | ||
1256 | subsys_initcall(init_bio); | 1257 | subsys_initcall(init_bio); |
1257 | 1258 | ||
1258 | EXPORT_SYMBOL(bio_alloc); | 1259 | EXPORT_SYMBOL(bio_alloc); |
1259 | EXPORT_SYMBOL(bio_put); | 1260 | EXPORT_SYMBOL(bio_put); |
1260 | EXPORT_SYMBOL(bio_free); | 1261 | EXPORT_SYMBOL(bio_free); |
1261 | EXPORT_SYMBOL(bio_endio); | 1262 | EXPORT_SYMBOL(bio_endio); |
1262 | EXPORT_SYMBOL(bio_init); | 1263 | EXPORT_SYMBOL(bio_init); |
1263 | EXPORT_SYMBOL(__bio_clone); | 1264 | EXPORT_SYMBOL(__bio_clone); |
1264 | EXPORT_SYMBOL(bio_clone); | 1265 | EXPORT_SYMBOL(bio_clone); |
1265 | EXPORT_SYMBOL(bio_phys_segments); | 1266 | EXPORT_SYMBOL(bio_phys_segments); |
1266 | EXPORT_SYMBOL(bio_hw_segments); | 1267 | EXPORT_SYMBOL(bio_hw_segments); |
1267 | EXPORT_SYMBOL(bio_add_page); | 1268 | EXPORT_SYMBOL(bio_add_page); |
1268 | EXPORT_SYMBOL(bio_add_pc_page); | 1269 | EXPORT_SYMBOL(bio_add_pc_page); |
1269 | EXPORT_SYMBOL(bio_get_nr_vecs); | 1270 | EXPORT_SYMBOL(bio_get_nr_vecs); |
1270 | EXPORT_SYMBOL(bio_map_user); | 1271 | EXPORT_SYMBOL(bio_map_user); |
1271 | EXPORT_SYMBOL(bio_unmap_user); | 1272 | EXPORT_SYMBOL(bio_unmap_user); |
1272 | EXPORT_SYMBOL(bio_map_kern); | 1273 | EXPORT_SYMBOL(bio_map_kern); |
1273 | EXPORT_SYMBOL(bio_pair_release); | 1274 | EXPORT_SYMBOL(bio_pair_release); |
1274 | EXPORT_SYMBOL(bio_split); | 1275 | EXPORT_SYMBOL(bio_split); |
1275 | EXPORT_SYMBOL(bio_split_pool); | 1276 | EXPORT_SYMBOL(bio_split_pool); |
1276 | EXPORT_SYMBOL(bio_copy_user); | 1277 | EXPORT_SYMBOL(bio_copy_user); |
1277 | EXPORT_SYMBOL(bio_uncopy_user); | 1278 | EXPORT_SYMBOL(bio_uncopy_user); |
1278 | EXPORT_SYMBOL(bioset_create); | 1279 | EXPORT_SYMBOL(bioset_create); |
1279 | EXPORT_SYMBOL(bioset_free); | 1280 | EXPORT_SYMBOL(bioset_free); |