Blame view
lib/iov_iter.c
42 KB
457c89965
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
4f18cd317
|
2 |
#include <linux/export.h> |
2f8b54447
|
3 |
#include <linux/bvec.h> |
4f18cd317
|
4 5 |
#include <linux/uio.h> #include <linux/pagemap.h> |
91f79c43d
|
6 7 |
#include <linux/slab.h> #include <linux/vmalloc.h> |
241699cd7
|
8 |
#include <linux/splice.h> |
a604ec7e9
|
9 |
#include <net/checksum.h> |
d05f44355
|
10 |
#include <linux/scatterlist.h> |
4f18cd317
|
11 |
|
241699cd7
|
12 |
#define PIPE_PARANOIA /* for now */ |
04a311655
|
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
#define iterate_iovec(i, n, __v, __p, skip, STEP) { \ size_t left; \ size_t wanted = n; \ __p = i->iov; \ __v.iov_len = min(n, __p->iov_len - skip); \ if (likely(__v.iov_len)) { \ __v.iov_base = __p->iov_base + skip; \ left = (STEP); \ __v.iov_len -= left; \ skip += __v.iov_len; \ n -= __v.iov_len; \ } else { \ left = 0; \ } \ while (unlikely(!left && n)) { \ __p++; \ __v.iov_len = min(n, __p->iov_len); \ if (unlikely(!__v.iov_len)) \ continue; \ __v.iov_base = __p->iov_base; \ left = (STEP); \ __v.iov_len -= left; \ skip = __v.iov_len; \ n -= __v.iov_len; \ } \ n = wanted - n; \ } |
a280455fa
|
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
#define iterate_kvec(i, n, __v, __p, skip, STEP) { \ size_t wanted = n; \ __p = i->kvec; \ __v.iov_len = min(n, __p->iov_len - skip); \ if (likely(__v.iov_len)) { \ __v.iov_base = __p->iov_base + skip; \ (void)(STEP); \ skip += __v.iov_len; \ n -= __v.iov_len; \ } \ while (unlikely(n)) { \ __p++; \ __v.iov_len = min(n, __p->iov_len); \ if (unlikely(!__v.iov_len)) \ continue; \ __v.iov_base = __p->iov_base; \ (void)(STEP); \ skip = __v.iov_len; \ n -= __v.iov_len; \ } \ n = wanted; \ } |
1bdc76aea
|
62 63 64 65 66 67 68 |
#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ struct bvec_iter __start; \ __start.bi_size = n; \ __start.bi_bvec_done = skip; \ __start.bi_idx = 0; \ for_each_bvec(__v, i->bvec, __bi, __start) { \ if (!__v.bv_len) \ |
04a311655
|
69 |
continue; \ |
04a311655
|
70 |
(void)(STEP); \ |
04a311655
|
71 |
} \ |
04a311655
|
72 |
} |
a280455fa
|
73 |
#define iterate_all_kinds(i, n, v, I, B, K) { \ |
33844e665
|
74 75 76 77 78 79 80 81 82 83 |
if (likely(n)) { \ size_t skip = i->iov_offset; \ if (unlikely(i->type & ITER_BVEC)) { \ struct bio_vec v; \ struct bvec_iter __bi; \ iterate_bvec(i, n, v, __bi, skip, (B)) \ } else if (unlikely(i->type & ITER_KVEC)) { \ const struct kvec *kvec; \ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ |
9ea9ce042
|
84 |
} else if (unlikely(i->type & ITER_DISCARD)) { \ |
33844e665
|
85 86 87 88 89 |
} else { \ const struct iovec *iov; \ struct iovec v; \ iterate_iovec(i, n, v, iov, skip, (I)) \ } \ |
04a311655
|
90 91 |
} \ } |
a280455fa
|
92 |
#define iterate_and_advance(i, n, v, I, B, K) { \ |
dd254f5a3
|
93 94 |
if (unlikely(i->count < n)) \ n = i->count; \ |
19f184593
|
95 |
if (i->count) { \ |
dd254f5a3
|
96 97 |
size_t skip = i->iov_offset; \ if (unlikely(i->type & ITER_BVEC)) { \ |
1bdc76aea
|
98 |
const struct bio_vec *bvec = i->bvec; \ |
dd254f5a3
|
99 |
struct bio_vec v; \ |
1bdc76aea
|
100 101 102 103 104 |
struct bvec_iter __bi; \ iterate_bvec(i, n, v, __bi, skip, (B)) \ i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ i->nr_segs -= i->bvec - bvec; \ skip = __bi.bi_bvec_done; \ |
dd254f5a3
|
105 106 107 108 109 110 111 112 113 114 |
} else if (unlikely(i->type & ITER_KVEC)) { \ const struct kvec *kvec; \ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ if (skip == kvec->iov_len) { \ kvec++; \ skip = 0; \ } \ i->nr_segs -= kvec - i->kvec; \ i->kvec = kvec; \ |
9ea9ce042
|
115 116 |
} else if (unlikely(i->type & ITER_DISCARD)) { \ skip += n; \ |
dd254f5a3
|
117 118 119 120 121 122 123 124 125 126 |
} else { \ const struct iovec *iov; \ struct iovec v; \ iterate_iovec(i, n, v, iov, skip, (I)) \ if (skip == iov->iov_len) { \ iov++; \ skip = 0; \ } \ i->nr_segs -= iov - i->iov; \ i->iov = iov; \ |
7ce2a91e5
|
127 |
} \ |
dd254f5a3
|
128 129 |
i->count -= n; \ i->iov_offset = skip; \ |
7ce2a91e5
|
130 |
} \ |
7ce2a91e5
|
131 |
} |
09fc68dc6
|
132 133 |
static int copyout(void __user *to, const void *from, size_t n) { |
96d4f267e
|
134 |
if (access_ok(to, n)) { |
09fc68dc6
|
135 136 137 138 139 140 141 142 |
kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); } return n; } static int copyin(void *to, const void __user *from, size_t n) { |
96d4f267e
|
143 |
if (access_ok(from, n)) { |
09fc68dc6
|
144 145 146 147 148 |
kasan_check_write(to, n); n = raw_copy_from_user(to, from, n); } return n; } |
62a8067a7
|
149 |
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
4f18cd317
|
150 151 152 153 154 155 156 157 158 159 160 161 |
struct iov_iter *i) { size_t skip, copy, left, wanted; const struct iovec *iov; char __user *buf; void *kaddr, *from; if (unlikely(bytes > i->count)) bytes = i->count; if (unlikely(!bytes)) return 0; |
09fc68dc6
|
162 |
might_fault(); |
4f18cd317
|
163 164 165 166 167 |
wanted = bytes; iov = i->iov; skip = i->iov_offset; buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); |
3fa6c5073
|
168 |
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { |
4f18cd317
|
169 170 171 172 |
kaddr = kmap_atomic(page); from = kaddr + offset; /* first chunk, usually the only one */ |
09fc68dc6
|
173 |
left = copyout(buf, from, copy); |
4f18cd317
|
174 175 176 177 178 179 180 181 182 |
copy -= left; skip += copy; from += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6
|
183 |
left = copyout(buf, from, copy); |
4f18cd317
|
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
copy -= left; skip = copy; from += copy; bytes -= copy; } if (likely(!bytes)) { kunmap_atomic(kaddr); goto done; } offset = from - kaddr; buf += copy; kunmap_atomic(kaddr); copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ |
3fa6c5073
|
199 |
|
4f18cd317
|
200 201 |
kaddr = kmap(page); from = kaddr + offset; |
09fc68dc6
|
202 |
left = copyout(buf, from, copy); |
4f18cd317
|
203 204 205 206 207 208 209 210 |
copy -= left; skip += copy; from += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6
|
211 |
left = copyout(buf, from, copy); |
4f18cd317
|
212 213 214 215 216 217 |
copy -= left; skip = copy; from += copy; bytes -= copy; } kunmap(page); |
3fa6c5073
|
218 |
|
4f18cd317
|
219 |
done: |
81055e584
|
220 221 222 223 |
if (skip == iov->iov_len) { iov++; skip = 0; } |
4f18cd317
|
224 225 226 227 228 229 |
i->count -= wanted - bytes; i->nr_segs -= iov - i->iov; i->iov = iov; i->iov_offset = skip; return wanted - bytes; } |
4f18cd317
|
230 |
|
62a8067a7
|
231 |
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, |
f0d1bec9d
|
232 233 234 235 236 237 238 239 240 241 242 243 |
struct iov_iter *i) { size_t skip, copy, left, wanted; const struct iovec *iov; char __user *buf; void *kaddr, *to; if (unlikely(bytes > i->count)) bytes = i->count; if (unlikely(!bytes)) return 0; |
09fc68dc6
|
244 |
might_fault(); |
f0d1bec9d
|
245 246 247 248 249 |
wanted = bytes; iov = i->iov; skip = i->iov_offset; buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); |
3fa6c5073
|
250 |
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { |
f0d1bec9d
|
251 252 253 254 |
kaddr = kmap_atomic(page); to = kaddr + offset; /* first chunk, usually the only one */ |
09fc68dc6
|
255 |
left = copyin(to, buf, copy); |
f0d1bec9d
|
256 257 258 259 260 261 262 263 264 |
copy -= left; skip += copy; to += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6
|
265 |
left = copyin(to, buf, copy); |
f0d1bec9d
|
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 |
copy -= left; skip = copy; to += copy; bytes -= copy; } if (likely(!bytes)) { kunmap_atomic(kaddr); goto done; } offset = to - kaddr; buf += copy; kunmap_atomic(kaddr); copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ |
3fa6c5073
|
281 |
|
f0d1bec9d
|
282 283 |
kaddr = kmap(page); to = kaddr + offset; |
09fc68dc6
|
284 |
left = copyin(to, buf, copy); |
f0d1bec9d
|
285 286 287 288 289 290 291 292 |
copy -= left; skip += copy; to += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6
|
293 |
left = copyin(to, buf, copy); |
f0d1bec9d
|
294 295 296 297 298 299 |
copy -= left; skip = copy; to += copy; bytes -= copy; } kunmap(page); |
3fa6c5073
|
300 |
|
f0d1bec9d
|
301 |
done: |
81055e584
|
302 303 304 305 |
if (skip == iov->iov_len) { iov++; skip = 0; } |
f0d1bec9d
|
306 307 308 309 310 311 |
i->count -= wanted - bytes; i->nr_segs -= iov - i->iov; i->iov = iov; i->iov_offset = skip; return wanted - bytes; } |
f0d1bec9d
|
312 |
|
241699cd7
|
313 314 315 316 |
#ifdef PIPE_PARANOIA static bool sanity(const struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
317 318 319 320 321 322 |
unsigned int p_head = pipe->head; unsigned int p_tail = pipe->tail; unsigned int p_mask = pipe->ring_size - 1; unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); unsigned int i_head = i->head; unsigned int idx; |
241699cd7
|
323 324 |
if (i->iov_offset) { struct pipe_buffer *p; |
8cefc107c
|
325 |
if (unlikely(p_occupancy == 0)) |
241699cd7
|
326 |
goto Bad; // pipe must be non-empty |
8cefc107c
|
327 |
if (unlikely(i_head != p_head - 1)) |
241699cd7
|
328 |
goto Bad; // must be at the last buffer... |
8cefc107c
|
329 |
p = &pipe->bufs[i_head & p_mask]; |
241699cd7
|
330 331 332 |
if (unlikely(p->offset + p->len != i->iov_offset)) goto Bad; // ... at the end of segment } else { |
8cefc107c
|
333 |
if (i_head != p_head) |
241699cd7
|
334 335 336 337 |
goto Bad; // must be right after the last buffer } return true; Bad: |
8cefc107c
|
338 339 340 341 342 343 |
printk(KERN_ERR "idx = %d, offset = %zd ", i_head, i->iov_offset); printk(KERN_ERR "head = %d, tail = %d, buffers = %d ", p_head, p_tail, pipe->ring_size); for (idx = 0; idx < pipe->ring_size; idx++) |
241699cd7
|
344 345 346 347 348 349 350 351 352 353 354 355 |
printk(KERN_ERR "[%p %p %d %d] ", pipe->bufs[idx].ops, pipe->bufs[idx].page, pipe->bufs[idx].offset, pipe->bufs[idx].len); WARN_ON(1); return false; } #else #define sanity(i) true #endif |
241699cd7
|
356 357 358 359 360 |
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; struct pipe_buffer *buf; |
8cefc107c
|
361 362 363 |
unsigned int p_tail = pipe->tail; unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head = i->head; |
241699cd7
|
364 |
size_t off; |
241699cd7
|
365 366 367 368 369 370 371 372 373 374 375 |
if (unlikely(bytes > i->count)) bytes = i->count; if (unlikely(!bytes)) return 0; if (!sanity(i)) return 0; off = i->iov_offset; |
8cefc107c
|
376 |
buf = &pipe->bufs[i_head & p_mask]; |
241699cd7
|
377 378 379 380 381 382 383 |
if (off) { if (offset == off && buf->page == page) { /* merge with the last one */ buf->len += bytes; i->iov_offset += bytes; goto out; } |
8cefc107c
|
384 385 |
i_head++; buf = &pipe->bufs[i_head & p_mask]; |
241699cd7
|
386 |
} |
6718b6f85
|
387 |
if (pipe_full(i_head, p_tail, pipe->max_usage)) |
241699cd7
|
388 |
return 0; |
8cefc107c
|
389 |
|
241699cd7
|
390 |
buf->ops = &page_cache_pipe_buf_ops; |
8cefc107c
|
391 392 |
get_page(page); buf->page = page; |
241699cd7
|
393 394 |
buf->offset = offset; buf->len = bytes; |
8cefc107c
|
395 396 |
pipe->head = i_head + 1; |
241699cd7
|
397 |
i->iov_offset = offset + bytes; |
8cefc107c
|
398 |
i->head = i_head; |
241699cd7
|
399 400 401 402 |
out: i->count -= bytes; return bytes; } |
4f18cd317
|
403 |
/* |
171a02032
|
404 405 406 407 408 409 |
* Fault in one or more iovecs of the given iov_iter, to a maximum length of * bytes. For each iovec, fault in each page that constitutes the iovec. * * Return 0 on success, or non-zero if the memory could not be accessed (i.e. * because it is an invalid address). */ |
d4690f1e1
|
410 |
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) |
171a02032
|
411 412 413 414 415 416 417 418 |
{ size_t skip = i->iov_offset; const struct iovec *iov; int err; struct iovec v; if (!(i->type & (ITER_BVEC|ITER_KVEC))) { iterate_iovec(i, bytes, v, iov, skip, ({ |
4bce9f6ee
|
419 |
err = fault_in_pages_readable(v.iov_base, v.iov_len); |
171a02032
|
420 421 422 423 424 425 |
if (unlikely(err)) return err; 0;})) } return 0; } |
d4690f1e1
|
426 |
EXPORT_SYMBOL(iov_iter_fault_in_readable); |
171a02032
|
427 |
|
aa563d7bc
|
428 |
void iov_iter_init(struct iov_iter *i, unsigned int direction, |
71d8e532b
|
429 430 431 |
const struct iovec *iov, unsigned long nr_segs, size_t count) { |
aa563d7bc
|
432 433 |
WARN_ON(direction & ~(READ | WRITE)); direction &= READ | WRITE; |
71d8e532b
|
434 |
/* It will get better. Eventually... */ |
db68ce10c
|
435 |
if (uaccess_kernel()) { |
aa563d7bc
|
436 |
i->type = ITER_KVEC | direction; |
a280455fa
|
437 438 |
i->kvec = (struct kvec *)iov; } else { |
aa563d7bc
|
439 |
i->type = ITER_IOVEC | direction; |
a280455fa
|
440 441 |
i->iov = iov; } |
71d8e532b
|
442 443 444 445 446 |
i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_init); |
7b2c99d15
|
447 |
|
62a8067a7
|
448 449 450 451 452 453 |
static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) { char *from = kmap_atomic(page); memcpy(to, from + offset, len); kunmap_atomic(from); } |
36f7a8a4c
|
454 |
static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) |
62a8067a7
|
455 456 457 458 459 |
{ char *to = kmap_atomic(page); memcpy(to + offset, from, len); kunmap_atomic(to); } |
c35e02480
|
460 461 462 463 464 465 |
static void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_atomic(page); memset(addr + offset, 0, len); kunmap_atomic(addr); } |
241699cd7
|
466 467 468 469 |
static inline bool allocated(struct pipe_buffer *buf) { return buf->ops == &default_pipe_buf_ops; } |
8cefc107c
|
470 471 |
static inline void data_start(const struct iov_iter *i, unsigned int *iter_headp, size_t *offp) |
241699cd7
|
472 |
{ |
8cefc107c
|
473 474 |
unsigned int p_mask = i->pipe->ring_size - 1; unsigned int iter_head = i->head; |
241699cd7
|
475 |
size_t off = i->iov_offset; |
8cefc107c
|
476 477 478 479 |
if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || off == PAGE_SIZE)) { iter_head++; |
241699cd7
|
480 481 |
off = 0; } |
8cefc107c
|
482 |
*iter_headp = iter_head; |
241699cd7
|
483 484 485 486 |
*offp = off; } static size_t push_pipe(struct iov_iter *i, size_t size, |
8cefc107c
|
487 |
int *iter_headp, size_t *offp) |
241699cd7
|
488 489 |
{ struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
490 491 492 |
unsigned int p_tail = pipe->tail; unsigned int p_mask = pipe->ring_size - 1; unsigned int iter_head; |
241699cd7
|
493 |
size_t off; |
241699cd7
|
494 495 496 497 498 499 500 501 |
ssize_t left; if (unlikely(size > i->count)) size = i->count; if (unlikely(!size)) return 0; left = size; |
8cefc107c
|
502 503 |
data_start(i, &iter_head, &off); *iter_headp = iter_head; |
241699cd7
|
504 505 506 507 |
*offp = off; if (off) { left -= PAGE_SIZE - off; if (left <= 0) { |
8cefc107c
|
508 |
pipe->bufs[iter_head & p_mask].len += size; |
241699cd7
|
509 510 |
return size; } |
8cefc107c
|
511 512 |
pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; iter_head++; |
241699cd7
|
513 |
} |
6718b6f85
|
514 |
while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { |
8cefc107c
|
515 |
struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; |
241699cd7
|
516 517 518 |
struct page *page = alloc_page(GFP_USER); if (!page) break; |
8cefc107c
|
519 520 521 522 523 524 525 526 527 528 |
buf->ops = &default_pipe_buf_ops; buf->page = page; buf->offset = 0; buf->len = min_t(ssize_t, left, PAGE_SIZE); left -= buf->len; iter_head++; pipe->head = iter_head; if (left == 0) |
241699cd7
|
529 |
return size; |
241699cd7
|
530 531 532 533 534 535 536 537 |
} return size - left; } static size_t copy_pipe_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
538 539 |
unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head; |
241699cd7
|
540 |
size_t n, off; |
241699cd7
|
541 542 543 |
if (!sanity(i)) return 0; |
8cefc107c
|
544 |
bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd7
|
545 546 |
if (unlikely(!n)) return 0; |
8cefc107c
|
547 |
do { |
241699cd7
|
548 |
size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107c
|
549 550 |
memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); i->head = i_head; |
241699cd7
|
551 552 553 |
i->iov_offset = off + chunk; n -= chunk; addr += chunk; |
8cefc107c
|
554 555 556 |
off = 0; i_head++; } while (n); |
241699cd7
|
557 558 559 |
i->count -= bytes; return bytes; } |
f91528955
|
560 561 562 563 564 565 |
static __wsum csum_and_memcpy(void *to, const void *from, size_t len, __wsum sum, size_t off) { __wsum next = csum_partial_copy_nocheck(from, to, len, 0); return csum_block_add(sum, next, off); } |
78e1f3861
|
566 567 568 569 |
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
570 571 |
unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head; |
78e1f3861
|
572 573 |
size_t n, r; size_t off = 0; |
f91528955
|
574 |
__wsum sum = *csum; |
78e1f3861
|
575 576 577 |
if (!sanity(i)) return 0; |
8cefc107c
|
578 |
bytes = n = push_pipe(i, bytes, &i_head, &r); |
78e1f3861
|
579 580 |
if (unlikely(!n)) return 0; |
8cefc107c
|
581 |
do { |
78e1f3861
|
582 |
size_t chunk = min_t(size_t, n, PAGE_SIZE - r); |
8cefc107c
|
583 |
char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page); |
f91528955
|
584 |
sum = csum_and_memcpy(p + r, addr, chunk, sum, off); |
78e1f3861
|
585 |
kunmap_atomic(p); |
8cefc107c
|
586 |
i->head = i_head; |
78e1f3861
|
587 588 589 590 |
i->iov_offset = r + chunk; n -= chunk; off += chunk; addr += chunk; |
8cefc107c
|
591 592 593 |
r = 0; i_head++; } while (n); |
78e1f3861
|
594 595 596 597 |
i->count -= bytes; *csum = sum; return bytes; } |
aa28de275
|
598 |
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a7
|
599 |
{ |
36f7a8a4c
|
600 |
const char *from = addr; |
00e237074
|
601 |
if (unlikely(iov_iter_is_pipe(i))) |
241699cd7
|
602 |
return copy_pipe_to_iter(addr, bytes, i); |
09fc68dc6
|
603 604 |
if (iter_is_iovec(i)) might_fault(); |
3d4d3e482
|
605 |
iterate_and_advance(i, bytes, v, |
09fc68dc6
|
606 |
copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), |
3d4d3e482
|
607 |
memcpy_to_page(v.bv_page, v.bv_offset, |
a280455fa
|
608 609 |
(from += v.bv_len) - v.bv_len, v.bv_len), memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) |
3d4d3e482
|
610 |
) |
62a8067a7
|
611 |
|
3d4d3e482
|
612 |
return bytes; |
c35e02480
|
613 |
} |
aa28de275
|
614 |
EXPORT_SYMBOL(_copy_to_iter); |
c35e02480
|
615 |
|
8780356ef
|
616 617 618 |
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE static int copyout_mcsafe(void __user *to, const void *from, size_t n) { |
96d4f267e
|
619 |
if (access_ok(to, n)) { |
8780356ef
|
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 |
kasan_check_read(from, n); n = copy_to_user_mcsafe((__force void *) to, from, n); } return n; } static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, const char *from, size_t len) { unsigned long ret; char *to; to = kmap_atomic(page); ret = memcpy_mcsafe(to + offset, from, len); kunmap_atomic(to); return ret; } |
ca146f6f0
|
638 639 640 641 |
static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
642 643 |
unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head; |
ca146f6f0
|
644 |
size_t n, off, xfer = 0; |
ca146f6f0
|
645 646 647 |
if (!sanity(i)) return 0; |
8cefc107c
|
648 |
bytes = n = push_pipe(i, bytes, &i_head, &off); |
ca146f6f0
|
649 650 |
if (unlikely(!n)) return 0; |
8cefc107c
|
651 |
do { |
ca146f6f0
|
652 653 |
size_t chunk = min_t(size_t, n, PAGE_SIZE - off); unsigned long rem; |
8cefc107c
|
654 655 656 |
rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); i->head = i_head; |
ca146f6f0
|
657 658 659 660 661 662 |
i->iov_offset = off + chunk - rem; xfer += chunk - rem; if (rem) break; n -= chunk; addr += chunk; |
8cefc107c
|
663 664 665 |
off = 0; i_head++; } while (n); |
ca146f6f0
|
666 667 668 |
i->count -= xfer; return xfer; } |
bf3eeb9b5
|
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 |
/** * _copy_to_iter_mcsafe - copy to user with source-read error exception handling * @addr: source kernel address * @bytes: total transfer length * @iter: destination iterator * * The pmem driver arranges for filesystem-dax to use this facility via * dax_copy_to_iter() for protecting read/write to persistent memory. * Unless / until an architecture can guarantee identical performance * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a * performance regression to switch more users to the mcsafe version. * * Otherwise, the main differences between this and typical _copy_to_iter(). * * * Typical tail/residue handling after a fault retries the copy * byte-by-byte until the fault happens again. Re-triggering machine * checks is potentially fatal so the implementation uses source * alignment and poison alignment assumptions to avoid re-triggering * hardware exceptions. * * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. * Compare to copy_to_iter() where only ITER_IOVEC attempts might return * a short copy. * * See MCSAFE_TEST for self-test. */ |
8780356ef
|
695 696 697 698 |
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr; |
00e237074
|
699 |
if (unlikely(iov_iter_is_pipe(i))) |
ca146f6f0
|
700 |
return copy_pipe_to_iter_mcsafe(addr, bytes, i); |
8780356ef
|
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 |
if (iter_is_iovec(i)) might_fault(); iterate_and_advance(i, bytes, v, copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), ({ rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len); if (rem) { curr_addr = (unsigned long) from; bytes = curr_addr - s_addr - rem; return bytes; } }), ({ rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len); if (rem) { curr_addr = (unsigned long) from; bytes = curr_addr - s_addr - rem; return bytes; } }) ) return bytes; } EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ |
aa28de275
|
729 |
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e02480
|
730 |
{ |
0dbca9a4b
|
731 |
char *to = addr; |
00e237074
|
732 |
if (unlikely(iov_iter_is_pipe(i))) { |
241699cd7
|
733 734 735 |
WARN_ON(1); return 0; } |
09fc68dc6
|
736 737 |
if (iter_is_iovec(i)) might_fault(); |
0dbca9a4b
|
738 |
iterate_and_advance(i, bytes, v, |
09fc68dc6
|
739 |
copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), |
0dbca9a4b
|
740 |
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
a280455fa
|
741 742 |
v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
0dbca9a4b
|
743 744 745 |
) return bytes; |
c35e02480
|
746 |
} |
aa28de275
|
747 |
EXPORT_SYMBOL(_copy_from_iter); |
c35e02480
|
748 |
|
aa28de275
|
749 |
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) |
cbbd26b8b
|
750 751 |
{ char *to = addr; |
00e237074
|
752 |
if (unlikely(iov_iter_is_pipe(i))) { |
cbbd26b8b
|
753 754 755 |
WARN_ON(1); return false; } |
33844e665
|
756 |
if (unlikely(i->count < bytes)) |
cbbd26b8b
|
757 |
return false; |
09fc68dc6
|
758 759 |
if (iter_is_iovec(i)) might_fault(); |
cbbd26b8b
|
760 |
iterate_all_kinds(i, bytes, v, ({ |
09fc68dc6
|
761 |
if (copyin((to += v.iov_len) - v.iov_len, |
cbbd26b8b
|
762 763 764 765 766 767 768 769 770 771 772 |
v.iov_base, v.iov_len)) return false; 0;}), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) iov_iter_advance(i, bytes); return true; } |
aa28de275
|
773 |
EXPORT_SYMBOL(_copy_from_iter_full); |
cbbd26b8b
|
774 |
|
aa28de275
|
775 |
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096d
|
776 777 |
{ char *to = addr; |
00e237074
|
778 |
if (unlikely(iov_iter_is_pipe(i))) { |
241699cd7
|
779 780 781 |
WARN_ON(1); return 0; } |
aa583096d
|
782 |
iterate_and_advance(i, bytes, v, |
3f763453e
|
783 |
__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
aa583096d
|
784 785 786 787 788 789 790 791 |
v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) return bytes; } |
aa28de275
|
792 |
EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096d
|
793 |
|
0aed55af8
|
794 |
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d2
|
795 796 797 798 799 800 801 802 803 804 805 806 807 808 |
/** * _copy_from_iter_flushcache - write destination through cpu cache * @addr: destination kernel address * @bytes: total transfer length * @iter: source iterator * * The pmem driver arranges for filesystem-dax to use this facility via * dax_copy_from_iter() for ensuring that writes to persistent memory * are flushed through the CPU cache. It is differentiated from * _copy_from_iter_nocache() in that guarantees all data is flushed for * all iterator types. The _copy_from_iter_nocache() only attempts to * bypass the cache for the ITER_IOVEC case, and on some archs may use * instructions that strand dirty-data in the cache. */ |
6a37e9400
|
809 |
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af8
|
810 811 |
{ char *to = addr; |
00e237074
|
812 |
if (unlikely(iov_iter_is_pipe(i))) { |
0aed55af8
|
813 814 815 816 817 818 819 820 821 822 823 824 825 826 |
WARN_ON(1); return 0; } iterate_and_advance(i, bytes, v, __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) return bytes; } |
6a37e9400
|
827 |
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af8
|
828 |
#endif |
aa28de275
|
829 |
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) |
cbbd26b8b
|
830 831 |
{ char *to = addr; |
00e237074
|
832 |
if (unlikely(iov_iter_is_pipe(i))) { |
cbbd26b8b
|
833 834 835 |
WARN_ON(1); return false; } |
33844e665
|
836 |
if (unlikely(i->count < bytes)) |
cbbd26b8b
|
837 838 |
return false; iterate_all_kinds(i, bytes, v, ({ |
3f763453e
|
839 |
if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
cbbd26b8b
|
840 841 842 843 844 845 846 847 848 849 850 |
v.iov_base, v.iov_len)) return false; 0;}), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) iov_iter_advance(i, bytes); return true; } |
aa28de275
|
851 |
EXPORT_SYMBOL(_copy_from_iter_full_nocache); |
cbbd26b8b
|
852 |
|
72e809ed8
|
853 854 |
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) { |
6daef95b8
|
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 |
struct page *head; size_t v = n + offset; /* * The general case needs to access the page order in order * to compute the page size. * However, we mostly deal with order-0 pages and thus can * avoid a possible cache line miss for requests that fit all * page orders. */ if (n <= v && v <= PAGE_SIZE) return true; head = compound_head(page); v += (page - head) << PAGE_SHIFT; |
a90bcb86a
|
870 |
|
a50b854e0
|
871 |
if (likely(n <= v && v <= (page_size(head)))) |
72e809ed8
|
872 873 874 875 |
return true; WARN_ON(1); return false; } |
cbbd26b8b
|
876 |
|
62a8067a7
|
877 878 879 |
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { |
72e809ed8
|
880 881 |
if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; |
d271524a3
|
882 883 884 885 886 |
if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; |
9ea9ce042
|
887 888 889 |
} else if (unlikely(iov_iter_is_discard(i))) return bytes; else if (likely(!iov_iter_is_pipe(i))) |
62a8067a7
|
890 |
return copy_page_to_iter_iovec(page, offset, bytes, i); |
241699cd7
|
891 892 |
else return copy_page_to_iter_pipe(page, offset, bytes, i); |
62a8067a7
|
893 894 895 896 897 898 |
} EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { |
72e809ed8
|
899 900 |
if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; |
9ea9ce042
|
901 |
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd7
|
902 903 904 |
WARN_ON(1); return 0; } |
a280455fa
|
905 |
if (i->type & (ITER_BVEC|ITER_KVEC)) { |
d271524a3
|
906 |
void *kaddr = kmap_atomic(page); |
aa28de275
|
907 |
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
d271524a3
|
908 909 910 |
kunmap_atomic(kaddr); return wanted; } else |
62a8067a7
|
911 912 913 |
return copy_page_from_iter_iovec(page, offset, bytes, i); } EXPORT_SYMBOL(copy_page_from_iter); |
241699cd7
|
914 915 916 |
static size_t pipe_zero(size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
917 918 |
unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head; |
241699cd7
|
919 |
size_t n, off; |
241699cd7
|
920 921 922 |
if (!sanity(i)) return 0; |
8cefc107c
|
923 |
bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd7
|
924 925 |
if (unlikely(!n)) return 0; |
8cefc107c
|
926 |
do { |
241699cd7
|
927 |
size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107c
|
928 929 |
memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk); i->head = i_head; |
241699cd7
|
930 931 |
i->iov_offset = off + chunk; n -= chunk; |
8cefc107c
|
932 933 934 |
off = 0; i_head++; } while (n); |
241699cd7
|
935 936 937 |
i->count -= bytes; return bytes; } |
c35e02480
|
938 939 |
size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { |
00e237074
|
940 |
if (unlikely(iov_iter_is_pipe(i))) |
241699cd7
|
941 |
return pipe_zero(bytes, i); |
8442fa46c
|
942 |
iterate_and_advance(i, bytes, v, |
09fc68dc6
|
943 |
clear_user(v.iov_base, v.iov_len), |
a280455fa
|
944 945 |
memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len) |
8442fa46c
|
946 947 948 |
) return bytes; |
c35e02480
|
949 950 |
} EXPORT_SYMBOL(iov_iter_zero); |
62a8067a7
|
951 952 953 |
size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { |
04a311655
|
954 |
char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
72e809ed8
|
955 956 957 958 |
if (unlikely(!page_copy_sane(page, offset, bytes))) { kunmap_atomic(kaddr); return 0; } |
9ea9ce042
|
959 |
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd7
|
960 961 962 963 |
kunmap_atomic(kaddr); WARN_ON(1); return 0; } |
04a311655
|
964 |
iterate_all_kinds(i, bytes, v, |
09fc68dc6
|
965 |
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), |
04a311655
|
966 |
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, |
a280455fa
|
967 968 |
v.bv_offset, v.bv_len), memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
04a311655
|
969 970 971 |
) kunmap_atomic(kaddr); return bytes; |
62a8067a7
|
972 973 |
} EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); |
b9dc6f65b
|
974 975 976 |
static inline void pipe_truncate(struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
977 978 979 980 981 982 983 |
unsigned int p_tail = pipe->tail; unsigned int p_head = pipe->head; unsigned int p_mask = pipe->ring_size - 1; if (!pipe_empty(p_head, p_tail)) { struct pipe_buffer *buf; unsigned int i_head = i->head; |
b9dc6f65b
|
984 |
size_t off = i->iov_offset; |
8cefc107c
|
985 |
|
b9dc6f65b
|
986 |
if (off) { |
8cefc107c
|
987 988 989 |
buf = &pipe->bufs[i_head & p_mask]; buf->len = off - buf->offset; i_head++; |
b9dc6f65b
|
990 |
} |
8cefc107c
|
991 992 993 |
while (p_head != i_head) { p_head--; pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); |
b9dc6f65b
|
994 |
} |
8cefc107c
|
995 996 |
pipe->head = p_head; |
b9dc6f65b
|
997 998 |
} } |
241699cd7
|
999 1000 1001 |
static void pipe_advance(struct iov_iter *i, size_t size) { struct pipe_inode_info *pipe = i->pipe; |
241699cd7
|
1002 1003 |
if (unlikely(i->count < size)) size = i->count; |
241699cd7
|
1004 |
if (size) { |
b9dc6f65b
|
1005 |
struct pipe_buffer *buf; |
8cefc107c
|
1006 1007 |
unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head = i->head; |
b9dc6f65b
|
1008 |
size_t off = i->iov_offset, left = size; |
8cefc107c
|
1009 |
|
241699cd7
|
1010 |
if (off) /* make it relative to the beginning of buffer */ |
8cefc107c
|
1011 |
left += off - pipe->bufs[i_head & p_mask].offset; |
241699cd7
|
1012 |
while (1) { |
8cefc107c
|
1013 |
buf = &pipe->bufs[i_head & p_mask]; |
b9dc6f65b
|
1014 |
if (left <= buf->len) |
241699cd7
|
1015 |
break; |
b9dc6f65b
|
1016 |
left -= buf->len; |
8cefc107c
|
1017 |
i_head++; |
241699cd7
|
1018 |
} |
8cefc107c
|
1019 |
i->head = i_head; |
b9dc6f65b
|
1020 |
i->iov_offset = buf->offset + left; |
241699cd7
|
1021 |
} |
b9dc6f65b
|
1022 1023 1024 |
i->count -= size; /* ... and discard everything past that point */ pipe_truncate(i); |
241699cd7
|
1025 |
} |
62a8067a7
|
1026 1027 |
void iov_iter_advance(struct iov_iter *i, size_t size) { |
00e237074
|
1028 |
if (unlikely(iov_iter_is_pipe(i))) { |
241699cd7
|
1029 1030 1031 |
pipe_advance(i, size); return; } |
9ea9ce042
|
1032 1033 1034 1035 |
if (unlikely(iov_iter_is_discard(i))) { i->count -= size; return; } |
a280455fa
|
1036 |
iterate_and_advance(i, size, v, 0, 0, 0) |
62a8067a7
|
1037 1038 |
} EXPORT_SYMBOL(iov_iter_advance); |
27c0e3748
|
1039 1040 1041 1042 |
void iov_iter_revert(struct iov_iter *i, size_t unroll) { if (!unroll) return; |
5b47d59af
|
1043 1044 |
if (WARN_ON(unroll > MAX_RW_COUNT)) return; |
27c0e3748
|
1045 |
i->count += unroll; |
00e237074
|
1046 |
if (unlikely(iov_iter_is_pipe(i))) { |
27c0e3748
|
1047 |
struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
1048 1049 |
unsigned int p_mask = pipe->ring_size - 1; unsigned int i_head = i->head; |
27c0e3748
|
1050 1051 |
size_t off = i->iov_offset; while (1) { |
8cefc107c
|
1052 1053 |
struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; size_t n = off - b->offset; |
27c0e3748
|
1054 |
if (unroll < n) { |
4fa55cefe
|
1055 |
off -= unroll; |
27c0e3748
|
1056 1057 1058 |
break; } unroll -= n; |
8cefc107c
|
1059 |
if (!unroll && i_head == i->start_head) { |
27c0e3748
|
1060 1061 1062 |
off = 0; break; } |
8cefc107c
|
1063 1064 1065 |
i_head--; b = &pipe->bufs[i_head & p_mask]; off = b->offset + b->len; |
27c0e3748
|
1066 1067 |
} i->iov_offset = off; |
8cefc107c
|
1068 |
i->head = i_head; |
27c0e3748
|
1069 1070 1071 |
pipe_truncate(i); return; } |
9ea9ce042
|
1072 1073 |
if (unlikely(iov_iter_is_discard(i))) return; |
27c0e3748
|
1074 1075 1076 1077 1078 |
if (unroll <= i->iov_offset) { i->iov_offset -= unroll; return; } unroll -= i->iov_offset; |
00e237074
|
1079 |
if (iov_iter_is_bvec(i)) { |
27c0e3748
|
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 |
const struct bio_vec *bvec = i->bvec; while (1) { size_t n = (--bvec)->bv_len; i->nr_segs++; if (unroll <= n) { i->bvec = bvec; i->iov_offset = n - unroll; return; } unroll -= n; } } else { /* same logics for iovec and kvec */ const struct iovec *iov = i->iov; while (1) { size_t n = (--iov)->iov_len; i->nr_segs++; if (unroll <= n) { i->iov = iov; i->iov_offset = n - unroll; return; } unroll -= n; } } } EXPORT_SYMBOL(iov_iter_revert); |
62a8067a7
|
1106 1107 1108 1109 1110 |
/* * Return the count of just the current iov_iter segment. */ size_t iov_iter_single_seg_count(const struct iov_iter *i) { |
00e237074
|
1111 |
if (unlikely(iov_iter_is_pipe(i))) |
241699cd7
|
1112 |
return i->count; // it is a silly place, anyway |
62a8067a7
|
1113 1114 |
if (i->nr_segs == 1) return i->count; |
9ea9ce042
|
1115 1116 |
if (unlikely(iov_iter_is_discard(i))) return i->count; |
00e237074
|
1117 |
else if (iov_iter_is_bvec(i)) |
62a8067a7
|
1118 |
return min(i->count, i->bvec->bv_len - i->iov_offset); |
ad0eab929
|
1119 1120 |
else return min(i->count, i->iov->iov_len - i->iov_offset); |
62a8067a7
|
1121 1122 |
} EXPORT_SYMBOL(iov_iter_single_seg_count); |
aa563d7bc
|
1123 |
void iov_iter_kvec(struct iov_iter *i, unsigned int direction, |
05afcb77e
|
1124 |
const struct kvec *kvec, unsigned long nr_segs, |
abb78f875
|
1125 1126 |
size_t count) { |
aa563d7bc
|
1127 1128 |
WARN_ON(direction & ~(READ | WRITE)); i->type = ITER_KVEC | (direction & (READ | WRITE)); |
05afcb77e
|
1129 |
i->kvec = kvec; |
abb78f875
|
1130 1131 1132 1133 1134 |
i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_kvec); |
aa563d7bc
|
1135 |
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, |
05afcb77e
|
1136 1137 1138 |
const struct bio_vec *bvec, unsigned long nr_segs, size_t count) { |
aa563d7bc
|
1139 1140 |
WARN_ON(direction & ~(READ | WRITE)); i->type = ITER_BVEC | (direction & (READ | WRITE)); |
05afcb77e
|
1141 1142 1143 1144 1145 1146 |
i->bvec = bvec; i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_bvec); |
aa563d7bc
|
1147 |
void iov_iter_pipe(struct iov_iter *i, unsigned int direction, |
241699cd7
|
1148 1149 1150 |
struct pipe_inode_info *pipe, size_t count) { |
aa563d7bc
|
1151 |
BUG_ON(direction != READ); |
8cefc107c
|
1152 |
WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); |
aa563d7bc
|
1153 |
i->type = ITER_PIPE | READ; |
241699cd7
|
1154 |
i->pipe = pipe; |
8cefc107c
|
1155 |
i->head = pipe->head; |
241699cd7
|
1156 1157 |
i->iov_offset = 0; i->count = count; |
8cefc107c
|
1158 |
i->start_head = i->head; |
241699cd7
|
1159 1160 |
} EXPORT_SYMBOL(iov_iter_pipe); |
9ea9ce042
|
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 |
/** * iov_iter_discard - Initialise an I/O iterator that discards data * @i: The iterator to initialise. * @direction: The direction of the transfer. * @count: The size of the I/O buffer in bytes. * * Set up an I/O iterator that just discards everything that's written to it. * It's only available as a READ iterator. */ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) { BUG_ON(direction != READ); i->type = ITER_DISCARD | READ; i->count = count; i->iov_offset = 0; } EXPORT_SYMBOL(iov_iter_discard); |
62a8067a7
|
1178 1179 |
unsigned long iov_iter_alignment(const struct iov_iter *i) { |
04a311655
|
1180 1181 |
unsigned long res = 0; size_t size = i->count; |
00e237074
|
1182 |
if (unlikely(iov_iter_is_pipe(i))) { |
e0ff126ee
|
1183 |
unsigned int p_mask = i->pipe->ring_size - 1; |
8cefc107c
|
1184 |
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) |
241699cd7
|
1185 1186 1187 |
return size | i->iov_offset; return size; } |
04a311655
|
1188 1189 |
iterate_all_kinds(i, size, v, (res |= (unsigned long)v.iov_base | v.iov_len, 0), |
a280455fa
|
1190 1191 |
res |= v.bv_offset | v.bv_len, res |= (unsigned long)v.iov_base | v.iov_len |
04a311655
|
1192 1193 |
) return res; |
62a8067a7
|
1194 1195 |
} EXPORT_SYMBOL(iov_iter_alignment); |
357f435d8
|
1196 1197 |
unsigned long iov_iter_gap_alignment(const struct iov_iter *i) { |
33844e665
|
1198 |
unsigned long res = 0; |
357f435d8
|
1199 |
size_t size = i->count; |
357f435d8
|
1200 |
|
9ea9ce042
|
1201 |
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd7
|
1202 1203 1204 |
WARN_ON(1); return ~0U; } |
357f435d8
|
1205 1206 1207 1208 1209 1210 1211 1212 |
iterate_all_kinds(i, size, v, (res |= (!res ? 0 : (unsigned long)v.iov_base) | (size != v.iov_len ? size : 0), 0), (res |= (!res ? 0 : (unsigned long)v.bv_offset) | (size != v.bv_len ? size : 0)), (res |= (!res ? 0 : (unsigned long)v.iov_base) | (size != v.iov_len ? size : 0)) ); |
33844e665
|
1213 |
return res; |
357f435d8
|
1214 1215 |
} EXPORT_SYMBOL(iov_iter_gap_alignment); |
e76b63123
|
1216 |
static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
241699cd7
|
1217 1218 |
size_t maxsize, struct page **pages, |
8cefc107c
|
1219 |
int iter_head, |
241699cd7
|
1220 1221 1222 |
size_t *start) { struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
1223 1224 |
unsigned int p_mask = pipe->ring_size - 1; ssize_t n = push_pipe(i, maxsize, &iter_head, start); |
241699cd7
|
1225 1226 1227 1228 1229 |
if (!n) return -EFAULT; maxsize = n; n += *start; |
1689c73a7
|
1230 |
while (n > 0) { |
8cefc107c
|
1231 1232 |
get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); iter_head++; |
241699cd7
|
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 |
n -= PAGE_SIZE; } return maxsize; } static ssize_t pipe_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { |
8cefc107c
|
1243 |
unsigned int iter_head, npages; |
241699cd7
|
1244 |
size_t capacity; |
241699cd7
|
1245 |
|
33844e665
|
1246 1247 |
if (!maxsize) return 0; |
241699cd7
|
1248 1249 |
if (!sanity(i)) return -EFAULT; |
8cefc107c
|
1250 1251 1252 1253 |
data_start(i, &iter_head, start); /* Amount of free space: some of this one + all after this one */ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); capacity = min(npages, maxpages) * PAGE_SIZE - *start; |
241699cd7
|
1254 |
|
8cefc107c
|
1255 |
return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); |
241699cd7
|
1256 |
} |
62a8067a7
|
1257 |
ssize_t iov_iter_get_pages(struct iov_iter *i, |
2c80929c4
|
1258 |
struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a7
|
1259 1260 |
size_t *start) { |
e5393fae3
|
1261 1262 |
if (maxsize > i->count) maxsize = i->count; |
00e237074
|
1263 |
if (unlikely(iov_iter_is_pipe(i))) |
241699cd7
|
1264 |
return pipe_get_pages(i, pages, maxsize, maxpages, start); |
9ea9ce042
|
1265 1266 |
if (unlikely(iov_iter_is_discard(i))) return -EFAULT; |
e5393fae3
|
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 |
iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); int n; int res; if (len > maxpages * PAGE_SIZE) len = maxpages * PAGE_SIZE; addr &= ~(PAGE_SIZE - 1); n = DIV_ROUND_UP(len, PAGE_SIZE); |
73b0140bf
|
1277 1278 1279 |
res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, pages); |
e5393fae3
|
1280 1281 1282 1283 1284 1285 1286 1287 |
if (unlikely(res < 0)) return res; return (res == n ? len : res * PAGE_SIZE) - *start; 0;}),({ /* can't be more than PAGE_SIZE */ *start = v.bv_offset; get_page(*pages = v.bv_page); return v.bv_len; |
a280455fa
|
1288 1289 |
}),({ return -EFAULT; |
e5393fae3
|
1290 1291 1292 |
}) ) return 0; |
62a8067a7
|
1293 1294 |
} EXPORT_SYMBOL(iov_iter_get_pages); |
1b17f1f2e
|
1295 1296 |
static struct page **get_pages_array(size_t n) { |
752ade68c
|
1297 |
return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); |
1b17f1f2e
|
1298 |
} |
241699cd7
|
1299 1300 1301 1302 1303 |
static ssize_t pipe_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) { struct page **p; |
8cefc107c
|
1304 |
unsigned int iter_head, npages; |
d7760d638
|
1305 |
ssize_t n; |
241699cd7
|
1306 |
|
33844e665
|
1307 1308 |
if (!maxsize) return 0; |
241699cd7
|
1309 1310 |
if (!sanity(i)) return -EFAULT; |
8cefc107c
|
1311 1312 1313 |
data_start(i, &iter_head, start); /* Amount of free space: some of this one + all after this one */ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); |
241699cd7
|
1314 1315 1316 1317 1318 1319 1320 1321 |
n = npages * PAGE_SIZE - *start; if (maxsize > n) maxsize = n; else npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); p = get_pages_array(npages); if (!p) return -ENOMEM; |
8cefc107c
|
1322 |
n = __pipe_get_pages(i, maxsize, p, iter_head, start); |
241699cd7
|
1323 1324 1325 1326 1327 1328 |
if (n > 0) *pages = p; else kvfree(p); return n; } |
62a8067a7
|
1329 1330 1331 1332 |
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) { |
1b17f1f2e
|
1333 1334 1335 1336 |
struct page **p; if (maxsize > i->count) maxsize = i->count; |
00e237074
|
1337 |
if (unlikely(iov_iter_is_pipe(i))) |
241699cd7
|
1338 |
return pipe_get_pages_alloc(i, pages, maxsize, start); |
9ea9ce042
|
1339 1340 |
if (unlikely(iov_iter_is_discard(i))) return -EFAULT; |
1b17f1f2e
|
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 |
iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); int n; int res; addr &= ~(PAGE_SIZE - 1); n = DIV_ROUND_UP(len, PAGE_SIZE); p = get_pages_array(n); if (!p) return -ENOMEM; |
73b0140bf
|
1352 1353 |
res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); |
1b17f1f2e
|
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 |
if (unlikely(res < 0)) { kvfree(p); return res; } *pages = p; return (res == n ? len : res * PAGE_SIZE) - *start; 0;}),({ /* can't be more than PAGE_SIZE */ *start = v.bv_offset; *pages = p = get_pages_array(1); if (!p) return -ENOMEM; get_page(*p = v.bv_page); return v.bv_len; |
a280455fa
|
1368 1369 |
}),({ return -EFAULT; |
1b17f1f2e
|
1370 1371 1372 |
}) ) return 0; |
62a8067a7
|
1373 1374 |
} EXPORT_SYMBOL(iov_iter_get_pages_alloc); |
a604ec7e9
|
1375 1376 1377 1378 1379 1380 |
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { char *to = addr; __wsum sum, next; size_t off = 0; |
a604ec7e9
|
1381 |
sum = *csum; |
9ea9ce042
|
1382 |
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd7
|
1383 1384 1385 |
WARN_ON(1); return 0; } |
a604ec7e9
|
1386 1387 |
iterate_and_advance(i, bytes, v, ({ int err = 0; |
cbbd26b8b
|
1388 |
next = csum_and_copy_from_user(v.iov_base, |
a604ec7e9
|
1389 1390 1391 1392 1393 1394 1395 1396 1397 |
(to += v.iov_len) - v.iov_len, v.iov_len, 0, &err); if (!err) { sum = csum_block_add(sum, next, off); off += v.iov_len; } err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); |
f91528955
|
1398 1399 1400 |
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, p + v.bv_offset, v.bv_len, sum, off); |
a604ec7e9
|
1401 |
kunmap_atomic(p); |
a604ec7e9
|
1402 1403 |
off += v.bv_len; }),({ |
f91528955
|
1404 1405 1406 |
sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len, sum, off); |
a604ec7e9
|
1407 1408 1409 1410 1411 1412 1413 |
off += v.iov_len; }) ) *csum = sum; return bytes; } EXPORT_SYMBOL(csum_and_copy_from_iter); |
cbbd26b8b
|
1414 1415 1416 1417 1418 1419 1420 |
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { char *to = addr; __wsum sum, next; size_t off = 0; sum = *csum; |
9ea9ce042
|
1421 |
if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
cbbd26b8b
|
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 |
WARN_ON(1); return false; } if (unlikely(i->count < bytes)) return false; iterate_all_kinds(i, bytes, v, ({ int err = 0; next = csum_and_copy_from_user(v.iov_base, (to += v.iov_len) - v.iov_len, v.iov_len, 0, &err); if (err) return false; sum = csum_block_add(sum, next, off); off += v.iov_len; 0; }), ({ char *p = kmap_atomic(v.bv_page); |
f91528955
|
1439 1440 1441 |
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, p + v.bv_offset, v.bv_len, sum, off); |
cbbd26b8b
|
1442 |
kunmap_atomic(p); |
cbbd26b8b
|
1443 1444 |
off += v.bv_len; }),({ |
f91528955
|
1445 1446 1447 |
sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len, sum, off); |
cbbd26b8b
|
1448 1449 1450 1451 1452 1453 1454 1455 |
off += v.iov_len; }) ) *csum = sum; iov_iter_advance(i, bytes); return true; } EXPORT_SYMBOL(csum_and_copy_from_iter_full); |
cb002d074
|
1456 |
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, |
a604ec7e9
|
1457 1458 |
struct iov_iter *i) { |
36f7a8a4c
|
1459 |
const char *from = addr; |
cb002d074
|
1460 |
__wsum *csum = csump; |
a604ec7e9
|
1461 1462 |
__wsum sum, next; size_t off = 0; |
78e1f3861
|
1463 1464 1465 |
if (unlikely(iov_iter_is_pipe(i))) return csum_and_copy_to_pipe_iter(addr, bytes, csum, i); |
a604ec7e9
|
1466 |
sum = *csum; |
78e1f3861
|
1467 |
if (unlikely(iov_iter_is_discard(i))) { |
241699cd7
|
1468 1469 1470 |
WARN_ON(1); /* for now */ return 0; } |
a604ec7e9
|
1471 1472 1473 |
iterate_and_advance(i, bytes, v, ({ int err = 0; next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, |
cbbd26b8b
|
1474 |
v.iov_base, |
a604ec7e9
|
1475 1476 1477 1478 1479 1480 1481 1482 |
v.iov_len, 0, &err); if (!err) { sum = csum_block_add(sum, next, off); off += v.iov_len; } err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); |
f91528955
|
1483 1484 1485 |
sum = csum_and_memcpy(p + v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len, sum, off); |
a604ec7e9
|
1486 |
kunmap_atomic(p); |
a604ec7e9
|
1487 1488 |
off += v.bv_len; }),({ |
f91528955
|
1489 1490 1491 |
sum = csum_and_memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len, sum, off); |
a604ec7e9
|
1492 1493 1494 1495 1496 1497 1498 |
off += v.iov_len; }) ) *csum = sum; return bytes; } EXPORT_SYMBOL(csum_and_copy_to_iter); |
d05f44355
|
1499 1500 1501 |
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, struct iov_iter *i) { |
27fad74a5
|
1502 |
#ifdef CONFIG_CRYPTO |
d05f44355
|
1503 1504 1505 1506 1507 1508 1509 1510 1511 |
struct ahash_request *hash = hashp; struct scatterlist sg; size_t copied; copied = copy_to_iter(addr, bytes, i); sg_init_one(&sg, addr, copied); ahash_request_set_crypt(hash, &sg, NULL, copied); crypto_ahash_update(hash); return copied; |
27fad74a5
|
1512 1513 1514 |
#else return 0; #endif |
d05f44355
|
1515 1516 |
} EXPORT_SYMBOL(hash_and_copy_to_iter); |
62a8067a7
|
1517 1518 |
int iov_iter_npages(const struct iov_iter *i, int maxpages) { |
e0f2dc406
|
1519 1520 1521 1522 1523 |
size_t size = i->count; int npages = 0; if (!size) return 0; |
9ea9ce042
|
1524 1525 |
if (unlikely(iov_iter_is_discard(i))) return 0; |
e0f2dc406
|
1526 |
|
00e237074
|
1527 |
if (unlikely(iov_iter_is_pipe(i))) { |
241699cd7
|
1528 |
struct pipe_inode_info *pipe = i->pipe; |
8cefc107c
|
1529 |
unsigned int iter_head; |
241699cd7
|
1530 |
size_t off; |
241699cd7
|
1531 1532 1533 |
if (!sanity(i)) return 0; |
8cefc107c
|
1534 |
data_start(i, &iter_head, &off); |
241699cd7
|
1535 |
/* some of this one + all after this one */ |
8cefc107c
|
1536 |
npages = pipe_space_for_user(iter_head, pipe->tail, pipe); |
241699cd7
|
1537 1538 1539 |
if (npages >= maxpages) return maxpages; } else iterate_all_kinds(i, size, v, ({ |
e0f2dc406
|
1540 1541 1542 1543 1544 1545 1546 1547 1548 |
unsigned long p = (unsigned long)v.iov_base; npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) - p / PAGE_SIZE; if (npages >= maxpages) return maxpages; 0;}),({ npages++; if (npages >= maxpages) return maxpages; |
a280455fa
|
1549 1550 1551 1552 1553 1554 |
}),({ unsigned long p = (unsigned long)v.iov_base; npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) - p / PAGE_SIZE; if (npages >= maxpages) return maxpages; |
e0f2dc406
|
1555 1556 1557 |
}) ) return npages; |
62a8067a7
|
1558 |
} |
f67da30c1
|
1559 |
EXPORT_SYMBOL(iov_iter_npages); |
4b8164b91
|
1560 1561 1562 1563 |
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; |
00e237074
|
1564 |
if (unlikely(iov_iter_is_pipe(new))) { |
241699cd7
|
1565 1566 1567 |
WARN_ON(1); return NULL; } |
9ea9ce042
|
1568 1569 |
if (unlikely(iov_iter_is_discard(new))) return NULL; |
00e237074
|
1570 |
if (iov_iter_is_bvec(new)) |
4b8164b91
|
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 |
return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), flags); else /* iovec and kvec have identical layout */ return new->iov = kmemdup(new->iov, new->nr_segs * sizeof(struct iovec), flags); } EXPORT_SYMBOL(dup_iter); |
bc917be81
|
1581 |
|
ffecee4f2
|
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 |
/** * import_iovec() - Copy an array of &struct iovec from userspace * into the kernel, check that it is valid, and initialize a new * &struct iov_iter iterator to access it. * * @type: One of %READ or %WRITE. * @uvector: Pointer to the userspace array. * @nr_segs: Number of elements in userspace array. * @fast_segs: Number of elements in @iov. * @iov: (input and output parameter) Pointer to pointer to (usually small * on-stack) kernel array. * @i: Pointer to iterator that will be initialized on success. * * If the array pointed to by *@iov is large enough to hold all @nr_segs, * then this function places %NULL in *@iov on return. Otherwise, a new * array will be allocated and the result placed in *@iov. This means that * the caller may call kfree() on *@iov regardless of whether the small * on-stack array was used or not (and regardless of whether this function * returns an error or not). * |
87e5e6dab
|
1602 |
* Return: Negative error code on error, bytes imported on success |
ffecee4f2
|
1603 |
*/ |
87e5e6dab
|
1604 |
ssize_t import_iovec(int type, const struct iovec __user * uvector, |
bc917be81
|
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 |
unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i) { ssize_t n; struct iovec *p; n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, *iov, &p); if (n < 0) { if (p != *iov) kfree(p); *iov = NULL; return n; } iov_iter_init(i, type, p, nr_segs, n); *iov = p == *iov ? NULL : p; |
87e5e6dab
|
1620 |
return n; |
bc917be81
|
1621 1622 1623 1624 1625 |
} EXPORT_SYMBOL(import_iovec); #ifdef CONFIG_COMPAT #include <linux/compat.h> |
87e5e6dab
|
1626 1627 1628 1629 |
ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i) |
bc917be81
|
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 |
{ ssize_t n; struct iovec *p; n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, *iov, &p); if (n < 0) { if (p != *iov) kfree(p); *iov = NULL; return n; } iov_iter_init(i, type, p, nr_segs, n); *iov = p == *iov ? NULL : p; |
87e5e6dab
|
1643 |
return n; |
bc917be81
|
1644 |
} |
98aaaec4a
|
1645 |
EXPORT_SYMBOL(compat_import_iovec); |
bc917be81
|
1646 1647 1648 1649 1650 1651 1652 |
#endif int import_single_range(int rw, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i) { if (len > MAX_RW_COUNT) len = MAX_RW_COUNT; |
96d4f267e
|
1653 |
if (unlikely(!access_ok(buf, len))) |
bc917be81
|
1654 1655 1656 1657 1658 1659 1660 |
return -EFAULT; iov->iov_base = buf; iov->iov_len = len; iov_iter_init(i, rw, iov, 1, len); return 0; } |
e12675853
|
1661 |
EXPORT_SYMBOL(import_single_range); |
09cf698a5
|
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 |
int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, int (*f)(struct kvec *vec, void *context), void *context) { struct kvec w; int err = -EINVAL; if (!bytes) return 0; iterate_all_kinds(i, bytes, v, -EINVAL, ({ w.iov_base = kmap(v.bv_page) + v.bv_offset; w.iov_len = v.bv_len; err = f(&w, context); kunmap(v.bv_page); err;}), ({ w = v; err = f(&w, context);}) ) return err; } EXPORT_SYMBOL(iov_iter_for_each_range); |