Blame view
lib/iov_iter.c
37.9 KB
4f18cd317 take iov_iter stu... |
1 |
#include <linux/export.h> |
2f8b54447 block,fs: untangl... |
2 |
#include <linux/bvec.h> |
4f18cd317 take iov_iter stu... |
3 4 |
#include <linux/uio.h> #include <linux/pagemap.h> |
91f79c43d new helper: iov_i... |
5 6 |
#include <linux/slab.h> #include <linux/vmalloc.h> |
241699cd7 new iov_iter flav... |
7 |
#include <linux/splice.h> |
a604ec7e9 csum_and_copy_...... |
8 |
#include <net/checksum.h> |
4f18cd317 take iov_iter stu... |
9 |
|
241699cd7 new iov_iter flav... |
10 |
#define PIPE_PARANOIA /* for now */ |
04a311655 iov_iter.c: macro... |
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
#define iterate_iovec(i, n, __v, __p, skip, STEP) { \ size_t left; \ size_t wanted = n; \ __p = i->iov; \ __v.iov_len = min(n, __p->iov_len - skip); \ if (likely(__v.iov_len)) { \ __v.iov_base = __p->iov_base + skip; \ left = (STEP); \ __v.iov_len -= left; \ skip += __v.iov_len; \ n -= __v.iov_len; \ } else { \ left = 0; \ } \ while (unlikely(!left && n)) { \ __p++; \ __v.iov_len = min(n, __p->iov_len); \ if (unlikely(!__v.iov_len)) \ continue; \ __v.iov_base = __p->iov_base; \ left = (STEP); \ __v.iov_len -= left; \ skip = __v.iov_len; \ n -= __v.iov_len; \ } \ n = wanted - n; \ } |
a280455fa iov_iter.c: handl... |
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
#define iterate_kvec(i, n, __v, __p, skip, STEP) { \ size_t wanted = n; \ __p = i->kvec; \ __v.iov_len = min(n, __p->iov_len - skip); \ if (likely(__v.iov_len)) { \ __v.iov_base = __p->iov_base + skip; \ (void)(STEP); \ skip += __v.iov_len; \ n -= __v.iov_len; \ } \ while (unlikely(n)) { \ __p++; \ __v.iov_len = min(n, __p->iov_len); \ if (unlikely(!__v.iov_len)) \ continue; \ __v.iov_base = __p->iov_base; \ (void)(STEP); \ skip = __v.iov_len; \ n -= __v.iov_len; \ } \ n = wanted; \ } |
1bdc76aea iov_iter: use bve... |
60 61 62 63 64 65 66 |
#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ struct bvec_iter __start; \ __start.bi_size = n; \ __start.bi_bvec_done = skip; \ __start.bi_idx = 0; \ for_each_bvec(__v, i->bvec, __bi, __start) { \ if (!__v.bv_len) \ |
04a311655 iov_iter.c: macro... |
67 |
continue; \ |
04a311655 iov_iter.c: macro... |
68 |
(void)(STEP); \ |
04a311655 iov_iter.c: macro... |
69 |
} \ |
04a311655 iov_iter.c: macro... |
70 |
} |
a280455fa iov_iter.c: handl... |
71 |
#define iterate_all_kinds(i, n, v, I, B, K) { \ |
33844e665 [iov_iter] fix it... |
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
if (likely(n)) { \ size_t skip = i->iov_offset; \ if (unlikely(i->type & ITER_BVEC)) { \ struct bio_vec v; \ struct bvec_iter __bi; \ iterate_bvec(i, n, v, __bi, skip, (B)) \ } else if (unlikely(i->type & ITER_KVEC)) { \ const struct kvec *kvec; \ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ } else { \ const struct iovec *iov; \ struct iovec v; \ iterate_iovec(i, n, v, iov, skip, (I)) \ } \ |
04a311655 iov_iter.c: macro... |
87 88 |
} \ } |
a280455fa iov_iter.c: handl... |
89 |
#define iterate_and_advance(i, n, v, I, B, K) { \ |
dd254f5a3 fold checks into ... |
90 91 |
if (unlikely(i->count < n)) \ n = i->count; \ |
19f184593 do "fold checks i... |
92 |
if (i->count) { \ |
dd254f5a3 fold checks into ... |
93 94 |
size_t skip = i->iov_offset; \ if (unlikely(i->type & ITER_BVEC)) { \ |
1bdc76aea iov_iter: use bve... |
95 |
const struct bio_vec *bvec = i->bvec; \ |
dd254f5a3 fold checks into ... |
96 |
struct bio_vec v; \ |
1bdc76aea iov_iter: use bve... |
97 98 99 100 101 |
struct bvec_iter __bi; \ iterate_bvec(i, n, v, __bi, skip, (B)) \ i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ i->nr_segs -= i->bvec - bvec; \ skip = __bi.bi_bvec_done; \ |
dd254f5a3 fold checks into ... |
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
} else if (unlikely(i->type & ITER_KVEC)) { \ const struct kvec *kvec; \ struct kvec v; \ iterate_kvec(i, n, v, kvec, skip, (K)) \ if (skip == kvec->iov_len) { \ kvec++; \ skip = 0; \ } \ i->nr_segs -= kvec - i->kvec; \ i->kvec = kvec; \ } else { \ const struct iovec *iov; \ struct iovec v; \ iterate_iovec(i, n, v, iov, skip, (I)) \ if (skip == iov->iov_len) { \ iov++; \ skip = 0; \ } \ i->nr_segs -= iov - i->iov; \ i->iov = iov; \ |
7ce2a91e5 iov_iter.c: itera... |
122 |
} \ |
dd254f5a3 fold checks into ... |
123 124 |
i->count -= n; \ i->iov_offset = skip; \ |
7ce2a91e5 iov_iter.c: itera... |
125 |
} \ |
7ce2a91e5 iov_iter.c: itera... |
126 |
} |
09fc68dc6 iov_iter: saner c... |
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
static int copyout(void __user *to, const void *from, size_t n) { if (access_ok(VERIFY_WRITE, to, n)) { kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); } return n; } static int copyin(void *to, const void __user *from, size_t n) { if (access_ok(VERIFY_READ, from, n)) { kasan_check_write(to, n); n = raw_copy_from_user(to, from, n); } return n; } |
62a8067a7 bio_vec-backed io... |
144 |
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
4f18cd317 take iov_iter stu... |
145 146 147 148 149 150 151 152 153 154 155 156 |
struct iov_iter *i) { size_t skip, copy, left, wanted; const struct iovec *iov; char __user *buf; void *kaddr, *from; if (unlikely(bytes > i->count)) bytes = i->count; if (unlikely(!bytes)) return 0; |
09fc68dc6 iov_iter: saner c... |
157 |
might_fault(); |
4f18cd317 take iov_iter stu... |
158 159 160 161 162 |
wanted = bytes; iov = i->iov; skip = i->iov_offset; buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); |
3fa6c5073 mm: optimize copy... |
163 |
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { |
4f18cd317 take iov_iter stu... |
164 165 166 167 |
kaddr = kmap_atomic(page); from = kaddr + offset; /* first chunk, usually the only one */ |
09fc68dc6 iov_iter: saner c... |
168 |
left = copyout(buf, from, copy); |
4f18cd317 take iov_iter stu... |
169 170 171 172 173 174 175 176 177 |
copy -= left; skip += copy; from += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6 iov_iter: saner c... |
178 |
left = copyout(buf, from, copy); |
4f18cd317 take iov_iter stu... |
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
copy -= left; skip = copy; from += copy; bytes -= copy; } if (likely(!bytes)) { kunmap_atomic(kaddr); goto done; } offset = from - kaddr; buf += copy; kunmap_atomic(kaddr); copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ |
3fa6c5073 mm: optimize copy... |
194 |
|
4f18cd317 take iov_iter stu... |
195 196 |
kaddr = kmap(page); from = kaddr + offset; |
09fc68dc6 iov_iter: saner c... |
197 |
left = copyout(buf, from, copy); |
4f18cd317 take iov_iter stu... |
198 199 200 201 202 203 204 205 |
copy -= left; skip += copy; from += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6 iov_iter: saner c... |
206 |
left = copyout(buf, from, copy); |
4f18cd317 take iov_iter stu... |
207 208 209 210 211 212 |
copy -= left; skip = copy; from += copy; bytes -= copy; } kunmap(page); |
3fa6c5073 mm: optimize copy... |
213 |
|
4f18cd317 take iov_iter stu... |
214 |
done: |
81055e584 optimize copy_pag... |
215 216 217 218 |
if (skip == iov->iov_len) { iov++; skip = 0; } |
4f18cd317 take iov_iter stu... |
219 220 221 222 223 224 |
i->count -= wanted - bytes; i->nr_segs -= iov - i->iov; i->iov = iov; i->iov_offset = skip; return wanted - bytes; } |
4f18cd317 take iov_iter stu... |
225 |
|
62a8067a7 bio_vec-backed io... |
226 |
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, |
f0d1bec9d new helper: copy_... |
227 228 229 230 231 232 233 234 235 236 237 238 |
struct iov_iter *i) { size_t skip, copy, left, wanted; const struct iovec *iov; char __user *buf; void *kaddr, *to; if (unlikely(bytes > i->count)) bytes = i->count; if (unlikely(!bytes)) return 0; |
09fc68dc6 iov_iter: saner c... |
239 |
might_fault(); |
f0d1bec9d new helper: copy_... |
240 241 242 243 244 |
wanted = bytes; iov = i->iov; skip = i->iov_offset; buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); |
3fa6c5073 mm: optimize copy... |
245 |
if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { |
f0d1bec9d new helper: copy_... |
246 247 248 249 |
kaddr = kmap_atomic(page); to = kaddr + offset; /* first chunk, usually the only one */ |
09fc68dc6 iov_iter: saner c... |
250 |
left = copyin(to, buf, copy); |
f0d1bec9d new helper: copy_... |
251 252 253 254 255 256 257 258 259 |
copy -= left; skip += copy; to += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6 iov_iter: saner c... |
260 |
left = copyin(to, buf, copy); |
f0d1bec9d new helper: copy_... |
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 |
copy -= left; skip = copy; to += copy; bytes -= copy; } if (likely(!bytes)) { kunmap_atomic(kaddr); goto done; } offset = to - kaddr; buf += copy; kunmap_atomic(kaddr); copy = min(bytes, iov->iov_len - skip); } /* Too bad - revert to non-atomic kmap */ |
3fa6c5073 mm: optimize copy... |
276 |
|
f0d1bec9d new helper: copy_... |
277 278 |
kaddr = kmap(page); to = kaddr + offset; |
09fc68dc6 iov_iter: saner c... |
279 |
left = copyin(to, buf, copy); |
f0d1bec9d new helper: copy_... |
280 281 282 283 284 285 286 287 |
copy -= left; skip += copy; to += copy; bytes -= copy; while (unlikely(!left && bytes)) { iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); |
09fc68dc6 iov_iter: saner c... |
288 |
left = copyin(to, buf, copy); |
f0d1bec9d new helper: copy_... |
289 290 291 292 293 294 |
copy -= left; skip = copy; to += copy; bytes -= copy; } kunmap(page); |
3fa6c5073 mm: optimize copy... |
295 |
|
f0d1bec9d new helper: copy_... |
296 |
done: |
81055e584 optimize copy_pag... |
297 298 299 300 |
if (skip == iov->iov_len) { iov++; skip = 0; } |
f0d1bec9d new helper: copy_... |
301 302 303 304 305 306 |
i->count -= wanted - bytes; i->nr_segs -= iov - i->iov; i->iov = iov; i->iov_offset = skip; return wanted - bytes; } |
f0d1bec9d new helper: copy_... |
307 |
|
241699cd7 new iov_iter flav... |
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 |
#ifdef PIPE_PARANOIA static bool sanity(const struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; int idx = i->idx; int next = pipe->curbuf + pipe->nrbufs; if (i->iov_offset) { struct pipe_buffer *p; if (unlikely(!pipe->nrbufs)) goto Bad; // pipe must be non-empty if (unlikely(idx != ((next - 1) & (pipe->buffers - 1)))) goto Bad; // must be at the last buffer... p = &pipe->bufs[idx]; if (unlikely(p->offset + p->len != i->iov_offset)) goto Bad; // ... at the end of segment } else { if (idx != (next & (pipe->buffers - 1))) goto Bad; // must be right after the last buffer } return true; Bad: printk(KERN_ERR "idx = %d, offset = %zd ", i->idx, i->iov_offset); printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d ", pipe->curbuf, pipe->nrbufs, pipe->buffers); for (idx = 0; idx < pipe->buffers; idx++) printk(KERN_ERR "[%p %p %d %d] ", pipe->bufs[idx].ops, pipe->bufs[idx].page, pipe->bufs[idx].offset, pipe->bufs[idx].len); WARN_ON(1); return false; } #else #define sanity(i) true #endif static inline int next_idx(int idx, struct pipe_inode_info *pipe) { return (idx + 1) & (pipe->buffers - 1); } static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; struct pipe_buffer *buf; size_t off; int idx; if (unlikely(bytes > i->count)) bytes = i->count; if (unlikely(!bytes)) return 0; if (!sanity(i)) return 0; off = i->iov_offset; idx = i->idx; buf = &pipe->bufs[idx]; if (off) { if (offset == off && buf->page == page) { /* merge with the last one */ buf->len += bytes; i->iov_offset += bytes; goto out; } idx = next_idx(idx, pipe); buf = &pipe->bufs[idx]; } if (idx == pipe->curbuf && pipe->nrbufs) return 0; pipe->nrbufs++; buf->ops = &page_cache_pipe_buf_ops; get_page(buf->page = page); buf->offset = offset; buf->len = bytes; i->iov_offset = offset + bytes; i->idx = idx; out: i->count -= bytes; return bytes; } |
4f18cd317 take iov_iter stu... |
397 |
/* |
171a02032 VFS: Add iov_iter... |
398 399 400 401 402 403 |
* Fault in one or more iovecs of the given iov_iter, to a maximum length of * bytes. For each iovec, fault in each page that constitutes the iovec. * * Return 0 on success, or non-zero if the memory could not be accessed (i.e. * because it is an invalid address). */ |
d4690f1e1 fix iov_iter_faul... |
404 |
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) |
171a02032 VFS: Add iov_iter... |
405 406 407 408 409 410 411 412 |
{ size_t skip = i->iov_offset; const struct iovec *iov; int err; struct iovec v; if (!(i->type & (ITER_BVEC|ITER_KVEC))) { iterate_iovec(i, bytes, v, iov, skip, ({ |
4bce9f6ee get rid of separa... |
413 |
err = fault_in_pages_readable(v.iov_base, v.iov_len); |
171a02032 VFS: Add iov_iter... |
414 415 416 417 418 419 |
if (unlikely(err)) return err; 0;})) } return 0; } |
d4690f1e1 fix iov_iter_faul... |
420 |
EXPORT_SYMBOL(iov_iter_fault_in_readable); |
171a02032 VFS: Add iov_iter... |
421 |
|
71d8e532b start adding the ... |
422 423 424 425 426 |
void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count) { /* It will get better. Eventually... */ |
db68ce10c new helper: uacce... |
427 |
if (uaccess_kernel()) { |
62a8067a7 bio_vec-backed io... |
428 |
direction |= ITER_KVEC; |
a280455fa iov_iter.c: handl... |
429 430 431 432 433 434 |
i->type = direction; i->kvec = (struct kvec *)iov; } else { i->type = direction; i->iov = iov; } |
71d8e532b start adding the ... |
435 436 437 438 439 |
i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_init); |
7b2c99d15 new helper: iov_i... |
440 |
|
62a8067a7 bio_vec-backed io... |
441 442 443 444 445 446 |
static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) { char *from = kmap_atomic(page); memcpy(to, from + offset, len); kunmap_atomic(from); } |
36f7a8a4c iov_iter: constif... |
447 |
static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) |
62a8067a7 bio_vec-backed io... |
448 449 450 451 452 |
{ char *to = kmap_atomic(page); memcpy(to + offset, from, len); kunmap_atomic(to); } |
c35e02480 Add copy_to_iter(... |
453 454 455 456 457 458 |
static void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_atomic(page); memset(addr + offset, 0, len); kunmap_atomic(addr); } |
241699cd7 new iov_iter flav... |
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 |
static inline bool allocated(struct pipe_buffer *buf) { return buf->ops == &default_pipe_buf_ops; } static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp) { size_t off = i->iov_offset; int idx = i->idx; if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) { idx = next_idx(idx, i->pipe); off = 0; } *idxp = idx; *offp = off; } static size_t push_pipe(struct iov_iter *i, size_t size, int *idxp, size_t *offp) { struct pipe_inode_info *pipe = i->pipe; size_t off; int idx; ssize_t left; if (unlikely(size > i->count)) size = i->count; if (unlikely(!size)) return 0; left = size; data_start(i, &idx, &off); *idxp = idx; *offp = off; if (off) { left -= PAGE_SIZE - off; if (left <= 0) { pipe->bufs[idx].len += size; return size; } pipe->bufs[idx].len = PAGE_SIZE; idx = next_idx(idx, pipe); } while (idx != pipe->curbuf || !pipe->nrbufs) { struct page *page = alloc_page(GFP_USER); if (!page) break; pipe->nrbufs++; pipe->bufs[idx].ops = &default_pipe_buf_ops; pipe->bufs[idx].page = page; pipe->bufs[idx].offset = 0; if (left <= PAGE_SIZE) { pipe->bufs[idx].len = left; return size; } pipe->bufs[idx].len = PAGE_SIZE; left -= PAGE_SIZE; idx = next_idx(idx, pipe); } return size - left; } static size_t copy_pipe_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; size_t n, off; int idx; if (!sanity(i)) return 0; bytes = n = push_pipe(i, bytes, &idx, &off); if (unlikely(!n)) return 0; for ( ; n; idx = next_idx(idx, pipe), off = 0) { size_t chunk = min_t(size_t, n, PAGE_SIZE - off); memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk); i->idx = idx; i->iov_offset = off + chunk; n -= chunk; addr += chunk; } i->count -= bytes; return bytes; } |
aa28de275 iov_iter/hardenin... |
545 |
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a7 bio_vec-backed io... |
546 |
{ |
36f7a8a4c iov_iter: constif... |
547 |
const char *from = addr; |
241699cd7 new iov_iter flav... |
548 549 |
if (unlikely(i->type & ITER_PIPE)) return copy_pipe_to_iter(addr, bytes, i); |
09fc68dc6 iov_iter: saner c... |
550 551 |
if (iter_is_iovec(i)) might_fault(); |
3d4d3e482 iov_iter.c: conve... |
552 |
iterate_and_advance(i, bytes, v, |
09fc68dc6 iov_iter: saner c... |
553 |
copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), |
3d4d3e482 iov_iter.c: conve... |
554 |
memcpy_to_page(v.bv_page, v.bv_offset, |
a280455fa iov_iter.c: handl... |
555 556 |
(from += v.bv_len) - v.bv_len, v.bv_len), memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) |
3d4d3e482 iov_iter.c: conve... |
557 |
) |
62a8067a7 bio_vec-backed io... |
558 |
|
3d4d3e482 iov_iter.c: conve... |
559 |
return bytes; |
c35e02480 Add copy_to_iter(... |
560 |
} |
aa28de275 iov_iter/hardenin... |
561 |
EXPORT_SYMBOL(_copy_to_iter); |
c35e02480 Add copy_to_iter(... |
562 |
|
8780356ef x86/asm/memcpy_mc... |
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 |
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE static int copyout_mcsafe(void __user *to, const void *from, size_t n) { if (access_ok(VERIFY_WRITE, to, n)) { kasan_check_read(from, n); n = copy_to_user_mcsafe((__force void *) to, from, n); } return n; } static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset, const char *from, size_t len) { unsigned long ret; char *to; to = kmap_atomic(page); ret = memcpy_mcsafe(to + offset, from, len); kunmap_atomic(to); return ret; } |
ca146f6f0 lib/iov_iter: Fix... |
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 |
static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; size_t n, off, xfer = 0; int idx; if (!sanity(i)) return 0; bytes = n = push_pipe(i, bytes, &idx, &off); if (unlikely(!n)) return 0; for ( ; n; idx = next_idx(idx, pipe), off = 0) { size_t chunk = min_t(size_t, n, PAGE_SIZE - off); unsigned long rem; rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr, chunk); i->idx = idx; i->iov_offset = off + chunk - rem; xfer += chunk - rem; if (rem) break; n -= chunk; addr += chunk; } i->count -= xfer; return xfer; } |
bf3eeb9b5 lib/iov_iter: Doc... |
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 |
/** * _copy_to_iter_mcsafe - copy to user with source-read error exception handling * @addr: source kernel address * @bytes: total transfer length * @iter: destination iterator * * The pmem driver arranges for filesystem-dax to use this facility via * dax_copy_to_iter() for protecting read/write to persistent memory. * Unless / until an architecture can guarantee identical performance * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a * performance regression to switch more users to the mcsafe version. * * Otherwise, the main differences between this and typical _copy_to_iter(). * * * Typical tail/residue handling after a fault retries the copy * byte-by-byte until the fault happens again. Re-triggering machine * checks is potentially fatal so the implementation uses source * alignment and poison alignment assumptions to avoid re-triggering * hardware exceptions. * * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. * Compare to copy_to_iter() where only ITER_IOVEC attempts might return * a short copy. * * See MCSAFE_TEST for self-test. */ |
8780356ef x86/asm/memcpy_mc... |
641 642 643 644 |
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr; |
ca146f6f0 lib/iov_iter: Fix... |
645 646 |
if (unlikely(i->type & ITER_PIPE)) return copy_pipe_to_iter_mcsafe(addr, bytes, i); |
8780356ef x86/asm/memcpy_mc... |
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 |
if (iter_is_iovec(i)) might_fault(); iterate_and_advance(i, bytes, v, copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), ({ rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len); if (rem) { curr_addr = (unsigned long) from; bytes = curr_addr - s_addr - rem; return bytes; } }), ({ rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len); if (rem) { curr_addr = (unsigned long) from; bytes = curr_addr - s_addr - rem; return bytes; } }) ) return bytes; } EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ |
aa28de275 iov_iter/hardenin... |
675 |
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e02480 Add copy_to_iter(... |
676 |
{ |
0dbca9a4b iov_iter.c: conve... |
677 |
char *to = addr; |
241699cd7 new iov_iter flav... |
678 679 680 681 |
if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } |
09fc68dc6 iov_iter: saner c... |
682 683 |
if (iter_is_iovec(i)) might_fault(); |
0dbca9a4b iov_iter.c: conve... |
684 |
iterate_and_advance(i, bytes, v, |
09fc68dc6 iov_iter: saner c... |
685 |
copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), |
0dbca9a4b iov_iter.c: conve... |
686 |
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
a280455fa iov_iter.c: handl... |
687 688 |
v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
0dbca9a4b iov_iter.c: conve... |
689 690 691 |
) return bytes; |
c35e02480 Add copy_to_iter(... |
692 |
} |
aa28de275 iov_iter/hardenin... |
693 |
EXPORT_SYMBOL(_copy_from_iter); |
c35e02480 Add copy_to_iter(... |
694 |
|
aa28de275 iov_iter/hardenin... |
695 |
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) |
cbbd26b8b [iov_iter] new pr... |
696 697 698 699 700 701 |
{ char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return false; } |
33844e665 [iov_iter] fix it... |
702 |
if (unlikely(i->count < bytes)) |
cbbd26b8b [iov_iter] new pr... |
703 |
return false; |
09fc68dc6 iov_iter: saner c... |
704 705 |
if (iter_is_iovec(i)) might_fault(); |
cbbd26b8b [iov_iter] new pr... |
706 |
iterate_all_kinds(i, bytes, v, ({ |
09fc68dc6 iov_iter: saner c... |
707 |
if (copyin((to += v.iov_len) - v.iov_len, |
cbbd26b8b [iov_iter] new pr... |
708 709 710 711 712 713 714 715 716 717 718 |
v.iov_base, v.iov_len)) return false; 0;}), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) iov_iter_advance(i, bytes); return true; } |
aa28de275 iov_iter/hardenin... |
719 |
EXPORT_SYMBOL(_copy_from_iter_full); |
cbbd26b8b [iov_iter] new pr... |
720 |
|
aa28de275 iov_iter/hardenin... |
721 |
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096d copy_from_iter_no... |
722 723 |
{ char *to = addr; |
241699cd7 new iov_iter flav... |
724 725 726 727 |
if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } |
aa583096d copy_from_iter_no... |
728 |
iterate_and_advance(i, bytes, v, |
3f763453e kill __copy_from_... |
729 |
__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
aa583096d copy_from_iter_no... |
730 731 732 733 734 735 736 737 |
v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) return bytes; } |
aa28de275 iov_iter/hardenin... |
738 |
EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096d copy_from_iter_no... |
739 |
|
0aed55af8 x86, uaccess: int... |
740 |
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d2 lib/iov_iter: Doc... |
741 742 743 744 745 746 747 748 749 750 751 752 753 754 |
/** * _copy_from_iter_flushcache - write destination through cpu cache * @addr: destination kernel address * @bytes: total transfer length * @iter: source iterator * * The pmem driver arranges for filesystem-dax to use this facility via * dax_copy_from_iter() for ensuring that writes to persistent memory * are flushed through the CPU cache. It is differentiated from * _copy_from_iter_nocache() in that guarantees all data is flushed for * all iterator types. The _copy_from_iter_nocache() only attempts to * bypass the cache for the ITER_IOVEC case, and on some archs may use * instructions that strand dirty-data in the cache. */ |
6a37e9400 Merge branch 'uac... |
755 |
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af8 x86, uaccess: int... |
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 |
{ char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } iterate_and_advance(i, bytes, v, __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) return bytes; } |
6a37e9400 Merge branch 'uac... |
773 |
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af8 x86, uaccess: int... |
774 |
#endif |
aa28de275 iov_iter/hardenin... |
775 |
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) |
cbbd26b8b [iov_iter] new pr... |
776 777 778 779 780 781 |
{ char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return false; } |
33844e665 [iov_iter] fix it... |
782 |
if (unlikely(i->count < bytes)) |
cbbd26b8b [iov_iter] new pr... |
783 784 |
return false; iterate_all_kinds(i, bytes, v, ({ |
3f763453e kill __copy_from_... |
785 |
if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
cbbd26b8b [iov_iter] new pr... |
786 787 788 789 790 791 792 793 794 795 796 |
v.iov_base, v.iov_len)) return false; 0;}), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) iov_iter_advance(i, bytes); return true; } |
aa28de275 iov_iter/hardenin... |
797 |
EXPORT_SYMBOL(_copy_from_iter_full_nocache); |
cbbd26b8b [iov_iter] new pr... |
798 |
|
72e809ed8 iov_iter: sanity ... |
799 800 |
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) { |
a90bcb86a iov_iter: fix pag... |
801 802 803 804 |
struct page *head = compound_head(page); size_t v = n + offset + page_address(page) - page_address(head); if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) |
72e809ed8 iov_iter: sanity ... |
805 806 807 808 |
return true; WARN_ON(1); return false; } |
cbbd26b8b [iov_iter] new pr... |
809 |
|
62a8067a7 bio_vec-backed io... |
810 811 812 |
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { |
72e809ed8 iov_iter: sanity ... |
813 814 |
if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; |
d271524a3 iov_iter.c: get r... |
815 816 817 818 819 |
if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; |
241699cd7 new iov_iter flav... |
820 |
} else if (likely(!(i->type & ITER_PIPE))) |
62a8067a7 bio_vec-backed io... |
821 |
return copy_page_to_iter_iovec(page, offset, bytes, i); |
241699cd7 new iov_iter flav... |
822 823 |
else return copy_page_to_iter_pipe(page, offset, bytes, i); |
62a8067a7 bio_vec-backed io... |
824 825 826 827 828 829 |
} EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { |
72e809ed8 iov_iter: sanity ... |
830 831 |
if (unlikely(!page_copy_sane(page, offset, bytes))) return 0; |
241699cd7 new iov_iter flav... |
832 833 834 835 |
if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } |
a280455fa iov_iter.c: handl... |
836 |
if (i->type & (ITER_BVEC|ITER_KVEC)) { |
d271524a3 iov_iter.c: get r... |
837 |
void *kaddr = kmap_atomic(page); |
aa28de275 iov_iter/hardenin... |
838 |
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
d271524a3 iov_iter.c: get r... |
839 840 841 |
kunmap_atomic(kaddr); return wanted; } else |
62a8067a7 bio_vec-backed io... |
842 843 844 |
return copy_page_from_iter_iovec(page, offset, bytes, i); } EXPORT_SYMBOL(copy_page_from_iter); |
241699cd7 new iov_iter flav... |
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 |
static size_t pipe_zero(size_t bytes, struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; size_t n, off; int idx; if (!sanity(i)) return 0; bytes = n = push_pipe(i, bytes, &idx, &off); if (unlikely(!n)) return 0; for ( ; n; idx = next_idx(idx, pipe), off = 0) { size_t chunk = min_t(size_t, n, PAGE_SIZE - off); memzero_page(pipe->bufs[idx].page, off, chunk); i->idx = idx; i->iov_offset = off + chunk; n -= chunk; } i->count -= bytes; return bytes; } |
c35e02480 Add copy_to_iter(... |
868 869 |
size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { |
241699cd7 new iov_iter flav... |
870 871 |
if (unlikely(i->type & ITER_PIPE)) return pipe_zero(bytes, i); |
8442fa46c iov_iter.c: conve... |
872 |
iterate_and_advance(i, bytes, v, |
09fc68dc6 iov_iter: saner c... |
873 |
clear_user(v.iov_base, v.iov_len), |
a280455fa iov_iter.c: handl... |
874 875 |
memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len) |
8442fa46c iov_iter.c: conve... |
876 877 878 |
) return bytes; |
c35e02480 Add copy_to_iter(... |
879 880 |
} EXPORT_SYMBOL(iov_iter_zero); |
62a8067a7 bio_vec-backed io... |
881 882 883 |
size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { |
04a311655 iov_iter.c: macro... |
884 |
char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
72e809ed8 iov_iter: sanity ... |
885 886 887 888 |
if (unlikely(!page_copy_sane(page, offset, bytes))) { kunmap_atomic(kaddr); return 0; } |
241699cd7 new iov_iter flav... |
889 890 891 892 893 |
if (unlikely(i->type & ITER_PIPE)) { kunmap_atomic(kaddr); WARN_ON(1); return 0; } |
04a311655 iov_iter.c: macro... |
894 |
iterate_all_kinds(i, bytes, v, |
09fc68dc6 iov_iter: saner c... |
895 |
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), |
04a311655 iov_iter.c: macro... |
896 |
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, |
a280455fa iov_iter.c: handl... |
897 898 |
v.bv_offset, v.bv_len), memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) |
04a311655 iov_iter.c: macro... |
899 900 901 |
) kunmap_atomic(kaddr); return bytes; |
62a8067a7 bio_vec-backed io... |
902 903 |
} EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); |
b9dc6f65b fix a fencepost e... |
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 |
static inline void pipe_truncate(struct iov_iter *i) { struct pipe_inode_info *pipe = i->pipe; if (pipe->nrbufs) { size_t off = i->iov_offset; int idx = i->idx; int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1); if (off) { pipe->bufs[idx].len = off - pipe->bufs[idx].offset; idx = next_idx(idx, pipe); nrbufs++; } while (pipe->nrbufs > nrbufs) { pipe_buf_release(pipe, &pipe->bufs[idx]); idx = next_idx(idx, pipe); pipe->nrbufs--; } } } |
241699cd7 new iov_iter flav... |
923 924 925 |
static void pipe_advance(struct iov_iter *i, size_t size) { struct pipe_inode_info *pipe = i->pipe; |
241699cd7 new iov_iter flav... |
926 927 |
if (unlikely(i->count < size)) size = i->count; |
241699cd7 new iov_iter flav... |
928 |
if (size) { |
b9dc6f65b fix a fencepost e... |
929 930 931 |
struct pipe_buffer *buf; size_t off = i->iov_offset, left = size; int idx = i->idx; |
241699cd7 new iov_iter flav... |
932 |
if (off) /* make it relative to the beginning of buffer */ |
b9dc6f65b fix a fencepost e... |
933 |
left += off - pipe->bufs[idx].offset; |
241699cd7 new iov_iter flav... |
934 935 |
while (1) { buf = &pipe->bufs[idx]; |
b9dc6f65b fix a fencepost e... |
936 |
if (left <= buf->len) |
241699cd7 new iov_iter flav... |
937 |
break; |
b9dc6f65b fix a fencepost e... |
938 |
left -= buf->len; |
241699cd7 new iov_iter flav... |
939 940 |
idx = next_idx(idx, pipe); } |
241699cd7 new iov_iter flav... |
941 |
i->idx = idx; |
b9dc6f65b fix a fencepost e... |
942 |
i->iov_offset = buf->offset + left; |
241699cd7 new iov_iter flav... |
943 |
} |
b9dc6f65b fix a fencepost e... |
944 945 946 |
i->count -= size; /* ... and discard everything past that point */ pipe_truncate(i); |
241699cd7 new iov_iter flav... |
947 |
} |
62a8067a7 bio_vec-backed io... |
948 949 |
void iov_iter_advance(struct iov_iter *i, size_t size) { |
241699cd7 new iov_iter flav... |
950 951 952 953 |
if (unlikely(i->type & ITER_PIPE)) { pipe_advance(i, size); return; } |
a280455fa iov_iter.c: handl... |
954 |
iterate_and_advance(i, size, v, 0, 0, 0) |
62a8067a7 bio_vec-backed io... |
955 956 |
} EXPORT_SYMBOL(iov_iter_advance); |
27c0e3748 [iov_iter] new pr... |
957 958 959 960 |
void iov_iter_revert(struct iov_iter *i, size_t unroll) { if (!unroll) return; |
5b47d59af fix braino in gen... |
961 962 |
if (WARN_ON(unroll > MAX_RW_COUNT)) return; |
27c0e3748 [iov_iter] new pr... |
963 964 965 966 967 968 969 970 |
i->count += unroll; if (unlikely(i->type & ITER_PIPE)) { struct pipe_inode_info *pipe = i->pipe; int idx = i->idx; size_t off = i->iov_offset; while (1) { size_t n = off - pipe->bufs[idx].offset; if (unroll < n) { |
4fa55cefe fix a braino in I... |
971 |
off -= unroll; |
27c0e3748 [iov_iter] new pr... |
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 |
break; } unroll -= n; if (!unroll && idx == i->start_idx) { off = 0; break; } if (!idx--) idx = pipe->buffers - 1; off = pipe->bufs[idx].offset + pipe->bufs[idx].len; } i->iov_offset = off; i->idx = idx; pipe_truncate(i); return; } if (unroll <= i->iov_offset) { i->iov_offset -= unroll; return; } unroll -= i->iov_offset; if (i->type & ITER_BVEC) { const struct bio_vec *bvec = i->bvec; while (1) { size_t n = (--bvec)->bv_len; i->nr_segs++; if (unroll <= n) { i->bvec = bvec; i->iov_offset = n - unroll; return; } unroll -= n; } } else { /* same logics for iovec and kvec */ const struct iovec *iov = i->iov; while (1) { size_t n = (--iov)->iov_len; i->nr_segs++; if (unroll <= n) { i->iov = iov; i->iov_offset = n - unroll; return; } unroll -= n; } } } EXPORT_SYMBOL(iov_iter_revert); |
62a8067a7 bio_vec-backed io... |
1020 1021 1022 1023 1024 |
/* * Return the count of just the current iov_iter segment. */ size_t iov_iter_single_seg_count(const struct iov_iter *i) { |
241699cd7 new iov_iter flav... |
1025 1026 |
if (unlikely(i->type & ITER_PIPE)) return i->count; // it is a silly place, anyway |
62a8067a7 bio_vec-backed io... |
1027 1028 1029 |
if (i->nr_segs == 1) return i->count; else if (i->type & ITER_BVEC) |
62a8067a7 bio_vec-backed io... |
1030 |
return min(i->count, i->bvec->bv_len - i->iov_offset); |
ad0eab929 Fix thinko in iov... |
1031 1032 |
else return min(i->count, i->iov->iov_len - i->iov_offset); |
62a8067a7 bio_vec-backed io... |
1033 1034 |
} EXPORT_SYMBOL(iov_iter_single_seg_count); |
abb78f875 new helper: iov_i... |
1035 |
void iov_iter_kvec(struct iov_iter *i, int direction, |
05afcb77e new helper: iov_i... |
1036 |
const struct kvec *kvec, unsigned long nr_segs, |
abb78f875 new helper: iov_i... |
1037 1038 1039 1040 |
size_t count) { BUG_ON(!(direction & ITER_KVEC)); i->type = direction; |
05afcb77e new helper: iov_i... |
1041 |
i->kvec = kvec; |
abb78f875 new helper: iov_i... |
1042 1043 1044 1045 1046 |
i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_kvec); |
05afcb77e new helper: iov_i... |
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 |
void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count) { BUG_ON(!(direction & ITER_BVEC)); i->type = direction; i->bvec = bvec; i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_bvec); |
241699cd7 new iov_iter flav... |
1059 1060 1061 1062 1063 |
void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe, size_t count) { BUG_ON(direction != ITER_PIPE); |
b9dc6f65b fix a fencepost e... |
1064 |
WARN_ON(pipe->nrbufs == pipe->buffers); |
241699cd7 new iov_iter flav... |
1065 1066 1067 1068 1069 |
i->type = direction; i->pipe = pipe; i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); i->iov_offset = 0; i->count = count; |
27c0e3748 [iov_iter] new pr... |
1070 |
i->start_idx = i->idx; |
241699cd7 new iov_iter flav... |
1071 1072 |
} EXPORT_SYMBOL(iov_iter_pipe); |
62a8067a7 bio_vec-backed io... |
1073 1074 |
unsigned long iov_iter_alignment(const struct iov_iter *i) { |
04a311655 iov_iter.c: macro... |
1075 1076 |
unsigned long res = 0; size_t size = i->count; |
241699cd7 new iov_iter flav... |
1077 |
if (unlikely(i->type & ITER_PIPE)) { |
33844e665 [iov_iter] fix it... |
1078 |
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) |
241699cd7 new iov_iter flav... |
1079 1080 1081 |
return size | i->iov_offset; return size; } |
04a311655 iov_iter.c: macro... |
1082 1083 |
iterate_all_kinds(i, size, v, (res |= (unsigned long)v.iov_base | v.iov_len, 0), |
a280455fa iov_iter.c: handl... |
1084 1085 |
res |= v.bv_offset | v.bv_len, res |= (unsigned long)v.iov_base | v.iov_len |
04a311655 iov_iter.c: macro... |
1086 1087 |
) return res; |
62a8067a7 bio_vec-backed io... |
1088 1089 |
} EXPORT_SYMBOL(iov_iter_alignment); |
357f435d8 fix the copy vs. ... |
1090 1091 |
unsigned long iov_iter_gap_alignment(const struct iov_iter *i) { |
33844e665 [iov_iter] fix it... |
1092 |
unsigned long res = 0; |
357f435d8 fix the copy vs. ... |
1093 |
size_t size = i->count; |
357f435d8 fix the copy vs. ... |
1094 |
|
241699cd7 new iov_iter flav... |
1095 1096 1097 1098 |
if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return ~0U; } |
357f435d8 fix the copy vs. ... |
1099 1100 1101 1102 1103 1104 1105 1106 |
iterate_all_kinds(i, size, v, (res |= (!res ? 0 : (unsigned long)v.iov_base) | (size != v.iov_len ? size : 0), 0), (res |= (!res ? 0 : (unsigned long)v.bv_offset) | (size != v.bv_len ? size : 0)), (res |= (!res ? 0 : (unsigned long)v.iov_base) | (size != v.iov_len ? size : 0)) ); |
33844e665 [iov_iter] fix it... |
1107 |
return res; |
357f435d8 fix the copy vs. ... |
1108 1109 |
} EXPORT_SYMBOL(iov_iter_gap_alignment); |
e76b63123 iov_iter: fix ret... |
1110 |
static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
241699cd7 new iov_iter flav... |
1111 1112 1113 1114 1115 1116 |
size_t maxsize, struct page **pages, int idx, size_t *start) { struct pipe_inode_info *pipe = i->pipe; |
1689c73a7 Fix off-by-one in... |
1117 |
ssize_t n = push_pipe(i, maxsize, &idx, start); |
241699cd7 new iov_iter flav... |
1118 1119 1120 1121 1122 |
if (!n) return -EFAULT; maxsize = n; n += *start; |
1689c73a7 Fix off-by-one in... |
1123 |
while (n > 0) { |
241699cd7 new iov_iter flav... |
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 |
get_page(*pages++ = pipe->bufs[idx].page); idx = next_idx(idx, pipe); n -= PAGE_SIZE; } return maxsize; } static ssize_t pipe_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { unsigned npages; size_t capacity; int idx; |
33844e665 [iov_iter] fix it... |
1139 1140 |
if (!maxsize) return 0; |
241699cd7 new iov_iter flav... |
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 |
if (!sanity(i)) return -EFAULT; data_start(i, &idx, start); /* some of this one + all after this one */ npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; capacity = min(npages,maxpages) * PAGE_SIZE - *start; return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start); } |
62a8067a7 bio_vec-backed io... |
1151 |
ssize_t iov_iter_get_pages(struct iov_iter *i, |
2c80929c4 fuse: honour max_... |
1152 |
struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a7 bio_vec-backed io... |
1153 1154 |
size_t *start) { |
e5393fae3 iov_iter.c: conve... |
1155 1156 |
if (maxsize > i->count) maxsize = i->count; |
241699cd7 new iov_iter flav... |
1157 1158 |
if (unlikely(i->type & ITER_PIPE)) return pipe_get_pages(i, pages, maxsize, maxpages, start); |
e5393fae3 iov_iter.c: conve... |
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 |
iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); int n; int res; if (len > maxpages * PAGE_SIZE) len = maxpages * PAGE_SIZE; addr &= ~(PAGE_SIZE - 1); n = DIV_ROUND_UP(len, PAGE_SIZE); res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); if (unlikely(res < 0)) return res; return (res == n ? len : res * PAGE_SIZE) - *start; 0;}),({ /* can't be more than PAGE_SIZE */ *start = v.bv_offset; get_page(*pages = v.bv_page); return v.bv_len; |
a280455fa iov_iter.c: handl... |
1178 1179 |
}),({ return -EFAULT; |
e5393fae3 iov_iter.c: conve... |
1180 1181 1182 |
}) ) return 0; |
62a8067a7 bio_vec-backed io... |
1183 1184 |
} EXPORT_SYMBOL(iov_iter_get_pages); |
1b17f1f2e iov_iter.c: conve... |
1185 1186 |
static struct page **get_pages_array(size_t n) { |
752ade68c treewide: use kv[... |
1187 |
return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); |
1b17f1f2e iov_iter.c: conve... |
1188 |
} |
241699cd7 new iov_iter flav... |
1189 1190 1191 1192 1193 |
static ssize_t pipe_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) { struct page **p; |
d7760d638 iov_iter: fix mem... |
1194 |
ssize_t n; |
241699cd7 new iov_iter flav... |
1195 1196 |
int idx; int npages; |
33844e665 [iov_iter] fix it... |
1197 1198 |
if (!maxsize) return 0; |
241699cd7 new iov_iter flav... |
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 |
if (!sanity(i)) return -EFAULT; data_start(i, &idx, start); /* some of this one + all after this one */ npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; n = npages * PAGE_SIZE - *start; if (maxsize > n) maxsize = n; else npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); p = get_pages_array(npages); if (!p) return -ENOMEM; n = __pipe_get_pages(i, maxsize, p, idx, start); if (n > 0) *pages = p; else kvfree(p); return n; } |
62a8067a7 bio_vec-backed io... |
1220 1221 1222 1223 |
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) { |
1b17f1f2e iov_iter.c: conve... |
1224 1225 1226 1227 |
struct page **p; if (maxsize > i->count) maxsize = i->count; |
241699cd7 new iov_iter flav... |
1228 1229 |
if (unlikely(i->type & ITER_PIPE)) return pipe_get_pages_alloc(i, pages, maxsize, start); |
1b17f1f2e iov_iter.c: conve... |
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 |
iterate_all_kinds(i, maxsize, v, ({ unsigned long addr = (unsigned long)v.iov_base; size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); int n; int res; addr &= ~(PAGE_SIZE - 1); n = DIV_ROUND_UP(len, PAGE_SIZE); p = get_pages_array(n); if (!p) return -ENOMEM; res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); if (unlikely(res < 0)) { kvfree(p); return res; } *pages = p; return (res == n ? len : res * PAGE_SIZE) - *start; 0;}),({ /* can't be more than PAGE_SIZE */ *start = v.bv_offset; *pages = p = get_pages_array(1); if (!p) return -ENOMEM; get_page(*p = v.bv_page); return v.bv_len; |
a280455fa iov_iter.c: handl... |
1256 1257 |
}),({ return -EFAULT; |
1b17f1f2e iov_iter.c: conve... |
1258 1259 1260 |
}) ) return 0; |
62a8067a7 bio_vec-backed io... |
1261 1262 |
} EXPORT_SYMBOL(iov_iter_get_pages_alloc); |
a604ec7e9 csum_and_copy_...... |
1263 1264 1265 1266 1267 1268 |
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { char *to = addr; __wsum sum, next; size_t off = 0; |
a604ec7e9 csum_and_copy_...... |
1269 |
sum = *csum; |
241699cd7 new iov_iter flav... |
1270 1271 1272 1273 |
if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } |
a604ec7e9 csum_and_copy_...... |
1274 1275 |
iterate_and_advance(i, bytes, v, ({ int err = 0; |
cbbd26b8b [iov_iter] new pr... |
1276 |
next = csum_and_copy_from_user(v.iov_base, |
a604ec7e9 csum_and_copy_...... |
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 |
(to += v.iov_len) - v.iov_len, v.iov_len, 0, &err); if (!err) { sum = csum_block_add(sum, next, off); off += v.iov_len; } err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); next = csum_partial_copy_nocheck(p + v.bv_offset, (to += v.bv_len) - v.bv_len, v.bv_len, 0); kunmap_atomic(p); sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ next = csum_partial_copy_nocheck(v.iov_base, (to += v.iov_len) - v.iov_len, v.iov_len, 0); sum = csum_block_add(sum, next, off); off += v.iov_len; }) ) *csum = sum; return bytes; } EXPORT_SYMBOL(csum_and_copy_from_iter); |
cbbd26b8b [iov_iter] new pr... |
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 |
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) { char *to = addr; __wsum sum, next; size_t off = 0; sum = *csum; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return false; } if (unlikely(i->count < bytes)) return false; iterate_all_kinds(i, bytes, v, ({ int err = 0; next = csum_and_copy_from_user(v.iov_base, (to += v.iov_len) - v.iov_len, v.iov_len, 0, &err); if (err) return false; sum = csum_block_add(sum, next, off); off += v.iov_len; 0; }), ({ char *p = kmap_atomic(v.bv_page); next = csum_partial_copy_nocheck(p + v.bv_offset, (to += v.bv_len) - v.bv_len, v.bv_len, 0); kunmap_atomic(p); sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ next = csum_partial_copy_nocheck(v.iov_base, (to += v.iov_len) - v.iov_len, v.iov_len, 0); sum = csum_block_add(sum, next, off); off += v.iov_len; }) ) *csum = sum; iov_iter_advance(i, bytes); return true; } EXPORT_SYMBOL(csum_and_copy_from_iter_full); |
36f7a8a4c iov_iter: constif... |
1348 |
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, |
a604ec7e9 csum_and_copy_...... |
1349 1350 |
struct iov_iter *i) { |
36f7a8a4c iov_iter: constif... |
1351 |
const char *from = addr; |
a604ec7e9 csum_and_copy_...... |
1352 1353 |
__wsum sum, next; size_t off = 0; |
a604ec7e9 csum_and_copy_...... |
1354 |
sum = *csum; |
241699cd7 new iov_iter flav... |
1355 1356 1357 1358 |
if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); /* for now */ return 0; } |
a604ec7e9 csum_and_copy_...... |
1359 1360 1361 |
iterate_and_advance(i, bytes, v, ({ int err = 0; next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, |
cbbd26b8b [iov_iter] new pr... |
1362 |
v.iov_base, |
a604ec7e9 csum_and_copy_...... |
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 |
v.iov_len, 0, &err); if (!err) { sum = csum_block_add(sum, next, off); off += v.iov_len; } err ? v.iov_len : 0; }), ({ char *p = kmap_atomic(v.bv_page); next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, p + v.bv_offset, v.bv_len, 0); kunmap_atomic(p); sum = csum_block_add(sum, next, off); off += v.bv_len; }),({ next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, v.iov_base, v.iov_len, 0); sum = csum_block_add(sum, next, off); off += v.iov_len; }) ) *csum = sum; return bytes; } EXPORT_SYMBOL(csum_and_copy_to_iter); |
62a8067a7 bio_vec-backed io... |
1389 1390 |
int iov_iter_npages(const struct iov_iter *i, int maxpages) { |
e0f2dc406 iov_iter.c: conve... |
1391 1392 1393 1394 1395 |
size_t size = i->count; int npages = 0; if (!size) return 0; |
241699cd7 new iov_iter flav... |
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 |
if (unlikely(i->type & ITER_PIPE)) { struct pipe_inode_info *pipe = i->pipe; size_t off; int idx; if (!sanity(i)) return 0; data_start(i, &idx, &off); /* some of this one + all after this one */ npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1; if (npages >= maxpages) return maxpages; } else iterate_all_kinds(i, size, v, ({ |
e0f2dc406 iov_iter.c: conve... |
1410 1411 1412 1413 1414 1415 1416 1417 1418 |
unsigned long p = (unsigned long)v.iov_base; npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) - p / PAGE_SIZE; if (npages >= maxpages) return maxpages; 0;}),({ npages++; if (npages >= maxpages) return maxpages; |
a280455fa iov_iter.c: handl... |
1419 1420 1421 1422 1423 1424 |
}),({ unsigned long p = (unsigned long)v.iov_base; npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) - p / PAGE_SIZE; if (npages >= maxpages) return maxpages; |
e0f2dc406 iov_iter.c: conve... |
1425 1426 1427 |
}) ) return npages; |
62a8067a7 bio_vec-backed io... |
1428 |
} |
f67da30c1 new helper: iov_i... |
1429 |
EXPORT_SYMBOL(iov_iter_npages); |
4b8164b91 new helper: dup_i... |
1430 1431 1432 1433 |
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; |
241699cd7 new iov_iter flav... |
1434 1435 1436 1437 |
if (unlikely(new->type & ITER_PIPE)) { WARN_ON(1); return NULL; } |
4b8164b91 new helper: dup_i... |
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 |
if (new->type & ITER_BVEC) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), flags); else /* iovec and kvec have identical layout */ return new->iov = kmemdup(new->iov, new->nr_segs * sizeof(struct iovec), flags); } EXPORT_SYMBOL(dup_iter); |
bc917be81 saner iov_iter in... |
1449 |
|
ffecee4f2 iov_iter: kernel-... |
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 |
/** * import_iovec() - Copy an array of &struct iovec from userspace * into the kernel, check that it is valid, and initialize a new * &struct iov_iter iterator to access it. * * @type: One of %READ or %WRITE. * @uvector: Pointer to the userspace array. * @nr_segs: Number of elements in userspace array. * @fast_segs: Number of elements in @iov. * @iov: (input and output parameter) Pointer to pointer to (usually small * on-stack) kernel array. * @i: Pointer to iterator that will be initialized on success. * * If the array pointed to by *@iov is large enough to hold all @nr_segs, * then this function places %NULL in *@iov on return. Otherwise, a new * array will be allocated and the result placed in *@iov. This means that * the caller may call kfree() on *@iov regardless of whether the small * on-stack array was used or not (and regardless of whether this function * returns an error or not). * * Return: 0 on success or negative error code on error. */ |
bc917be81 saner iov_iter in... |
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 |
int import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i) { ssize_t n; struct iovec *p; n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, *iov, &p); if (n < 0) { if (p != *iov) kfree(p); *iov = NULL; return n; } iov_iter_init(i, type, p, nr_segs, n); *iov = p == *iov ? NULL : p; return 0; } EXPORT_SYMBOL(import_iovec); #ifdef CONFIG_COMPAT #include <linux/compat.h> int compat_import_iovec(int type, const struct compat_iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, struct iovec **iov, struct iov_iter *i) { ssize_t n; struct iovec *p; n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, *iov, &p); if (n < 0) { if (p != *iov) kfree(p); *iov = NULL; return n; } iov_iter_init(i, type, p, nr_segs, n); *iov = p == *iov ? NULL : p; return 0; } #endif int import_single_range(int rw, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i) { if (len > MAX_RW_COUNT) len = MAX_RW_COUNT; if (unlikely(!access_ok(!rw, buf, len))) return -EFAULT; iov->iov_base = buf; iov->iov_len = len; iov_iter_init(i, rw, iov, 1, len); return 0; } |
e12675853 iov_iter: export ... |
1528 |
EXPORT_SYMBOL(import_single_range); |
09cf698a5 new primitive: io... |
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 |
int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, int (*f)(struct kvec *vec, void *context), void *context) { struct kvec w; int err = -EINVAL; if (!bytes) return 0; iterate_all_kinds(i, bytes, v, -EINVAL, ({ w.iov_base = kmap(v.bv_page) + v.bv_offset; w.iov_len = v.bv_len; err = f(&w, context); kunmap(v.bv_page); err;}), ({ w = v; err = f(&w, context);}) ) return err; } EXPORT_SYMBOL(iov_iter_for_each_range); |