Blame view
mm/filemap.h
2.76 KB
ceffc0785 [PATCH] xip: fs/m... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * linux/mm/filemap.h * * Copyright (C) 1994-1999 Linus Torvalds */ #ifndef __FILEMAP_H #define __FILEMAP_H #include <linux/types.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/uio.h> |
c22ce143d [PATCH] x86: cach... |
15 |
#include <linux/uaccess.h> |
ceffc0785 [PATCH] xip: fs/m... |
16 |
|
eb6fe0c38 [PATCH] xip: redu... |
17 |
size_t |
01408c493 [PATCH] Prepare f... |
18 19 20 21 |
__filemap_copy_from_user_iovec_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes); |
ceffc0785 [PATCH] xip: fs/m... |
22 23 24 25 26 |
/* * Copy as much as we can into the page and return the number of bytes which * were sucessfully copied. If a fault is encountered then clear the page * out to (offset+bytes) and return the number of bytes which were copied. |
01408c493 [PATCH] Prepare f... |
27 28 29 30 31 32 33 34 |
* * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache * to *NOT* zero any tail of the buffer that it failed to copy. If it does, * and if the following non-atomic copy succeeds, then there is a small window * where the target page contains neither the data before the write, nor the * data after the write (it contains zero). A read at this time will see * data that is inconsistent with any ordering of the read and the write. * (This has been detected in practice). |
ceffc0785 [PATCH] xip: fs/m... |
35 36 37 38 39 40 41 42 43 |
*/ static inline size_t filemap_copy_from_user(struct page *page, unsigned long offset, const char __user *buf, unsigned bytes) { char *kaddr; int left; kaddr = kmap_atomic(page, KM_USER0); |
c22ce143d [PATCH] x86: cach... |
44 |
left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); |
ceffc0785 [PATCH] xip: fs/m... |
45 46 47 48 49 |
kunmap_atomic(kaddr, KM_USER0); if (left != 0) { /* Do it the slow way */ kaddr = kmap(page); |
c22ce143d [PATCH] x86: cach... |
50 |
left = __copy_from_user_nocache(kaddr + offset, buf, bytes); |
ceffc0785 [PATCH] xip: fs/m... |
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
kunmap(page); } return bytes - left; } /* * This has the same sideeffects and return value as filemap_copy_from_user(). * The difference is that on a fault we need to memset the remainder of the * page (out to offset+bytes), to emulate filemap_copy_from_user()'s * single-segment behaviour. */ static inline size_t filemap_copy_from_user_iovec(struct page *page, unsigned long offset, const struct iovec *iov, size_t base, size_t bytes) { char *kaddr; size_t copied; kaddr = kmap_atomic(page, KM_USER0); |
01408c493 [PATCH] Prepare f... |
70 71 |
copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, base, bytes); |
ceffc0785 [PATCH] xip: fs/m... |
72 73 74 |
kunmap_atomic(kaddr, KM_USER0); if (copied != bytes) { kaddr = kmap(page); |
01408c493 [PATCH] Prepare f... |
75 76 77 78 |
copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov, base, bytes); if (bytes - copied) memset(kaddr + offset + copied, 0, bytes - copied); |
ceffc0785 [PATCH] xip: fs/m... |
79 80 81 82 83 84 85 86 87 88 |
kunmap(page); } return copied; } static inline void filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes) { const struct iovec *iov = *iovp; size_t base = *basep; |
81b0c8713 [PATCH] generic_f... |
89 |
do { |
ceffc0785 [PATCH] xip: fs/m... |
90 91 92 93 94 95 96 97 |
int copy = min(bytes, iov->iov_len - base); bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; base = 0; } |
81b0c8713 [PATCH] generic_f... |
98 |
} while (bytes); |
ceffc0785 [PATCH] xip: fs/m... |
99 100 101 102 |
*iovp = iov; *basep = base; } #endif |