Commit 01f2705daf5a36208e69d7cf95db9c330f843af6

Authored by Nate Diller
Committed by Linus Torvalds
1 parent 38a23e311b

fs: convert core functions to zero_user_page

It's very common for file systems to need to zero part or all of a page,
the simplist way is just to use kmap_atomic() and memset().  There's
actually a library function in include/linux/highmem.h that does exactly
that, but it's confusingly named memclear_highpage_flush(), which is
descriptive of *how* it does the work rather than what the *purpose* is.
So this patchset renames the function to zero_user_page(), and calls it
from the various places that currently open code it.

This first patch introduces the new function call, and converts all the
core kernel callsites, both the open-coded ones and the old
memclear_highpage_flush() ones.  Following this patch is a series of
conversions for each file system individually, per AKPM, and finally a
patch deprecating the old call.  The diffstat below shows the entire
patchset.

[akpm@linux-foundation.org: fix a few things]
Signed-off-by: Nate Diller <nate.diller@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 7 changed files with 42 additions and 81 deletions Side-by-side Diff

drivers/block/loop.c
... ... @@ -243,17 +243,13 @@
243 243 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
244 244 bvec->bv_page, bv_offs, size, IV);
245 245 if (unlikely(transfer_result)) {
246   - char *kaddr;
247   -
248 246 /*
249 247 * The transfer failed, but we still write the data to
250 248 * keep prepare/commit calls balanced.
251 249 */
252 250 printk(KERN_ERR "loop: transfer error block %llu\n",
253 251 (unsigned long long)index);
254   - kaddr = kmap_atomic(page, KM_USER0);
255   - memset(kaddr + offset, 0, size);
256   - kunmap_atomic(kaddr, KM_USER0);
  252 + zero_user_page(page, offset, size, KM_USER0);
257 253 }
258 254 flush_dcache_page(page);
259 255 ret = aops->commit_write(file, page, offset,
... ... @@ -1846,13 +1846,8 @@
1846 1846 if (block_start >= to)
1847 1847 break;
1848 1848 if (buffer_new(bh)) {
1849   - void *kaddr;
1850   -
1851 1849 clear_buffer_new(bh);
1852   - kaddr = kmap_atomic(page, KM_USER0);
1853   - memset(kaddr+block_start, 0, bh->b_size);
1854   - flush_dcache_page(page);
1855   - kunmap_atomic(kaddr, KM_USER0);
  1850 + zero_user_page(page, block_start, bh->b_size, KM_USER0);
1856 1851 set_buffer_uptodate(bh);
1857 1852 mark_buffer_dirty(bh);
1858 1853 }
... ... @@ -1940,10 +1935,8 @@
1940 1935 SetPageError(page);
1941 1936 }
1942 1937 if (!buffer_mapped(bh)) {
1943   - void *kaddr = kmap_atomic(page, KM_USER0);
1944   - memset(kaddr + i * blocksize, 0, blocksize);
1945   - flush_dcache_page(page);
1946   - kunmap_atomic(kaddr, KM_USER0);
  1938 + zero_user_page(page, i * blocksize, blocksize,
  1939 + KM_USER0);
1947 1940 if (!err)
1948 1941 set_buffer_uptodate(bh);
1949 1942 continue;
... ... @@ -2086,7 +2079,6 @@
2086 2079 long status;
2087 2080 unsigned zerofrom;
2088 2081 unsigned blocksize = 1 << inode->i_blkbits;
2089   - void *kaddr;
2090 2082  
2091 2083 while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2092 2084 status = -ENOMEM;
... ... @@ -2108,10 +2100,8 @@
2108 2100 PAGE_CACHE_SIZE, get_block);
2109 2101 if (status)
2110 2102 goto out_unmap;
2111   - kaddr = kmap_atomic(new_page, KM_USER0);
2112   - memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2113   - flush_dcache_page(new_page);
2114   - kunmap_atomic(kaddr, KM_USER0);
  2103 + zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
  2104 + KM_USER0);
2115 2105 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2116 2106 unlock_page(new_page);
2117 2107 page_cache_release(new_page);
... ... @@ -2138,10 +2128,7 @@
2138 2128 if (status)
2139 2129 goto out1;
2140 2130 if (zerofrom < offset) {
2141   - kaddr = kmap_atomic(page, KM_USER0);
2142   - memset(kaddr+zerofrom, 0, offset-zerofrom);
2143   - flush_dcache_page(page);
2144   - kunmap_atomic(kaddr, KM_USER0);
  2131 + zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
2145 2132 __block_commit_write(inode, page, zerofrom, offset);
2146 2133 }
2147 2134 return 0;
... ... @@ -2340,10 +2327,7 @@
2340 2327 * Error recovery is pretty slack. Clear the page and mark it dirty
2341 2328 * so we'll later zero out any blocks which _were_ allocated.
2342 2329 */
2343   - kaddr = kmap_atomic(page, KM_USER0);
2344   - memset(kaddr, 0, PAGE_CACHE_SIZE);
2345   - flush_dcache_page(page);
2346   - kunmap_atomic(kaddr, KM_USER0);
  2330 + zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
2347 2331 SetPageUptodate(page);
2348 2332 set_page_dirty(page);
2349 2333 return ret;
... ... @@ -2382,7 +2366,6 @@
2382 2366 loff_t i_size = i_size_read(inode);
2383 2367 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2384 2368 unsigned offset;
2385   - void *kaddr;
2386 2369 int ret;
2387 2370  
2388 2371 /* Is the page fully inside i_size? */
... ... @@ -2413,10 +2396,7 @@
2413 2396 * the page size, the remaining memory is zeroed when mapped, and
2414 2397 * writes to that region are not written out to the file."
2415 2398 */
2416   - kaddr = kmap_atomic(page, KM_USER0);
2417   - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2418   - flush_dcache_page(page);
2419   - kunmap_atomic(kaddr, KM_USER0);
  2399 + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2420 2400 out:
2421 2401 ret = mpage_writepage(page, get_block, wbc);
2422 2402 if (ret == -EAGAIN)
... ... @@ -2437,7 +2417,6 @@
2437 2417 unsigned to;
2438 2418 struct page *page;
2439 2419 const struct address_space_operations *a_ops = mapping->a_ops;
2440   - char *kaddr;
2441 2420 int ret = 0;
2442 2421  
2443 2422 if ((offset & (blocksize - 1)) == 0)
... ... @@ -2451,10 +2430,8 @@
2451 2430 to = (offset + blocksize) & ~(blocksize - 1);
2452 2431 ret = a_ops->prepare_write(NULL, page, offset, to);
2453 2432 if (ret == 0) {
2454   - kaddr = kmap_atomic(page, KM_USER0);
2455   - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2456   - flush_dcache_page(page);
2457   - kunmap_atomic(kaddr, KM_USER0);
  2433 + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
  2434 + KM_USER0);
2458 2435 /*
2459 2436 * It would be more correct to call aops->commit_write()
2460 2437 * here, but this is more efficient.
... ... @@ -2480,7 +2457,6 @@
2480 2457 struct inode *inode = mapping->host;
2481 2458 struct page *page;
2482 2459 struct buffer_head *bh;
2483   - void *kaddr;
2484 2460 int err;
2485 2461  
2486 2462 blocksize = 1 << inode->i_blkbits;
... ... @@ -2534,11 +2510,7 @@
2534 2510 goto unlock;
2535 2511 }
2536 2512  
2537   - kaddr = kmap_atomic(page, KM_USER0);
2538   - memset(kaddr + offset, 0, length);
2539   - flush_dcache_page(page);
2540   - kunmap_atomic(kaddr, KM_USER0);
2541   -
  2513 + zero_user_page(page, offset, length, KM_USER0);
2542 2514 mark_buffer_dirty(bh);
2543 2515 err = 0;
2544 2516  
... ... @@ -2559,7 +2531,6 @@
2559 2531 loff_t i_size = i_size_read(inode);
2560 2532 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2561 2533 unsigned offset;
2562   - void *kaddr;
2563 2534  
2564 2535 /* Is the page fully inside i_size? */
2565 2536 if (page->index < end_index)
... ... @@ -2585,10 +2556,7 @@
2585 2556 * the page size, the remaining memory is zeroed when mapped, and
2586 2557 * writes to that region are not written out to the file."
2587 2558 */
2588   - kaddr = kmap_atomic(page, KM_USER0);
2589   - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2590   - flush_dcache_page(page);
2591   - kunmap_atomic(kaddr, KM_USER0);
  2559 + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
2592 2560 return __block_write_full_page(inode, page, get_block, wbc);
2593 2561 }
2594 2562  
... ... @@ -867,7 +867,6 @@
867 867 do_holes:
868 868 /* Handle holes */
869 869 if (!buffer_mapped(map_bh)) {
870   - char *kaddr;
871 870 loff_t i_size_aligned;
872 871  
873 872 /* AKPM: eargh, -ENOTBLK is a hack */
... ... @@ -888,11 +887,8 @@
888 887 page_cache_release(page);
889 888 goto out;
890 889 }
891   - kaddr = kmap_atomic(page, KM_USER0);
892   - memset(kaddr + (block_in_page << blkbits),
893   - 0, 1 << blkbits);
894   - flush_dcache_page(page);
895   - kunmap_atomic(kaddr, KM_USER0);
  890 + zero_user_page(page, block_in_page << blkbits,
  891 + 1 << blkbits, KM_USER0);
896 892 dio->block_in_file++;
897 893 block_in_page++;
898 894 goto next_block;
... ... @@ -284,11 +284,9 @@
284 284 }
285 285  
286 286 if (first_hole != blocks_per_page) {
287   - char *kaddr = kmap_atomic(page, KM_USER0);
288   - memset(kaddr + (first_hole << blkbits), 0,
289   - PAGE_CACHE_SIZE - (first_hole << blkbits));
290   - flush_dcache_page(page);
291   - kunmap_atomic(kaddr, KM_USER0);
  287 + zero_user_page(page, first_hole << blkbits,
  288 + PAGE_CACHE_SIZE - (first_hole << blkbits),
  289 + KM_USER0);
292 290 if (first_hole == 0) {
293 291 SetPageUptodate(page);
294 292 unlock_page(page);
295 293  
... ... @@ -576,14 +574,11 @@
576 574 * written out to the file."
577 575 */
578 576 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
579   - char *kaddr;
580 577  
581 578 if (page->index > end_index || !offset)
582 579 goto confused;
583   - kaddr = kmap_atomic(page, KM_USER0);
584   - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
585   - flush_dcache_page(page);
586   - kunmap_atomic(kaddr, KM_USER0);
  580 + zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
  581 + KM_USER0);
587 582 }
588 583  
589 584 /*
include/linux/highmem.h
... ... @@ -94,17 +94,27 @@
94 94  
95 95 /*
96 96 * Same but also flushes aliased cache contents to RAM.
  97 + *
  98 + * This must be a macro because KM_USER0 and friends aren't defined if
  99 + * !CONFIG_HIGHMEM
97 100 */
98   -static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
99   -{
100   - void *kaddr;
  101 +#define zero_user_page(page, offset, size, km_type) \
  102 + do { \
  103 + void *kaddr; \
  104 + \
  105 + BUG_ON((offset) + (size) > PAGE_SIZE); \
  106 + \
  107 + kaddr = kmap_atomic(page, km_type); \
  108 + memset((char *)kaddr + (offset), 0, (size)); \
  109 + flush_dcache_page(page); \
  110 + kunmap_atomic(kaddr, (km_type)); \
  111 + } while (0)
101 112  
102   - BUG_ON(offset + size > PAGE_SIZE);
103 113  
104   - kaddr = kmap_atomic(page, KM_USER0);
105   - memset((char *)kaddr + offset, 0, size);
106   - flush_dcache_page(page);
107   - kunmap_atomic(kaddr, KM_USER0);
  114 +static inline void memclear_highpage_flush(struct page *page,
  115 + unsigned int offset, unsigned int size)
  116 +{
  117 + zero_user_page(page, offset, size, KM_USER0);
108 118 }
109 119  
110 120 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
... ... @@ -434,7 +434,6 @@
434 434 unsigned blocksize;
435 435 unsigned length;
436 436 struct page *page;
437   - void *kaddr;
438 437  
439 438 BUG_ON(!mapping->a_ops->get_xip_page);
440 439  
... ... @@ -458,11 +457,7 @@
458 457 else
459 458 return PTR_ERR(page);
460 459 }
461   - kaddr = kmap_atomic(page, KM_USER0);
462   - memset(kaddr + offset, 0, length);
463   - kunmap_atomic(kaddr, KM_USER0);
464   -
465   - flush_dcache_page(page);
  460 + zero_user_page(page, offset, length, KM_USER0);
466 461 return 0;
467 462 }
468 463 EXPORT_SYMBOL_GPL(xip_truncate_page);
... ... @@ -12,6 +12,7 @@
12 12 #include <linux/swap.h>
13 13 #include <linux/module.h>
14 14 #include <linux/pagemap.h>
  15 +#include <linux/highmem.h>
15 16 #include <linux/pagevec.h>
16 17 #include <linux/task_io_accounting_ops.h>
17 18 #include <linux/buffer_head.h> /* grr. try_to_release_page,
... ... @@ -46,7 +47,7 @@
46 47  
47 48 static inline void truncate_partial_page(struct page *page, unsigned partial)
48 49 {
49   - memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
  50 + zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
50 51 if (PagePrivate(page))
51 52 do_invalidatepage(page, partial);
52 53 }