Commit eebd2aa355692afaf9906f62118620f1a1c19dbb

Authored by Christoph Lameter
Committed by Linus Torvalds
1 parent b98348bdd0

Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user

Simplify page cache zeroing of segments of pages through 3 functions

zero_user_segments(page, start1, end1, start2, end2)

        Zeros two segments of the page. It takes the position where to
        start and end the zeroing which avoids length calculations and
	makes code clearer.

zero_user_segment(page, start, end)

        Same for a single segment.

zero_user(page, start, length)

        Length variant for the case where we know the length.

We remove the zero_user_page macro. Issues:

1. Its a macro. Inline functions are preferable.

2. The KM_USER0 macro is only defined for HIGHMEM.

   Having to treat this special case everywhere makes the
   code needlessly complex. The parameter for zeroing is always
   KM_USER0 except in one single case that we open code.

Avoiding KM_USER0 makes a lot of code not having to be dealing
with the special casing for HIGHMEM anymore. Dealing with
kmap is only necessary for HIGHMEM configurations. In those
configurations we use KM_USER0 like we do for a series of other
functions defined in highmem.h.

Since KM_USER0 is depends on HIGHMEM the existing zero_user_page
function could not be a macro. zero_user_* functions introduced
here can be be inline because that constant is not used when these
functions are called.

Also extract the flushing of the caches to be outside of the kmap.

[akpm@linux-foundation.org: fix nfs and ntfs build]
[akpm@linux-foundation.org: fix ntfs build some more]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: <linux-ext4@vger.kernel.org>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: "J. Bruce Fields" <bfields@fieldses.org>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: David Chinner <dgc@sgi.com>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: Steven French <sfrench@us.ibm.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 22 changed files with 103 additions and 116 deletions Side-by-side Diff

... ... @@ -1798,7 +1798,7 @@
1798 1798 start = max(from, block_start);
1799 1799 size = min(to, block_end) - start;
1800 1800  
1801   - zero_user_page(page, start, size, KM_USER0);
  1801 + zero_user(page, start, size);
1802 1802 set_buffer_uptodate(bh);
1803 1803 }
1804 1804  
... ... @@ -1861,19 +1861,10 @@
1861 1861 mark_buffer_dirty(bh);
1862 1862 continue;
1863 1863 }
1864   - if (block_end > to || block_start < from) {
1865   - void *kaddr;
1866   -
1867   - kaddr = kmap_atomic(page, KM_USER0);
1868   - if (block_end > to)
1869   - memset(kaddr+to, 0,
1870   - block_end-to);
1871   - if (block_start < from)
1872   - memset(kaddr+block_start,
1873   - 0, from-block_start);
1874   - flush_dcache_page(page);
1875   - kunmap_atomic(kaddr, KM_USER0);
1876   - }
  1864 + if (block_end > to || block_start < from)
  1865 + zero_user_segments(page,
  1866 + to, block_end,
  1867 + block_start, from);
1877 1868 continue;
1878 1869 }
1879 1870 }
... ... @@ -2104,8 +2095,7 @@
2104 2095 SetPageError(page);
2105 2096 }
2106 2097 if (!buffer_mapped(bh)) {
2107   - zero_user_page(page, i * blocksize, blocksize,
2108   - KM_USER0);
  2098 + zero_user(page, i * blocksize, blocksize);
2109 2099 if (!err)
2110 2100 set_buffer_uptodate(bh);
2111 2101 continue;
... ... @@ -2218,7 +2208,7 @@
2218 2208 &page, &fsdata);
2219 2209 if (err)
2220 2210 goto out;
2221   - zero_user_page(page, zerofrom, len, KM_USER0);
  2211 + zero_user(page, zerofrom, len);
2222 2212 err = pagecache_write_end(file, mapping, curpos, len, len,
2223 2213 page, fsdata);
2224 2214 if (err < 0)
... ... @@ -2245,7 +2235,7 @@
2245 2235 &page, &fsdata);
2246 2236 if (err)
2247 2237 goto out;
2248   - zero_user_page(page, zerofrom, len, KM_USER0);
  2238 + zero_user(page, zerofrom, len);
2249 2239 err = pagecache_write_end(file, mapping, curpos, len, len,
2250 2240 page, fsdata);
2251 2241 if (err < 0)
... ... @@ -2422,7 +2412,6 @@
2422 2412 unsigned block_in_page;
2423 2413 unsigned block_start, block_end;
2424 2414 sector_t block_in_file;
2425   - char *kaddr;
2426 2415 int nr_reads = 0;
2427 2416 int ret = 0;
2428 2417 int is_mapped_to_disk = 1;
... ... @@ -2493,13 +2482,8 @@
2493 2482 continue;
2494 2483 }
2495 2484 if (buffer_new(bh) || !buffer_mapped(bh)) {
2496   - kaddr = kmap_atomic(page, KM_USER0);
2497   - if (block_start < from)
2498   - memset(kaddr+block_start, 0, from-block_start);
2499   - if (block_end > to)
2500   - memset(kaddr + to, 0, block_end - to);
2501   - flush_dcache_page(page);
2502   - kunmap_atomic(kaddr, KM_USER0);
  2485 + zero_user_segments(page, block_start, from,
  2486 + to, block_end);
2503 2487 continue;
2504 2488 }
2505 2489 if (buffer_uptodate(bh))
... ... @@ -2636,7 +2620,7 @@
2636 2620 * the page size, the remaining memory is zeroed when mapped, and
2637 2621 * writes to that region are not written out to the file."
2638 2622 */
2639   - zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
  2623 + zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2640 2624 out:
2641 2625 ret = mpage_writepage(page, get_block, wbc);
2642 2626 if (ret == -EAGAIN)
... ... @@ -2709,7 +2693,7 @@
2709 2693 if (page_has_buffers(page))
2710 2694 goto has_buffers;
2711 2695 }
2712   - zero_user_page(page, offset, length, KM_USER0);
  2696 + zero_user(page, offset, length);
2713 2697 set_page_dirty(page);
2714 2698 err = 0;
2715 2699  
... ... @@ -2785,7 +2769,7 @@
2785 2769 goto unlock;
2786 2770 }
2787 2771  
2788   - zero_user_page(page, offset, length, KM_USER0);
  2772 + zero_user(page, offset, length);
2789 2773 mark_buffer_dirty(bh);
2790 2774 err = 0;
2791 2775  
... ... @@ -2831,7 +2815,7 @@
2831 2815 * the page size, the remaining memory is zeroed when mapped, and
2832 2816 * writes to that region are not written out to the file."
2833 2817 */
2834   - zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
  2818 + zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2835 2819 return __block_write_full_page(inode, page, get_block, wbc);
2836 2820 }
2837 2821  
... ... @@ -1386,7 +1386,7 @@
1386 1386 if (!page)
1387 1387 return -ENOMEM;
1388 1388  
1389   - zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
  1389 + zero_user_segment(page, offset, PAGE_CACHE_SIZE);
1390 1390 unlock_page(page);
1391 1391 page_cache_release(page);
1392 1392 return rc;
... ... @@ -878,8 +878,8 @@
878 878 page_cache_release(page);
879 879 goto out;
880 880 }
881   - zero_user_page(page, block_in_page << blkbits,
882   - 1 << blkbits, KM_USER0);
  881 + zero_user(page, block_in_page << blkbits,
  882 + 1 << blkbits);
883 883 dio->block_in_file++;
884 884 block_in_page++;
885 885 goto next_block;
... ... @@ -257,8 +257,7 @@
257 257 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
258 258 if (to > end_byte_in_page)
259 259 end_byte_in_page = to;
260   - zero_user_page(page, end_byte_in_page,
261   - PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
  260 + zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
262 261 out:
263 262 return 0;
264 263 }
... ... @@ -307,7 +306,7 @@
307 306 */
308 307 if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
309 308 (from != 0)) {
310   - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  309 + zero_user(page, 0, PAGE_CACHE_SIZE);
311 310 }
312 311 out:
313 312 return rc;
... ... @@ -1845,7 +1845,7 @@
1845 1845 */
1846 1846 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1847 1847 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1848   - zero_user_page(page, offset, length, KM_USER0);
  1848 + zero_user(page, offset, length);
1849 1849 set_page_dirty(page);
1850 1850 goto unlock;
1851 1851 }
... ... @@ -1898,7 +1898,7 @@
1898 1898 goto unlock;
1899 1899 }
1900 1900  
1901   - zero_user_page(page, offset, length, KM_USER0);
  1901 + zero_user(page, offset, length);
1902 1902 BUFFER_TRACE(bh, "zeroed end of block");
1903 1903  
1904 1904 err = 0;
... ... @@ -1840,7 +1840,7 @@
1840 1840 */
1841 1841 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1842 1842 ext4_should_writeback_data(inode) && PageUptodate(page)) {
1843   - zero_user_page(page, offset, length, KM_USER0);
  1843 + zero_user(page, offset, length);
1844 1844 set_page_dirty(page);
1845 1845 goto unlock;
1846 1846 }
... ... @@ -1893,7 +1893,7 @@
1893 1893 goto unlock;
1894 1894 }
1895 1895  
1896   - zero_user_page(page, offset, length, KM_USER0);
  1896 + zero_user(page, offset, length);
1897 1897  
1898 1898 BUFFER_TRACE(bh, "zeroed end of block");
1899 1899  
... ... @@ -932,7 +932,7 @@
932 932 if (!gfs2_is_writeback(ip))
933 933 gfs2_trans_add_bh(ip->i_gl, bh, 0);
934 934  
935   - zero_user_page(page, offset, length, KM_USER0);
  935 + zero_user(page, offset, length);
936 936  
937 937 unlock:
938 938 unlock_page(page);
fs/gfs2/ops_address.c
... ... @@ -446,7 +446,7 @@
446 446 * so we need to supply one here. It doesn't happen often.
447 447 */
448 448 if (unlikely(page->index)) {
449   - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  449 + zero_user(page, 0, PAGE_CACHE_SIZE);
450 450 return 0;
451 451 }
452 452  
... ... @@ -341,13 +341,10 @@
341 341 unsigned from, unsigned to)
342 342 {
343 343 if (!PageUptodate(page)) {
344   - if (to - from != PAGE_CACHE_SIZE) {
345   - void *kaddr = kmap_atomic(page, KM_USER0);
346   - memset(kaddr, 0, from);
347   - memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
348   - flush_dcache_page(page);
349   - kunmap_atomic(kaddr, KM_USER0);
350   - }
  344 + if (to - from != PAGE_CACHE_SIZE)
  345 + zero_user_segments(page,
  346 + 0, from,
  347 + to, PAGE_CACHE_SIZE);
351 348 }
352 349 return 0;
353 350 }
... ... @@ -276,9 +276,7 @@
276 276 }
277 277  
278 278 if (first_hole != blocks_per_page) {
279   - zero_user_page(page, first_hole << blkbits,
280   - PAGE_CACHE_SIZE - (first_hole << blkbits),
281   - KM_USER0);
  279 + zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
282 280 if (first_hole == 0) {
283 281 SetPageUptodate(page);
284 282 unlock_page(page);
... ... @@ -571,8 +569,7 @@
571 569  
572 570 if (page->index > end_index || !offset)
573 571 goto confused;
574   - zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
575   - KM_USER0);
  572 + zero_user_segment(page, offset, PAGE_CACHE_SIZE);
576 573 }
577 574  
578 575 /*
... ... @@ -79,7 +79,7 @@
79 79 static
80 80 int nfs_return_empty_page(struct page *page)
81 81 {
82   - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  82 + zero_user(page, 0, PAGE_CACHE_SIZE);
83 83 SetPageUptodate(page);
84 84 unlock_page(page);
85 85 return 0;
86 86  
... ... @@ -103,10 +103,10 @@
103 103 pglen = PAGE_CACHE_SIZE - base;
104 104 for (;;) {
105 105 if (remainder <= pglen) {
106   - zero_user_page(*pages, base, remainder, KM_USER0);
  106 + zero_user(*pages, base, remainder);
107 107 break;
108 108 }
109   - zero_user_page(*pages, base, pglen, KM_USER0);
  109 + zero_user(*pages, base, pglen);
110 110 pages++;
111 111 remainder -= pglen;
112 112 pglen = PAGE_CACHE_SIZE;
... ... @@ -130,7 +130,7 @@
130 130 return PTR_ERR(new);
131 131 }
132 132 if (len < PAGE_CACHE_SIZE)
133   - zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
  133 + zero_user_segment(page, len, PAGE_CACHE_SIZE);
134 134  
135 135 nfs_list_add_request(new, &one_request);
136 136 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
... ... @@ -532,7 +532,7 @@
532 532 goto out_error;
533 533  
534 534 if (len < PAGE_CACHE_SIZE)
535   - zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
  535 + zero_user_segment(page, len, PAGE_CACHE_SIZE);
536 536 nfs_pageio_add_request(desc->pgio, new);
537 537 return 0;
538 538 out_error:
... ... @@ -665,9 +665,7 @@
665 665 * then we need to zero any uninitalised data. */
666 666 if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
667 667 && !PageUptodate(req->wb_page))
668   - zero_user_page(req->wb_page, req->wb_bytes,
669   - PAGE_CACHE_SIZE - req->wb_bytes,
670   - KM_USER0);
  668 + zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
671 669 return req;
672 670 }
673 671  
... ... @@ -87,13 +87,17 @@
87 87 /* Check for the current buffer head overflowing. */
88 88 if (unlikely(file_ofs + bh->b_size > init_size)) {
89 89 int ofs;
  90 + void *kaddr;
90 91  
91 92 ofs = 0;
92 93 if (file_ofs < init_size)
93 94 ofs = init_size - file_ofs;
94 95 local_irq_save(flags);
95   - zero_user_page(page, bh_offset(bh) + ofs,
96   - bh->b_size - ofs, KM_BIO_SRC_IRQ);
  96 + kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
  97 + memset(kaddr + bh_offset(bh) + ofs, 0,
  98 + bh->b_size - ofs);
  99 + flush_dcache_page(page);
  100 + kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
97 101 local_irq_restore(flags);
98 102 }
99 103 } else {
... ... @@ -334,7 +338,7 @@
334 338 bh->b_blocknr = -1UL;
335 339 clear_buffer_mapped(bh);
336 340 handle_zblock:
337   - zero_user_page(page, i * blocksize, blocksize, KM_USER0);
  341 + zero_user(page, i * blocksize, blocksize);
338 342 if (likely(!err))
339 343 set_buffer_uptodate(bh);
340 344 } while (i++, iblock++, (bh = bh->b_this_page) != head);
... ... @@ -410,7 +414,7 @@
410 414 /* Is the page fully outside i_size? (truncate in progress) */
411 415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
412 416 PAGE_CACHE_SHIFT)) {
413   - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  417 + zero_user(page, 0, PAGE_CACHE_SIZE);
414 418 ntfs_debug("Read outside i_size - truncated?");
415 419 goto done;
416 420 }
... ... @@ -459,7 +463,7 @@
459 463 * ok to ignore the compressed flag here.
460 464 */
461 465 if (unlikely(page->index > 0)) {
462   - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  466 + zero_user(page, 0, PAGE_CACHE_SIZE);
463 467 goto done;
464 468 }
465 469 if (!NInoAttr(ni))
... ... @@ -788,8 +792,7 @@
788 792 if (err == -ENOENT || lcn == LCN_ENOENT) {
789 793 bh->b_blocknr = -1;
790 794 clear_buffer_dirty(bh);
791   - zero_user_page(page, bh_offset(bh), blocksize,
792   - KM_USER0);
  795 + zero_user(page, bh_offset(bh), blocksize);
793 796 set_buffer_uptodate(bh);
794 797 err = 0;
795 798 continue;
... ... @@ -1414,8 +1417,7 @@
1414 1417 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1415 1418 /* The page straddles i_size. */
1416 1419 unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1417   - zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,
1418   - KM_USER0);
  1420 + zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
1419 1421 }
1420 1422 /* Handle mst protected attributes. */
1421 1423 if (NInoMstProtected(ni))
... ... @@ -565,7 +565,7 @@
565 565 if (xpage >= max_page) {
566 566 kfree(bhs);
567 567 kfree(pages);
568   - zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
  568 + zero_user(page, 0, PAGE_CACHE_SIZE);
569 569 ntfs_debug("Compressed read outside i_size - truncated?");
570 570 SetPageUptodate(page);
571 571 unlock_page(page);
... ... @@ -607,8 +607,8 @@
607 607 ntfs_submit_bh_for_read(bh);
608 608 *wait_bh++ = bh;
609 609 } else {
610   - zero_user_page(page, bh_offset(bh),
611   - blocksize, KM_USER0);
  610 + zero_user(page, bh_offset(bh),
  611 + blocksize);
612 612 set_buffer_uptodate(bh);
613 613 }
614 614 }
... ... @@ -683,9 +683,8 @@
683 683 ntfs_submit_bh_for_read(bh);
684 684 *wait_bh++ = bh;
685 685 } else {
686   - zero_user_page(page,
687   - bh_offset(bh),
688   - blocksize, KM_USER0);
  686 + zero_user(page, bh_offset(bh),
  687 + blocksize);
689 688 set_buffer_uptodate(bh);
690 689 }
691 690 }
... ... @@ -703,8 +702,8 @@
703 702 */
704 703 if (bh_end <= pos || bh_pos >= end) {
705 704 if (!buffer_uptodate(bh)) {
706   - zero_user_page(page, bh_offset(bh),
707   - blocksize, KM_USER0);
  705 + zero_user(page, bh_offset(bh),
  706 + blocksize);
708 707 set_buffer_uptodate(bh);
709 708 }
710 709 mark_buffer_dirty(bh);
... ... @@ -743,8 +742,7 @@
743 742 if (!buffer_uptodate(bh))
744 743 set_buffer_uptodate(bh);
745 744 } else if (!buffer_uptodate(bh)) {
746   - zero_user_page(page, bh_offset(bh), blocksize,
747   - KM_USER0);
  745 + zero_user(page, bh_offset(bh), blocksize);
748 746 set_buffer_uptodate(bh);
749 747 }
750 748 continue;
... ... @@ -868,8 +866,8 @@
868 866 if (!buffer_uptodate(bh))
869 867 set_buffer_uptodate(bh);
870 868 } else if (!buffer_uptodate(bh)) {
871   - zero_user_page(page, bh_offset(bh),
872   - blocksize, KM_USER0);
  869 + zero_user(page, bh_offset(bh),
  870 + blocksize);
873 871 set_buffer_uptodate(bh);
874 872 }
875 873 continue;
... ... @@ -1128,8 +1126,8 @@
1128 1126  
1129 1127 if (likely(bh_pos < initialized_size))
1130 1128 ofs = initialized_size - bh_pos;
1131   - zero_user_page(page, bh_offset(bh) + ofs,
1132   - blocksize - ofs, KM_USER0);
  1129 + zero_user_segment(page, bh_offset(bh) + ofs,
  1130 + blocksize);
1133 1131 }
1134 1132 } else /* if (unlikely(!buffer_uptodate(bh))) */
1135 1133 err = -EIO;
... ... @@ -1269,8 +1267,8 @@
1269 1267 if (PageUptodate(page))
1270 1268 set_buffer_uptodate(bh);
1271 1269 else {
1272   - zero_user_page(page, bh_offset(bh),
1273   - blocksize, KM_USER0);
  1270 + zero_user(page, bh_offset(bh),
  1271 + blocksize);
1274 1272 set_buffer_uptodate(bh);
1275 1273 }
1276 1274 }
... ... @@ -1330,7 +1328,7 @@
1330 1328 len = PAGE_CACHE_SIZE;
1331 1329 if (len > bytes)
1332 1330 len = bytes;
1333   - zero_user_page(*pages, 0, len, KM_USER0);
  1331 + zero_user(*pages, 0, len);
1334 1332 }
1335 1333 goto out;
1336 1334 }
... ... @@ -1451,7 +1449,7 @@
1451 1449 len = PAGE_CACHE_SIZE;
1452 1450 if (len > bytes)
1453 1451 len = bytes;
1454   - zero_user_page(*pages, 0, len, KM_USER0);
  1452 + zero_user(*pages, 0, len);
1455 1453 }
1456 1454 goto out;
1457 1455 }
... ... @@ -5670,7 +5670,7 @@
5670 5670 mlog_errno(ret);
5671 5671  
5672 5672 if (zero)
5673   - zero_user_page(page, from, to - from, KM_USER0);
  5673 + zero_user_segment(page, from, to);
5674 5674  
5675 5675 /*
5676 5676 * Need to set the buffers we zero'd into uptodate
... ... @@ -307,7 +307,7 @@
307 307 * XXX sys_readahead() seems to get that wrong?
308 308 */
309 309 if (start >= i_size_read(inode)) {
310   - zero_user_page(page, 0, PAGE_SIZE, KM_USER0);
  310 + zero_user(page, 0, PAGE_SIZE);
311 311 SetPageUptodate(page);
312 312 ret = 0;
313 313 goto out_alloc;
... ... @@ -869,7 +869,7 @@
869 869 if (block_start >= to)
870 870 break;
871 871  
872   - zero_user_page(page, block_start, bh->b_size, KM_USER0);
  872 + zero_user(page, block_start, bh->b_size);
873 873 set_buffer_uptodate(bh);
874 874 mark_buffer_dirty(bh);
875 875  
... ... @@ -1034,7 +1034,7 @@
1034 1034 start = max(from, block_start);
1035 1035 end = min(to, block_end);
1036 1036  
1037   - zero_user_page(page, start, end - start, KM_USER0);
  1037 + zero_user_segment(page, start, end);
1038 1038 set_buffer_uptodate(bh);
1039 1039 }
1040 1040  
... ... @@ -2143,7 +2143,7 @@
2143 2143 /* if we are not on a block boundary */
2144 2144 if (length) {
2145 2145 length = blocksize - length;
2146   - zero_user_page(page, offset, length, KM_USER0);
  2146 + zero_user(page, offset, length);
2147 2147 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
2148 2148 mark_buffer_dirty(bh);
2149 2149 }
... ... @@ -2367,7 +2367,7 @@
2367 2367 unlock_page(page);
2368 2368 return 0;
2369 2369 }
2370   - zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0);
  2370 + zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
2371 2371 }
2372 2372 bh = head;
2373 2373 block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
fs/xfs/linux-2.6/xfs_lrw.c
... ... @@ -155,7 +155,7 @@
155 155 if (status)
156 156 break;
157 157  
158   - zero_user_page(page, offset, bytes, KM_USER0);
  158 + zero_user(page, offset, bytes);
159 159  
160 160 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
161 161 page, fsdata);
include/linux/highmem.h
... ... @@ -124,28 +124,40 @@
124 124 kunmap_atomic(kaddr, KM_USER0);
125 125 }
126 126  
127   -/*
128   - * Same but also flushes aliased cache contents to RAM.
129   - *
130   - * This must be a macro because KM_USER0 and friends aren't defined if
131   - * !CONFIG_HIGHMEM
132   - */
133   -#define zero_user_page(page, offset, size, km_type) \
134   - do { \
135   - void *kaddr; \
136   - \
137   - BUG_ON((offset) + (size) > PAGE_SIZE); \
138   - \
139   - kaddr = kmap_atomic(page, km_type); \
140   - memset((char *)kaddr + (offset), 0, (size)); \
141   - flush_dcache_page(page); \
142   - kunmap_atomic(kaddr, (km_type)); \
143   - } while (0)
  127 +static inline void zero_user_segments(struct page *page,
  128 + unsigned start1, unsigned end1,
  129 + unsigned start2, unsigned end2)
  130 +{
  131 + void *kaddr = kmap_atomic(page, KM_USER0);
144 132  
  133 + BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
  134 +
  135 + if (end1 > start1)
  136 + memset(kaddr + start1, 0, end1 - start1);
  137 +
  138 + if (end2 > start2)
  139 + memset(kaddr + start2, 0, end2 - start2);
  140 +
  141 + kunmap_atomic(kaddr, KM_USER0);
  142 + flush_dcache_page(page);
  143 +}
  144 +
  145 +static inline void zero_user_segment(struct page *page,
  146 + unsigned start, unsigned end)
  147 +{
  148 + zero_user_segments(page, start, end, 0, 0);
  149 +}
  150 +
  151 +static inline void zero_user(struct page *page,
  152 + unsigned start, unsigned size)
  153 +{
  154 + zero_user_segments(page, start, start + size, 0, 0);
  155 +}
  156 +
145 157 static inline void __deprecated memclear_highpage_flush(struct page *page,
146 158 unsigned int offset, unsigned int size)
147 159 {
148   - zero_user_page(page, offset, size, KM_USER0);
  160 + zero_user(page, offset, size);
149 161 }
150 162  
151 163 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
... ... @@ -431,7 +431,7 @@
431 431 else
432 432 return PTR_ERR(page);
433 433 }
434   - zero_user_page(page, offset, length, KM_USER0);
  434 + zero_user(page, offset, length);
435 435 return 0;
436 436 }
437 437 EXPORT_SYMBOL_GPL(xip_truncate_page);
... ... @@ -48,7 +48,7 @@
48 48  
49 49 static inline void truncate_partial_page(struct page *page, unsigned partial)
50 50 {
51   - zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
  51 + zero_user_segment(page, partial, PAGE_CACHE_SIZE);
52 52 if (PagePrivate(page))
53 53 do_invalidatepage(page, partial);
54 54 }