Commit c4af96449e20f9245cf3d904098db508cdebcda8

Authored by Akinobu Mita
Committed by Linus Torvalds
1 parent bcc54e2a6d

ntfs: use bitmap_weight

Use bitmap_weight() instead of doing hweight32() for each u32 element in
the page.

Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 13 additions and 12 deletions Side-by-side Diff

... ... @@ -31,6 +31,7 @@
31 31 #include <linux/vfs.h>
32 32 #include <linux/moduleparam.h>
33 33 #include <linux/smp_lock.h>
  34 +#include <linux/bitmap.h>
34 35  
35 36 #include "sysctl.h"
36 37 #include "logfile.h"
... ... @@ -2458,7 +2459,6 @@
2458 2459 static s64 get_nr_free_clusters(ntfs_volume *vol)
2459 2460 {
2460 2461 s64 nr_free = vol->nr_clusters;
2461   - u32 *kaddr;
2462 2462 struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
2463 2463 struct page *page;
2464 2464 pgoff_t index, max_index;
... ... @@ -2477,7 +2477,8 @@
2477 2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2478 2478 max_index, PAGE_CACHE_SIZE / 4);
2479 2479 for (index = 0; index < max_index; index++) {
2480   - unsigned int i;
  2480 + unsigned long *kaddr;
  2481 +
2481 2482 /*
2482 2483 * Read the page from page cache, getting it from backing store
2483 2484 * if necessary, and increment the use count.
2484 2485  
2485 2486  
... ... @@ -2490,16 +2491,16 @@
2490 2491 nr_free -= PAGE_CACHE_SIZE * 8;
2491 2492 continue;
2492 2493 }
2493   - kaddr = (u32*)kmap_atomic(page, KM_USER0);
  2494 + kaddr = kmap_atomic(page, KM_USER0);
2494 2495 /*
2495   - * For each 4 bytes, subtract the number of set bits. If this
  2496 + * Subtract the number of set bits. If this
2496 2497 * is the last page and it is partial we don't really care as
2497 2498 * it just means we do a little extra work but it won't affect
2498 2499 * the result as all out of range bytes are set to zero by
2499 2500 * ntfs_readpage().
2500 2501 */
2501   - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++)
2502   - nr_free -= (s64)hweight32(kaddr[i]);
  2502 + nr_free -= bitmap_weight(kaddr,
  2503 + PAGE_CACHE_SIZE * BITS_PER_BYTE);
2503 2504 kunmap_atomic(kaddr, KM_USER0);
2504 2505 page_cache_release(page);
2505 2506 }
... ... @@ -2538,7 +2539,6 @@
2538 2539 static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2539 2540 s64 nr_free, const pgoff_t max_index)
2540 2541 {
2541   - u32 *kaddr;
2542 2542 struct address_space *mapping = vol->mftbmp_ino->i_mapping;
2543 2543 struct page *page;
2544 2544 pgoff_t index;
... ... @@ -2548,7 +2548,8 @@
2548 2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2549 2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
2550 2550 for (index = 0; index < max_index; index++) {
2551   - unsigned int i;
  2551 + unsigned long *kaddr;
  2552 +
2552 2553 /*
2553 2554 * Read the page from page cache, getting it from backing store
2554 2555 * if necessary, and increment the use count.
2555 2556  
2556 2557  
... ... @@ -2561,16 +2562,16 @@
2561 2562 nr_free -= PAGE_CACHE_SIZE * 8;
2562 2563 continue;
2563 2564 }
2564   - kaddr = (u32*)kmap_atomic(page, KM_USER0);
  2565 + kaddr = kmap_atomic(page, KM_USER0);
2565 2566 /*
2566   - * For each 4 bytes, subtract the number of set bits. If this
  2567 + * Subtract the number of set bits. If this
2567 2568 * is the last page and it is partial we don't really care as
2568 2569 * it just means we do a little extra work but it won't affect
2569 2570 * the result as all out of range bytes are set to zero by
2570 2571 * ntfs_readpage().
2571 2572 */
2572   - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++)
2573   - nr_free -= (s64)hweight32(kaddr[i]);
  2573 + nr_free -= bitmap_weight(kaddr,
  2574 + PAGE_CACHE_SIZE * BITS_PER_BYTE);
2574 2575 kunmap_atomic(kaddr, KM_USER0);
2575 2576 page_cache_release(page);
2576 2577 }