Commit 50e8a2890ed0eeb7a11ae0c39144fcdd1cad1cf8

Authored by Marcin Slusarz
Committed by Linus Torvalds
1 parent 8b5f688368

ext3: replace all adds to little endians variables with le*_add_cpu

replace all:
	little_endian_variable = cpu_to_leX(leX_to_cpu(little_endian_variable) +
				expression_in_cpu_byteorder);
with:
	leX_add_cpu(&little_endian_variable, expression_in_cpu_byteorder);
sparse didn't generate any new warning with this patch

Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: David Chinner <dgc@sgi.com>
Cc: Timothy Shimmin <tes@sgi.com>
Cc: <linux-ext4@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 13 additions and 26 deletions Side-by-side Diff

... ... @@ -630,9 +630,7 @@
630 630 jbd_unlock_bh_state(bitmap_bh);
631 631  
632 632 spin_lock(sb_bgl_lock(sbi, block_group));
633   - desc->bg_free_blocks_count =
634   - cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
635   - group_freed);
  633 + le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
636 634 spin_unlock(sb_bgl_lock(sbi, block_group));
637 635 percpu_counter_add(&sbi->s_freeblocks_counter, count);
638 636  
... ... @@ -1696,8 +1694,7 @@
1696 1694 ret_block, goal_hits, goal_attempts);
1697 1695  
1698 1696 spin_lock(sb_bgl_lock(sbi, group_no));
1699   - gdp->bg_free_blocks_count =
1700   - cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
  1697 + le16_add_cpu(&gdp->bg_free_blocks_count, -num);
1701 1698 spin_unlock(sb_bgl_lock(sbi, group_no));
1702 1699 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1703 1700  
... ... @@ -164,11 +164,9 @@
164 164  
165 165 if (gdp) {
166 166 spin_lock(sb_bgl_lock(sbi, block_group));
167   - gdp->bg_free_inodes_count = cpu_to_le16(
168   - le16_to_cpu(gdp->bg_free_inodes_count) + 1);
  167 + le16_add_cpu(&gdp->bg_free_inodes_count, 1);
169 168 if (is_directory)
170   - gdp->bg_used_dirs_count = cpu_to_le16(
171   - le16_to_cpu(gdp->bg_used_dirs_count) - 1);
  169 + le16_add_cpu(&gdp->bg_used_dirs_count, -1);
172 170 spin_unlock(sb_bgl_lock(sbi, block_group));
173 171 percpu_counter_inc(&sbi->s_freeinodes_counter);
174 172 if (is_directory)
175 173  
... ... @@ -527,11 +525,9 @@
527 525 err = ext3_journal_get_write_access(handle, bh2);
528 526 if (err) goto fail;
529 527 spin_lock(sb_bgl_lock(sbi, group));
530   - gdp->bg_free_inodes_count =
531   - cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
  528 + le16_add_cpu(&gdp->bg_free_inodes_count, -1);
532 529 if (S_ISDIR(mode)) {
533   - gdp->bg_used_dirs_count =
534   - cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
  530 + le16_add_cpu(&gdp->bg_used_dirs_count, 1);
535 531 }
536 532 spin_unlock(sb_bgl_lock(sbi, group));
537 533 BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
... ... @@ -518,8 +518,7 @@
518 518 EXT3_SB(sb)->s_gdb_count++;
519 519 kfree(o_group_desc);
520 520  
521   - es->s_reserved_gdt_blocks =
522   - cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
  521 + le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
523 522 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
524 523  
525 524 return 0;
... ... @@ -890,10 +889,8 @@
890 889 * blocks/inodes before the group is live won't actually let us
891 890 * allocate the new space yet.
892 891 */
893   - es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) +
894   - input->blocks_count);
895   - es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
896   - EXT3_INODES_PER_GROUP(sb));
  892 + le32_add_cpu(&es->s_blocks_count, input->blocks_count);
  893 + le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
897 894  
898 895 /*
899 896 * We need to protect s_groups_count against other CPUs seeing
... ... @@ -926,8 +923,7 @@
926 923  
927 924 /* Update the reserved block counts only once the new group is
928 925 * active. */
929   - es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) +
930   - input->reserved_blocks);
  926 + le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
931 927  
932 928 /* Update the free space counts */
933 929 percpu_counter_add(&sbi->s_freeblocks_counter,
... ... @@ -1222,7 +1222,7 @@
1222 1222 #endif
1223 1223 if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
1224 1224 es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
1225   - es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
  1225 + le16_add_cpu(&es->s_mnt_count, 1);
1226 1226 es->s_mtime = cpu_to_le32(get_seconds());
1227 1227 ext3_update_dynamic_rev(sb);
1228 1228 EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
... ... @@ -492,8 +492,7 @@
492 492 get_bh(bh);
493 493 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
494 494 } else {
495   - BHDR(bh)->h_refcount = cpu_to_le32(
496   - le32_to_cpu(BHDR(bh)->h_refcount) - 1);
  495 + le32_add_cpu(&BHDR(bh)->h_refcount, -1);
497 496 error = ext3_journal_dirty_metadata(handle, bh);
498 497 if (IS_SYNC(inode))
499 498 handle->h_sync = 1;
... ... @@ -780,8 +779,7 @@
780 779 if (error)
781 780 goto cleanup_dquot;
782 781 lock_buffer(new_bh);
783   - BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
784   - le32_to_cpu(BHDR(new_bh)->h_refcount));
  782 + le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
785 783 ea_bdebug(new_bh, "reusing; refcount now=%d",
786 784 le32_to_cpu(BHDR(new_bh)->h_refcount));
787 785 unlock_buffer(new_bh);