Commit 79ffab34391933ee3b95dac7f25c0478fa2f8f1e

Authored by Aneesh Kumar K.V
Committed by Theodore Ts'o
1 parent 9fa7eb283c

ext4: Properly initialize the buffer_head state

These struct buffer_heads are allocated on the stack (and hence are
initialized with stack garbage).  They are only used to call a
get_blocks() function, so that's mostly OK, but b_state must be
initialized to be 0 so we don't have any unexpected BH_* flags set by
accident, such as BH_Unwritten or BH_Delay.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>

Showing 3 changed files with 19 additions and 3 deletions Side-by-side Diff

... ... @@ -3150,6 +3150,7 @@
3150 3150 ret = PTR_ERR(handle);
3151 3151 break;
3152 3152 }
  3153 + map_bh.b_state = 0;
3153 3154 ret = ext4_get_blocks_wrap(handle, inode, block,
3154 3155 max_blocks, &map_bh,
3155 3156 EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
... ... @@ -2055,7 +2055,20 @@
2055 2055 if ((mpd->b_state & (1 << BH_Mapped)) &&
2056 2056 !(mpd->b_state & (1 << BH_Delay)))
2057 2057 return 0;
2058   - new.b_state = mpd->b_state;
  2058 + /*
  2059 + * We need to make sure the BH_Delay flag is passed down to
  2060 + * ext4_da_get_block_write(), since it calls
  2061 + * ext4_get_blocks_wrap() with the EXT4_DELALLOC_RSVED flag.
  2062 + * This flag causes ext4_get_blocks_wrap() to call
  2063 + * ext4_da_update_reserve_space() if the passed buffer head
  2064 + * has the BH_Delay flag set. In the future, once we clean up
  2065 + * the interfaces to ext4_get_blocks_wrap(), we should pass in
  2066 + * a separate flag which requests that the delayed allocation
  2067 + * statistics should be updated, instead of depending on the
  2068 + * state information getting passed down via the map_bh's
  2069 + * state bitmasks plus the magic EXT4_DELALLOC_RSVED flag.
  2070 + */
  2071 + new.b_state = mpd->b_state & (1 << BH_Delay);
2059 2072 new.b_blocknr = 0;
2060 2073 new.b_size = mpd->b_size;
2061 2074 next = mpd->b_blocknr;
... ... @@ -379,7 +379,8 @@
379 379 struct buffer_head map_bh;
380 380 unsigned long first_logical_block = 0;
381 381  
382   - clear_buffer_mapped(&map_bh);
  382 + map_bh.b_state = 0;
  383 + map_bh.b_size = 0;
383 384 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
384 385 struct page *page = list_entry(pages->prev, struct page, lru);
385 386  
... ... @@ -412,7 +413,8 @@
412 413 struct buffer_head map_bh;
413 414 unsigned long first_logical_block = 0;
414 415  
415   - clear_buffer_mapped(&map_bh);
  416 + map_bh.b_state = 0;
  417 + map_bh.b_size = 0;
416 418 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
417 419 &map_bh, &first_logical_block, get_block);
418 420 if (bio)