Commit b05e6ae58a13b56e3e11882c1fc71948c9b29760
1 parent
01f49d0b9d
Exists in
master
and in
7 other branches
ext4: drop ec_type from the ext4_ext_cache structure
We can encode the ec_type information by using ee_len == 0 to denote EXT4_EXT_CACHE_NO, ee_start == 0 to denote EXT4_EXT_CACHE_GAP, and if neither is true, then the cache type must be EXT4_EXT_CACHE_EXTENT. This allows us to reduce the size of ext4_ext_inode by another 8 bytes. (ec_type is 4 bytes, plus another 4 bytes of padding) Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Showing 3 changed files with 18 additions and 28 deletions Side-by-side Diff
fs/ext4/ext4.h
... | ... | @@ -738,12 +738,13 @@ |
738 | 738 | |
739 | 739 | /* |
740 | 740 | * storage for cached extent |
741 | + * If ec_len == 0, then the cache is invalid. | |
742 | + * If ec_start == 0, then the cache represents a gap (null mapping) | |
741 | 743 | */ |
742 | 744 | struct ext4_ext_cache { |
743 | 745 | ext4_fsblk_t ec_start; |
744 | 746 | ext4_lblk_t ec_block; |
745 | 747 | __u32 ec_len; /* must be 32bit to return holes */ |
746 | - __u32 ec_type; | |
747 | 748 | }; |
748 | 749 | |
749 | 750 | /* |
fs/ext4/ext4_extents.h
... | ... | @@ -119,10 +119,6 @@ |
119 | 119 | * structure for external API |
120 | 120 | */ |
121 | 121 | |
122 | -#define EXT4_EXT_CACHE_NO 0 | |
123 | -#define EXT4_EXT_CACHE_GAP 1 | |
124 | -#define EXT4_EXT_CACHE_EXTENT 2 | |
125 | - | |
126 | 122 | /* |
127 | 123 | * to be called by ext4_ext_walk_space() |
128 | 124 | * negative retcode - error |
... | ... | @@ -197,7 +193,7 @@ |
197 | 193 | static inline void |
198 | 194 | ext4_ext_invalidate_cache(struct inode *inode) |
199 | 195 | { |
200 | - EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO; | |
196 | + EXT4_I(inode)->i_cached_extent.ec_len = 0; | |
201 | 197 | } |
202 | 198 | |
203 | 199 | static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext) |
fs/ext4/extents.c
... | ... | @@ -1894,12 +1894,10 @@ |
1894 | 1894 | cbex.ec_block = start; |
1895 | 1895 | cbex.ec_len = end - start; |
1896 | 1896 | cbex.ec_start = 0; |
1897 | - cbex.ec_type = EXT4_EXT_CACHE_GAP; | |
1898 | 1897 | } else { |
1899 | 1898 | cbex.ec_block = le32_to_cpu(ex->ee_block); |
1900 | 1899 | cbex.ec_len = ext4_ext_get_actual_len(ex); |
1901 | 1900 | cbex.ec_start = ext4_ext_pblock(ex); |
1902 | - cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | |
1903 | 1901 | } |
1904 | 1902 | |
1905 | 1903 | if (unlikely(cbex.ec_len == 0)) { |
1906 | 1904 | |
... | ... | @@ -1939,13 +1937,12 @@ |
1939 | 1937 | |
1940 | 1938 | static void |
1941 | 1939 | ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, |
1942 | - __u32 len, ext4_fsblk_t start, int type) | |
1940 | + __u32 len, ext4_fsblk_t start) | |
1943 | 1941 | { |
1944 | 1942 | struct ext4_ext_cache *cex; |
1945 | 1943 | BUG_ON(len == 0); |
1946 | 1944 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
1947 | 1945 | cex = &EXT4_I(inode)->i_cached_extent; |
1948 | - cex->ec_type = type; | |
1949 | 1946 | cex->ec_block = block; |
1950 | 1947 | cex->ec_len = len; |
1951 | 1948 | cex->ec_start = start; |
1952 | 1949 | |
1953 | 1950 | |
... | ... | @@ -1998,15 +1995,18 @@ |
1998 | 1995 | } |
1999 | 1996 | |
2000 | 1997 | ext_debug(" -> %u:%lu\n", lblock, len); |
2001 | - ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); | |
1998 | + ext4_ext_put_in_cache(inode, lblock, len, 0); | |
2002 | 1999 | } |
2003 | 2000 | |
2001 | +/* | |
2002 | + * Return 0 if cache is invalid; 1 if the cache is valid | |
2003 | + */ | |
2004 | 2004 | static int |
2005 | 2005 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, |
2006 | 2006 | struct ext4_extent *ex) |
2007 | 2007 | { |
2008 | 2008 | struct ext4_ext_cache *cex; |
2009 | - int ret = EXT4_EXT_CACHE_NO; | |
2009 | + int ret = 0; | |
2010 | 2010 | |
2011 | 2011 | /* |
2012 | 2012 | * We borrow i_block_reservation_lock to protect i_cached_extent |
2013 | 2013 | |
... | ... | @@ -2015,11 +2015,9 @@ |
2015 | 2015 | cex = &EXT4_I(inode)->i_cached_extent; |
2016 | 2016 | |
2017 | 2017 | /* has cache valid data? */ |
2018 | - if (cex->ec_type == EXT4_EXT_CACHE_NO) | |
2018 | + if (cex->ec_len == 0) | |
2019 | 2019 | goto errout; |
2020 | 2020 | |
2021 | - BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | |
2022 | - cex->ec_type != EXT4_EXT_CACHE_EXTENT); | |
2023 | 2021 | if (in_range(block, cex->ec_block, cex->ec_len)) { |
2024 | 2022 | ex->ee_block = cpu_to_le32(cex->ec_block); |
2025 | 2023 | ext4_ext_store_pblock(ex, cex->ec_start); |
... | ... | @@ -2027,7 +2025,7 @@ |
2027 | 2025 | ext_debug("%u cached by %u:%u:%llu\n", |
2028 | 2026 | block, |
2029 | 2027 | cex->ec_block, cex->ec_len, cex->ec_start); |
2030 | - ret = cex->ec_type; | |
2028 | + ret = 1; | |
2031 | 2029 | } |
2032 | 2030 | errout: |
2033 | 2031 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
... | ... | @@ -3298,7 +3296,7 @@ |
3298 | 3296 | struct ext4_extent_header *eh; |
3299 | 3297 | struct ext4_extent newex, *ex; |
3300 | 3298 | ext4_fsblk_t newblock; |
3301 | - int err = 0, depth, ret, cache_type; | |
3299 | + int err = 0, depth, ret; | |
3302 | 3300 | unsigned int allocated = 0; |
3303 | 3301 | struct ext4_allocation_request ar; |
3304 | 3302 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
... | ... | @@ -3307,9 +3305,8 @@ |
3307 | 3305 | map->m_lblk, map->m_len, inode->i_ino); |
3308 | 3306 | |
3309 | 3307 | /* check in cache */ |
3310 | - cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); | |
3311 | - if (cache_type) { | |
3312 | - if (cache_type == EXT4_EXT_CACHE_GAP) { | |
3308 | + if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { | |
3309 | + if (!newex.ee_start_lo && !newex.ee_start_hi) { | |
3313 | 3310 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
3314 | 3311 | /* |
3315 | 3312 | * block isn't allocated yet and |
... | ... | @@ -3318,7 +3315,7 @@ |
3318 | 3315 | goto out2; |
3319 | 3316 | } |
3320 | 3317 | /* we should allocate requested block */ |
3321 | - } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { | |
3318 | + } else { | |
3322 | 3319 | /* block is already allocated */ |
3323 | 3320 | newblock = map->m_lblk |
3324 | 3321 | - le32_to_cpu(newex.ee_block) |
... | ... | @@ -3327,8 +3324,6 @@ |
3327 | 3324 | allocated = ext4_ext_get_actual_len(&newex) - |
3328 | 3325 | (map->m_lblk - le32_to_cpu(newex.ee_block)); |
3329 | 3326 | goto out; |
3330 | - } else { | |
3331 | - BUG(); | |
3332 | 3327 | } |
3333 | 3328 | } |
3334 | 3329 | |
... | ... | @@ -3379,8 +3374,7 @@ |
3379 | 3374 | /* Do not put uninitialized extent in the cache */ |
3380 | 3375 | if (!ext4_ext_is_uninitialized(ex)) { |
3381 | 3376 | ext4_ext_put_in_cache(inode, ee_block, |
3382 | - ee_len, ee_start, | |
3383 | - EXT4_EXT_CACHE_EXTENT); | |
3377 | + ee_len, ee_start); | |
3384 | 3378 | goto out; |
3385 | 3379 | } |
3386 | 3380 | ret = ext4_ext_handle_uninitialized_extents(handle, |
... | ... | @@ -3512,8 +3506,7 @@ |
3512 | 3506 | * when it is _not_ an uninitialized extent. |
3513 | 3507 | */ |
3514 | 3508 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { |
3515 | - ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, | |
3516 | - EXT4_EXT_CACHE_EXTENT); | |
3509 | + ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); | |
3517 | 3510 | ext4_update_inode_fsync_trans(handle, inode, 1); |
3518 | 3511 | } else |
3519 | 3512 | ext4_update_inode_fsync_trans(handle, inode, 0); |
... | ... | @@ -3789,7 +3782,7 @@ |
3789 | 3782 | |
3790 | 3783 | logical = (__u64)newex->ec_block << blksize_bits; |
3791 | 3784 | |
3792 | - if (newex->ec_type == EXT4_EXT_CACHE_GAP) { | |
3785 | + if (newex->ec_start == 0) { | |
3793 | 3786 | pgoff_t offset; |
3794 | 3787 | struct page *page; |
3795 | 3788 | struct buffer_head *bh = NULL; |