Commit e984fd486fdbd65d1b4a637f0ef80086eee8fbe6

Authored by Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  Define/reserve new ext4 superblock fields
  When ext4_ext_insert_extent() fails to insert new blocks
  ext4: Extent overlap bugfix
  Remove unnecessary exported symbols.
  EXT4: Fix whitespace

Showing 8 changed files Side-by-side Diff

... ... @@ -30,15 +30,15 @@
30 30 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
31 31 unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
32 32 {
33   - struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  33 + struct ext4_super_block *es = EXT4_SB(sb)->s_es;
34 34 ext4_grpblk_t offset;
35 35  
36   - blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
  36 + blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
37 37 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
38 38 if (offsetp)
39 39 *offsetp = offset;
40 40 if (blockgrpp)
41   - *blockgrpp = blocknr;
  41 + *blockgrpp = blocknr;
42 42  
43 43 }
44 44  
... ... @@ -374,7 +374,7 @@
374 374 le32_to_cpu(ix[-1].ei_block));
375 375 }
376 376 BUG_ON(k && le32_to_cpu(ix->ei_block)
377   - <= le32_to_cpu(ix[-1].ei_block));
  377 + <= le32_to_cpu(ix[-1].ei_block));
378 378 if (block < le32_to_cpu(ix->ei_block))
379 379 break;
380 380 chix = ix;
... ... @@ -423,8 +423,8 @@
423 423  
424 424 path->p_ext = l - 1;
425 425 ext_debug(" -> %d:%llu:%d ",
426   - le32_to_cpu(path->p_ext->ee_block),
427   - ext_pblock(path->p_ext),
  426 + le32_to_cpu(path->p_ext->ee_block),
  427 + ext_pblock(path->p_ext),
428 428 le16_to_cpu(path->p_ext->ee_len));
429 429  
430 430 #ifdef CHECK_BINSEARCH
... ... @@ -435,7 +435,7 @@
435 435 chex = ex = EXT_FIRST_EXTENT(eh);
436 436 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
437 437 BUG_ON(k && le32_to_cpu(ex->ee_block)
438   - <= le32_to_cpu(ex[-1].ee_block));
  438 + <= le32_to_cpu(ex[-1].ee_block));
439 439 if (block < le32_to_cpu(ex->ee_block))
440 440 break;
441 441 chex = ex;
... ... @@ -577,7 +577,7 @@
577 577 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
578 578  
579 579 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
580   - > le16_to_cpu(curp->p_hdr->eh_max));
  580 + > le16_to_cpu(curp->p_hdr->eh_max));
581 581 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
582 582  
583 583 err = ext4_ext_dirty(handle, inode, curp);
584 584  
... ... @@ -621,12 +621,12 @@
621 621 border = path[depth].p_ext[1].ee_block;
622 622 ext_debug("leaf will be split."
623 623 " next leaf starts at %d\n",
624   - le32_to_cpu(border));
  624 + le32_to_cpu(border));
625 625 } else {
626 626 border = newext->ee_block;
627 627 ext_debug("leaf will be added."
628 628 " next leaf starts at %d\n",
629   - le32_to_cpu(border));
  629 + le32_to_cpu(border));
630 630 }
631 631  
632 632 /*
... ... @@ -684,9 +684,9 @@
684 684 while (path[depth].p_ext <=
685 685 EXT_MAX_EXTENT(path[depth].p_hdr)) {
686 686 ext_debug("move %d:%llu:%d in new leaf %llu\n",
687   - le32_to_cpu(path[depth].p_ext->ee_block),
688   - ext_pblock(path[depth].p_ext),
689   - le16_to_cpu(path[depth].p_ext->ee_len),
  687 + le32_to_cpu(path[depth].p_ext->ee_block),
  688 + ext_pblock(path[depth].p_ext),
  689 + le16_to_cpu(path[depth].p_ext->ee_len),
690 690 newblock);
691 691 /*memmove(ex++, path[depth].p_ext++,
692 692 sizeof(struct ext4_extent));
... ... @@ -765,9 +765,9 @@
765 765 EXT_LAST_INDEX(path[i].p_hdr));
766 766 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
767 767 ext_debug("%d: move %d:%d in new index %llu\n", i,
768   - le32_to_cpu(path[i].p_idx->ei_block),
769   - idx_pblock(path[i].p_idx),
770   - newblock);
  768 + le32_to_cpu(path[i].p_idx->ei_block),
  769 + idx_pblock(path[i].p_idx),
  770 + newblock);
771 771 /*memmove(++fidx, path[i].p_idx++,
772 772 sizeof(struct ext4_extent_idx));
773 773 neh->eh_entries++;
... ... @@ -1128,6 +1128,55 @@
1128 1128 }
1129 1129  
1130 1130 /*
  1131 + * check if a portion of the "newext" extent overlaps with an
  1132 + * existing extent.
  1133 + *
  1134 + * If there is an overlap discovered, it updates the length of the newext
  1135 + * such that there will be no overlap, and then returns 1.
  1136 + * If there is no overlap found, it returns 0.
  1137 + */
  1138 +unsigned int ext4_ext_check_overlap(struct inode *inode,
  1139 + struct ext4_extent *newext,
  1140 + struct ext4_ext_path *path)
  1141 +{
  1142 + unsigned long b1, b2;
  1143 + unsigned int depth, len1;
  1144 + unsigned int ret = 0;
  1145 +
  1146 + b1 = le32_to_cpu(newext->ee_block);
  1147 + len1 = le16_to_cpu(newext->ee_len);
  1148 + depth = ext_depth(inode);
  1149 + if (!path[depth].p_ext)
  1150 + goto out;
  1151 + b2 = le32_to_cpu(path[depth].p_ext->ee_block);
  1152 +
  1153 + /*
  1154 + * get the next allocated block if the extent in the path
  1155 + * is before the requested block(s)
  1156 + */
  1157 + if (b2 < b1) {
  1158 + b2 = ext4_ext_next_allocated_block(path);
  1159 + if (b2 == EXT_MAX_BLOCK)
  1160 + goto out;
  1161 + }
  1162 +
  1163 + /* check for wrap through zero */
  1164 + if (b1 + len1 < b1) {
  1165 + len1 = EXT_MAX_BLOCK - b1;
  1166 + newext->ee_len = cpu_to_le16(len1);
  1167 + ret = 1;
  1168 + }
  1169 +
  1170 + /* check for overlap */
  1171 + if (b1 + len1 > b2) {
  1172 + newext->ee_len = cpu_to_le16(b2 - b1);
  1173 + ret = 1;
  1174 + }
  1175 +out:
  1176 + return ret;
  1177 +}
  1178 +
  1179 +/*
1131 1180 * ext4_ext_insert_extent:
1132 1181 * tries to merge requsted extent into the existing extent or
1133 1182 * inserts requested extent as new one into the tree,
1134 1183  
... ... @@ -1212,12 +1261,12 @@
1212 1261 if (!nearex) {
1213 1262 /* there is no extent in this leaf, create first one */
1214 1263 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1215   - le32_to_cpu(newext->ee_block),
1216   - ext_pblock(newext),
1217   - le16_to_cpu(newext->ee_len));
  1264 + le32_to_cpu(newext->ee_block),
  1265 + ext_pblock(newext),
  1266 + le16_to_cpu(newext->ee_len));
1218 1267 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1219 1268 } else if (le32_to_cpu(newext->ee_block)
1220   - > le32_to_cpu(nearex->ee_block)) {
  1269 + > le32_to_cpu(nearex->ee_block)) {
1221 1270 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1222 1271 if (nearex != EXT_LAST_EXTENT(eh)) {
1223 1272 len = EXT_MAX_EXTENT(eh) - nearex;
... ... @@ -1225,9 +1274,9 @@
1225 1274 len = len < 0 ? 0 : len;
1226 1275 ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1227 1276 "move %d from 0x%p to 0x%p\n",
1228   - le32_to_cpu(newext->ee_block),
1229   - ext_pblock(newext),
1230   - le16_to_cpu(newext->ee_len),
  1277 + le32_to_cpu(newext->ee_block),
  1278 + ext_pblock(newext),
  1279 + le16_to_cpu(newext->ee_len),
1231 1280 nearex, len, nearex + 1, nearex + 2);
1232 1281 memmove(nearex + 2, nearex + 1, len);
1233 1282 }
... ... @@ -1358,9 +1407,9 @@
1358 1407 cbex.ec_start = 0;
1359 1408 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1360 1409 } else {
1361   - cbex.ec_block = le32_to_cpu(ex->ee_block);
1362   - cbex.ec_len = le16_to_cpu(ex->ee_len);
1363   - cbex.ec_start = ext_pblock(ex);
  1410 + cbex.ec_block = le32_to_cpu(ex->ee_block);
  1411 + cbex.ec_len = le16_to_cpu(ex->ee_len);
  1412 + cbex.ec_start = ext_pblock(ex);
1364 1413 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1365 1414 }
1366 1415  
1367 1416  
1368 1417  
... ... @@ -1431,16 +1480,16 @@
1431 1480 len = le32_to_cpu(ex->ee_block) - block;
1432 1481 ext_debug("cache gap(before): %lu [%lu:%lu]",
1433 1482 (unsigned long) block,
1434   - (unsigned long) le32_to_cpu(ex->ee_block),
1435   - (unsigned long) le16_to_cpu(ex->ee_len));
  1483 + (unsigned long) le32_to_cpu(ex->ee_block),
  1484 + (unsigned long) le16_to_cpu(ex->ee_len));
1436 1485 } else if (block >= le32_to_cpu(ex->ee_block)
1437   - + le16_to_cpu(ex->ee_len)) {
1438   - lblock = le32_to_cpu(ex->ee_block)
1439   - + le16_to_cpu(ex->ee_len);
  1486 + + le16_to_cpu(ex->ee_len)) {
  1487 + lblock = le32_to_cpu(ex->ee_block)
  1488 + + le16_to_cpu(ex->ee_len);
1440 1489 len = ext4_ext_next_allocated_block(path);
1441 1490 ext_debug("cache gap(after): [%lu:%lu] %lu",
1442   - (unsigned long) le32_to_cpu(ex->ee_block),
1443   - (unsigned long) le16_to_cpu(ex->ee_len),
  1491 + (unsigned long) le32_to_cpu(ex->ee_block),
  1492 + (unsigned long) le16_to_cpu(ex->ee_len),
1444 1493 (unsigned long) block);
1445 1494 BUG_ON(len == lblock);
1446 1495 len = len - lblock;
1447 1496  
... ... @@ -1468,9 +1517,9 @@
1468 1517 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1469 1518 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1470 1519 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1471   - ex->ee_block = cpu_to_le32(cex->ec_block);
  1520 + ex->ee_block = cpu_to_le32(cex->ec_block);
1472 1521 ext4_ext_store_pblock(ex, cex->ec_start);
1473   - ex->ee_len = cpu_to_le16(cex->ec_len);
  1522 + ex->ee_len = cpu_to_le16(cex->ec_len);
1474 1523 ext_debug("%lu cached by %lu:%lu:%llu\n",
1475 1524 (unsigned long) block,
1476 1525 (unsigned long) cex->ec_block,
... ... @@ -1956,9 +2005,9 @@
1956 2005 /* we should allocate requested block */
1957 2006 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
1958 2007 /* block is already allocated */
1959   - newblock = iblock
1960   - - le32_to_cpu(newex.ee_block)
1961   - + ext_pblock(&newex);
  2008 + newblock = iblock
  2009 + - le32_to_cpu(newex.ee_block)
  2010 + + ext_pblock(&newex);
1962 2011 /* number of remaining blocks in the extent */
1963 2012 allocated = le16_to_cpu(newex.ee_len) -
1964 2013 (iblock - le32_to_cpu(newex.ee_block));
... ... @@ -1987,7 +2036,7 @@
1987 2036  
1988 2037 ex = path[depth].p_ext;
1989 2038 if (ex) {
1990   - unsigned long ee_block = le32_to_cpu(ex->ee_block);
  2039 + unsigned long ee_block = le32_to_cpu(ex->ee_block);
1991 2040 ext4_fsblk_t ee_start = ext_pblock(ex);
1992 2041 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1993 2042  
... ... @@ -2000,7 +2049,7 @@
2000 2049 if (ee_len > EXT_MAX_LEN)
2001 2050 goto out2;
2002 2051 /* if found extent covers block, simply return it */
2003   - if (iblock >= ee_block && iblock < ee_block + ee_len) {
  2052 + if (iblock >= ee_block && iblock < ee_block + ee_len) {
2004 2053 newblock = iblock - ee_block + ee_start;
2005 2054 /* number of remaining blocks in the extent */
2006 2055 allocated = ee_len - (iblock - ee_block);
... ... @@ -2031,7 +2080,15 @@
2031 2080  
2032 2081 /* allocate new block */
2033 2082 goal = ext4_ext_find_goal(inode, path, iblock);
2034   - allocated = max_blocks;
  2083 +
  2084 + /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
  2085 + newex.ee_block = cpu_to_le32(iblock);
  2086 + newex.ee_len = cpu_to_le16(max_blocks);
  2087 + err = ext4_ext_check_overlap(inode, &newex, path);
  2088 + if (err)
  2089 + allocated = le16_to_cpu(newex.ee_len);
  2090 + else
  2091 + allocated = max_blocks;
2035 2092 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2036 2093 if (!newblock)
2037 2094 goto out2;
2038 2095  
2039 2096  
... ... @@ -2039,12 +2096,15 @@
2039 2096 goal, newblock, allocated);
2040 2097  
2041 2098 /* try to insert new extent into found leaf and return */
2042   - newex.ee_block = cpu_to_le32(iblock);
2043 2099 ext4_ext_store_pblock(&newex, newblock);
2044 2100 newex.ee_len = cpu_to_le16(allocated);
2045 2101 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2046   - if (err)
  2102 + if (err) {
  2103 + /* free data blocks we just allocated */
  2104 + ext4_free_blocks(handle, inode, ext_pblock(&newex),
  2105 + le16_to_cpu(newex.ee_len));
2047 2106 goto out2;
  2107 + }
2048 2108  
2049 2109 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2050 2110 EXT4_I(inode)->i_disksize = inode->i_size;
... ... @@ -2157,11 +2217,4 @@
2157 2217  
2158 2218 return needed;
2159 2219 }
2160   -
2161   -EXPORT_SYMBOL(ext4_mark_inode_dirty);
2162   -EXPORT_SYMBOL(ext4_ext_invalidate_cache);
2163   -EXPORT_SYMBOL(ext4_ext_insert_extent);
2164   -EXPORT_SYMBOL(ext4_ext_walk_space);
2165   -EXPORT_SYMBOL(ext4_ext_find_goal);
2166   -EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
... ... @@ -255,8 +255,8 @@
255 255 * @inode: inode in question (we are only interested in its superblock)
256 256 * @i_block: block number to be parsed
257 257 * @offsets: array to store the offsets in
258   - * @boundary: set this non-zero if the referred-to block is likely to be
259   - * followed (on disk) by an indirect block.
  258 + * @boundary: set this non-zero if the referred-to block is likely to be
  259 + * followed (on disk) by an indirect block.
260 260 *
261 261 * To store the locations of file's data ext4 uses a data structure common
262 262 * for UNIX filesystems - tree of pointers anchored in the inode, with
... ... @@ -46,7 +46,7 @@
46 46 */
47 47 #define NAMEI_RA_CHUNKS 2
48 48 #define NAMEI_RA_BLOCKS 4
49   -#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  49 +#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
50 50 #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
51 51  
52 52 static struct buffer_head *ext4_append(handle_t *handle,
... ... @@ -241,7 +241,7 @@
241 241 static void dx_show_index (char * label, struct dx_entry *entries)
242 242 {
243 243 int i, n = dx_get_count (entries);
244   - printk("%s index ", label);
  244 + printk("%s index ", label);
245 245 for (i = 0; i < n; i++) {
246 246 printk("%x->%u ", i? dx_get_hash(entries + i) :
247 247 0, dx_get_block(entries + i));
... ... @@ -1985,7 +1985,7 @@
1985 1985  
1986 1986 if (bd_claim(bdev, sb)) {
1987 1987 printk(KERN_ERR
1988   - "EXT4: failed to claim external journal device.\n");
  1988 + "EXT4: failed to claim external journal device.\n");
1989 1989 blkdev_put(bdev);
1990 1990 return NULL;
1991 1991 }
include/linux/ext4_fs.h
... ... @@ -32,9 +32,9 @@
32 32 /*
33 33 * Define EXT4_RESERVATION to reserve data blocks for expanding files
34 34 */
35   -#define EXT4_DEFAULT_RESERVE_BLOCKS 8
  35 +#define EXT4_DEFAULT_RESERVE_BLOCKS 8
36 36 /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
37   -#define EXT4_MAX_RESERVE_BLOCKS 1027
  37 +#define EXT4_MAX_RESERVE_BLOCKS 1027
38 38 #define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
39 39 /*
40 40 * Always enable hashed directories
... ... @@ -204,12 +204,12 @@
204 204  
205 205 /* Used to pass group descriptor data when online resize is done */
206 206 struct ext4_new_group_input {
207   - __u32 group; /* Group number for this data */
208   - __u64 block_bitmap; /* Absolute block number of block bitmap */
209   - __u64 inode_bitmap; /* Absolute block number of inode bitmap */
210   - __u64 inode_table; /* Absolute block number of inode table start */
211   - __u32 blocks_count; /* Total number of blocks in this group */
212   - __u16 reserved_blocks; /* Number of reserved blocks in this group */
  207 + __u32 group; /* Group number for this data */
  208 + __u64 block_bitmap; /* Absolute block number of block bitmap */
  209 + __u64 inode_bitmap; /* Absolute block number of inode bitmap */
  210 + __u64 inode_table; /* Absolute block number of inode table start */
  211 + __u32 blocks_count; /* Total number of blocks in this group */
  212 + __u16 reserved_blocks; /* Number of reserved blocks in this group */
213 213 __u16 unused;
214 214 };
215 215  
... ... @@ -310,7 +310,7 @@
310 310 __u8 l_i_frag; /* Fragment number */
311 311 __u8 l_i_fsize; /* Fragment size */
312 312 __le16 l_i_file_acl_high;
313   - __le16 l_i_uid_high; /* these 2 fields */
  313 + __le16 l_i_uid_high; /* these 2 fields */
314 314 __le16 l_i_gid_high; /* were reserved2[0] */
315 315 __u32 l_i_reserved2;
316 316 } linux2;
... ... @@ -513,7 +513,14 @@
513 513 /*150*/ __le32 s_blocks_count_hi; /* Blocks count */
514 514 __le32 s_r_blocks_count_hi; /* Reserved blocks count */
515 515 __le32 s_free_blocks_count_hi; /* Free blocks count */
516   - __u32 s_reserved[169]; /* Padding to the end of the block */
  516 + __u16 s_min_extra_isize; /* All inodes have at least # bytes */
  517 + __u16 s_want_extra_isize; /* New inodes should reserve # bytes */
  518 + __u32 s_flags; /* Miscellaneous flags */
  519 + __u16 s_raid_stride; /* RAID stride */
  520 + __u16 s_mmp_interval; /* # seconds to wait in MMP checking */
  521 + __u64 s_mmp_block; /* Block for multi-mount protection */
  522 + __u32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
  523 + __u32 s_reserved[163]; /* Padding to the end of the block */
517 524 };
518 525  
519 526 #ifdef __KERNEL__
... ... @@ -780,9 +787,9 @@
780 787 * Ok, these declarations are also in <linux/kernel.h> but none of the
781 788 * ext4 source programs needs to include it so they are duplicated here.
782 789 */
783   -# define NORET_TYPE /**/
784   -# define ATTRIB_NORET __attribute__((noreturn))
785   -# define NORET_AND noreturn,
  790 +# define NORET_TYPE /**/
  791 +# define ATTRIB_NORET __attribute__((noreturn))
  792 +# define NORET_AND noreturn,
786 793  
787 794 /* balloc.c */
788 795 extern unsigned int ext4_block_group(struct super_block *sb,
include/linux/ext4_fs_extents.h
... ... @@ -151,8 +151,8 @@
151 151 ((struct ext4_extent_idx *) (((char *) (__hdr__)) + \
152 152 sizeof(struct ext4_extent_header)))
153 153 #define EXT_HAS_FREE_INDEX(__path__) \
154   - (le16_to_cpu((__path__)->p_hdr->eh_entries) \
155   - < le16_to_cpu((__path__)->p_hdr->eh_max))
  154 + (le16_to_cpu((__path__)->p_hdr->eh_entries) \
  155 + < le16_to_cpu((__path__)->p_hdr->eh_max))
156 156 #define EXT_LAST_EXTENT(__hdr__) \
157 157 (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
158 158 #define EXT_LAST_INDEX(__hdr__) \
... ... @@ -190,6 +190,7 @@
190 190  
191 191 extern int ext4_extent_tree_init(handle_t *, struct inode *);
192 192 extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *);
  193 +extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
193 194 extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
194 195 extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
195 196 extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *);
include/linux/ext4_fs_i.h
... ... @@ -41,14 +41,14 @@
41 41  
42 42 struct ext4_block_alloc_info {
43 43 /* information about reservation window */
44   - struct ext4_reserve_window_node rsv_window_node;
  44 + struct ext4_reserve_window_node rsv_window_node;
45 45 /*
46 46 * was i_next_alloc_block in ext4_inode_info
47 47 * is the logical (file-relative) number of the
48 48 * most-recently-allocated block in this file.
49 49 * We use this for detecting linearly ascending allocation requests.
50 50 */
51   - __u32 last_alloc_logical_block;
  51 + __u32 last_alloc_logical_block;
52 52 /*
53 53 * Was i_next_alloc_goal in ext4_inode_info
54 54 * is the *physical* companion to i_next_alloc_block.
... ... @@ -56,7 +56,7 @@
56 56 * allocated to this file. This give us the goal (target) for the next
57 57 * allocation when we detect linearly ascending requests.
58 58 */
59   - ext4_fsblk_t last_alloc_physical_block;
  59 + ext4_fsblk_t last_alloc_physical_block;
60 60 };
61 61  
62 62 #define rsv_start rsv_window._rsv_start