Commit d0d856e8bd6e697cb44b2b4dd038f3bec576a70e

Authored by Randy Dunlap
Committed by Linus Torvalds
1 parent 471d4011a9

[PATCH] ext4: clean up comments in ext4-extents patch

Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 3 changed files with 157 additions and 127 deletions Side-by-side Diff

... ... @@ -44,7 +44,10 @@
44 44 #include <asm/uaccess.h>
45 45  
46 46  
47   -/* this macro combines low and hi parts of phys. blocknr into ext4_fsblk_t */
  47 +/*
  48 + * ext_pblock:
  49 + * combine low and high parts of physical block number into ext4_fsblk_t
  50 + */
48 51 static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
49 52 {
50 53 ext4_fsblk_t block;
... ... @@ -55,7 +58,10 @@
55 58 return block;
56 59 }
57 60  
58   -/* this macro combines low and hi parts of phys. blocknr into ext4_fsblk_t */
  61 +/*
  62 + * idx_pblock:
  63 + * combine low and high parts of a leaf physical block number into ext4_fsblk_t
  64 + */
59 65 static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
60 66 {
61 67 ext4_fsblk_t block;
... ... @@ -66,7 +72,11 @@
66 72 return block;
67 73 }
68 74  
69   -/* the routine stores large phys. blocknr into extent breaking it into parts */
  75 +/*
  76 + * ext4_ext_store_pblock:
  77 + * stores a large physical block number into an extent struct,
  78 + * breaking it into parts
  79 + */
70 80 static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
71 81 {
72 82 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
... ... @@ -74,7 +84,11 @@
74 84 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
75 85 }
76 86  
77   -/* the routine stores large phys. blocknr into index breaking it into parts */
  87 +/*
  88 + * ext4_idx_store_pblock:
  89 + * stores a large physical block number into an index struct,
  90 + * breaking it into parts
  91 + */
78 92 static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
79 93 {
80 94 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
... ... @@ -179,8 +193,8 @@
179 193 if ((ex = path[depth].p_ext))
180 194 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
181 195  
182   - /* it looks index is empty
183   - * try to find starting from index itself */
  196 + /* it looks like index is empty;
  197 + * try to find starting block from index itself */
184 198 if (path[depth].p_bh)
185 199 return path[depth].p_bh->b_blocknr;
186 200 }
... ... @@ -317,7 +331,8 @@
317 331 }
318 332  
319 333 /*
320   - * binary search for closest index by given block
  334 + * ext4_ext_binsearch_idx:
  335 + * binary search for the closest index of the given block
321 336 */
322 337 static void
323 338 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
... ... @@ -375,7 +390,8 @@
375 390 }
376 391  
377 392 /*
378   - * binary search for closest extent by given block
  393 + * ext4_ext_binsearch:
  394 + * binary search for closest extent of the given block
379 395 */
380 396 static void
381 397 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
... ... @@ -388,8 +404,8 @@
388 404  
389 405 if (eh->eh_entries == 0) {
390 406 /*
391   - * this leaf is empty yet:
392   - * we get such a leaf in split/add case
  407 + * this leaf is empty:
  408 + * we get such a leaf in split/add case
393 409 */
394 410 return;
395 411 }
... ... @@ -520,8 +536,9 @@
520 536 }
521 537  
522 538 /*
523   - * insert new index [logical;ptr] into the block at cupr
524   - * it check where to insert: before curp or after curp
  539 + * ext4_ext_insert_index:
  540 + * insert new index [@logical;@ptr] into the block at @curp;
  541 + * check where to insert: before @curp or after @curp
525 542 */
526 543 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
527 544 struct ext4_ext_path *curp,
... ... @@ -574,13 +591,14 @@
574 591 }
575 592  
576 593 /*
577   - * routine inserts new subtree into the path, using free index entry
578   - * at depth 'at:
579   - * - allocates all needed blocks (new leaf and all intermediate index blocks)
580   - * - makes decision where to split
581   - * - moves remaining extens and index entries (right to the split point)
582   - * into the newly allocated blocks
583   - * - initialize subtree
  594 + * ext4_ext_split:
  595 + * inserts new subtree into the path, using free index entry
  596 + * at depth @at:
  597 + * - allocates all needed blocks (new leaf and all intermediate index blocks)
  598 + * - makes decision where to split
  599 + * - moves remaining extents and index entries (right to the split point)
  600 + * into the newly allocated blocks
  601 + * - initializes subtree
584 602 */
585 603 static int ext4_ext_split(handle_t *handle, struct inode *inode,
586 604 struct ext4_ext_path *path,
587 605  
588 606  
... ... @@ -598,14 +616,14 @@
598 616 int err = 0;
599 617  
600 618 /* make decision: where to split? */
601   - /* FIXME: now desicion is simplest: at current extent */
  619 + /* FIXME: now decision is simplest: at current extent */
602 620  
603   - /* if current leaf will be splitted, then we should use
  621 + /* if current leaf will be split, then we should use
604 622 * border from split point */
605 623 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
606 624 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
607 625 border = path[depth].p_ext[1].ee_block;
608   - ext_debug("leaf will be splitted."
  626 + ext_debug("leaf will be split."
609 627 " next leaf starts at %d\n",
610 628 le32_to_cpu(border));
611 629 } else {
612 630  
613 631  
... ... @@ -616,16 +634,16 @@
616 634 }
617 635  
618 636 /*
619   - * if error occurs, then we break processing
620   - * and turn filesystem read-only. so, index won't
  637 + * If error occurs, then we break processing
  638 + * and mark filesystem read-only. index won't
621 639 * be inserted and tree will be in consistent
622   - * state. next mount will repair buffers too
  640 + * state. Next mount will repair buffers too.
623 641 */
624 642  
625 643 /*
626   - * get array to track all allocated blocks
627   - * we need this to handle errors and free blocks
628   - * upon them
  644 + * Get array to track all allocated blocks.
  645 + * We need this to handle errors and free blocks
  646 + * upon them.
629 647 */
630 648 ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
631 649 if (!ablocks)
... ... @@ -661,7 +679,7 @@
661 679 neh->eh_depth = 0;
662 680 ex = EXT_FIRST_EXTENT(neh);
663 681  
664   - /* move remain of path[depth] to the new leaf */
  682 + /* move remainder of path[depth] to the new leaf */
665 683 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
666 684 /* start copy from next extent */
667 685 /* TODO: we could do it by single memmove */
... ... @@ -813,11 +831,12 @@
813 831 }
814 832  
815 833 /*
816   - * routine implements tree growing procedure:
817   - * - allocates new block
818   - * - moves top-level data (index block or leaf) into the new block
819   - * - initialize new top-level, creating index that points to the
820   - * just created block
  834 + * ext4_ext_grow_indepth:
  835 + * implements tree growing procedure:
  836 + * - allocates new block
  837 + * - moves top-level data (index block or leaf) into the new block
  838 + * - initializes new top-level, creating index that points to the
  839 + * just created block
821 840 */
822 841 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
823 842 struct ext4_ext_path *path,
... ... @@ -892,8 +911,9 @@
892 911 }
893 912  
894 913 /*
895   - * routine finds empty index and adds new leaf. if no free index found
896   - * then it requests in-depth growing
  914 + * ext4_ext_create_new_leaf:
  915 + * finds empty index and adds new leaf.
  916 + * if no free index is found, then it requests in-depth growing.
897 917 */
898 918 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
899 919 struct ext4_ext_path *path,
... ... @@ -912,8 +932,8 @@
912 932 curp--;
913 933 }
914 934  
915   - /* we use already allocated block for index block
916   - * so, subsequent data blocks should be contigoues */
  935 + /* we use already allocated block for index block,
  936 + * so subsequent data blocks should be contiguous */
917 937 if (EXT_HAS_FREE_INDEX(curp)) {
918 938 /* if we found index with free entry, then use that
919 939 * entry: create all needed subtree and add new leaf */
920 940  
... ... @@ -943,12 +963,12 @@
943 963 }
944 964  
945 965 /*
946   - * only first (depth 0 -> 1) produces free space
947   - * in all other cases we have to split growed tree
  966 + * only first (depth 0 -> 1) produces free space;
  967 + * in all other cases we have to split the grown tree
948 968 */
949 969 depth = ext_depth(inode);
950 970 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
951   - /* now we need split */
  971 + /* now we need to split */
952 972 goto repeat;
953 973 }
954 974 }
... ... @@ -958,10 +978,11 @@
958 978 }
959 979  
960 980 /*
961   - * returns allocated block in subsequent extent or EXT_MAX_BLOCK
962   - * NOTE: it consider block number from index entry as
963   - * allocated block. thus, index entries have to be consistent
964   - * with leafs
  981 + * ext4_ext_next_allocated_block:
  982 + * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
  983 + * NOTE: it considers block number from index entry as
  984 + * allocated block. Thus, index entries have to be consistent
  985 + * with leaves.
965 986 */
966 987 static unsigned long
967 988 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
... ... @@ -993,6 +1014,7 @@
993 1014 }
994 1015  
995 1016 /*
  1017 + * ext4_ext_next_leaf_block:
996 1018 * returns first allocated block from next leaf or EXT_MAX_BLOCK
997 1019 */
998 1020 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
... ... @@ -1021,8 +1043,9 @@
1021 1043 }
1022 1044  
1023 1045 /*
1024   - * if leaf gets modified and modified extent is first in the leaf
1025   - * then we have to correct all indexes above
  1046 + * ext4_ext_correct_indexes:
  1047 + * if leaf gets modified and modified extent is first in the leaf,
  1048 + * then we have to correct all indexes above.
1026 1049 * TODO: do we need to correct tree in all cases?
1027 1050 */
1028 1051 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
... ... @@ -1050,7 +1073,7 @@
1050 1073 }
1051 1074  
1052 1075 /*
1053   - * TODO: we need correction if border is smaller then current one
  1076 + * TODO: we need correction if border is smaller than current one
1054 1077 */
1055 1078 k = depth - 1;
1056 1079 border = path[depth].p_ext->ee_block;
... ... @@ -1085,7 +1108,7 @@
1085 1108 /*
1086 1109 * To allow future support for preallocated extents to be added
1087 1110 * as an RO_COMPAT feature, refuse to merge to extents if
1088   - * can result in the top bit of ee_len being set
  1111 + * this can result in the top bit of ee_len being set.
1089 1112 */
1090 1113 if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
1091 1114 return 0;
... ... @@ -1100,9 +1123,10 @@
1100 1123 }
1101 1124  
1102 1125 /*
1103   - * this routine tries to merge requsted extent into the existing
1104   - * extent or inserts requested extent as new one into the tree,
1105   - * creating new leaf in no-space case
  1126 + * ext4_ext_insert_extent:
  1127 + * tries to merge requsted extent into the existing extent or
  1128 + * inserts requested extent as new one into the tree,
  1129 + * creating new leaf in the no-space case.
1106 1130 */
1107 1131 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1108 1132 struct ext4_ext_path *path,
... ... @@ -1163,8 +1187,8 @@
1163 1187 }
1164 1188  
1165 1189 /*
1166   - * there is no free space in found leaf
1167   - * we're gonna add new leaf in the tree
  1190 + * There is no free space in the found leaf.
  1191 + * We're gonna add a new leaf in the tree.
1168 1192 */
1169 1193 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1170 1194 if (err)
... ... @@ -1377,7 +1401,8 @@
1377 1401 }
1378 1402  
1379 1403 /*
1380   - * this routine calculate boundaries of the gap requested block fits into
  1404 + * ext4_ext_put_gap_in_cache:
  1405 + * calculate boundaries of the gap that the requested block fits into
1381 1406 * and cache this gap
1382 1407 */
1383 1408 static inline void
... ... @@ -1452,9 +1477,10 @@
1452 1477 }
1453 1478  
1454 1479 /*
1455   - * routine removes index from the index block
1456   - * it's used in truncate case only. thus all requests are for
1457   - * last index in the block only
  1480 + * ext4_ext_rm_idx:
  1481 + * removes index from the index block.
  1482 + * It's used in truncate case only, thus all requests are for
  1483 + * last index in the block only.
1458 1484 */
1459 1485 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1460 1486 struct ext4_ext_path *path)
1461 1487  
... ... @@ -1480,11 +1506,12 @@
1480 1506 }
1481 1507  
1482 1508 /*
1483   - * This routine returns max. credits extent tree can consume.
  1509 + * ext4_ext_calc_credits_for_insert:
  1510 + * This routine returns max. credits that the extent tree can consume.
1484 1511 * It should be OK for low-performance paths like ->writepage()
1485   - * To allow many writing process to fit a single transaction,
1486   - * caller should calculate credits under truncate_mutex and
1487   - * pass actual path.
  1512 + * To allow many writing processes to fit into a single transaction,
  1513 + * the caller should calculate credits under truncate_mutex and
  1514 + * pass the actual path.
1488 1515 */
1489 1516 int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1490 1517 struct ext4_ext_path *path)
1491 1518  
... ... @@ -1500,9 +1527,9 @@
1500 1527 }
1501 1528  
1502 1529 /*
1503   - * given 32bit logical block (4294967296 blocks), max. tree
  1530 + * given 32-bit logical block (4294967296 blocks), max. tree
1504 1531 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1505   - * let's also add one more level for imbalance.
  1532 + * Let's also add one more level for imbalance.
1506 1533 */
1507 1534 depth = 5;
1508 1535  
1509 1536  
... ... @@ -1510,13 +1537,13 @@
1510 1537 needed = 2;
1511 1538  
1512 1539 /*
1513   - * tree can be full, so it'd need to grow in depth:
  1540 + * tree can be full, so it would need to grow in depth:
1514 1541 * allocation + old root + new root
1515 1542 */
1516 1543 needed += 2 + 1 + 1;
1517 1544  
1518 1545 /*
1519   - * Index split can happen, we'd need:
  1546 + * Index split can happen, we would need:
1520 1547 * allocate intermediate indexes (bitmap + group)
1521 1548 * + change two blocks at each level, but root (already included)
1522 1549 */
... ... @@ -1634,7 +1661,7 @@
1634 1661 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1635 1662 }
1636 1663  
1637   - /* at present, extent can't cross block group */
  1664 + /* at present, extent can't cross block group: */
1638 1665 /* leaf + bitmap + group desc + sb + inode */
1639 1666 credits = 5;
1640 1667 if (ex == EXT_FIRST_EXTENT(eh)) {
... ... @@ -1660,7 +1687,7 @@
1660 1687 goto out;
1661 1688  
1662 1689 if (num == 0) {
1663   - /* this extent is removed entirely mark slot unused */
  1690 + /* this extent is removed; mark slot entirely unused */
1664 1691 ext4_ext_store_pblock(ex, 0);
1665 1692 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1666 1693 }
... ... @@ -1692,7 +1719,8 @@
1692 1719 }
1693 1720  
1694 1721 /*
1695   - * returns 1 if current index have to be freed (even partial)
  1722 + * ext4_ext_more_to_rm:
  1723 + * returns 1 if current index has to be freed (even partial)
1696 1724 */
1697 1725 static int inline
1698 1726 ext4_ext_more_to_rm(struct ext4_ext_path *path)
... ... @@ -1703,7 +1731,7 @@
1703 1731 return 0;
1704 1732  
1705 1733 /*
1706   - * if truncate on deeper level happened it it wasn't partial
  1734 + * if truncate on deeper level happened, it wasn't partial,
1707 1735 * so we have to consider current index for truncation
1708 1736 */
1709 1737 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
... ... @@ -1729,8 +1757,8 @@
1729 1757 ext4_ext_invalidate_cache(inode);
1730 1758  
1731 1759 /*
1732   - * we start scanning from right side freeing all the blocks
1733   - * after i_size and walking into the deep
  1760 + * We start scanning from right side, freeing all the blocks
  1761 + * after i_size and walking into the tree depth-wise.
1734 1762 */
1735 1763 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1736 1764 if (path == NULL) {
... ... @@ -1749,7 +1777,7 @@
1749 1777 if (i == depth) {
1750 1778 /* this is leaf block */
1751 1779 err = ext4_ext_rm_leaf(handle, inode, path, start);
1752   - /* root level have p_bh == NULL, brelse() eats this */
  1780 + /* root level has p_bh == NULL, brelse() eats this */
1753 1781 brelse(path[i].p_bh);
1754 1782 path[i].p_bh = NULL;
1755 1783 i--;
1756 1784  
... ... @@ -1772,14 +1800,14 @@
1772 1800 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1773 1801  
1774 1802 if (!path[i].p_idx) {
1775   - /* this level hasn't touched yet */
  1803 + /* this level hasn't been touched yet */
1776 1804 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1777 1805 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1778 1806 ext_debug("init index ptr: hdr 0x%p, num %d\n",
1779 1807 path[i].p_hdr,
1780 1808 le16_to_cpu(path[i].p_hdr->eh_entries));
1781 1809 } else {
1782   - /* we've already was here, see at next index */
  1810 + /* we were already here, see at next index */
1783 1811 path[i].p_idx--;
1784 1812 }
1785 1813  
1786 1814  
1787 1815  
1788 1816  
... ... @@ -1799,19 +1827,19 @@
1799 1827 break;
1800 1828 }
1801 1829  
1802   - /* put actual number of indexes to know is this
1803   - * number got changed at the next iteration */
  1830 + /* save actual number of indexes since this
  1831 + * number is changed at the next iteration */
1804 1832 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1805 1833 i++;
1806 1834 } else {
1807   - /* we finish processing this index, go up */
  1835 + /* we finished processing this index, go up */
1808 1836 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1809   - /* index is empty, remove it
  1837 + /* index is empty, remove it;
1810 1838 * handle must be already prepared by the
1811 1839 * truncatei_leaf() */
1812 1840 err = ext4_ext_rm_idx(handle, inode, path + i);
1813 1841 }
1814   - /* root level have p_bh == NULL, brelse() eats this */
  1842 + /* root level has p_bh == NULL, brelse() eats this */
1815 1843 brelse(path[i].p_bh);
1816 1844 path[i].p_bh = NULL;
1817 1845 i--;
... ... @@ -1822,8 +1850,8 @@
1822 1850 /* TODO: flexible tree reduction should be here */
1823 1851 if (path->p_hdr->eh_entries == 0) {
1824 1852 /*
1825   - * truncate to zero freed all the tree
1826   - * so, we need to correct eh_depth
  1853 + * truncate to zero freed all the tree,
  1854 + * so we need to correct eh_depth
1827 1855 */
1828 1856 err = ext4_ext_get_access(handle, inode, path);
1829 1857 if (err == 0) {
... ... @@ -1912,7 +1940,7 @@
1912 1940 if (goal == EXT4_EXT_CACHE_GAP) {
1913 1941 if (!create) {
1914 1942 /* block isn't allocated yet and
1915   - * user don't want to allocate it */
  1943 + * user doesn't want to allocate it */
1916 1944 goto out2;
1917 1945 }
1918 1946 /* we should allocate requested block */
... ... @@ -1921,7 +1949,7 @@
1921 1949 newblock = iblock
1922 1950 - le32_to_cpu(newex.ee_block)
1923 1951 + ext_pblock(&newex);
1924   - /* number of remain blocks in the extent */
  1952 + /* number of remaining blocks in the extent */
1925 1953 allocated = le16_to_cpu(newex.ee_len) -
1926 1954 (iblock - le32_to_cpu(newex.ee_block));
1927 1955 goto out;
... ... @@ -1941,8 +1969,8 @@
1941 1969 depth = ext_depth(inode);
1942 1970  
1943 1971 /*
1944   - * consistent leaf must not be empty
1945   - * this situations is possible, though, _during_ tree modification
  1972 + * consistent leaf must not be empty;
  1973 + * this situation is possible, though, _during_ tree modification;
1946 1974 * this is why assert can't be put in ext4_ext_find_extent()
1947 1975 */
1948 1976 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1949 1977  
... ... @@ -1960,10 +1988,10 @@
1960 1988 */
1961 1989 if (ee_len > EXT_MAX_LEN)
1962 1990 goto out2;
1963   - /* if found exent covers block, simple return it */
  1991 + /* if found extent covers block, simply return it */
1964 1992 if (iblock >= ee_block && iblock < ee_block + ee_len) {
1965 1993 newblock = iblock - ee_block + ee_start;
1966   - /* number of remain blocks in the extent */
  1994 + /* number of remaining blocks in the extent */
1967 1995 allocated = ee_len - (iblock - ee_block);
1968 1996 ext_debug("%d fit into %lu:%d -> "E3FSBLK"\n", (int) iblock,
1969 1997 ee_block, ee_len, newblock);
1970 1998  
1971 1999  
... ... @@ -1974,17 +2002,18 @@
1974 2002 }
1975 2003  
1976 2004 /*
1977   - * requested block isn't allocated yet
  2005 + * requested block isn't allocated yet;
1978 2006 * we couldn't try to create block if create flag is zero
1979 2007 */
1980 2008 if (!create) {
1981   - /* put just found gap into cache to speedup subsequest reqs */
  2009 + /* put just found gap into cache to speed up
  2010 + * subsequent requests */
1982 2011 ext4_ext_put_gap_in_cache(inode, path, iblock);
1983 2012 goto out2;
1984 2013 }
1985 2014 /*
1986 2015 * Okay, we need to do block allocation. Lazily initialize the block
1987   - * allocation info here if necessary
  2016 + * allocation info here if necessary.
1988 2017 */
1989 2018 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
1990 2019 ext4_init_block_alloc_info(inode);
... ... @@ -2062,9 +2091,9 @@
2062 2091 ext4_ext_invalidate_cache(inode);
2063 2092  
2064 2093 /*
2065   - * TODO: optimization is possible here
2066   - * probably we need not scaning at all,
2067   - * because page truncation is enough
  2094 + * TODO: optimization is possible here.
  2095 + * Probably we need not scan at all,
  2096 + * because page truncation is enough.
2068 2097 */
2069 2098 if (ext4_orphan_add(handle, inode))
2070 2099 goto out_stop;
2071 2100  
... ... @@ -2078,13 +2107,13 @@
2078 2107 err = ext4_ext_remove_space(inode, last_block);
2079 2108  
2080 2109 /* In a multi-transaction truncate, we only make the final
2081   - * transaction synchronous */
  2110 + * transaction synchronous. */
2082 2111 if (IS_SYNC(inode))
2083 2112 handle->h_sync = 1;
2084 2113  
2085 2114 out_stop:
2086 2115 /*
2087   - * If this was a simple ftruncate(), and the file will remain alive
  2116 + * If this was a simple ftruncate() and the file will remain alive,
2088 2117 * then we need to clear up the orphan record which we created above.
2089 2118 * However, if this was a real unlink then we were called by
2090 2119 * ext4_delete_inode(), and we allow that function to clean up the
... ... @@ -2098,7 +2127,8 @@
2098 2127 }
2099 2128  
2100 2129 /*
2101   - * this routine calculate max number of blocks we could modify
  2130 + * ext4_ext_writepage_trans_blocks:
  2131 + * calculate max number of blocks we could modify
2102 2132 * in order to allocate new block for an inode
2103 2133 */
2104 2134 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
... ... @@ -2107,7 +2137,7 @@
2107 2137  
2108 2138 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2109 2139  
2110   - /* caller want to allocate num blocks, but note it includes sb */
  2140 + /* caller wants to allocate num blocks, but note it includes sb */
2111 2141 needed = needed * num - (num - 1);
2112 2142  
2113 2143 #ifdef CONFIG_QUOTA
include/linux/ext4_fs_extents.h
... ... @@ -22,29 +22,29 @@
22 22 #include <linux/ext4_fs.h>
23 23  
24 24 /*
25   - * with AGRESSIVE_TEST defined capacity of index/leaf blocks
26   - * become very little, so index split, in-depth growing and
27   - * other hard changes happens much more often
28   - * this is for debug purposes only
  25 + * With AGRESSIVE_TEST defined, the capacity of index/leaf blocks
  26 + * becomes very small, so index split, in-depth growing and
  27 + * other hard changes happen much more often.
  28 + * This is for debug purposes only.
29 29 */
30 30 #define AGRESSIVE_TEST_
31 31  
32 32 /*
33   - * with EXTENTS_STATS defined number of blocks and extents
34   - * are collected in truncate path. they'll be showed at
35   - * umount time
  33 + * With EXTENTS_STATS defined, the number of blocks and extents
  34 + * are collected in the truncate path. They'll be shown at
  35 + * umount time.
36 36 */
37 37 #define EXTENTS_STATS__
38 38  
39 39 /*
40   - * if CHECK_BINSEARCH defined, then results of binary search
41   - * will be checked by linear search
  40 + * If CHECK_BINSEARCH is defined, then the results of the binary search
  41 + * will also be checked by linear search.
42 42 */
43 43 #define CHECK_BINSEARCH__
44 44  
45 45 /*
46   - * if EXT_DEBUG is defined you can use 'extdebug' mount option
47   - * to get lots of info what's going on
  46 + * If EXT_DEBUG is defined you can use the 'extdebug' mount option
  47 + * to get lots of info about what's going on.
48 48 */
49 49 #define EXT_DEBUG__
50 50 #ifdef EXT_DEBUG
51 51  
52 52  
53 53  
54 54  
55 55  
56 56  
57 57  
58 58  
... ... @@ -54,58 +54,58 @@
54 54 #endif
55 55  
56 56 /*
57   - * if EXT_STATS is defined then stats numbers are collected
58   - * these number will be displayed at umount time
  57 + * If EXT_STATS is defined then stats numbers are collected.
  58 + * These number will be displayed at umount time.
59 59 */
60 60 #define EXT_STATS_
61 61  
62 62  
63 63 /*
64   - * ext4_inode has i_block array (60 bytes total)
65   - * first 12 bytes store ext4_extent_header
66   - * the remain stores array of ext4_extent
  64 + * ext4_inode has i_block array (60 bytes total).
  65 + * The first 12 bytes store ext4_extent_header;
  66 + * the remainder stores an array of ext4_extent.
67 67 */
68 68  
69 69 /*
70   - * this is extent on-disk structure
71   - * it's used at the bottom of the tree
  70 + * This is the extent on-disk structure.
  71 + * It's used at the bottom of the tree.
72 72 */
73 73 struct ext4_extent {
74 74 __le32 ee_block; /* first logical block extent covers */
75 75 __le16 ee_len; /* number of blocks covered by extent */
76 76 __le16 ee_start_hi; /* high 16 bits of physical block */
77   - __le32 ee_start; /* low 32 bigs of physical block */
  77 + __le32 ee_start; /* low 32 bits of physical block */
78 78 };
79 79  
80 80 /*
81   - * this is index on-disk structure
82   - * it's used at all the levels, but the bottom
  81 + * This is index on-disk structure.
  82 + * It's used at all the levels except the bottom.
83 83 */
84 84 struct ext4_extent_idx {
85 85 __le32 ei_block; /* index covers logical blocks from 'block' */
86 86 __le32 ei_leaf; /* pointer to the physical block of the next *
87   - * level. leaf or next index could bet here */
  87 + * level. leaf or next index could be there */
88 88 __le16 ei_leaf_hi; /* high 16 bits of physical block */
89 89 __u16 ei_unused;
90 90 };
91 91  
92 92 /*
93   - * each block (leaves and indexes), even inode-stored has header
  93 + * Each block (leaves and indexes), even inode-stored has header.
94 94 */
95 95 struct ext4_extent_header {
96 96 __le16 eh_magic; /* probably will support different formats */
97 97 __le16 eh_entries; /* number of valid entries */
98 98 __le16 eh_max; /* capacity of store in entries */
99   - __le16 eh_depth; /* has tree real underlaying blocks? */
  99 + __le16 eh_depth; /* has tree real underlying blocks? */
100 100 __le32 eh_generation; /* generation of the tree */
101 101 };
102 102  
103 103 #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
104 104  
105 105 /*
106   - * array of ext4_ext_path contains path to some extent
107   - * creation/lookup routines use it for traversal/splitting/etc
108   - * truncate uses it to simulate recursive walking
  106 + * Array of ext4_ext_path contains path to some extent.
  107 + * Creation/lookup routines use it for traversal/splitting/etc.
  108 + * Truncate uses it to simulate recursive walking.
109 109 */
110 110 struct ext4_ext_path {
111 111 ext4_fsblk_t p_block;
include/linux/ext4_jbd2.h
... ... @@ -28,8 +28,8 @@
28 28 * indirection blocks, the group and superblock summaries, and the data
29 29 * block to complete the transaction.
30 30 *
31   - * For extents-enabled fs we may have to allocate and modify upto
32   - * 5 levels of tree + root which is stored in inode. */
  31 + * For extents-enabled fs we may have to allocate and modify up to
  32 + * 5 levels of tree + root which are stored in the inode. */
33 33  
34 34 #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \
35 35 (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS) \