Commit 51eaaa677691f8da526ce5a3d89e08ee2d2669ce

Authored by Linus Torvalds

Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
  UBIFS: pre-allocate bulk-read buffer
  UBIFS: do not allocate too much
  UBIFS: do not print scary memory allocation warnings
  UBIFS: allow for gaps when dirtying the LPT
  UBIFS: fix compilation warnings
  MAINTAINERS: change UBI/UBIFS git tree URLs
  UBIFS: endian handling fixes and annotations
  UBIFS: remove printk

Showing 15 changed files Side-by-side Diff

... ... @@ -4236,7 +4236,7 @@
4236 4236 P: Adrian Hunter
4237 4237 M: ext-adrian.hunter@nokia.com
4238 4238 L: linux-mtd@lists.infradead.org
4239   -T: git git://git.infradead.org/~dedekind/ubifs-2.6.git
  4239 +T: git git://git.infradead.org/ubifs-2.6.git
4240 4240 W: http://www.linux-mtd.infradead.org/doc/ubifs.html
4241 4241 S: Maintained
4242 4242  
... ... @@ -4290,7 +4290,7 @@
4290 4290 M: dedekind@infradead.org
4291 4291 W: http://www.linux-mtd.infradead.org/
4292 4292 L: linux-mtd@lists.infradead.org
4293   -T: git git://git.infradead.org/~dedekind/ubi-2.6.git
  4293 +T: git git://git.infradead.org/ubi-2.6.git
4294 4294 S: Maintained
4295 4295  
4296 4296 USB ACM DRIVER
... ... @@ -234,8 +234,8 @@
234 234 int err;
235 235 struct ubifs_info *c = info;
236 236  
237   - ubifs_msg("background thread \"%s\" started, PID %d",
238   - c->bgt_name, current->pid);
  237 + dbg_msg("background thread \"%s\" started, PID %d",
  238 + c->bgt_name, current->pid);
239 239 set_freezable();
240 240  
241 241 while (1) {
... ... @@ -101,21 +101,24 @@
101 101 if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
102 102 switch (type) {
103 103 case UBIFS_INO_KEY:
104   - sprintf(p, "(%lu, %s)", key_inum(c, key),
  104 + sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
105 105 get_key_type(type));
106 106 break;
107 107 case UBIFS_DENT_KEY:
108 108 case UBIFS_XENT_KEY:
109   - sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key),
  109 + sprintf(p, "(%lu, %s, %#08x)",
  110 + (unsigned long)key_inum(c, key),
110 111 get_key_type(type), key_hash(c, key));
111 112 break;
112 113 case UBIFS_DATA_KEY:
113   - sprintf(p, "(%lu, %s, %u)", key_inum(c, key),
  114 + sprintf(p, "(%lu, %s, %u)",
  115 + (unsigned long)key_inum(c, key),
114 116 get_key_type(type), key_block(c, key));
115 117 break;
116 118 case UBIFS_TRUN_KEY:
117 119 sprintf(p, "(%lu, %s)",
118   - key_inum(c, key), get_key_type(type));
  120 + (unsigned long)key_inum(c, key),
  121 + get_key_type(type));
119 122 break;
120 123 default:
121 124 sprintf(p, "(bad key type: %#08x, %#08x)",
... ... @@ -364,8 +367,8 @@
364 367 le32_to_cpu(mst->ihead_lnum));
365 368 printk(KERN_DEBUG "\tihead_offs %u\n",
366 369 le32_to_cpu(mst->ihead_offs));
367   - printk(KERN_DEBUG "\tindex_size %u\n",
368   - le32_to_cpu(mst->index_size));
  370 + printk(KERN_DEBUG "\tindex_size %llu\n",
  371 + (unsigned long long)le64_to_cpu(mst->index_size));
369 372 printk(KERN_DEBUG "\tlpt_lnum %u\n",
370 373 le32_to_cpu(mst->lpt_lnum));
371 374 printk(KERN_DEBUG "\tlpt_offs %u\n",
... ... @@ -1589,7 +1592,7 @@
1589 1592  
1590 1593 if (inum > c->highest_inum) {
1591 1594 ubifs_err("too high inode number, max. is %lu",
1592   - c->highest_inum);
  1595 + (unsigned long)c->highest_inum);
1593 1596 return ERR_PTR(-EINVAL);
1594 1597 }
1595 1598  
1596 1599  
1597 1600  
... ... @@ -1668,16 +1671,18 @@
1668 1671 ino_key_init(c, &key, inum);
1669 1672 err = ubifs_lookup_level0(c, &key, &znode, &n);
1670 1673 if (!err) {
1671   - ubifs_err("inode %lu not found in index", inum);
  1674 + ubifs_err("inode %lu not found in index", (unsigned long)inum);
1672 1675 return ERR_PTR(-ENOENT);
1673 1676 } else if (err < 0) {
1674   - ubifs_err("error %d while looking up inode %lu", err, inum);
  1677 + ubifs_err("error %d while looking up inode %lu",
  1678 + err, (unsigned long)inum);
1675 1679 return ERR_PTR(err);
1676 1680 }
1677 1681  
1678 1682 zbr = &znode->zbranch[n];
1679 1683 if (zbr->len < UBIFS_INO_NODE_SZ) {
1680   - ubifs_err("bad node %lu node length %d", inum, zbr->len);
  1684 + ubifs_err("bad node %lu node length %d",
  1685 + (unsigned long)inum, zbr->len);
1681 1686 return ERR_PTR(-EINVAL);
1682 1687 }
1683 1688  
... ... @@ -1697,7 +1702,7 @@
1697 1702 kfree(ino);
1698 1703 if (IS_ERR(fscki)) {
1699 1704 ubifs_err("error %ld while adding inode %lu node",
1700   - PTR_ERR(fscki), inum);
  1705 + PTR_ERR(fscki), (unsigned long)inum);
1701 1706 return fscki;
1702 1707 }
1703 1708  
... ... @@ -1786,7 +1791,8 @@
1786 1791 if (IS_ERR(fscki)) {
1787 1792 err = PTR_ERR(fscki);
1788 1793 ubifs_err("error %d while processing data node and "
1789   - "trying to find inode node %lu", err, inum);
  1794 + "trying to find inode node %lu",
  1795 + err, (unsigned long)inum);
1790 1796 goto out_dump;
1791 1797 }
1792 1798  
... ... @@ -1819,7 +1825,8 @@
1819 1825 if (IS_ERR(fscki)) {
1820 1826 err = PTR_ERR(fscki);
1821 1827 ubifs_err("error %d while processing entry node and "
1822   - "trying to find inode node %lu", err, inum);
  1828 + "trying to find inode node %lu",
  1829 + err, (unsigned long)inum);
1823 1830 goto out_dump;
1824 1831 }
1825 1832  
... ... @@ -1832,7 +1839,7 @@
1832 1839 err = PTR_ERR(fscki);
1833 1840 ubifs_err("error %d while processing entry node and "
1834 1841 "trying to find parent inode node %lu",
1835   - err, inum);
  1842 + err, (unsigned long)inum);
1836 1843 goto out_dump;
1837 1844 }
1838 1845  
... ... @@ -1923,7 +1930,8 @@
1923 1930 fscki->references != 1) {
1924 1931 ubifs_err("directory inode %lu has %d "
1925 1932 "direntries which refer it, but "
1926   - "should be 1", fscki->inum,
  1933 + "should be 1",
  1934 + (unsigned long)fscki->inum,
1927 1935 fscki->references);
1928 1936 goto out_dump;
1929 1937 }
1930 1938  
1931 1939  
1932 1940  
... ... @@ -1931,27 +1939,29 @@
1931 1939 fscki->references != 0) {
1932 1940 ubifs_err("root inode %lu has non-zero (%d) "
1933 1941 "direntries which refer it",
1934   - fscki->inum, fscki->references);
  1942 + (unsigned long)fscki->inum,
  1943 + fscki->references);
1935 1944 goto out_dump;
1936 1945 }
1937 1946 if (fscki->calc_sz != fscki->size) {
1938 1947 ubifs_err("directory inode %lu size is %lld, "
1939 1948 "but calculated size is %lld",
1940   - fscki->inum, fscki->size,
1941   - fscki->calc_sz);
  1949 + (unsigned long)fscki->inum,
  1950 + fscki->size, fscki->calc_sz);
1942 1951 goto out_dump;
1943 1952 }
1944 1953 if (fscki->calc_cnt != fscki->nlink) {
1945 1954 ubifs_err("directory inode %lu nlink is %d, "
1946 1955 "but calculated nlink is %d",
1947   - fscki->inum, fscki->nlink,
1948   - fscki->calc_cnt);
  1956 + (unsigned long)fscki->inum,
  1957 + fscki->nlink, fscki->calc_cnt);
1949 1958 goto out_dump;
1950 1959 }
1951 1960 } else {
1952 1961 if (fscki->references != fscki->nlink) {
1953 1962 ubifs_err("inode %lu nlink is %d, but "
1954   - "calculated nlink is %d", fscki->inum,
  1963 + "calculated nlink is %d",
  1964 + (unsigned long)fscki->inum,
1955 1965 fscki->nlink, fscki->references);
1956 1966 goto out_dump;
1957 1967 }
1958 1968  
1959 1969  
... ... @@ -1959,20 +1969,21 @@
1959 1969 if (fscki->xattr_sz != fscki->calc_xsz) {
1960 1970 ubifs_err("inode %lu has xattr size %u, but "
1961 1971 "calculated size is %lld",
1962   - fscki->inum, fscki->xattr_sz,
  1972 + (unsigned long)fscki->inum, fscki->xattr_sz,
1963 1973 fscki->calc_xsz);
1964 1974 goto out_dump;
1965 1975 }
1966 1976 if (fscki->xattr_cnt != fscki->calc_xcnt) {
1967 1977 ubifs_err("inode %lu has %u xattrs, but "
1968   - "calculated count is %lld", fscki->inum,
  1978 + "calculated count is %lld",
  1979 + (unsigned long)fscki->inum,
1969 1980 fscki->xattr_cnt, fscki->calc_xcnt);
1970 1981 goto out_dump;
1971 1982 }
1972 1983 if (fscki->xattr_nms != fscki->calc_xnms) {
1973 1984 ubifs_err("inode %lu has xattr names' size %u, but "
1974 1985 "calculated names' size is %lld",
1975   - fscki->inum, fscki->xattr_nms,
  1986 + (unsigned long)fscki->inum, fscki->xattr_nms,
1976 1987 fscki->calc_xnms);
1977 1988 goto out_dump;
1978 1989 }
1979 1990  
... ... @@ -1985,11 +1996,12 @@
1985 1996 ino_key_init(c, &key, fscki->inum);
1986 1997 err = ubifs_lookup_level0(c, &key, &znode, &n);
1987 1998 if (!err) {
1988   - ubifs_err("inode %lu not found in index", fscki->inum);
  1999 + ubifs_err("inode %lu not found in index",
  2000 + (unsigned long)fscki->inum);
1989 2001 return -ENOENT;
1990 2002 } else if (err < 0) {
1991 2003 ubifs_err("error %d while looking up inode %lu",
1992   - err, fscki->inum);
  2004 + err, (unsigned long)fscki->inum);
1993 2005 return err;
1994 2006 }
1995 2007  
... ... @@ -2007,7 +2019,7 @@
2007 2019 }
2008 2020  
2009 2021 ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
2010   - fscki->inum, zbr->lnum, zbr->offs);
  2022 + (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
2011 2023 dbg_dump_node(c, ino);
2012 2024 kfree(ino);
2013 2025 return -EINVAL;
... ... @@ -161,7 +161,7 @@
161 161 return ERR_PTR(-EINVAL);
162 162 }
163 163 ubifs_warn("running out of inode numbers (current %lu, max %d)",
164   - c->highest_inum, INUM_WATERMARK);
  164 + (unsigned long)c->highest_inum, INUM_WATERMARK);
165 165 }
166 166  
167 167 inode->i_ino = ++c->highest_inum;
... ... @@ -428,7 +428,8 @@
428 428 dbg_gen("feed '%s', ino %llu, new f_pos %#x",
429 429 dent->name, (unsigned long long)le64_to_cpu(dent->inum),
430 430 key_hash_flash(c, &dent->key));
431   - ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum);
  431 + ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
  432 + ubifs_inode(dir)->creat_sqnum);
432 433  
433 434 nm.len = le16_to_cpu(dent->nlen);
434 435 over = filldir(dirent, dent->name, nm.len, file->f_pos,
... ... @@ -72,7 +72,7 @@
72 72 return err;
73 73 }
74 74  
75   - ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum);
  75 + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
76 76  
77 77 len = le32_to_cpu(dn->size);
78 78 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
... ... @@ -626,7 +626,7 @@
626 626  
627 627 dn = bu->buf + (bu->zbranch[nn].offs - offs);
628 628  
629   - ubifs_assert(dn->ch.sqnum >
  629 + ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
630 630 ubifs_inode(inode)->creat_sqnum);
631 631  
632 632 len = le32_to_cpu(dn->size);
633 633  
634 634  
635 635  
636 636  
... ... @@ -691,32 +691,22 @@
691 691 /**
692 692 * ubifs_do_bulk_read - do bulk-read.
693 693 * @c: UBIFS file-system description object
694   - * @page1: first page
  694 + * @bu: bulk-read information
  695 + * @page1: first page to read
695 696 *
696 697 * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
697 698 */
698   -static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
  699 +static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
  700 + struct page *page1)
699 701 {
700 702 pgoff_t offset = page1->index, end_index;
701 703 struct address_space *mapping = page1->mapping;
702 704 struct inode *inode = mapping->host;
703 705 struct ubifs_inode *ui = ubifs_inode(inode);
704   - struct bu_info *bu;
705 706 int err, page_idx, page_cnt, ret = 0, n = 0;
  707 + int allocate = bu->buf ? 0 : 1;
706 708 loff_t isize;
707 709  
708   - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS);
709   - if (!bu)
710   - return 0;
711   -
712   - bu->buf_len = c->bulk_read_buf_size;
713   - bu->buf = kmalloc(bu->buf_len, GFP_NOFS);
714   - if (!bu->buf)
715   - goto out_free;
716   -
717   - data_key_init(c, &bu->key, inode->i_ino,
718   - offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
719   -
720 710 err = ubifs_tnc_get_bu_keys(c, bu);
721 711 if (err)
722 712 goto out_warn;
723 713  
... ... @@ -735,12 +725,25 @@
735 725 * together. If all the pages were like this, bulk-read would
736 726 * reduce performance, so we turn it off for a while.
737 727 */
738   - ui->read_in_a_row = 0;
739   - ui->bulk_read = 0;
740   - goto out_free;
  728 + goto out_bu_off;
741 729 }
742 730  
743 731 if (bu->cnt) {
  732 + if (allocate) {
  733 + /*
  734 + * Allocate bulk-read buffer depending on how many data
  735 + * nodes we are going to read.
  736 + */
  737 + bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
  738 + bu->zbranch[bu->cnt - 1].len -
  739 + bu->zbranch[0].offs;
  740 + ubifs_assert(bu->buf_len > 0);
  741 + ubifs_assert(bu->buf_len <= c->leb_size);
  742 + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
  743 + if (!bu->buf)
  744 + goto out_bu_off;
  745 + }
  746 +
744 747 err = ubifs_tnc_bulk_read(c, bu);
745 748 if (err)
746 749 goto out_warn;
747 750  
... ... @@ -779,13 +782,17 @@
779 782 ui->last_page_read = offset + page_idx - 1;
780 783  
781 784 out_free:
782   - kfree(bu->buf);
783   - kfree(bu);
  785 + if (allocate)
  786 + kfree(bu->buf);
784 787 return ret;
785 788  
786 789 out_warn:
787 790 ubifs_warn("ignoring error %d and skipping bulk-read", err);
788 791 goto out_free;
  792 +
  793 +out_bu_off:
  794 + ui->read_in_a_row = ui->bulk_read = 0;
  795 + goto out_free;
789 796 }
790 797  
791 798 /**
792 799  
793 800  
794 801  
795 802  
... ... @@ -803,18 +810,20 @@
803 810 struct ubifs_info *c = inode->i_sb->s_fs_info;
804 811 struct ubifs_inode *ui = ubifs_inode(inode);
805 812 pgoff_t index = page->index, last_page_read = ui->last_page_read;
806   - int ret = 0;
  813 + struct bu_info *bu;
  814 + int err = 0, allocated = 0;
807 815  
808 816 ui->last_page_read = index;
809   -
810 817 if (!c->bulk_read)
811 818 return 0;
  819 +
812 820 /*
813   - * Bulk-read is protected by ui_mutex, but it is an optimization, so
814   - * don't bother if we cannot lock the mutex.
  821 + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
  822 + * so don't bother if we cannot lock the mutex.
815 823 */
816 824 if (!mutex_trylock(&ui->ui_mutex))
817 825 return 0;
  826 +
818 827 if (index != last_page_read + 1) {
819 828 /* Turn off bulk-read if we stop reading sequentially */
820 829 ui->read_in_a_row = 1;
... ... @@ -822,6 +831,7 @@
822 831 ui->bulk_read = 0;
823 832 goto out_unlock;
824 833 }
  834 +
825 835 if (!ui->bulk_read) {
826 836 ui->read_in_a_row += 1;
827 837 if (ui->read_in_a_row < 3)
828 838  
... ... @@ -829,10 +839,35 @@
829 839 /* Three reads in a row, so switch on bulk-read */
830 840 ui->bulk_read = 1;
831 841 }
832   - ret = ubifs_do_bulk_read(c, page);
  842 +
  843 + /*
  844 + * If possible, try to use pre-allocated bulk-read information, which
  845 + * is protected by @c->bu_mutex.
  846 + */
  847 + if (mutex_trylock(&c->bu_mutex))
  848 + bu = &c->bu;
  849 + else {
  850 + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
  851 + if (!bu)
  852 + goto out_unlock;
  853 +
  854 + bu->buf = NULL;
  855 + allocated = 1;
  856 + }
  857 +
  858 + bu->buf_len = c->max_bu_buf_len;
  859 + data_key_init(c, &bu->key, inode->i_ino,
  860 + page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
  861 + err = ubifs_do_bulk_read(c, bu, page);
  862 +
  863 + if (!allocated)
  864 + mutex_unlock(&c->bu_mutex);
  865 + else
  866 + kfree(bu);
  867 +
833 868 out_unlock:
834 869 mutex_unlock(&ui->ui_mutex);
835   - return ret;
  870 + return err;
836 871 }
837 872  
838 873 static int ubifs_readpage(struct file *file, struct page *page)
... ... @@ -690,8 +690,9 @@
690 690 int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR;
691 691 struct ubifs_inode *ui = ubifs_inode(inode);
692 692  
693   - dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key),
694   - key_block(c, key), len, DBGKEY(key));
  693 + dbg_jnl("ino %lu, blk %u, len %d, key %s",
  694 + (unsigned long)key_inum(c, key), key_block(c, key), len,
  695 + DBGKEY(key));
695 696 ubifs_assert(len <= UBIFS_BLOCK_SIZE);
696 697  
697 698 data = kmalloc(dlen, GFP_NOFS);
... ... @@ -1128,7 +1129,8 @@
1128 1129 ino_t inum = inode->i_ino;
1129 1130 unsigned int blk;
1130 1131  
1131   - dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size);
  1132 + dbg_jnl("ino %lu, size %lld -> %lld",
  1133 + (unsigned long)inum, old_size, new_size);
1132 1134 ubifs_assert(!ui->data_len);
1133 1135 ubifs_assert(S_ISREG(inode->i_mode));
1134 1136 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
... ... @@ -345,7 +345,7 @@
345 345 {
346 346 const union ubifs_key *key = k;
347 347  
348   - return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS;
  348 + return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS;
349 349 }
350 350  
351 351 /**
... ... @@ -416,7 +416,7 @@
416 416 {
417 417 const union ubifs_key *key = k;
418 418  
419   - return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK;
  419 + return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK;
420 420 }
421 421  
422 422 /**
fs/ubifs/lpt_commit.c
... ... @@ -571,8 +571,6 @@
571 571 /* We assume here that LEB zero is never an LPT LEB */
572 572 if (nnode->nbranch[iip].lnum)
573 573 return ubifs_get_pnode(c, nnode, iip);
574   - else
575   - return NULL;
576 574 }
577 575  
578 576 /* Go up while can't go right */
... ... @@ -105,7 +105,7 @@
105 105 list_add_tail(&orphan->list, &c->orph_list);
106 106 list_add_tail(&orphan->new_list, &c->orph_new);
107 107 spin_unlock(&c->orphan_lock);
108   - dbg_gen("ino %lu", inum);
  108 + dbg_gen("ino %lu", (unsigned long)inum);
109 109 return 0;
110 110 }
111 111  
112 112  
... ... @@ -132,14 +132,16 @@
132 132 else {
133 133 if (o->dnext) {
134 134 spin_unlock(&c->orphan_lock);
135   - dbg_gen("deleted twice ino %lu", inum);
  135 + dbg_gen("deleted twice ino %lu",
  136 + (unsigned long)inum);
136 137 return;
137 138 }
138 139 if (o->cnext) {
139 140 o->dnext = c->orph_dnext;
140 141 c->orph_dnext = o;
141 142 spin_unlock(&c->orphan_lock);
142   - dbg_gen("delete later ino %lu", inum);
  143 + dbg_gen("delete later ino %lu",
  144 + (unsigned long)inum);
143 145 return;
144 146 }
145 147 rb_erase(p, &c->orph_tree);
146 148  
... ... @@ -151,12 +153,12 @@
151 153 }
152 154 spin_unlock(&c->orphan_lock);
153 155 kfree(o);
154   - dbg_gen("inum %lu", inum);
  156 + dbg_gen("inum %lu", (unsigned long)inum);
155 157 return;
156 158 }
157 159 }
158 160 spin_unlock(&c->orphan_lock);
159   - dbg_err("missing orphan ino %lu", inum);
  161 + dbg_err("missing orphan ino %lu", (unsigned long)inum);
160 162 dbg_dump_stack();
161 163 }
162 164  
... ... @@ -448,7 +450,7 @@
448 450 rb_erase(&orphan->rb, &c->orph_tree);
449 451 list_del(&orphan->list);
450 452 c->tot_orphans -= 1;
451   - dbg_gen("deleting orphan ino %lu", orphan->inum);
  453 + dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum);
452 454 kfree(orphan);
453 455 }
454 456 c->orph_dnext = NULL;
... ... @@ -536,8 +538,8 @@
536 538 list_add_tail(&orphan->list, &c->orph_list);
537 539 orphan->dnext = c->orph_dnext;
538 540 c->orph_dnext = orphan;
539   - dbg_mnt("ino %lu, new %d, tot %d",
540   - inum, c->new_orphans, c->tot_orphans);
  541 + dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
  542 + c->new_orphans, c->tot_orphans);
541 543 return 0;
542 544 }
543 545  
... ... @@ -609,7 +611,8 @@
609 611 n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
610 612 for (i = 0; i < n; i++) {
611 613 inum = le64_to_cpu(orph->inos[i]);
612   - dbg_rcvry("deleting orphaned inode %lu", inum);
  614 + dbg_rcvry("deleting orphaned inode %lu",
  615 + (unsigned long)inum);
613 616 err = ubifs_tnc_remove_ino(c, inum);
614 617 if (err)
615 618 return err;
... ... @@ -840,8 +843,8 @@
840 843 if (inum != ci->last_ino) {
841 844 /* Lowest node type is the inode node, so it comes first */
842 845 if (key_type(c, &zbr->key) != UBIFS_INO_KEY)
843   - ubifs_err("found orphan node ino %lu, type %d", inum,
844   - key_type(c, &zbr->key));
  846 + ubifs_err("found orphan node ino %lu, type %d",
  847 + (unsigned long)inum, key_type(c, &zbr->key));
845 848 ci->last_ino = inum;
846 849 ci->tot_inos += 1;
847 850 err = ubifs_tnc_read_node(c, zbr, ci->node);
... ... @@ -853,7 +856,8 @@
853 856 /* Must be recorded as an orphan */
854 857 if (!dbg_find_check_orphan(&ci->root, inum) &&
855 858 !dbg_find_orphan(c, inum)) {
856   - ubifs_err("missing orphan, ino %lu", inum);
  859 + ubifs_err("missing orphan, ino %lu",
  860 + (unsigned long)inum);
857 861 ci->missing += 1;
858 862 }
859 863 }
... ... @@ -168,12 +168,12 @@
168 168 struct ubifs_mst_node *mst)
169 169 {
170 170 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
171   - uint32_t save_flags;
  171 + __le32 save_flags;
172 172  
173 173 dbg_rcvry("recovery");
174 174  
175 175 save_flags = mst->flags;
176   - mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY);
  176 + mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
177 177  
178 178 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
179 179 err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM);
180 180  
... ... @@ -1435,13 +1435,13 @@
1435 1435 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
1436 1436 if (err)
1437 1437 goto out;
1438   - dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs,
1439   - i_size, e->d_size);
  1438 + dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ",
  1439 + (unsigned long)e->inum, lnum, offs, i_size, e->d_size);
1440 1440 return 0;
1441 1441  
1442 1442 out:
1443 1443 ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d",
1444   - e->inum, e->i_size, e->d_size, err);
  1444 + (unsigned long)e->inum, e->i_size, e->d_size, err);
1445 1445 return err;
1446 1446 }
1447 1447  
... ... @@ -1472,7 +1472,8 @@
1472 1472 return err;
1473 1473 if (err == -ENOENT) {
1474 1474 /* Remove data nodes that have no inode */
1475   - dbg_rcvry("removing ino %lu", e->inum);
  1475 + dbg_rcvry("removing ino %lu",
  1476 + (unsigned long)e->inum);
1476 1477 err = ubifs_tnc_remove_ino(c, e->inum);
1477 1478 if (err)
1478 1479 return err;
... ... @@ -1493,8 +1494,8 @@
1493 1494 return PTR_ERR(inode);
1494 1495 if (inode->i_size < e->d_size) {
1495 1496 dbg_rcvry("ino %lu size %lld -> %lld",
1496   - e->inum, e->d_size,
1497   - inode->i_size);
  1497 + (unsigned long)e->inum,
  1498 + e->d_size, inode->i_size);
1498 1499 inode->i_size = e->d_size;
1499 1500 ubifs_inode(inode)->ui_size = e->d_size;
1500 1501 e->inode = inode;
... ... @@ -1065,7 +1065,7 @@
1065 1065 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
1066 1066 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
1067 1067 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
1068   - c->highest_inum);
  1068 + (unsigned long)c->highest_inum);
1069 1069 out:
1070 1070 destroy_replay_tree(c);
1071 1071 destroy_bud_list(c);
... ... @@ -81,6 +81,7 @@
81 81 int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0;
82 82 int min_leb_cnt = UBIFS_MIN_LEB_CNT;
83 83 uint64_t tmp64, main_bytes;
  84 + __le64 tmp_le64;
84 85  
85 86 /* Some functions called from here depend on the @c->key_len filed */
86 87 c->key_len = UBIFS_SK_LEN;
... ... @@ -295,10 +296,10 @@
295 296 ino->ch.node_type = UBIFS_INO_NODE;
296 297 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
297 298 ino->nlink = cpu_to_le32(2);
298   - tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
299   - ino->atime_sec = tmp;
300   - ino->ctime_sec = tmp;
301   - ino->mtime_sec = tmp;
  299 + tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
  300 + ino->atime_sec = tmp_le64;
  301 + ino->ctime_sec = tmp_le64;
  302 + ino->mtime_sec = tmp_le64;
302 303 ino->atime_nsec = 0;
303 304 ino->ctime_nsec = 0;
304 305 ino->mtime_nsec = 0;
... ... @@ -36,6 +36,12 @@
36 36 #include <linux/mount.h>
37 37 #include "ubifs.h"
38 38  
  39 +/*
  40 + * Maximum amount of memory we may 'kmalloc()' without worrying that we are
  41 + * allocating too much.
  42 + */
  43 +#define UBIFS_KMALLOC_OK (128*1024)
  44 +
39 45 /* Slab cache for UBIFS inodes */
40 46 struct kmem_cache *ubifs_inode_slab;
41 47  
42 48  
... ... @@ -561,18 +567,11 @@
561 567 * calculations when reporting free space.
562 568 */
563 569 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
564   - /* Buffer size for bulk-reads */
565   - c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
566   - if (c->bulk_read_buf_size > c->leb_size)
567   - c->bulk_read_buf_size = c->leb_size;
568   - if (c->bulk_read_buf_size > 128 * 1024) {
569   - /* Check if we can kmalloc more than 128KiB */
570   - void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL);
571 570  
572   - kfree(try);
573   - if (!try)
574   - c->bulk_read_buf_size = 128 * 1024;
575   - }
  571 + /* Buffer size for bulk-reads */
  572 + c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
  573 + if (c->max_bu_buf_len > c->leb_size)
  574 + c->max_bu_buf_len = c->leb_size;
576 575 return 0;
577 576 }
578 577  
... ... @@ -992,6 +991,34 @@
992 991 }
993 992  
994 993 /**
  994 + * bu_init - initialize bulk-read information.
  995 + * @c: UBIFS file-system description object
  996 + */
  997 +static void bu_init(struct ubifs_info *c)
  998 +{
  999 + ubifs_assert(c->bulk_read == 1);
  1000 +
  1001 + if (c->bu.buf)
  1002 + return; /* Already initialized */
  1003 +
  1004 +again:
  1005 + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN);
  1006 + if (!c->bu.buf) {
  1007 + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
  1008 + c->max_bu_buf_len = UBIFS_KMALLOC_OK;
  1009 + goto again;
  1010 + }
  1011 +
  1012 + /* Just disable bulk-read */
  1013 + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, "
  1014 + "disabling it", c->max_bu_buf_len);
  1015 + c->mount_opts.bulk_read = 1;
  1016 + c->bulk_read = 0;
  1017 + return;
  1018 + }
  1019 +}
  1020 +
  1021 +/**
995 1022 * mount_ubifs - mount UBIFS file-system.
996 1023 * @c: UBIFS file-system description object
997 1024 *
... ... @@ -1059,6 +1086,13 @@
1059 1086 goto out_free;
1060 1087 }
1061 1088  
  1089 + if (c->bulk_read == 1)
  1090 + bu_init(c);
  1091 +
  1092 + /*
  1093 + * We have to check all CRCs, even for data nodes, when we mount the FS
  1094 + * (specifically, when we are replaying).
  1095 + */
1062 1096 c->always_chk_crc = 1;
1063 1097  
1064 1098 err = ubifs_read_superblock(c);
... ... @@ -1289,6 +1323,7 @@
1289 1323 out_dereg:
1290 1324 dbg_failure_mode_deregistration(c);
1291 1325 out_free:
  1326 + kfree(c->bu.buf);
1292 1327 vfree(c->ileb_buf);
1293 1328 vfree(c->sbuf);
1294 1329 kfree(c->bottom_up_buf);
1295 1330  
... ... @@ -1325,10 +1360,11 @@
1325 1360 kfree(c->cbuf);
1326 1361 kfree(c->rcvrd_mst_node);
1327 1362 kfree(c->mst_node);
  1363 + kfree(c->bu.buf);
  1364 + vfree(c->ileb_buf);
1328 1365 vfree(c->sbuf);
1329 1366 kfree(c->bottom_up_buf);
1330 1367 UBIFS_DBG(vfree(c->dbg_buf));
1331   - vfree(c->ileb_buf);
1332 1368 dbg_failure_mode_deregistration(c);
1333 1369 }
1334 1370  
... ... @@ -1626,6 +1662,7 @@
1626 1662 ubifs_err("invalid or unknown remount parameter");
1627 1663 return err;
1628 1664 }
  1665 +
1629 1666 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
1630 1667 err = ubifs_remount_rw(c);
1631 1668 if (err)
... ... @@ -1633,6 +1670,14 @@
1633 1670 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
1634 1671 ubifs_remount_ro(c);
1635 1672  
  1673 + if (c->bulk_read == 1)
  1674 + bu_init(c);
  1675 + else {
  1676 + dbg_gen("disable bulk-read");
  1677 + kfree(c->bu.buf);
  1678 + c->bu.buf = NULL;
  1679 + }
  1680 +
1636 1681 return 0;
1637 1682 }
1638 1683  
... ... @@ -1723,6 +1768,7 @@
1723 1768 mutex_init(&c->log_mutex);
1724 1769 mutex_init(&c->mst_mutex);
1725 1770 mutex_init(&c->umount_mutex);
  1771 + mutex_init(&c->bu_mutex);
1726 1772 init_waitqueue_head(&c->cmt_wq);
1727 1773 c->buds = RB_ROOT;
1728 1774 c->old_idx = RB_ROOT;
... ... @@ -1501,7 +1501,12 @@
1501 1501 * @bu: bulk-read parameters and results
1502 1502 *
1503 1503 * Lookup consecutive data node keys for the same inode that reside
1504   - * consecutively in the same LEB.
  1504 + * consecutively in the same LEB. This function returns zero in case of success
  1505 + * and a negative error code in case of failure.
  1506 + *
  1507 + * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function
  1508 + * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares
  1509 + * maxumum possible amount of nodes for bulk-read.
1505 1510 */
1506 1511 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1507 1512 {
... ... @@ -2677,7 +2682,7 @@
2677 2682 struct ubifs_dent_node *xent, *pxent = NULL;
2678 2683 struct qstr nm = { .name = NULL };
2679 2684  
2680   - dbg_tnc("ino %lu", inum);
  2685 + dbg_tnc("ino %lu", (unsigned long)inum);
2681 2686  
2682 2687 /*
2683 2688 * Walk all extended attribute entries and remove them together with
... ... @@ -2697,7 +2702,8 @@
2697 2702 }
2698 2703  
2699 2704 xattr_inum = le64_to_cpu(xent->inum);
2700   - dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum);
  2705 + dbg_tnc("xent '%s', ino %lu", xent->name,
  2706 + (unsigned long)xattr_inum);
2701 2707  
2702 2708 nm.name = xent->name;
2703 2709 nm.len = le16_to_cpu(xent->nlen);
... ... @@ -753,7 +753,7 @@
753 753 };
754 754  
755 755 /**
756   - * struct bu_info - bulk-read information
  756 + * struct bu_info - bulk-read information.
757 757 * @key: first data node key
758 758 * @zbranch: zbranches of data nodes to bulk read
759 759 * @buf: buffer to read into
760 760  
... ... @@ -969,8 +969,11 @@
969 969 * @mst_node: master node
970 970 * @mst_offs: offset of valid master node
971 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
972   - * @bulk_read_buf_size: buffer size for bulk-reads
973 972 *
  973 + * @max_bu_buf_len: maximum bulk-read buffer length
  974 + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
  975 + * @bu: pre-allocated bulk-read information
  976 + *
974 977 * @log_lebs: number of logical eraseblocks in the log
975 978 * @log_bytes: log size in bytes
976 979 * @log_last: last LEB of the log
... ... @@ -1217,7 +1220,10 @@
1217 1220 struct ubifs_mst_node *mst_node;
1218 1221 int mst_offs;
1219 1222 struct mutex mst_mutex;
1220   - int bulk_read_buf_size;
  1223 +
  1224 + int max_bu_buf_len;
  1225 + struct mutex bu_mutex;
  1226 + struct bu_info bu;
1221 1227  
1222 1228 int log_lebs;
1223 1229 long long log_bytes;