Commit 7d2f280e75f05919314e250cadf361a327ed555c

Authored by Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: (24 commits)
  quota: Fix possible oops in __dquot_initialize()
  ext3: Update kernel-doc comments
  jbd/2: fixed typos
  ext2: fixed typo.
  ext3: Fix debug messages in ext3_group_extend()
  jbd: Convert atomic_inc() to get_bh()
  ext3: Remove misplaced BUFFER_TRACE() in ext3_truncate()
  jbd: Fix debug message in do_get_write_access()
  jbd: Check return value of __getblk()
  ext3: Use DIV_ROUND_UP() on group desc block counting
  ext3: Return proper error code on ext3_fill_super()
  ext3: Remove unnecessary casts on bh->b_data
  ext3: Cleanup ext3_setup_super()
  quota: Fix issuing of warnings from dquot_transfer
  quota: fix dquot_disable vs dquot_transfer race v2
  jbd: Convert bitops to buffer fns
  ext3/jbd: Avoid WARN() messages when failing to write the superblock
  jbd: Use offset_in_page() instead of manual calculation
  jbd: Remove unnecessary goto statement
  jbd: Use printk_ratelimited() in journal_alloc_journal_head()
  ...

Showing 15 changed files Side-by-side Diff

... ... @@ -646,10 +646,9 @@
646 646 return here;
647 647 }
648 648  
649   -/*
  649 +/**
650 650 * ext2_try_to_allocate()
651 651 * @sb: superblock
652   - * @handle: handle to this transaction
653 652 * @group: given allocation block group
654 653 * @bitmap_bh: bufferhead holds the block bitmap
655 654 * @grp_goal: given target block within the group
... ... @@ -792,9 +792,9 @@
792 792 if (here < 0)
793 793 here = 0;
794 794  
795   - p = ((char *)bh->b_data) + (here >> 3);
  795 + p = bh->b_data + (here >> 3);
796 796 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
797   - next = (r - ((char *)bh->b_data)) << 3;
  797 + next = (r - bh->b_data) << 3;
798 798  
799 799 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
800 800 return next;
801 801  
... ... @@ -810,8 +810,9 @@
810 810  
811 811 /**
812 812 * claim_block()
  813 + * @lock: the spin lock for this block group
813 814 * @block: the free block (group relative) to allocate
814   - * @bh: the bufferhead containts the block group bitmap
  815 + * @bh: the buffer_head contains the block group bitmap
815 816 *
816 817 * We think we can allocate this block in this bitmap. Try to set the bit.
817 818 * If that succeeds then check that nobody has allocated and then freed the
818 819  
... ... @@ -956,9 +957,11 @@
956 957 * but we will shift to the place where start_block is,
957 958 * then start from there, when looking for a reservable space.
958 959 *
959   - * @size: the target new reservation window size
  960 + * @my_rsv: the reservation window
960 961 *
961   - * @group_first_block: the first block we consider to start
  962 + * @sb: the super block
  963 + *
  964 + * @start_block: the first block we consider to start
962 965 * the real search from
963 966 *
964 967 * @last_block:
... ... @@ -1084,7 +1087,7 @@
1084 1087 *
1085 1088 * failed: we failed to find a reservation window in this group
1086 1089 *
1087   - * @rsv: the reservation
  1090 + * @my_rsv: the reservation window
1088 1091 *
1089 1092 * @grp_goal: The goal (group-relative). It is where the search for a
1090 1093 * free reservable space should start from.
1091 1094  
... ... @@ -1273,8 +1276,8 @@
1273 1276 * @group: given allocation block group
1274 1277 * @bitmap_bh: bufferhead holds the block bitmap
1275 1278 * @grp_goal: given target block within the group
1276   - * @count: target number of blocks to allocate
1277 1279 * @my_rsv: reservation window
  1280 + * @count: target number of blocks to allocate
1278 1281 * @errp: pointer to store the error code
1279 1282 *
1280 1283 * This is the main function used to allocate a new block and its reservation
... ... @@ -570,9 +570,14 @@
570 570 ei->i_state_flags = 0;
571 571 ext3_set_inode_state(inode, EXT3_STATE_NEW);
572 572  
573   - ei->i_extra_isize =
574   - (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
575   - sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
  573 + /* See comment in ext3_iget for explanation */
  574 + if (ino >= EXT3_FIRST_INO(sb) + 1 &&
  575 + EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
  576 + ei->i_extra_isize =
  577 + sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
  578 + } else {
  579 + ei->i_extra_isize = 0;
  580 + }
576 581  
577 582 ret = inode;
578 583 dquot_initialize(inode);
... ... @@ -498,7 +498,7 @@
498 498 }
499 499  
500 500 /**
501   - * ext3_blks_to_allocate: Look up the block map and count the number
  501 + * ext3_blks_to_allocate - Look up the block map and count the number
502 502 * of direct blocks need to be allocated for the given branch.
503 503 *
504 504 * @branch: chain of indirect blocks
505 505  
506 506  
... ... @@ -536,14 +536,18 @@
536 536 }
537 537  
538 538 /**
539   - * ext3_alloc_blocks: multiple allocate blocks needed for a branch
  539 + * ext3_alloc_blocks - multiple allocate blocks needed for a branch
  540 + * @handle: handle for this transaction
  541 + * @inode: owner
  542 + * @goal: preferred place for allocation
540 543 * @indirect_blks: the number of blocks need to allocate for indirect
541 544 * blocks
542   - *
  545 + * @blks: number of blocks need to allocated for direct blocks
543 546 * @new_blocks: on return it will store the new block numbers for
544 547 * the indirect blocks(if needed) and the first direct block,
545   - * @blks: on return it will store the total number of allocated
546   - * direct blocks
  548 + * @err: here we store the error value
  549 + *
  550 + * return the number of direct blocks allocated
547 551 */
548 552 static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
549 553 ext3_fsblk_t goal, int indirect_blks, int blks,
550 554  
... ... @@ -598,9 +602,11 @@
598 602  
599 603 /**
600 604 * ext3_alloc_branch - allocate and set up a chain of blocks.
  605 + * @handle: handle for this transaction
601 606 * @inode: owner
602 607 * @indirect_blks: number of allocated indirect blocks
603 608 * @blks: number of allocated direct blocks
  609 + * @goal: preferred place for allocation
604 610 * @offsets: offsets (in the blocks) to store the pointers to next.
605 611 * @branch: place to store the chain in.
606 612 *
607 613  
... ... @@ -700,10 +706,9 @@
700 706  
701 707 /**
702 708 * ext3_splice_branch - splice the allocated branch onto inode.
  709 + * @handle: handle for this transaction
703 710 * @inode: owner
704 711 * @block: (logical) number of block we are adding
705   - * @chain: chain of indirect blocks (with a missing link - see
706   - * ext3_alloc_branch)
707 712 * @where: location of missing link
708 713 * @num: number of indirect blocks we are adding
709 714 * @blks: number of direct blocks we are adding
... ... @@ -2530,7 +2535,6 @@
2530 2535 */
2531 2536 } else {
2532 2537 /* Shared branch grows from an indirect block */
2533   - BUFFER_TRACE(partial->bh, "get_write_access");
2534 2538 ext3_free_branches(handle, inode, partial->bh,
2535 2539 partial->p,
2536 2540 partial->p+1, (chain+n-1) - partial);
... ... @@ -977,7 +977,8 @@
977 977 o_blocks_count = le32_to_cpu(es->s_blocks_count);
978 978  
979 979 if (test_opt(sb, DEBUG))
980   - printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
  980 + printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
  981 + " upto "E3FSBLK" blocks\n",
981 982 o_blocks_count, n_blocks_count);
982 983  
983 984 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
... ... @@ -985,7 +986,7 @@
985 986  
986 987 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
987 988 printk(KERN_ERR "EXT3-fs: filesystem on %s:"
988   - " too large to resize to %lu blocks safely\n",
  989 + " too large to resize to "E3FSBLK" blocks safely\n",
989 990 sb->s_id, n_blocks_count);
990 991 if (sizeof(sector_t) < 8)
991 992 ext3_warning(sb, __func__,
992 993  
... ... @@ -1065,11 +1066,11 @@
1065 1066 es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
1066 1067 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
1067 1068 mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
1068   - ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
1069   - o_blocks_count + add);
  1069 + ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
  1070 + o_blocks_count, o_blocks_count + add);
1070 1071 ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
1071   - ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count,
1072   - o_blocks_count + add);
  1072 + ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
  1073 + o_blocks_count, o_blocks_count + add);
1073 1074 if ((err = ext3_journal_stop(handle)))
1074 1075 goto exit_put;
1075 1076 if (test_opt(sb, DEBUG))
... ... @@ -1301,9 +1301,9 @@
1301 1301 ext3_msg(sb, KERN_WARNING,
1302 1302 "warning: mounting fs with errors, "
1303 1303 "running e2fsck is recommended");
1304   - else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
  1304 + else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
1305 1305 le16_to_cpu(es->s_mnt_count) >=
1306   - (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  1306 + le16_to_cpu(es->s_max_mnt_count))
1307 1307 ext3_msg(sb, KERN_WARNING,
1308 1308 "warning: maximal mount count reached, "
1309 1309 "running e2fsck is recommended");
... ... @@ -1320,7 +1320,7 @@
1320 1320 valid forever! :) */
1321 1321 es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
1322 1322 #endif
1323   - if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
  1323 + if (!le16_to_cpu(es->s_max_mnt_count))
1324 1324 es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
1325 1325 le16_add_cpu(&es->s_mnt_count, 1);
1326 1326 es->s_mtime = cpu_to_le32(get_seconds());
... ... @@ -1647,7 +1647,7 @@
1647 1647 * Note: s_es must be initialized as soon as possible because
1648 1648 * some ext3 macro-instructions depend on its value
1649 1649 */
1650   - es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
  1650 + es = (struct ext3_super_block *) (bh->b_data + offset);
1651 1651 sbi->s_es = es;
1652 1652 sb->s_magic = le16_to_cpu(es->s_magic);
1653 1653 if (sb->s_magic != EXT3_SUPER_MAGIC)
... ... @@ -1758,7 +1758,7 @@
1758 1758 "error: can't read superblock on 2nd try");
1759 1759 goto failed_mount;
1760 1760 }
1761   - es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
  1761 + es = (struct ext3_super_block *)(bh->b_data + offset);
1762 1762 sbi->s_es = es;
1763 1763 if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
1764 1764 ext3_msg(sb, KERN_ERR,
1765 1765  
... ... @@ -1857,13 +1857,13 @@
1857 1857 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
1858 1858 le32_to_cpu(es->s_first_data_block) - 1)
1859 1859 / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
1860   - db_count = (sbi->s_groups_count + EXT3_DESC_PER_BLOCK(sb) - 1) /
1861   - EXT3_DESC_PER_BLOCK(sb);
  1860 + db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
1862 1861 sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
1863 1862 GFP_KERNEL);
1864 1863 if (sbi->s_group_desc == NULL) {
1865 1864 ext3_msg(sb, KERN_ERR,
1866 1865 "error: not enough memory");
  1866 + ret = -ENOMEM;
1867 1867 goto failed_mount;
1868 1868 }
1869 1869  
... ... @@ -1951,6 +1951,7 @@
1951 1951 }
1952 1952 if (err) {
1953 1953 ext3_msg(sb, KERN_ERR, "error: insufficient memory");
  1954 + ret = err;
1954 1955 goto failed_mount3;
1955 1956 }
1956 1957  
... ... @@ -2159,7 +2160,7 @@
2159 2160 goto out_bdev;
2160 2161 }
2161 2162  
2162   - es = (struct ext3_super_block *) (((char *)bh->b_data) + offset);
  2163 + es = (struct ext3_super_block *) (bh->b_data + offset);
2163 2164 if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
2164 2165 !(le32_to_cpu(es->s_feature_incompat) &
2165 2166 EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
... ... @@ -2352,6 +2353,21 @@
2352 2353  
2353 2354 if (!sbh)
2354 2355 return error;
  2356 +
  2357 + if (buffer_write_io_error(sbh)) {
  2358 + /*
  2359 + * Oh, dear. A previous attempt to write the
  2360 + * superblock failed. This could happen because the
  2361 + * USB device was yanked out. Or it could happen to
  2362 + * be a transient write error and maybe the block will
  2363 + * be remapped. Nothing we can do but to retry the
  2364 + * write and hope for the best.
  2365 + */
  2366 + ext3_msg(sb, KERN_ERR, "previous I/O error to "
  2367 + "superblock detected");
  2368 + clear_buffer_write_io_error(sbh);
  2369 + set_buffer_uptodate(sbh);
  2370 + }
2355 2371 /*
2356 2372 * If the file system is mounted read-only, don't update the
2357 2373 * superblock write time. This avoids updating the superblock
2358 2374  
... ... @@ -2368,8 +2384,15 @@
2368 2384 es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
2369 2385 BUFFER_TRACE(sbh, "marking dirty");
2370 2386 mark_buffer_dirty(sbh);
2371   - if (sync)
  2387 + if (sync) {
2372 2388 error = sync_dirty_buffer(sbh);
  2389 + if (buffer_write_io_error(sbh)) {
  2390 + ext3_msg(sb, KERN_ERR, "I/O error while writing "
  2391 + "superblock");
  2392 + clear_buffer_write_io_error(sbh);
  2393 + set_buffer_uptodate(sbh);
  2394 + }
  2395 + }
2373 2396 return error;
2374 2397 }
2375 2398  
... ... @@ -221,7 +221,7 @@
221 221 goto restart;
222 222 }
223 223 if (buffer_locked(bh)) {
224   - atomic_inc(&bh->b_count);
  224 + get_bh(bh);
225 225 spin_unlock(&journal->j_list_lock);
226 226 jbd_unlock_bh_state(bh);
227 227 wait_on_buffer(bh);
... ... @@ -283,7 +283,7 @@
283 283 int ret = 0;
284 284  
285 285 if (buffer_locked(bh)) {
286   - atomic_inc(&bh->b_count);
  286 + get_bh(bh);
287 287 spin_unlock(&journal->j_list_lock);
288 288 jbd_unlock_bh_state(bh);
289 289 wait_on_buffer(bh);
... ... @@ -587,13 +587,13 @@
587 587 /* Bump b_count to prevent truncate from stumbling over
588 588 the shadowed buffer! @@@ This can go if we ever get
589 589 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
590   - atomic_inc(&jh2bh(jh)->b_count);
  590 + get_bh(jh2bh(jh));
591 591  
592 592 /* Make a temporary IO buffer with which to write it out
593 593 (this will requeue both the metadata buffer and the
594 594 temporary IO buffer). new_bh goes on BJ_IO*/
595 595  
596   - set_bit(BH_JWrite, &jh2bh(jh)->b_state);
  596 + set_buffer_jwrite(jh2bh(jh));
597 597 /*
598 598 * akpm: journal_write_metadata_buffer() sets
599 599 * new_bh->b_transaction to commit_transaction.
... ... @@ -603,7 +603,7 @@
603 603 JBUFFER_TRACE(jh, "ph3: write metadata");
604 604 flags = journal_write_metadata_buffer(commit_transaction,
605 605 jh, &new_jh, blocknr);
606   - set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
  606 + set_buffer_jwrite(jh2bh(new_jh));
607 607 wbuf[bufs++] = jh2bh(new_jh);
608 608  
609 609 /* Record the new block's tag in the current descriptor
... ... @@ -713,7 +713,7 @@
713 713 shadowed buffer */
714 714 jh = commit_transaction->t_shadow_list->b_tprev;
715 715 bh = jh2bh(jh);
716   - clear_bit(BH_JWrite, &bh->b_state);
  716 + clear_buffer_jwrite(bh);
717 717 J_ASSERT_BH(bh, buffer_jbddirty(bh));
718 718  
719 719 /* The metadata is now released for reuse, but we need
... ... @@ -36,6 +36,7 @@
36 36 #include <linux/poison.h>
37 37 #include <linux/proc_fs.h>
38 38 #include <linux/debugfs.h>
  39 +#include <linux/ratelimit.h>
39 40  
40 41 #include <asm/uaccess.h>
41 42 #include <asm/page.h>
... ... @@ -84,6 +85,7 @@
84 85  
85 86 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86 87 static void __journal_abort_soft (journal_t *journal, int errno);
  88 +static const char *journal_dev_name(journal_t *journal, char *buffer);
87 89  
88 90 /*
89 91 * Helper function used to manage commit timeouts
... ... @@ -439,7 +441,7 @@
439 441 */
440 442 if (!tid_geq(journal->j_commit_request, target)) {
441 443 /*
442   - * We want a new commit: OK, mark the request and wakup the
  444 + * We want a new commit: OK, mark the request and wakeup the
443 445 * commit thread. We do _not_ do the commit ourselves.
444 446 */
445 447  
... ... @@ -950,6 +952,8 @@
950 952 if (err)
951 953 return err;
952 954 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
  955 + if (unlikely(!bh))
  956 + return -ENOMEM;
953 957 lock_buffer(bh);
954 958 memset (bh->b_data, 0, journal->j_blocksize);
955 959 BUFFER_TRACE(bh, "marking dirty");
... ... @@ -1010,6 +1014,23 @@
1010 1014 goto out;
1011 1015 }
1012 1016  
  1017 + if (buffer_write_io_error(bh)) {
  1018 + char b[BDEVNAME_SIZE];
  1019 + /*
  1020 + * Oh, dear. A previous attempt to write the journal
  1021 + * superblock failed. This could happen because the
  1022 + * USB device was yanked out. Or it could happen to
  1023 + * be a transient write error and maybe the block will
  1024 + * be remapped. Nothing we can do but to retry the
  1025 + * write and hope for the best.
  1026 + */
  1027 + printk(KERN_ERR "JBD: previous I/O error detected "
  1028 + "for journal superblock update for %s.\n",
  1029 + journal_dev_name(journal, b));
  1030 + clear_buffer_write_io_error(bh);
  1031 + set_buffer_uptodate(bh);
  1032 + }
  1033 +
1013 1034 spin_lock(&journal->j_state_lock);
1014 1035 jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
1015 1036 journal->j_tail, journal->j_tail_sequence, journal->j_errno);
1016 1037  
... ... @@ -1021,9 +1042,17 @@
1021 1042  
1022 1043 BUFFER_TRACE(bh, "marking dirty");
1023 1044 mark_buffer_dirty(bh);
1024   - if (wait)
  1045 + if (wait) {
1025 1046 sync_dirty_buffer(bh);
1026   - else
  1047 + if (buffer_write_io_error(bh)) {
  1048 + char b[BDEVNAME_SIZE];
  1049 + printk(KERN_ERR "JBD: I/O error detected "
  1050 + "when updating journal superblock for %s.\n",
  1051 + journal_dev_name(journal, b));
  1052 + clear_buffer_write_io_error(bh);
  1053 + set_buffer_uptodate(bh);
  1054 + }
  1055 + } else
1027 1056 write_dirty_buffer(bh, WRITE);
1028 1057  
1029 1058 out:
... ... @@ -1719,7 +1748,6 @@
1719 1748 static struct journal_head *journal_alloc_journal_head(void)
1720 1749 {
1721 1750 struct journal_head *ret;
1722   - static unsigned long last_warning;
1723 1751  
1724 1752 #ifdef CONFIG_JBD_DEBUG
1725 1753 atomic_inc(&nr_journal_heads);
... ... @@ -1727,11 +1755,9 @@
1727 1755 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
1728 1756 if (ret == NULL) {
1729 1757 jbd_debug(1, "out of memory for journal_head\n");
1730   - if (time_after(jiffies, last_warning + 5*HZ)) {
1731   - printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
1732   - __func__);
1733   - last_warning = jiffies;
1734   - }
  1758 + printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
  1759 + __func__);
  1760 +
1735 1761 while (ret == NULL) {
1736 1762 yield();
1737 1763 ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
... ... @@ -296,10 +296,10 @@
296 296 #ifdef CONFIG_JBD_DEBUG
297 297 int dropped = info.end_transaction -
298 298 be32_to_cpu(journal->j_superblock->s_sequence);
299   -#endif
300 299 jbd_debug(1,
301 300 "JBD: ignoring %d transaction%s from the journal.\n",
302 301 dropped, (dropped == 1) ? "" : "s");
  302 +#endif
303 303 journal->j_transaction_sequence = ++info.end_transaction;
304 304 }
305 305  
fs/jbd/transaction.c
... ... @@ -293,9 +293,7 @@
293 293 jbd_free_handle(handle);
294 294 current->journal_info = NULL;
295 295 handle = ERR_PTR(err);
296   - goto out;
297 296 }
298   -out:
299 297 return handle;
300 298 }
301 299  
... ... @@ -528,7 +526,7 @@
528 526 transaction = handle->h_transaction;
529 527 journal = transaction->t_journal;
530 528  
531   - jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
  529 + jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
532 530  
533 531 JBUFFER_TRACE(jh, "entry");
534 532 repeat:
... ... @@ -713,7 +711,7 @@
713 711 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
714 712 "Possible IO failure.\n");
715 713 page = jh2bh(jh)->b_page;
716   - offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
  714 + offset = offset_in_page(jh2bh(jh)->b_data);
717 715 source = kmap_atomic(page, KM_USER0);
718 716 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
719 717 kunmap_atomic(source, KM_USER0);
... ... @@ -478,7 +478,7 @@
478 478 */
479 479 if (!tid_geq(journal->j_commit_request, target)) {
480 480 /*
481   - * We want a new commit: OK, mark the request and wakup the
  481 + * We want a new commit: OK, mark the request and wakeup the
482 482 * commit thread. We do _not_ do the commit ourselves.
483 483 */
484 484  
... ... @@ -4,6 +4,7 @@
4 4  
5 5 config QUOTA
6 6 bool "Quota support"
  7 + select QUOTACTL
7 8 help
8 9 If you say Y here, you will be able to set per user limits for disk
9 10 usage (also called disk quotas). Currently, it works for the
... ... @@ -65,8 +66,7 @@
65 66  
66 67 config QUOTACTL
67 68 bool
68   - depends on XFS_QUOTA || QUOTA
69   - default y
  69 + default n
70 70  
71 71 config QUOTACTL_COMPAT
72 72 bool
... ... @@ -1386,6 +1386,9 @@
1386 1386 /* Avoid races with quotaoff() */
1387 1387 if (!sb_has_quota_active(sb, cnt))
1388 1388 continue;
  1389 + /* We could race with quotaon or dqget() could have failed */
  1390 + if (!got[cnt])
  1391 + continue;
1389 1392 if (!inode->i_dquot[cnt]) {
1390 1393 inode->i_dquot[cnt] = got[cnt];
1391 1394 got[cnt] = NULL;
... ... @@ -1736,6 +1739,7 @@
1736 1739 qsize_t rsv_space = 0;
1737 1740 struct dquot *transfer_from[MAXQUOTAS] = {};
1738 1741 int cnt, ret = 0;
  1742 + char is_valid[MAXQUOTAS] = {};
1739 1743 char warntype_to[MAXQUOTAS];
1740 1744 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1741 1745  
1742 1746  
... ... @@ -1757,8 +1761,15 @@
1757 1761 space = cur_space + rsv_space;
1758 1762 /* Build the transfer_from list and check the limits */
1759 1763 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1764 + /*
  1765 + * Skip changes for same uid or gid or for turned off quota-type.
  1766 + */
1760 1767 if (!transfer_to[cnt])
1761 1768 continue;
  1769 + /* Avoid races with quotaoff() */
  1770 + if (!sb_has_quota_active(inode->i_sb, cnt))
  1771 + continue;
  1772 + is_valid[cnt] = 1;
1762 1773 transfer_from[cnt] = inode->i_dquot[cnt];
1763 1774 ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt);
1764 1775 if (ret)
1765 1776  
... ... @@ -1772,12 +1783,8 @@
1772 1783 * Finally perform the needed transfer from transfer_from to transfer_to
1773 1784 */
1774 1785 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1775   - /*
1776   - * Skip changes for same uid or gid or for turned off quota-type.
1777   - */
1778   - if (!transfer_to[cnt])
  1786 + if (!is_valid[cnt])
1779 1787 continue;
1780   -
1781 1788 /* Due to IO error we might not have transfer_from[] structure */
1782 1789 if (transfer_from[cnt]) {
1783 1790 warntype_from_inodes[cnt] =
1784 1791  
1785 1792  
... ... @@ -1801,18 +1808,19 @@
1801 1808  
1802 1809 mark_all_dquot_dirty(transfer_from);
1803 1810 mark_all_dquot_dirty(transfer_to);
1804   - /* Pass back references to put */
1805   - for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1806   - transfer_to[cnt] = transfer_from[cnt];
1807   -warn:
1808 1811 flush_warnings(transfer_to, warntype_to);
1809 1812 flush_warnings(transfer_from, warntype_from_inodes);
1810 1813 flush_warnings(transfer_from, warntype_from_space);
1811   - return ret;
  1814 + /* Pass back references to put */
  1815 + for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1816 + if (is_valid[cnt])
  1817 + transfer_to[cnt] = transfer_from[cnt];
  1818 + return 0;
1812 1819 over_quota:
1813 1820 spin_unlock(&dq_data_lock);
1814 1821 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1815   - goto warn;
  1822 + flush_warnings(transfer_to, warntype_to);
  1823 + return ret;
1816 1824 }
1817 1825 EXPORT_SYMBOL(__dquot_transfer);
1818 1826  
... ... @@ -22,6 +22,7 @@
22 22 config XFS_QUOTA
23 23 bool "XFS Quota support"
24 24 depends on XFS_FS
  25 + select QUOTACTL
25 26 help
26 27 If you say Y here, you will be able to set limits for disk usage on
27 28 a per user and/or a per group basis under XFS. XFS considers quota