Commit 5dd4056db84387975140ff2568eaa0406f07985e

Authored by Christoph Hellwig
Committed by Jan Kara
1 parent 49792c806d

dquot: cleanup space allocation / freeing routines

Get rid of the alloc_space, free_space, reserve_space, claim_space and
release_rsv dquot operations - they are always called from the filesystem
and if a filesystem really needs their own (which none currently does)
it can just call into it's own routine directly.

Move shared logic into the common __dquot_alloc_space,
dquot_claim_space_nodirty and __dquot_free_space low-level methods,
and rationalize the wrappers around it to move as much as possible
code into the common block for CONFIG_QUOTA vs not.  Also rename
all these helpers to be named dquot_* instead of vfs_dq_*.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>

Showing 29 changed files with 258 additions and 383 deletions Side-by-side Diff

Documentation/filesystems/Locking
... ... @@ -462,9 +462,7 @@
462 462 prototypes:
463 463 int (*initialize) (struct inode *, int);
464 464 int (*drop) (struct inode *);
465   - int (*alloc_space) (struct inode *, qsize_t, int);
466 465 int (*alloc_inode) (const struct inode *, unsigned long);
467   - int (*free_space) (struct inode *, qsize_t);
468 466 int (*free_inode) (const struct inode *, unsigned long);
469 467 int (*transfer) (struct inode *, struct iattr *);
470 468 int (*write_dquot) (struct dquot *);
471 469  
... ... @@ -481,9 +479,7 @@
481 479 FS recursion Held locks when called
482 480 initialize: yes maybe dqonoff_sem
483 481 drop: yes -
484   -alloc_space: ->mark_dirty() -
485 482 alloc_inode: ->mark_dirty() -
486   -free_space: ->mark_dirty() -
487 483 free_inode: ->mark_dirty() -
488 484 transfer: yes -
489 485 write_dquot: yes dqonoff_sem or dqptr_sem
... ... @@ -495,7 +491,7 @@
495 491 FS recursion means calling ->quota_read() and ->quota_write() from superblock
496 492 operations.
497 493  
498   -->alloc_space(), ->alloc_inode(), ->free_space(), ->free_inode() are called
  494 +->alloc_inode(), ->free_inode() are called
499 495 only directly by the filesystem and do not call any fs functions only
500 496 the ->mark_dirty() operation.
501 497  
... ... @@ -570,7 +570,7 @@
570 570 error_return:
571 571 brelse(bitmap_bh);
572 572 release_blocks(sb, freed);
573   - vfs_dq_free_block(inode, freed);
  573 + dquot_free_block(inode, freed);
574 574 }
575 575  
576 576 /**
... ... @@ -1236,6 +1236,7 @@
1236 1236 unsigned short windowsz = 0;
1237 1237 unsigned long ngroups;
1238 1238 unsigned long num = *count;
  1239 + int ret;
1239 1240  
1240 1241 *errp = -ENOSPC;
1241 1242 sb = inode->i_sb;
... ... @@ -1247,8 +1248,9 @@
1247 1248 /*
1248 1249 * Check quota for allocation of this block.
1249 1250 */
1250   - if (vfs_dq_alloc_block(inode, num)) {
1251   - *errp = -EDQUOT;
  1251 + ret = dquot_alloc_block(inode, num);
  1252 + if (ret) {
  1253 + *errp = ret;
1252 1254 return 0;
1253 1255 }
1254 1256  
... ... @@ -1409,7 +1411,7 @@
1409 1411  
1410 1412 *errp = 0;
1411 1413 brelse(bitmap_bh);
1412   - vfs_dq_free_block(inode, *count-num);
  1414 + dquot_free_block(inode, *count-num);
1413 1415 *count = num;
1414 1416 return ret_block;
1415 1417  
... ... @@ -1420,7 +1422,7 @@
1420 1422 * Undo the block allocation
1421 1423 */
1422 1424 if (!performed_allocation)
1423   - vfs_dq_free_block(inode, *count);
  1425 + dquot_free_block(inode, *count);
1424 1426 brelse(bitmap_bh);
1425 1427 return 0;
1426 1428 }
... ... @@ -644,8 +644,8 @@
644 644 the inode. */
645 645 ea_bdebug(new_bh, "reusing block");
646 646  
647   - error = -EDQUOT;
648   - if (vfs_dq_alloc_block(inode, 1)) {
  647 + error = dquot_alloc_block(inode, 1);
  648 + if (error) {
649 649 unlock_buffer(new_bh);
650 650 goto cleanup;
651 651 }
... ... @@ -702,7 +702,7 @@
702 702 * as if nothing happened and cleanup the unused block */
703 703 if (error && error != -ENOSPC) {
704 704 if (new_bh && new_bh != old_bh)
705   - vfs_dq_free_block(inode, 1);
  705 + dquot_free_block(inode, 1);
706 706 goto cleanup;
707 707 }
708 708 } else
... ... @@ -734,7 +734,7 @@
734 734 le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
735 735 if (ce)
736 736 mb_cache_entry_release(ce);
737   - vfs_dq_free_block(inode, 1);
  737 + dquot_free_block(inode, 1);
738 738 mark_buffer_dirty(old_bh);
739 739 ea_bdebug(old_bh, "refcount now=%d",
740 740 le32_to_cpu(HDR(old_bh)->h_refcount));
... ... @@ -797,7 +797,7 @@
797 797 mark_buffer_dirty(bh);
798 798 if (IS_SYNC(inode))
799 799 sync_dirty_buffer(bh);
800   - vfs_dq_free_block(inode, 1);
  800 + dquot_free_block(inode, 1);
801 801 }
802 802 EXT2_I(inode)->i_file_acl = 0;
803 803  
... ... @@ -676,7 +676,7 @@
676 676 }
677 677 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
678 678 if (dquot_freed_blocks)
679   - vfs_dq_free_block(inode, dquot_freed_blocks);
  679 + dquot_free_block(inode, dquot_freed_blocks);
680 680 return;
681 681 }
682 682  
... ... @@ -1502,8 +1502,9 @@
1502 1502 /*
1503 1503 * Check quota for allocation of this block.
1504 1504 */
1505   - if (vfs_dq_alloc_block(inode, num)) {
1506   - *errp = -EDQUOT;
  1505 + err = dquot_alloc_block(inode, num);
  1506 + if (err) {
  1507 + *errp = err;
1507 1508 return 0;
1508 1509 }
1509 1510  
... ... @@ -1713,7 +1714,7 @@
1713 1714  
1714 1715 *errp = 0;
1715 1716 brelse(bitmap_bh);
1716   - vfs_dq_free_block(inode, *count-num);
  1717 + dquot_free_block(inode, *count-num);
1717 1718 *count = num;
1718 1719 return ret_block;
1719 1720  
... ... @@ -1728,7 +1729,7 @@
1728 1729 * Undo the block allocation
1729 1730 */
1730 1731 if (!performed_allocation)
1731   - vfs_dq_free_block(inode, *count);
  1732 + dquot_free_block(inode, *count);
1732 1733 brelse(bitmap_bh);
1733 1734 return 0;
1734 1735 }
... ... @@ -3336,7 +3336,7 @@
3336 3336 * i_size has been changed by generic_commit_write() and we thus need
3337 3337 * to include the updated inode in the current transaction.
3338 3338 *
3339   - * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
  3339 + * Also, dquot_alloc_space() will always dirty the inode when blocks
3340 3340 * are allocated to the file.
3341 3341 *
3342 3342 * If the inode is marked synchronous, we don't honour that here - doing
... ... @@ -752,9 +752,7 @@
752 752 static const struct dquot_operations ext3_quota_operations = {
753 753 .initialize = dquot_initialize,
754 754 .drop = dquot_drop,
755   - .alloc_space = dquot_alloc_space,
756 755 .alloc_inode = dquot_alloc_inode,
757   - .free_space = dquot_free_space,
758 756 .free_inode = dquot_free_inode,
759 757 .transfer = dquot_transfer,
760 758 .write_dquot = ext3_write_dquot,
... ... @@ -500,7 +500,7 @@
500 500 error = ext3_journal_dirty_metadata(handle, bh);
501 501 if (IS_SYNC(inode))
502 502 handle->h_sync = 1;
503   - vfs_dq_free_block(inode, 1);
  503 + dquot_free_block(inode, 1);
504 504 ea_bdebug(bh, "refcount now=%d; releasing",
505 505 le32_to_cpu(BHDR(bh)->h_refcount));
506 506 if (ce)
... ... @@ -775,8 +775,8 @@
775 775 else {
776 776 /* The old block is released after updating
777 777 the inode. */
778   - error = -EDQUOT;
779   - if (vfs_dq_alloc_block(inode, 1))
  778 + error = dquot_alloc_block(inode, 1);
  779 + if (error)
780 780 goto cleanup;
781 781 error = ext3_journal_get_write_access(handle,
782 782 new_bh);
... ... @@ -850,7 +850,7 @@
850 850 return error;
851 851  
852 852 cleanup_dquot:
853   - vfs_dq_free_block(inode, 1);
  853 + dquot_free_block(inode, 1);
854 854 goto cleanup;
855 855  
856 856 bad_block:
... ... @@ -1093,9 +1093,9 @@
1093 1093  
1094 1094 /* Update quota subsystem */
1095 1095 if (quota_claim) {
1096   - vfs_dq_claim_block(inode, used);
  1096 + dquot_claim_block(inode, used);
1097 1097 if (mdb_free)
1098   - vfs_dq_release_reservation_block(inode, mdb_free);
  1098 + dquot_release_reservation_block(inode, mdb_free);
1099 1099 } else {
1100 1100 /*
1101 1101 * We did fallocate with an offset that is already delayed
... ... @@ -1106,8 +1106,8 @@
1106 1106 * that
1107 1107 */
1108 1108 if (allocated_meta_blocks)
1109   - vfs_dq_claim_block(inode, allocated_meta_blocks);
1110   - vfs_dq_release_reservation_block(inode, mdb_free + used);
  1109 + dquot_claim_block(inode, allocated_meta_blocks);
  1110 + dquot_release_reservation_block(inode, mdb_free + used);
1111 1111 }
1112 1112  
1113 1113 /*
... ... @@ -1836,6 +1836,7 @@
1836 1836 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1837 1837 struct ext4_inode_info *ei = EXT4_I(inode);
1838 1838 unsigned long md_needed, md_reserved;
  1839 + int ret;
1839 1840  
1840 1841 /*
1841 1842 * recalculate the amount of metadata blocks to reserve
1842 1843  
... ... @@ -1853,11 +1854,12 @@
1853 1854 * later. Real quota accounting is done at pages writeout
1854 1855 * time.
1855 1856 */
1856   - if (vfs_dq_reserve_block(inode, md_needed + 1))
1857   - return -EDQUOT;
  1857 + ret = dquot_reserve_block(inode, md_needed + 1);
  1858 + if (ret)
  1859 + return ret;
1858 1860  
1859 1861 if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1860   - vfs_dq_release_reservation_block(inode, md_needed + 1);
  1862 + dquot_release_reservation_block(inode, md_needed + 1);
1861 1863 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1862 1864 yield();
1863 1865 goto repeat;
... ... @@ -1914,7 +1916,7 @@
1914 1916  
1915 1917 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1916 1918  
1917   - vfs_dq_release_reservation_block(inode, to_free);
  1919 + dquot_release_reservation_block(inode, to_free);
1918 1920 }
1919 1921  
1920 1922 static void ext4_da_page_release_reservation(struct page *page,
... ... @@ -5641,7 +5643,7 @@
5641 5643 * i_size has been changed by generic_commit_write() and we thus need
5642 5644 * to include the updated inode in the current transaction.
5643 5645 *
5644   - * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
  5646 + * Also, dquot_alloc_block() will always dirty the inode when blocks
5645 5647 * are allocated to the file.
5646 5648 *
5647 5649 * If the inode is marked synchronous, we don't honour that here - doing
... ... @@ -4254,7 +4254,7 @@
4254 4254 return 0;
4255 4255 }
4256 4256 reserv_blks = ar->len;
4257   - while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
  4257 + while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
4258 4258 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4259 4259 ar->len--;
4260 4260 }
... ... @@ -4331,7 +4331,7 @@
4331 4331 kmem_cache_free(ext4_ac_cachep, ac);
4332 4332 out1:
4333 4333 if (inquota && ar->len < inquota)
4334   - vfs_dq_free_block(ar->inode, inquota - ar->len);
  4334 + dquot_free_block(ar->inode, inquota - ar->len);
4335 4335 out3:
4336 4336 if (!ar->len) {
4337 4337 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
... ... @@ -4646,7 +4646,7 @@
4646 4646 sb->s_dirt = 1;
4647 4647 error_return:
4648 4648 if (freed)
4649   - vfs_dq_free_block(inode, freed);
  4649 + dquot_free_block(inode, freed);
4650 4650 brelse(bitmap_bh);
4651 4651 ext4_std_error(sb, err);
4652 4652 if (ac)
... ... @@ -1014,15 +1014,10 @@
1014 1014 static const struct dquot_operations ext4_quota_operations = {
1015 1015 .initialize = dquot_initialize,
1016 1016 .drop = dquot_drop,
1017   - .alloc_space = dquot_alloc_space,
1018   - .reserve_space = dquot_reserve_space,
1019   - .claim_space = dquot_claim_space,
1020   - .release_rsv = dquot_release_reserved_space,
1021 1017 #ifdef CONFIG_QUOTA
1022 1018 .get_reserved_space = ext4_get_reserved_space,
1023 1019 #endif
1024 1020 .alloc_inode = dquot_alloc_inode,
1025   - .free_space = dquot_free_space,
1026 1021 .free_inode = dquot_free_inode,
1027 1022 .transfer = dquot_transfer,
1028 1023 .write_dquot = ext4_write_dquot,
... ... @@ -494,7 +494,7 @@
494 494 error = ext4_handle_dirty_metadata(handle, inode, bh);
495 495 if (IS_SYNC(inode))
496 496 ext4_handle_sync(handle);
497   - vfs_dq_free_block(inode, 1);
  497 + dquot_free_block(inode, 1);
498 498 ea_bdebug(bh, "refcount now=%d; releasing",
499 499 le32_to_cpu(BHDR(bh)->h_refcount));
500 500 if (ce)
... ... @@ -787,8 +787,8 @@
787 787 else {
788 788 /* The old block is released after updating
789 789 the inode. */
790   - error = -EDQUOT;
791   - if (vfs_dq_alloc_block(inode, 1))
  790 + error = dquot_alloc_block(inode, 1);
  791 + if (error)
792 792 goto cleanup;
793 793 error = ext4_journal_get_write_access(handle,
794 794 new_bh);
... ... @@ -876,7 +876,7 @@
876 876 return error;
877 877  
878 878 cleanup_dquot:
879   - vfs_dq_free_block(inode, 1);
  879 + dquot_free_block(inode, 1);
880 880 goto cleanup;
881 881  
882 882 bad_block:
... ... @@ -381,10 +381,10 @@
381 381 * It's time to move the inline table to an external
382 382 * page and begin to build the xtree
383 383 */
384   - if (vfs_dq_alloc_block(ip, sbi->nbperpage))
  384 + if (dquot_alloc_block(ip, sbi->nbperpage))
385 385 goto clean_up;
386 386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
387   - vfs_dq_free_block(ip, sbi->nbperpage);
  387 + dquot_free_block(ip, sbi->nbperpage);
388 388 goto clean_up;
389 389 }
390 390  
... ... @@ -408,7 +408,7 @@
408 408 memcpy(&jfs_ip->i_dirtable, temp_table,
409 409 sizeof (temp_table));
410 410 dbFree(ip, xaddr, sbi->nbperpage);
411   - vfs_dq_free_block(ip, sbi->nbperpage);
  411 + dquot_free_block(ip, sbi->nbperpage);
412 412 goto clean_up;
413 413 }
414 414 ip->i_size = PSIZE;
415 415  
... ... @@ -1027,10 +1027,9 @@
1027 1027 n = xlen;
1028 1028  
1029 1029 /* Allocate blocks to quota. */
1030   - if (vfs_dq_alloc_block(ip, n)) {
1031   - rc = -EDQUOT;
  1030 + rc = dquot_alloc_block(ip, n);
  1031 + if (rc)
1032 1032 goto extendOut;
1033   - }
1034 1033 quota_allocation += n;
1035 1034  
1036 1035 if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
... ... @@ -1308,7 +1307,7 @@
1308 1307  
1309 1308 /* Rollback quota allocation */
1310 1309 if (rc && quota_allocation)
1311   - vfs_dq_free_block(ip, quota_allocation);
  1310 + dquot_free_block(ip, quota_allocation);
1312 1311  
1313 1312 dtSplitUp_Exit:
1314 1313  
1315 1314  
... ... @@ -1369,9 +1368,10 @@
1369 1368 return -EIO;
1370 1369  
1371 1370 /* Allocate blocks to quota. */
1372   - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
  1371 + rc = dquot_alloc_block(ip, lengthPXD(pxd));
  1372 + if (rc) {
1373 1373 release_metapage(rmp);
1374   - return -EDQUOT;
  1374 + return rc;
1375 1375 }
1376 1376  
1377 1377 jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
... ... @@ -1892,6 +1892,7 @@
1892 1892 struct dt_lock *dtlck;
1893 1893 struct tlock *tlck;
1894 1894 struct lv *lv;
  1895 + int rc;
1895 1896  
1896 1897 /* get split root page */
1897 1898 smp = split->mp;
1898 1899  
... ... @@ -1916,9 +1917,10 @@
1916 1917 rp = rmp->data;
1917 1918  
1918 1919 /* Allocate blocks to quota. */
1919   - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
  1920 + rc = dquot_alloc_block(ip, lengthPXD(pxd));
  1921 + if (rc) {
1920 1922 release_metapage(rmp);
1921   - return -EDQUOT;
  1923 + return rc;
1922 1924 }
1923 1925  
1924 1926 BT_MARK_DIRTY(rmp, ip);
... ... @@ -2287,7 +2289,7 @@
2287 2289 xlen = lengthPXD(&fp->header.self);
2288 2290  
2289 2291 /* Free quota allocation. */
2290   - vfs_dq_free_block(ip, xlen);
  2292 + dquot_free_block(ip, xlen);
2291 2293  
2292 2294 /* free/invalidate its buffer page */
2293 2295 discard_metapage(fmp);
... ... @@ -2363,7 +2365,7 @@
2363 2365 xlen = lengthPXD(&p->header.self);
2364 2366  
2365 2367 /* Free quota allocation */
2366   - vfs_dq_free_block(ip, xlen);
  2368 + dquot_free_block(ip, xlen);
2367 2369  
2368 2370 /* free/invalidate its buffer page */
2369 2371 discard_metapage(mp);
... ... @@ -141,10 +141,11 @@
141 141 }
142 142  
143 143 /* Allocate blocks to quota. */
144   - if (vfs_dq_alloc_block(ip, nxlen)) {
  144 + rc = dquot_alloc_block(ip, nxlen);
  145 + if (rc) {
145 146 dbFree(ip, nxaddr, (s64) nxlen);
146 147 mutex_unlock(&JFS_IP(ip)->commit_mutex);
147   - return -EDQUOT;
  148 + return rc;
148 149 }
149 150  
150 151 /* determine the value of the extent flag */
... ... @@ -164,7 +165,7 @@
164 165 */
165 166 if (rc) {
166 167 dbFree(ip, nxaddr, nxlen);
167   - vfs_dq_free_block(ip, nxlen);
  168 + dquot_free_block(ip, nxlen);
168 169 mutex_unlock(&JFS_IP(ip)->commit_mutex);
169 170 return (rc);
170 171 }
171 172  
... ... @@ -256,10 +257,11 @@
256 257 goto exit;
257 258  
258 259 /* Allocat blocks to quota. */
259   - if (vfs_dq_alloc_block(ip, nxlen)) {
  260 + rc = dquot_alloc_block(ip, nxlen);
  261 + if (rc) {
260 262 dbFree(ip, nxaddr, (s64) nxlen);
261 263 mutex_unlock(&JFS_IP(ip)->commit_mutex);
262   - return -EDQUOT;
  264 + return rc;
263 265 }
264 266  
265 267 delta = nxlen - xlen;
... ... @@ -297,7 +299,7 @@
297 299 /* extend the extent */
298 300 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
299 301 dbFree(ip, xaddr + xlen, delta);
300   - vfs_dq_free_block(ip, nxlen);
  302 + dquot_free_block(ip, nxlen);
301 303 goto exit;
302 304 }
303 305 } else {
... ... @@ -308,7 +310,7 @@
308 310 */
309 311 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
310 312 dbFree(ip, nxaddr, nxlen);
311   - vfs_dq_free_block(ip, nxlen);
  313 + dquot_free_block(ip, nxlen);
312 314 goto exit;
313 315 }
314 316 }
... ... @@ -585,10 +585,10 @@
585 585 hint = addressXAD(xad) + lengthXAD(xad) - 1;
586 586 } else
587 587 hint = 0;
588   - if ((rc = vfs_dq_alloc_block(ip, xlen)))
  588 + if ((rc = dquot_alloc_block(ip, xlen)))
589 589 goto out;
590 590 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
591   - vfs_dq_free_block(ip, xlen);
  591 + dquot_free_block(ip, xlen);
592 592 goto out;
593 593 }
594 594 }
... ... @@ -617,7 +617,7 @@
617 617 /* undo data extent allocation */
618 618 if (*xaddrp == 0) {
619 619 dbFree(ip, xaddr, (s64) xlen);
620   - vfs_dq_free_block(ip, xlen);
  620 + dquot_free_block(ip, xlen);
621 621 }
622 622 return rc;
623 623 }
624 624  
... ... @@ -985,10 +985,9 @@
985 985 rbn = addressPXD(pxd);
986 986  
987 987 /* Allocate blocks to quota. */
988   - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
989   - rc = -EDQUOT;
  988 + rc = dquot_alloc_block(ip, lengthPXD(pxd));
  989 + if (rc)
990 990 goto clean_up;
991   - }
992 991  
993 992 quota_allocation += lengthPXD(pxd);
994 993  
... ... @@ -1195,7 +1194,7 @@
1195 1194  
1196 1195 /* Rollback quota allocation. */
1197 1196 if (quota_allocation)
1198   - vfs_dq_free_block(ip, quota_allocation);
  1197 + dquot_free_block(ip, quota_allocation);
1199 1198  
1200 1199 return (rc);
1201 1200 }
... ... @@ -1235,6 +1234,7 @@
1235 1234 struct pxdlist *pxdlist;
1236 1235 struct tlock *tlck;
1237 1236 struct xtlock *xtlck;
  1237 + int rc;
1238 1238  
1239 1239 sp = &JFS_IP(ip)->i_xtroot;
1240 1240  
1241 1241  
... ... @@ -1252,9 +1252,10 @@
1252 1252 return -EIO;
1253 1253  
1254 1254 /* Allocate blocks to quota. */
1255   - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
  1255 + rc = dquot_alloc_block(ip, lengthPXD(pxd));
  1256 + if (rc) {
1256 1257 release_metapage(rmp);
1257   - return -EDQUOT;
  1258 + return rc;
1258 1259 }
1259 1260  
1260 1261 jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
... ... @@ -3680,7 +3681,7 @@
3680 3681 ip->i_size = newsize;
3681 3682  
3682 3683 /* update quota allocation to reflect freed blocks */
3683   - vfs_dq_free_block(ip, nfreed);
  3684 + dquot_free_block(ip, nfreed);
3684 3685  
3685 3686 /*
3686 3687 * free tlock of invalidated pages
... ... @@ -260,14 +260,14 @@
260 260 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
261 261  
262 262 /* Allocate new blocks to quota. */
263   - if (vfs_dq_alloc_block(ip, nblocks)) {
264   - return -EDQUOT;
265   - }
  263 + rc = dquot_alloc_block(ip, nblocks);
  264 + if (rc)
  265 + return rc;
266 266  
267 267 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
268 268 if (rc) {
269 269 /*Rollback quota allocation. */
270   - vfs_dq_free_block(ip, nblocks);
  270 + dquot_free_block(ip, nblocks);
271 271 return rc;
272 272 }
273 273  
... ... @@ -332,7 +332,7 @@
332 332  
333 333 failed:
334 334 /* Rollback quota allocation. */
335   - vfs_dq_free_block(ip, nblocks);
  335 + dquot_free_block(ip, nblocks);
336 336  
337 337 dbFree(ip, blkno, nblocks);
338 338 return rc;
... ... @@ -538,7 +538,8 @@
538 538  
539 539 if (blocks_needed > current_blocks) {
540 540 /* Allocate new blocks to quota. */
541   - if (vfs_dq_alloc_block(inode, blocks_needed))
  541 + rc = dquot_alloc_block(inode, blocks_needed);
  542 + if (rc)
542 543 return -EDQUOT;
543 544  
544 545 quota_allocation = blocks_needed;
... ... @@ -602,7 +603,7 @@
602 603 clean_up:
603 604 /* Rollback quota allocation */
604 605 if (quota_allocation)
605   - vfs_dq_free_block(inode, quota_allocation);
  606 + dquot_free_block(inode, quota_allocation);
606 607  
607 608 return (rc);
608 609 }
... ... @@ -677,7 +678,7 @@
677 678  
678 679 /* If old blocks exist, they must be removed from quota allocation. */
679 680 if (old_blocks)
680   - vfs_dq_free_block(inode, old_blocks);
  681 + dquot_free_block(inode, old_blocks);
681 682  
682 683 inode->i_ctime = CURRENT_TIME;
683 684  
... ... @@ -5712,7 +5712,7 @@
5712 5712 goto out;
5713 5713 }
5714 5714  
5715   - vfs_dq_free_space_nodirty(inode,
  5715 + dquot_free_space_nodirty(inode,
5716 5716 ocfs2_clusters_to_bytes(inode->i_sb, len));
5717 5717  
5718 5718 ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc);
... ... @@ -6935,7 +6935,7 @@
6935 6935 goto bail;
6936 6936 }
6937 6937  
6938   - vfs_dq_free_space_nodirty(inode,
  6938 + dquot_free_space_nodirty(inode,
6939 6939 ocfs2_clusters_to_bytes(osb->sb, clusters_to_del));
6940 6940 spin_lock(&OCFS2_I(inode)->ip_lock);
6941 6941 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
6942 6942  
... ... @@ -7300,11 +7300,10 @@
7300 7300 unsigned int page_end;
7301 7301 u64 phys;
7302 7302  
7303   - if (vfs_dq_alloc_space_nodirty(inode,
7304   - ocfs2_clusters_to_bytes(osb->sb, 1))) {
7305   - ret = -EDQUOT;
  7303 + ret = dquot_alloc_space_nodirty(inode,
  7304 + ocfs2_clusters_to_bytes(osb->sb, 1));
  7305 + if (ret)
7306 7306 goto out_commit;
7307   - }
7308 7307 did_quota = 1;
7309 7308  
7310 7309 ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off,
... ... @@ -7380,7 +7379,7 @@
7380 7379  
7381 7380 out_commit:
7382 7381 if (ret < 0 && did_quota)
7383   - vfs_dq_free_space_nodirty(inode,
  7382 + dquot_free_space_nodirty(inode,
7384 7383 ocfs2_clusters_to_bytes(osb->sb, 1));
7385 7384  
7386 7385 ocfs2_commit_trans(osb, handle);
... ... @@ -1763,10 +1763,11 @@
1763 1763  
1764 1764 wc->w_handle = handle;
1765 1765  
1766   - if (clusters_to_alloc && vfs_dq_alloc_space_nodirty(inode,
1767   - ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc))) {
1768   - ret = -EDQUOT;
1769   - goto out_commit;
  1766 + if (clusters_to_alloc) {
  1767 + ret = dquot_alloc_space_nodirty(inode,
  1768 + ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
  1769 + if (ret)
  1770 + goto out_commit;
1770 1771 }
1771 1772 /*
1772 1773 * We don't want this to fail in ocfs2_write_end(), so do it
... ... @@ -1809,7 +1810,7 @@
1809 1810 return 0;
1810 1811 out_quota:
1811 1812 if (clusters_to_alloc)
1812   - vfs_dq_free_space(inode,
  1813 + dquot_free_space(inode,
1813 1814 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
1814 1815 out_commit:
1815 1816 ocfs2_commit_trans(osb, handle);
... ... @@ -2964,12 +2964,10 @@
2964 2964 goto out;
2965 2965 }
2966 2966  
2967   - if (vfs_dq_alloc_space_nodirty(dir,
2968   - ocfs2_clusters_to_bytes(osb->sb,
2969   - alloc + dx_alloc))) {
2970   - ret = -EDQUOT;
  2967 + ret = dquot_alloc_space_nodirty(dir,
  2968 + ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc));
  2969 + if (ret)
2971 2970 goto out_commit;
2972   - }
2973 2971 did_quota = 1;
2974 2972  
2975 2973 if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) {
... ... @@ -3178,7 +3176,7 @@
3178 3176  
3179 3177 out_commit:
3180 3178 if (ret < 0 && did_quota)
3181   - vfs_dq_free_space_nodirty(dir, bytes_allocated);
  3179 + dquot_free_space_nodirty(dir, bytes_allocated);
3182 3180  
3183 3181 ocfs2_commit_trans(osb, handle);
3184 3182  
3185 3183  
... ... @@ -3221,11 +3219,10 @@
3221 3219 if (extend) {
3222 3220 u32 offset = OCFS2_I(dir)->ip_clusters;
3223 3221  
3224   - if (vfs_dq_alloc_space_nodirty(dir,
3225   - ocfs2_clusters_to_bytes(sb, 1))) {
3226   - status = -EDQUOT;
  3222 + status = dquot_alloc_space_nodirty(dir,
  3223 + ocfs2_clusters_to_bytes(sb, 1));
  3224 + if (status)
3227 3225 goto bail;
3228   - }
3229 3226 did_quota = 1;
3230 3227  
3231 3228 status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset,
... ... @@ -3254,7 +3251,7 @@
3254 3251 status = 0;
3255 3252 bail:
3256 3253 if (did_quota && status < 0)
3257   - vfs_dq_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
  3254 + dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1));
3258 3255 mlog_exit(status);
3259 3256 return status;
3260 3257 }
3261 3258  
... ... @@ -3889,11 +3886,10 @@
3889 3886 goto out;
3890 3887 }
3891 3888  
3892   - if (vfs_dq_alloc_space_nodirty(dir,
3893   - ocfs2_clusters_to_bytes(dir->i_sb, 1))) {
3894   - ret = -EDQUOT;
  3889 + ret = dquot_alloc_space_nodirty(dir,
  3890 + ocfs2_clusters_to_bytes(dir->i_sb, 1));
  3891 + if (ret)
3895 3892 goto out_commit;
3896   - }
3897 3893 did_quota = 1;
3898 3894  
3899 3895 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh,
... ... @@ -3983,7 +3979,7 @@
3983 3979  
3984 3980 out_commit:
3985 3981 if (ret < 0 && did_quota)
3986   - vfs_dq_free_space_nodirty(dir,
  3982 + dquot_free_space_nodirty(dir,
3987 3983 ocfs2_clusters_to_bytes(dir->i_sb, 1));
3988 3984  
3989 3985 ocfs2_commit_trans(osb, handle);
3990 3986  
... ... @@ -4165,11 +4161,10 @@
4165 4161 goto out;
4166 4162 }
4167 4163  
4168   - if (vfs_dq_alloc_space_nodirty(dir,
4169   - ocfs2_clusters_to_bytes(osb->sb, 1))) {
4170   - ret = -EDQUOT;
  4164 + ret = dquot_alloc_space_nodirty(dir,
  4165 + ocfs2_clusters_to_bytes(osb->sb, 1));
  4166 + if (ret)
4171 4167 goto out_commit;
4172   - }
4173 4168 did_quota = 1;
4174 4169  
4175 4170 /*
... ... @@ -4229,7 +4224,7 @@
4229 4224  
4230 4225 out_commit:
4231 4226 if (ret < 0 && did_quota)
4232   - vfs_dq_free_space_nodirty(dir,
  4227 + dquot_free_space_nodirty(dir,
4233 4228 ocfs2_clusters_to_bytes(dir->i_sb, 1));
4234 4229  
4235 4230 ocfs2_commit_trans(osb, handle);
... ... @@ -629,11 +629,10 @@
629 629 }
630 630  
631 631 restarted_transaction:
632   - if (vfs_dq_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb,
633   - clusters_to_add))) {
634   - status = -EDQUOT;
  632 + status = dquot_alloc_space_nodirty(inode,
  633 + ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
  634 + if (status)
635 635 goto leave;
636   - }
637 636 did_quota = 1;
638 637  
639 638 /* reserve a write to the file entry early on - that we if we
... ... @@ -674,7 +673,7 @@
674 673 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
675 674 spin_unlock(&OCFS2_I(inode)->ip_lock);
676 675 /* Release unused quota reservation */
677   - vfs_dq_free_space(inode,
  676 + dquot_free_space(inode,
678 677 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
679 678 did_quota = 0;
680 679  
... ... @@ -710,7 +709,7 @@
710 709  
711 710 leave:
712 711 if (status < 0 && did_quota)
713   - vfs_dq_free_space(inode,
  712 + dquot_free_space(inode,
714 713 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
715 714 if (handle) {
716 715 ocfs2_commit_trans(osb, handle);
... ... @@ -1716,11 +1716,10 @@
1716 1716 u32 offset = 0;
1717 1717  
1718 1718 inode->i_op = &ocfs2_symlink_inode_operations;
1719   - if (vfs_dq_alloc_space_nodirty(inode,
1720   - ocfs2_clusters_to_bytes(osb->sb, 1))) {
1721   - status = -EDQUOT;
  1719 + status = dquot_alloc_space_nodirty(inode,
  1720 + ocfs2_clusters_to_bytes(osb->sb, 1));
  1721 + if (status)
1722 1722 goto bail;
1723   - }
1724 1723 did_quota = 1;
1725 1724 status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
1726 1725 new_fe_bh,
... ... @@ -1788,7 +1787,7 @@
1788 1787 d_instantiate(dentry, inode);
1789 1788 bail:
1790 1789 if (status < 0 && did_quota)
1791   - vfs_dq_free_space_nodirty(inode,
  1790 + dquot_free_space_nodirty(inode,
1792 1791 ocfs2_clusters_to_bytes(osb->sb, 1));
1793 1792 if (status < 0 && did_quota_inode)
1794 1793 vfs_dq_free_inode(inode);
fs/ocfs2/quota_global.c
... ... @@ -853,9 +853,7 @@
853 853 const struct dquot_operations ocfs2_quota_operations = {
854 854 .initialize = dquot_initialize,
855 855 .drop = dquot_drop,
856   - .alloc_space = dquot_alloc_space,
857 856 .alloc_inode = dquot_alloc_inode,
858   - .free_space = dquot_free_space,
859 857 .free_inode = dquot_free_inode,
860 858 .transfer = dquot_transfer,
861 859 .write_dquot = ocfs2_write_dquot,
... ... @@ -1464,28 +1464,29 @@
1464 1464 }
1465 1465  
1466 1466 /*
1467   - * Following four functions update i_blocks+i_bytes fields and
1468   - * quota information (together with appropriate checks)
1469   - * NOTE: We absolutely rely on the fact that caller dirties
1470   - * the inode (usually macros in quotaops.h care about this) and
1471   - * holds a handle for the current transaction so that dquot write and
1472   - * inode write go into the same transaction.
  1467 + * This functions updates i_blocks+i_bytes fields and quota information
  1468 + * (together with appropriate checks).
  1469 + *
  1470 + * NOTE: We absolutely rely on the fact that caller dirties the inode
  1471 + * (usually helpers in quotaops.h care about this) and holds a handle for
  1472 + * the current transaction so that dquot write and inode write go into the
  1473 + * same transaction.
1473 1474 */
1474 1475  
1475 1476 /*
1476 1477 * This operation can block, but only after everything is updated
1477 1478 */
1478 1479 int __dquot_alloc_space(struct inode *inode, qsize_t number,
1479   - int warn, int reserve)
  1480 + int warn, int reserve)
1480 1481 {
1481   - int cnt, ret = QUOTA_OK;
  1482 + int cnt, ret = 0;
1482 1483 char warntype[MAXQUOTAS];
1483 1484  
1484 1485 /*
1485 1486 * First test before acquiring mutex - solves deadlocks when we
1486 1487 * re-enter the quota code and are already holding the mutex
1487 1488 */
1488   - if (IS_NOQUOTA(inode)) {
  1489 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
1489 1490 inode_incr_space(inode, number, reserve);
1490 1491 goto out;
1491 1492 }
... ... @@ -1498,9 +1499,9 @@
1498 1499 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1499 1500 if (!inode->i_dquot[cnt])
1500 1501 continue;
1501   - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
1502   - == NO_QUOTA) {
1503   - ret = NO_QUOTA;
  1502 + if (check_bdq(inode->i_dquot[cnt], number, !warn, warntype+cnt)
  1503 + == NO_QUOTA) {
  1504 + ret = -EDQUOT;
1504 1505 spin_unlock(&dq_data_lock);
1505 1506 goto out_flush_warn;
1506 1507 }
1507 1508  
... ... @@ -1525,19 +1526,8 @@
1525 1526 out:
1526 1527 return ret;
1527 1528 }
  1529 +EXPORT_SYMBOL(__dquot_alloc_space);
1528 1530  
1529   -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1530   -{
1531   - return __dquot_alloc_space(inode, number, warn, 0);
1532   -}
1533   -EXPORT_SYMBOL(dquot_alloc_space);
1534   -
1535   -int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1536   -{
1537   - return __dquot_alloc_space(inode, number, warn, 1);
1538   -}
1539   -EXPORT_SYMBOL(dquot_reserve_space);
1540   -
1541 1531 /*
1542 1532 * This operation can block, but only after everything is updated
1543 1533 */
1544 1534  
1545 1535  
1546 1536  
... ... @@ -1578,14 +1568,16 @@
1578 1568 }
1579 1569 EXPORT_SYMBOL(dquot_alloc_inode);
1580 1570  
1581   -int dquot_claim_space(struct inode *inode, qsize_t number)
  1571 +/*
  1572 + * Convert in-memory reserved quotas to real consumed quotas
  1573 + */
  1574 +int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1582 1575 {
1583 1576 int cnt;
1584   - int ret = QUOTA_OK;
1585 1577  
1586   - if (IS_NOQUOTA(inode)) {
  1578 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
1587 1579 inode_claim_rsv_space(inode, number);
1588   - goto out;
  1580 + return 0;
1589 1581 }
1590 1582  
1591 1583 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1592 1584  
1593 1585  
1594 1586  
1595 1587  
... ... @@ -1601,24 +1593,23 @@
1601 1593 spin_unlock(&dq_data_lock);
1602 1594 mark_all_dquot_dirty(inode->i_dquot);
1603 1595 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1604   -out:
1605   - return ret;
  1596 + return 0;
1606 1597 }
1607   -EXPORT_SYMBOL(dquot_claim_space);
  1598 +EXPORT_SYMBOL(dquot_claim_space_nodirty);
1608 1599  
1609 1600 /*
1610 1601 * This operation can block, but only after everything is updated
1611 1602 */
1612   -int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
  1603 +void __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
1613 1604 {
1614 1605 unsigned int cnt;
1615 1606 char warntype[MAXQUOTAS];
1616 1607  
1617 1608 /* First test before acquiring mutex - solves deadlocks when we
1618 1609 * re-enter the quota code and are already holding the mutex */
1619   - if (IS_NOQUOTA(inode)) {
  1610 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
1620 1611 inode_decr_space(inode, number, reserve);
1621   - return QUOTA_OK;
  1612 + return;
1622 1613 }
1623 1614  
1624 1615 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1625 1616  
1626 1617  
1627 1618  
... ... @@ -1641,26 +1632,10 @@
1641 1632 out_unlock:
1642 1633 flush_warnings(inode->i_dquot, warntype);
1643 1634 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1644   - return QUOTA_OK;
1645 1635 }
  1636 +EXPORT_SYMBOL(__dquot_free_space);
1646 1637  
1647   -int dquot_free_space(struct inode *inode, qsize_t number)
1648   -{
1649   - return __dquot_free_space(inode, number, 0);
1650   -}
1651   -EXPORT_SYMBOL(dquot_free_space);
1652   -
1653 1638 /*
1654   - * Release reserved quota space
1655   - */
1656   -void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1657   -{
1658   - __dquot_free_space(inode, number, 1);
1659   -
1660   -}
1661   -EXPORT_SYMBOL(dquot_release_reserved_space);
1662   -
1663   -/*
1664 1639 * This operation can block, but only after everything is updated
1665 1640 */
1666 1641 int dquot_free_inode(const struct inode *inode, qsize_t number)
1667 1642  
... ... @@ -1840,9 +1815,7 @@
1840 1815 const struct dquot_operations dquot_operations = {
1841 1816 .initialize = dquot_initialize,
1842 1817 .drop = dquot_drop,
1843   - .alloc_space = dquot_alloc_space,
1844 1818 .alloc_inode = dquot_alloc_inode,
1845   - .free_space = dquot_free_space,
1846 1819 .free_inode = dquot_free_inode,
1847 1820 .transfer = dquot_transfer,
1848 1821 .write_dquot = dquot_commit,
fs/reiserfs/bitmap.c
... ... @@ -425,7 +425,7 @@
425 425  
426 426 journal_mark_dirty(th, s, sbh);
427 427 if (for_unformatted)
428   - vfs_dq_free_block_nodirty(inode, 1);
  428 + dquot_free_block_nodirty(inode, 1);
429 429 }
430 430  
431 431 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
... ... @@ -1049,7 +1049,7 @@
1049 1049 amount_needed, hint->inode->i_uid);
1050 1050 #endif
1051 1051 quota_ret =
1052   - vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
  1052 + dquot_alloc_block_nodirty(hint->inode, amount_needed);
1053 1053 if (quota_ret) /* Quota exceeded? */
1054 1054 return QUOTA_EXCEEDED;
1055 1055 if (hint->preallocate && hint->prealloc_size) {
... ... @@ -1058,7 +1058,7 @@
1058 1058 "reiserquota: allocating (prealloc) %d blocks id=%u",
1059 1059 hint->prealloc_size, hint->inode->i_uid);
1060 1060 #endif
1061   - quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
  1061 + quota_ret = dquot_prealloc_block_nodirty(hint->inode,
1062 1062 hint->prealloc_size);
1063 1063 if (quota_ret)
1064 1064 hint->preallocate = hint->prealloc_size = 0;
... ... @@ -1092,7 +1092,7 @@
1092 1092 hint->inode->i_uid);
1093 1093 #endif
1094 1094 /* Free not allocated blocks */
1095   - vfs_dq_free_block_nodirty(hint->inode,
  1095 + dquot_free_block_nodirty(hint->inode,
1096 1096 amount_needed + hint->prealloc_size -
1097 1097 nr_allocated);
1098 1098 }
... ... @@ -1125,7 +1125,7 @@
1125 1125 REISERFS_I(hint->inode)->i_prealloc_count,
1126 1126 hint->inode->i_uid);
1127 1127 #endif
1128   - vfs_dq_free_block_nodirty(hint->inode, amount_needed +
  1128 + dquot_free_block_nodirty(hint->inode, amount_needed +
1129 1129 hint->prealloc_size - nr_allocated -
1130 1130 REISERFS_I(hint->inode)->
1131 1131 i_prealloc_count);
... ... @@ -1299,7 +1299,7 @@
1299 1299 "reiserquota delete_item(): freeing %u, id=%u type=%c",
1300 1300 quota_cut_bytes, inode->i_uid, head2type(&s_ih));
1301 1301 #endif
1302   - vfs_dq_free_space_nodirty(inode, quota_cut_bytes);
  1302 + dquot_free_space_nodirty(inode, quota_cut_bytes);
1303 1303  
1304 1304 /* Return deleted body length */
1305 1305 return ret_value;
... ... @@ -1383,7 +1383,7 @@
1383 1383 quota_cut_bytes, inode->i_uid,
1384 1384 key2type(key));
1385 1385 #endif
1386   - vfs_dq_free_space_nodirty(inode,
  1386 + dquot_free_space_nodirty(inode,
1387 1387 quota_cut_bytes);
1388 1388 }
1389 1389 break;
... ... @@ -1733,7 +1733,7 @@
1733 1733 "reiserquota cut_from_item(): freeing %u id=%u type=%c",
1734 1734 quota_cut_bytes, inode->i_uid, '?');
1735 1735 #endif
1736   - vfs_dq_free_space_nodirty(inode, quota_cut_bytes);
  1736 + dquot_free_space_nodirty(inode, quota_cut_bytes);
1737 1737 return ret_value;
1738 1738 }
1739 1739  
1740 1740  
... ... @@ -1968,9 +1968,10 @@
1968 1968 key2type(&(key->on_disk_key)));
1969 1969 #endif
1970 1970  
1971   - if (vfs_dq_alloc_space_nodirty(inode, pasted_size)) {
  1971 + retval = dquot_alloc_space_nodirty(inode, pasted_size);
  1972 + if (retval) {
1972 1973 pathrelse(search_path);
1973   - return -EDQUOT;
  1974 + return retval;
1974 1975 }
1975 1976 init_tb_struct(th, &s_paste_balance, th->t_super, search_path,
1976 1977 pasted_size);
... ... @@ -2024,7 +2025,7 @@
2024 2025 pasted_size, inode->i_uid,
2025 2026 key2type(&(key->on_disk_key)));
2026 2027 #endif
2027   - vfs_dq_free_space_nodirty(inode, pasted_size);
  2028 + dquot_free_space_nodirty(inode, pasted_size);
2028 2029 return retval;
2029 2030 }
2030 2031  
2031 2032  
... ... @@ -2062,9 +2063,10 @@
2062 2063 #endif
2063 2064 /* We can't dirty inode here. It would be immediately written but
2064 2065 * appropriate stat item isn't inserted yet... */
2065   - if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
  2066 + retval = dquot_alloc_space_nodirty(inode, quota_bytes);
  2067 + if (retval) {
2066 2068 pathrelse(path);
2067   - return -EDQUOT;
  2069 + return retval;
2068 2070 }
2069 2071 }
2070 2072 init_tb_struct(th, &s_ins_balance, th->t_super, path,
... ... @@ -2113,7 +2115,7 @@
2113 2115 quota_bytes, inode->i_uid, head2type(ih));
2114 2116 #endif
2115 2117 if (inode)
2116   - vfs_dq_free_space_nodirty(inode, quota_bytes);
  2118 + dquot_free_space_nodirty(inode, quota_bytes);
2117 2119 return retval;
2118 2120 }
... ... @@ -618,9 +618,7 @@
618 618 static const struct dquot_operations reiserfs_quota_operations = {
619 619 .initialize = dquot_initialize,
620 620 .drop = dquot_drop,
621   - .alloc_space = dquot_alloc_space,
622 621 .alloc_inode = dquot_alloc_inode,
623   - .free_space = dquot_free_space,
624 622 .free_inode = dquot_free_inode,
625 623 .transfer = dquot_transfer,
626 624 .write_dquot = reiserfs_write_dquot,
... ... @@ -208,7 +208,7 @@
208 208 ((char *)bh->b_data)[(bit + i) >> 3]);
209 209 } else {
210 210 if (inode)
211   - vfs_dq_free_block(inode, 1);
  211 + dquot_free_block(inode, 1);
212 212 udf_add_free_space(sb, sbi->s_partition, 1);
213 213 }
214 214 }
215 215  
... ... @@ -260,11 +260,11 @@
260 260 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
261 261 if (!udf_test_bit(bit, bh->b_data))
262 262 goto out;
263   - else if (vfs_dq_prealloc_block(inode, 1))
  263 + else if (dquot_prealloc_block(inode, 1))
264 264 goto out;
265 265 else if (!udf_clear_bit(bit, bh->b_data)) {
266 266 udf_debug("bit already cleared for block %d\n", bit);
267   - vfs_dq_free_block(inode, 1);
  267 + dquot_free_block(inode, 1);
268 268 goto out;
269 269 }
270 270 block_count--;
... ... @@ -390,10 +390,14 @@
390 390 /*
391 391 * Check quota for allocation of this block.
392 392 */
393   - if (inode && vfs_dq_alloc_block(inode, 1)) {
394   - mutex_unlock(&sbi->s_alloc_mutex);
395   - *err = -EDQUOT;
396   - return 0;
  393 + if (inode) {
  394 + int ret = dquot_alloc_block(inode, 1);
  395 +
  396 + if (ret) {
  397 + mutex_unlock(&sbi->s_alloc_mutex);
  398 + *err = ret;
  399 + return 0;
  400 + }
397 401 }
398 402  
399 403 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
... ... @@ -449,7 +453,7 @@
449 453 /* We do this up front - There are some error conditions that
450 454 could occure, but.. oh well */
451 455 if (inode)
452   - vfs_dq_free_block(inode, count);
  456 + dquot_free_block(inode, count);
453 457 udf_add_free_space(sb, sbi->s_partition, count);
454 458  
455 459 start = bloc->logicalBlockNum + offset;
... ... @@ -694,7 +698,7 @@
694 698 epos.offset -= adsize;
695 699  
696 700 alloc_count = (elen >> sb->s_blocksize_bits);
697   - if (inode && vfs_dq_prealloc_block(inode,
  701 + if (inode && dquot_prealloc_block(inode,
698 702 alloc_count > block_count ? block_count : alloc_count))
699 703 alloc_count = 0;
700 704 else if (alloc_count > block_count) {
... ... @@ -797,12 +801,13 @@
797 801 newblock = goal_eloc.logicalBlockNum;
798 802 goal_eloc.logicalBlockNum++;
799 803 goal_elen -= sb->s_blocksize;
800   -
801   - if (inode && vfs_dq_alloc_block(inode, 1)) {
802   - brelse(goal_epos.bh);
803   - mutex_unlock(&sbi->s_alloc_mutex);
804   - *err = -EDQUOT;
805   - return 0;
  804 + if (inode) {
  805 + *err = dquot_alloc_block(inode, 1);
  806 + if (*err) {
  807 + brelse(goal_epos.bh);
  808 + mutex_unlock(&sbi->s_alloc_mutex);
  809 + return 0;
  810 + }
806 811 }
807 812  
808 813 if (goal_elen)
... ... @@ -85,7 +85,7 @@
85 85 "bit already cleared for fragment %u", i);
86 86 }
87 87  
88   - vfs_dq_free_block(inode, count);
  88 + dquot_free_block(inode, count);
89 89  
90 90  
91 91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
... ... @@ -195,7 +195,7 @@
195 195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 197 ufs_clusteracct (sb, ucpi, blkno, 1);
198   - vfs_dq_free_block(inode, uspi->s_fpb);
  198 + dquot_free_block(inode, uspi->s_fpb);
199 199  
200 200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 201 uspi->cs_total.cs_nbfree++;
... ... @@ -511,6 +511,7 @@
511 511 struct ufs_cg_private_info * ucpi;
512 512 struct ufs_cylinder_group * ucg;
513 513 unsigned cgno, fragno, fragoff, count, fragsize, i;
  514 + int ret;
514 515  
515 516 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
516 517 (unsigned long long)fragment, oldcount, newcount);
... ... @@ -556,8 +557,9 @@
556 557 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
557 558 for (i = oldcount; i < newcount; i++)
558 559 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
559   - if (vfs_dq_alloc_block(inode, count)) {
560   - *err = -EDQUOT;
  560 + ret = dquot_alloc_block(inode, count);
  561 + if (ret) {
  562 + *err = ret;
561 563 return 0;
562 564 }
563 565  
... ... @@ -596,6 +598,7 @@
596 598 struct ufs_cylinder_group * ucg;
597 599 unsigned oldcg, i, j, k, allocsize;
598 600 u64 result;
  601 + int ret;
599 602  
600 603 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
601 604 inode->i_ino, cgno, (unsigned long long)goal, count);
... ... @@ -664,7 +667,7 @@
664 667 for (i = count; i < uspi->s_fpb; i++)
665 668 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
666 669 i = uspi->s_fpb - count;
667   - vfs_dq_free_block(inode, i);
  670 + dquot_free_block(inode, i);
668 671  
669 672 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
670 673 uspi->cs_total.cs_nffree += i;
... ... @@ -676,8 +679,9 @@
676 679 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
677 680 if (result == INVBLOCK)
678 681 return 0;
679   - if (vfs_dq_alloc_block(inode, count)) {
680   - *err = -EDQUOT;
  682 + ret = dquot_alloc_block(inode, count);
  683 + if (ret) {
  684 + *err = ret;
681 685 return 0;
682 686 }
683 687 for (i = 0; i < count; i++)
... ... @@ -714,6 +718,7 @@
714 718 struct ufs_super_block_first * usb1;
715 719 struct ufs_cylinder_group * ucg;
716 720 u64 result, blkno;
  721 + int ret;
717 722  
718 723 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
719 724  
... ... @@ -747,8 +752,9 @@
747 752 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
748 753 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
749 754 ufs_clusteracct (sb, ucpi, blkno, -1);
750   - if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
751   - *err = -EDQUOT;
  755 + ret = dquot_alloc_block(inode, uspi->s_fpb);
  756 + if (ret) {
  757 + *err = ret;
752 758 return INVBLOCK;
753 759 }
754 760  
include/linux/quota.h
... ... @@ -297,9 +297,7 @@
297 297 struct dquot_operations {
298 298 int (*initialize) (struct inode *, int);
299 299 int (*drop) (struct inode *);
300   - int (*alloc_space) (struct inode *, qsize_t, int);
301 300 int (*alloc_inode) (const struct inode *, qsize_t);
302   - int (*free_space) (struct inode *, qsize_t);
303 301 int (*free_inode) (const struct inode *, qsize_t);
304 302 int (*transfer) (struct inode *, qid_t *, unsigned long);
305 303 int (*write_dquot) (struct dquot *); /* Ordinary dquot write */
... ... @@ -309,12 +307,6 @@
309 307 int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
310 308 int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
311 309 int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
312   - /* reserve quota for delayed block allocation */
313   - int (*reserve_space) (struct inode *, qsize_t, int);
314   - /* claim reserved quota for delayed alloc */
315   - int (*claim_space) (struct inode *, qsize_t);
316   - /* release rsved quota for delayed alloc */
317   - void (*release_rsv) (struct inode *, qsize_t);
318 310 /* get reserved quota for delayed alloc, value returned is managed by
319 311 * quota code only */
320 312 qsize_t *(*get_reserved_space) (struct inode *);
include/linux/quotaops.h
... ... @@ -33,14 +33,13 @@
33 33 struct dquot *dquot_alloc(struct super_block *sb, int type);
34 34 void dquot_destroy(struct dquot *dquot);
35 35  
36   -int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
  36 +int __dquot_alloc_space(struct inode *inode, qsize_t number,
  37 + int warn, int reserve);
  38 +void __dquot_free_space(struct inode *inode, qsize_t number, int reserve);
  39 +
37 40 int dquot_alloc_inode(const struct inode *inode, qsize_t number);
38 41  
39   -int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
40   -int dquot_claim_space(struct inode *inode, qsize_t number);
41   -void dquot_release_reserved_space(struct inode *inode, qsize_t number);
42   -
43   -int dquot_free_space(struct inode *inode, qsize_t number);
  42 +int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
44 43 int dquot_free_inode(const struct inode *inode, qsize_t number);
45 44  
46 45 int dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask);
... ... @@ -149,60 +148,6 @@
149 148 inode->i_sb->dq_op->initialize(inode, -1);
150 149 }
151 150  
152   -/* The following allocation/freeing/transfer functions *must* be called inside
153   - * a transaction (deadlocks possible otherwise) */
154   -static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
155   -{
156   - if (sb_any_quota_active(inode->i_sb)) {
157   - /* Used space is updated in alloc_space() */
158   - if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA)
159   - return 1;
160   - }
161   - else
162   - inode_add_bytes(inode, nr);
163   - return 0;
164   -}
165   -
166   -static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr)
167   -{
168   - int ret;
169   - if (!(ret = vfs_dq_prealloc_space_nodirty(inode, nr)))
170   - mark_inode_dirty(inode);
171   - return ret;
172   -}
173   -
174   -static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
175   -{
176   - if (sb_any_quota_active(inode->i_sb)) {
177   - /* Used space is updated in alloc_space() */
178   - if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA)
179   - return 1;
180   - }
181   - else
182   - inode_add_bytes(inode, nr);
183   - return 0;
184   -}
185   -
186   -static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
187   -{
188   - int ret;
189   - if (!(ret = vfs_dq_alloc_space_nodirty(inode, nr)))
190   - mark_inode_dirty(inode);
191   - return ret;
192   -}
193   -
194   -static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
195   -{
196   - if (sb_any_quota_active(inode->i_sb)) {
197   - /* Used space is updated in alloc_space() */
198   - if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
199   - return 1;
200   - }
201   - else
202   - inode_add_rsv_space(inode, nr);
203   - return 0;
204   -}
205   -
206 151 static inline int vfs_dq_alloc_inode(struct inode *inode)
207 152 {
208 153 if (sb_any_quota_active(inode->i_sb)) {
... ... @@ -213,47 +158,6 @@
213 158 return 0;
214 159 }
215 160  
216   -/*
217   - * Convert in-memory reserved quotas to real consumed quotas
218   - */
219   -static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
220   -{
221   - if (sb_any_quota_active(inode->i_sb)) {
222   - if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
223   - return 1;
224   - } else
225   - inode_claim_rsv_space(inode, nr);
226   -
227   - mark_inode_dirty(inode);
228   - return 0;
229   -}
230   -
231   -/*
232   - * Release reserved (in-memory) quotas
233   - */
234   -static inline
235   -void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
236   -{
237   - if (sb_any_quota_active(inode->i_sb))
238   - inode->i_sb->dq_op->release_rsv(inode, nr);
239   - else
240   - inode_sub_rsv_space(inode, nr);
241   -}
242   -
243   -static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
244   -{
245   - if (sb_any_quota_active(inode->i_sb))
246   - inode->i_sb->dq_op->free_space(inode, nr);
247   - else
248   - inode_sub_bytes(inode, nr);
249   -}
250   -
251   -static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
252   -{
253   - vfs_dq_free_space_nodirty(inode, nr);
254   - mark_inode_dirty(inode);
255   -}
256   -
257 161 static inline void vfs_dq_free_inode(struct inode *inode)
258 162 {
259 163 if (sb_any_quota_active(inode->i_sb))
260 164  
261 165  
262 166  
263 167  
264 168  
265 169  
266 170  
267 171  
268 172  
269 173  
270 174  
271 175  
272 176  
273 177  
274 178  
275 179  
276 180  
277 181  
278 182  
279 183  
280 184  
281 185  
282 186  
283 187  
284 188  
285 189  
286 190  
287 191  
288 192  
289 193  
290 194  
291 195  
292 196  
... ... @@ -351,105 +255,109 @@
351 255 return 0;
352 256 }
353 257  
354   -static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
  258 +static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
  259 + int warn, int reserve)
355 260 {
356   - inode_add_bytes(inode, nr);
  261 + if (!reserve)
  262 + inode_add_bytes(inode, number);
357 263 return 0;
358 264 }
359 265  
360   -static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr)
  266 +static inline void __dquot_free_space(struct inode *inode, qsize_t number,
  267 + int reserve)
361 268 {
362   - vfs_dq_prealloc_space_nodirty(inode, nr);
363   - mark_inode_dirty(inode);
364   - return 0;
  269 + if (!reserve)
  270 + inode_sub_bytes(inode, number);
365 271 }
366 272  
367   -static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
  273 +static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
368 274 {
369   - inode_add_bytes(inode, nr);
  275 + inode_add_bytes(inode, number);
370 276 return 0;
371 277 }
372 278  
373   -static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
  279 +#endif /* CONFIG_QUOTA */
  280 +
  281 +static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
374 282 {
375   - vfs_dq_alloc_space_nodirty(inode, nr);
376   - mark_inode_dirty(inode);
377   - return 0;
  283 + return __dquot_alloc_space(inode, nr, 1, 0);
378 284 }
379 285  
380   -static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
  286 +static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
381 287 {
382   - return 0;
  288 + int ret;
  289 +
  290 + ret = dquot_alloc_space_nodirty(inode, nr);
  291 + if (!ret)
  292 + mark_inode_dirty(inode);
  293 + return ret;
383 294 }
384 295  
385   -static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
  296 +static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr)
386 297 {
387   - return vfs_dq_alloc_space(inode, nr);
  298 + return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits);
388 299 }
389 300  
390   -static inline
391   -int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
  301 +static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
392 302 {
393   - return 0;
  303 + return dquot_alloc_space(inode, nr << inode->i_blkbits);
394 304 }
395 305  
396   -static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
  306 +static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
397 307 {
398   - inode_sub_bytes(inode, nr);
  308 + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0, 0);
399 309 }
400 310  
401   -static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
  311 +static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
402 312 {
403   - vfs_dq_free_space_nodirty(inode, nr);
404   - mark_inode_dirty(inode);
405   -}
  313 + int ret;
406 314  
407   -#endif /* CONFIG_QUOTA */
408   -
409   -static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
410   -{
411   - return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
  315 + ret = dquot_prealloc_block_nodirty(inode, nr);
  316 + if (!ret)
  317 + mark_inode_dirty(inode);
  318 + return ret;
412 319 }
413 320  
414   -static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
  321 +static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
415 322 {
416   - return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
  323 + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 1, 1);
417 324 }
418 325  
419   -static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
  326 +static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
420 327 {
421   - return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
422   -}
  328 + int ret;
423 329  
424   -static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
425   -{
426   - return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
  330 + ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
  331 + if (!ret)
  332 + mark_inode_dirty(inode);
  333 + return ret;
427 334 }
428 335  
429   -static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
  336 +static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
430 337 {
431   - return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
  338 + __dquot_free_space(inode, nr, 0);
432 339 }
433 340  
434   -static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
  341 +static inline void dquot_free_space(struct inode *inode, qsize_t nr)
435 342 {
436   - return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
  343 + dquot_free_space_nodirty(inode, nr);
  344 + mark_inode_dirty(inode);
437 345 }
438 346  
439   -static inline
440   -void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
  347 +static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr)
441 348 {
442   - vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
  349 + dquot_free_space_nodirty(inode, nr << inode->i_blkbits);
443 350 }
444 351  
445   -static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
  352 +static inline void dquot_free_block(struct inode *inode, qsize_t nr)
446 353 {
447   - vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
  354 + dquot_free_space(inode, nr << inode->i_blkbits);
448 355 }
449 356  
450   -static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
  357 +static inline void dquot_release_reservation_block(struct inode *inode,
  358 + qsize_t nr)
451 359 {
452   - vfs_dq_free_space(inode, nr << inode->i_blkbits);
  360 + __dquot_free_space(inode, nr << inode->i_blkbits, 1);
453 361 }
454 362  
455 363 #endif /* _LINUX_QUOTAOPS_ */