Commit 92b2e5b31dd2ad2c9273578c2289d17f417fe32d

Authored by Christoph Hellwig
Committed by Ben Myers
1 parent 4177af3a8a

xfs: use a normal shrinker for the dquot freelist

Stop reusing dquots from the freelist when allocating new ones directly, and
implement a shrinker that actually follows the specifications for the
interface.  The shrinker implementation is still highly suboptimal at this
point, but we can gradually work on it.

This also fixes an bug in the previous lock ordering, where we would take
the hash and dqlist locks inside of the freelist lock against the normal
lock ordering.  This is only solvable by introducing the dispose list,
and thus not when using direct reclaim of unused dquots for new allocations.

As a side-effect the quota upper bound and used to free ratio values in
/proc/fs/xfs/xqm are set to 0 as these values don't make any sense in the
new world order.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ben Myers <bpm@sgi.com>

(cherry picked from commit 04da0c8196ac0b12fb6b84f4b7a51ad2fa56d869)

Showing 6 changed files with 141 additions and 282 deletions Side-by-side Diff

... ... @@ -110,11 +110,5 @@
110 110 extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
111 111 extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
112 112  
113   -static inline int
114   -kmem_shake_allow(gfp_t gfp_mask)
115   -{
116   - return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS));
117   -}
118   -
119 113 #endif /* __XFS_SUPPORT_KMEM_H__ */
... ... @@ -63,82 +63,6 @@
63 63 static struct lock_class_key xfs_dquot_other_class;
64 64  
65 65 /*
66   - * Allocate and initialize a dquot. We don't always allocate fresh memory;
67   - * we try to reclaim a free dquot if the number of incore dquots are above
68   - * a threshold.
69   - * The only field inside the core that gets initialized at this point
70   - * is the d_id field. The idea is to fill in the entire q_core
71   - * when we read in the on disk dquot.
72   - */
73   -STATIC xfs_dquot_t *
74   -xfs_qm_dqinit(
75   - xfs_mount_t *mp,
76   - xfs_dqid_t id,
77   - uint type)
78   -{
79   - xfs_dquot_t *dqp;
80   - boolean_t brandnewdquot;
81   -
82   - brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
83   - dqp->dq_flags = type;
84   - dqp->q_core.d_id = cpu_to_be32(id);
85   - dqp->q_mount = mp;
86   -
87   - /*
88   - * No need to re-initialize these if this is a reclaimed dquot.
89   - */
90   - if (brandnewdquot) {
91   - INIT_LIST_HEAD(&dqp->q_freelist);
92   - mutex_init(&dqp->q_qlock);
93   - init_waitqueue_head(&dqp->q_pinwait);
94   -
95   - /*
96   - * Because we want to use a counting completion, complete
97   - * the flush completion once to allow a single access to
98   - * the flush completion without blocking.
99   - */
100   - init_completion(&dqp->q_flush);
101   - complete(&dqp->q_flush);
102   -
103   - trace_xfs_dqinit(dqp);
104   - } else {
105   - /*
106   - * Only the q_core portion was zeroed in dqreclaim_one().
107   - * So, we need to reset others.
108   - */
109   - dqp->q_nrefs = 0;
110   - dqp->q_blkno = 0;
111   - INIT_LIST_HEAD(&dqp->q_mplist);
112   - INIT_LIST_HEAD(&dqp->q_hashlist);
113   - dqp->q_bufoffset = 0;
114   - dqp->q_fileoffset = 0;
115   - dqp->q_transp = NULL;
116   - dqp->q_gdquot = NULL;
117   - dqp->q_res_bcount = 0;
118   - dqp->q_res_icount = 0;
119   - dqp->q_res_rtbcount = 0;
120   - atomic_set(&dqp->q_pincount, 0);
121   - dqp->q_hash = NULL;
122   - ASSERT(list_empty(&dqp->q_freelist));
123   -
124   - trace_xfs_dqreuse(dqp);
125   - }
126   -
127   - /*
128   - * In either case we need to make sure group quotas have a different
129   - * lock class than user quotas, to make sure lockdep knows we can
130   - * locks of one of each at the same time.
131   - */
132   - if (!(type & XFS_DQ_USER))
133   - lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
134   -
135   - /*
136   - * log item gets initialized later
137   - */
138   - return (dqp);
139   -}
140   -
141   -/*
142 66 * This is called to free all the memory associated with a dquot
143 67 */
144 68 void
... ... @@ -567,7 +491,32 @@
567 491 int error;
568 492 int cancelflags = 0;
569 493  
570   - dqp = xfs_qm_dqinit(mp, id, type);
  494 +
  495 + dqp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
  496 +
  497 + dqp->dq_flags = type;
  498 + dqp->q_core.d_id = cpu_to_be32(id);
  499 + dqp->q_mount = mp;
  500 + INIT_LIST_HEAD(&dqp->q_freelist);
  501 + mutex_init(&dqp->q_qlock);
  502 + init_waitqueue_head(&dqp->q_pinwait);
  503 +
  504 + /*
  505 + * Because we want to use a counting completion, complete
  506 + * the flush completion once to allow a single access to
  507 + * the flush completion without blocking.
  508 + */
  509 + init_completion(&dqp->q_flush);
  510 + complete(&dqp->q_flush);
  511 +
  512 + /*
  513 + * Make sure group quotas have a different lock class than user
  514 + * quotas.
  515 + */
  516 + if (!(type & XFS_DQ_USER))
  517 + lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
  518 +
  519 + atomic_inc(&xfs_Gqm->qm_totaldquots);
571 520  
572 521 trace_xfs_dqread(dqp);
573 522  
... ... @@ -50,7 +50,6 @@
50 50 */
51 51 struct mutex xfs_Gqm_lock;
52 52 struct xfs_qm *xfs_Gqm;
53   -uint ndquot;
54 53  
55 54 kmem_zone_t *qm_dqzone;
56 55 kmem_zone_t *qm_dqtrxzone;
... ... @@ -93,7 +92,6 @@
93 92 goto out_free_udqhash;
94 93  
95 94 hsize /= sizeof(xfs_dqhash_t);
96   - ndquot = hsize << 8;
97 95  
98 96 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
99 97 xqm->qm_dqhashmask = hsize - 1;
... ... @@ -137,7 +135,6 @@
137 135 xqm->qm_dqtrxzone = qm_dqtrxzone;
138 136  
139 137 atomic_set(&xqm->qm_totaldquots, 0);
140   - xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
141 138 xqm->qm_nrefs = 0;
142 139 return xqm;
143 140  
144 141  
145 142  
146 143  
147 144  
148 145  
149 146  
150 147  
151 148  
152 149  
153 150  
154 151  
155 152  
156 153  
157 154  
158 155  
159 156  
160 157  
161 158  
162 159  
163 160  
164 161  
165 162  
166 163  
167 164  
168 165  
169 166  
170 167  
171 168  
172 169  
173 170  
... ... @@ -1600,215 +1597,149 @@
1600 1597 return 0;
1601 1598 }
1602 1599  
  1600 +STATIC void
  1601 +xfs_qm_dqfree_one(
  1602 + struct xfs_dquot *dqp)
  1603 +{
  1604 + struct xfs_mount *mp = dqp->q_mount;
  1605 + struct xfs_quotainfo *qi = mp->m_quotainfo;
1603 1606  
  1607 + mutex_lock(&dqp->q_hash->qh_lock);
  1608 + list_del_init(&dqp->q_hashlist);
  1609 + dqp->q_hash->qh_version++;
  1610 + mutex_unlock(&dqp->q_hash->qh_lock);
1604 1611  
1605   -/*
1606   - * Pop the least recently used dquot off the freelist and recycle it.
1607   - */
1608   -STATIC struct xfs_dquot *
1609   -xfs_qm_dqreclaim_one(void)
  1612 + mutex_lock(&qi->qi_dqlist_lock);
  1613 + list_del_init(&dqp->q_mplist);
  1614 + qi->qi_dquots--;
  1615 + qi->qi_dqreclaims++;
  1616 + mutex_unlock(&qi->qi_dqlist_lock);
  1617 +
  1618 + xfs_qm_dqdestroy(dqp);
  1619 +}
  1620 +
  1621 +STATIC void
  1622 +xfs_qm_dqreclaim_one(
  1623 + struct xfs_dquot *dqp,
  1624 + struct list_head *dispose_list)
1610 1625 {
1611   - struct xfs_dquot *dqp;
1612   - int restarts = 0;
  1626 + struct xfs_mount *mp = dqp->q_mount;
  1627 + int error;
1613 1628  
1614   - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
1615   -restart:
1616   - list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
1617   - struct xfs_mount *mp = dqp->q_mount;
  1629 + if (!xfs_dqlock_nowait(dqp))
  1630 + goto out_busy;
1618 1631  
1619   - if (!xfs_dqlock_nowait(dqp))
1620   - continue;
  1632 + /*
  1633 + * This dquot has acquired a reference in the meantime remove it from
  1634 + * the freelist and try again.
  1635 + */
  1636 + if (dqp->q_nrefs) {
  1637 + xfs_dqunlock(dqp);
1621 1638  
1622   - /*
1623   - * This dquot has already been grabbed by dqlookup.
1624   - * Remove it from the freelist and try again.
1625   - */
1626   - if (dqp->q_nrefs) {
1627   - trace_xfs_dqreclaim_want(dqp);
1628   - XQM_STATS_INC(xqmstats.xs_qm_dqwants);
  1639 + trace_xfs_dqreclaim_want(dqp);
  1640 + XQM_STATS_INC(xqmstats.xs_qm_dqwants);
1629 1641  
1630   - list_del_init(&dqp->q_freelist);
1631   - xfs_Gqm->qm_dqfrlist_cnt--;
1632   - restarts++;
1633   - goto dqunlock;
1634   - }
  1642 + list_del_init(&dqp->q_freelist);
  1643 + xfs_Gqm->qm_dqfrlist_cnt--;
  1644 + return;
  1645 + }
1635 1646  
1636   - ASSERT(dqp->q_hash);
1637   - ASSERT(!list_empty(&dqp->q_mplist));
  1647 + ASSERT(dqp->q_hash);
  1648 + ASSERT(!list_empty(&dqp->q_mplist));
1638 1649  
1639   - /*
1640   - * Try to grab the flush lock. If this dquot is in the process
1641   - * of getting flushed to disk, we don't want to reclaim it.
1642   - */
1643   - if (!xfs_dqflock_nowait(dqp))
1644   - goto dqunlock;
  1650 + /*
  1651 + * Try to grab the flush lock. If this dquot is in the process of
  1652 + * getting flushed to disk, we don't want to reclaim it.
  1653 + */
  1654 + if (!xfs_dqflock_nowait(dqp))
  1655 + goto out_busy;
1645 1656  
  1657 + /*
  1658 + * We have the flush lock so we know that this is not in the
  1659 + * process of being flushed. So, if this is dirty, flush it
  1660 + * DELWRI so that we don't get a freelist infested with
  1661 + * dirty dquots.
  1662 + */
  1663 + if (XFS_DQ_IS_DIRTY(dqp)) {
  1664 + trace_xfs_dqreclaim_dirty(dqp);
  1665 +
1646 1666 /*
1647   - * We have the flush lock so we know that this is not in the
1648   - * process of being flushed. So, if this is dirty, flush it
1649   - * DELWRI so that we don't get a freelist infested with
1650   - * dirty dquots.
  1667 + * We flush it delayed write, so don't bother releasing the
  1668 + * freelist lock.
1651 1669 */
1652   - if (XFS_DQ_IS_DIRTY(dqp)) {
1653   - int error;
1654   -
1655   - trace_xfs_dqreclaim_dirty(dqp);
1656   -
1657   - /*
1658   - * We flush it delayed write, so don't bother
1659   - * releasing the freelist lock.
1660   - */
1661   - error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
1662   - if (error) {
1663   - xfs_warn(mp, "%s: dquot %p flush failed",
1664   - __func__, dqp);
1665   - }
1666   - goto dqunlock;
  1670 + error = xfs_qm_dqflush(dqp, 0);
  1671 + if (error) {
  1672 + xfs_warn(mp, "%s: dquot %p flush failed",
  1673 + __func__, dqp);
1667 1674 }
1668   - xfs_dqfunlock(dqp);
1669 1675  
1670 1676 /*
1671   - * Prevent lookup now that we are going to reclaim the dquot.
1672   - * Once XFS_DQ_FREEING is set lookup won't touch the dquot,
1673   - * thus we can drop the lock now.
  1677 + * Give the dquot another try on the freelist, as the
  1678 + * flushing will take some time.
1674 1679 */
1675   - dqp->dq_flags |= XFS_DQ_FREEING;
1676   - xfs_dqunlock(dqp);
  1680 + goto out_busy;
  1681 + }
  1682 + xfs_dqfunlock(dqp);
1677 1683  
1678   - mutex_lock(&dqp->q_hash->qh_lock);
1679   - list_del_init(&dqp->q_hashlist);
1680   - dqp->q_hash->qh_version++;
1681   - mutex_unlock(&dqp->q_hash->qh_lock);
  1684 + /*
  1685 + * Prevent lookups now that we are past the point of no return.
  1686 + */
  1687 + dqp->dq_flags |= XFS_DQ_FREEING;
  1688 + xfs_dqunlock(dqp);
1682 1689  
1683   - mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
1684   - list_del_init(&dqp->q_mplist);
1685   - mp->m_quotainfo->qi_dquots--;
1686   - mp->m_quotainfo->qi_dqreclaims++;
1687   - mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
  1690 + ASSERT(dqp->q_nrefs == 0);
  1691 + list_move_tail(&dqp->q_freelist, dispose_list);
  1692 + xfs_Gqm->qm_dqfrlist_cnt--;
1688 1693  
1689   - ASSERT(dqp->q_nrefs == 0);
1690   - list_del_init(&dqp->q_freelist);
1691   - xfs_Gqm->qm_dqfrlist_cnt--;
  1694 + trace_xfs_dqreclaim_done(dqp);
  1695 + XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
  1696 + return;
1692 1697  
1693   - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1694   - return dqp;
1695   -dqunlock:
1696   - xfs_dqunlock(dqp);
1697   - if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
1698   - break;
1699   - goto restart;
1700   - }
  1698 +out_busy:
  1699 + xfs_dqunlock(dqp);
1701 1700  
1702   - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1703   - return NULL;
1704   -}
  1701 + /*
  1702 + * Move the dquot to the tail of the list so that we don't spin on it.
  1703 + */
  1704 + list_move_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
1705 1705  
1706   -/*
1707   - * Traverse the freelist of dquots and attempt to reclaim a maximum of
1708   - * 'howmany' dquots. This operation races with dqlookup(), and attempts to
1709   - * favor the lookup function ...
1710   - */
1711   -STATIC int
1712   -xfs_qm_shake_freelist(
1713   - int howmany)
1714   -{
1715   - int nreclaimed = 0;
1716   - xfs_dquot_t *dqp;
1717   -
1718   - if (howmany <= 0)
1719   - return 0;
1720   -
1721   - while (nreclaimed < howmany) {
1722   - dqp = xfs_qm_dqreclaim_one();
1723   - if (!dqp)
1724   - return nreclaimed;
1725   - xfs_qm_dqdestroy(dqp);
1726   - nreclaimed++;
1727   - }
1728   - return nreclaimed;
  1706 + trace_xfs_dqreclaim_busy(dqp);
  1707 + XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
1729 1708 }
1730 1709  
1731   -/*
1732   - * The kmem_shake interface is invoked when memory is running low.
1733   - */
1734   -/* ARGSUSED */
1735 1710 STATIC int
1736 1711 xfs_qm_shake(
1737   - struct shrinker *shrink,
1738   - struct shrink_control *sc)
  1712 + struct shrinker *shrink,
  1713 + struct shrink_control *sc)
1739 1714 {
1740   - int ndqused, nfree, n;
1741   - gfp_t gfp_mask = sc->gfp_mask;
  1715 + int nr_to_scan = sc->nr_to_scan;
  1716 + LIST_HEAD (dispose_list);
  1717 + struct xfs_dquot *dqp;
1742 1718  
1743   - if (!kmem_shake_allow(gfp_mask))
  1719 + if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
1744 1720 return 0;
1745   - if (!xfs_Gqm)
1746   - return 0;
  1721 + if (!nr_to_scan)
  1722 + goto out;
1747 1723  
1748   - nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
1749   - /* incore dquots in all f/s's */
1750   - ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
1751   -
1752   - ASSERT(ndqused >= 0);
1753   -
1754   - if (nfree <= ndqused && nfree < ndquot)
1755   - return 0;
1756   -
1757   - ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
1758   - n = nfree - ndqused - ndquot; /* # over target */
1759   -
1760   - return xfs_qm_shake_freelist(MAX(nfree, n));
1761   -}
1762   -
1763   -
1764   -/*------------------------------------------------------------------*/
1765   -
1766   -/*
1767   - * Return a new incore dquot. Depending on the number of
1768   - * dquots in the system, we either allocate a new one on the kernel heap,
1769   - * or reclaim a free one.
1770   - * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
1771   - * to reclaim an existing one from the freelist.
1772   - */
1773   -boolean_t
1774   -xfs_qm_dqalloc_incore(
1775   - xfs_dquot_t **O_dqpp)
1776   -{
1777   - xfs_dquot_t *dqp;
1778   -
1779   - /*
1780   - * Check against high water mark to see if we want to pop
1781   - * a nincompoop dquot off the freelist.
1782   - */
1783   - if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
1784   - /*
1785   - * Try to recycle a dquot from the freelist.
1786   - */
1787   - if ((dqp = xfs_qm_dqreclaim_one())) {
1788   - XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
1789   - /*
1790   - * Just zero the core here. The rest will get
1791   - * reinitialized by caller. XXX we shouldn't even
1792   - * do this zero ...
1793   - */
1794   - memset(&dqp->q_core, 0, sizeof(dqp->q_core));
1795   - *O_dqpp = dqp;
1796   - return B_FALSE;
1797   - }
1798   - XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
  1724 + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
  1725 + while (!list_empty(&xfs_Gqm->qm_dqfrlist)) {
  1726 + if (nr_to_scan-- <= 0)
  1727 + break;
  1728 + dqp = list_first_entry(&xfs_Gqm->qm_dqfrlist, struct xfs_dquot,
  1729 + q_freelist);
  1730 + xfs_qm_dqreclaim_one(dqp, &dispose_list);
1799 1731 }
  1732 + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1800 1733  
1801   - /*
1802   - * Allocate a brand new dquot on the kernel heap and return it
1803   - * to the caller to initialize.
1804   - */
1805   - ASSERT(xfs_Gqm->qm_dqzone != NULL);
1806   - *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
1807   - atomic_inc(&xfs_Gqm->qm_totaldquots);
1808   -
1809   - return B_TRUE;
  1734 + while (!list_empty(&dispose_list)) {
  1735 + dqp = list_first_entry(&dispose_list, struct xfs_dquot,
  1736 + q_freelist);
  1737 + list_del_init(&dqp->q_freelist);
  1738 + xfs_qm_dqfree_one(dqp);
  1739 + }
  1740 +out:
  1741 + return (xfs_Gqm->qm_dqfrlist_cnt / 100) * sysctl_vfs_cache_pressure;
1810 1742 }
1811   -
1812 1743  
1813 1744 /*
1814 1745 * Start a transaction and write the incore superblock changes to
... ... @@ -26,24 +26,12 @@
26 26 struct xfs_qm;
27 27 struct xfs_inode;
28 28  
29   -extern uint ndquot;
30 29 extern struct mutex xfs_Gqm_lock;
31 30 extern struct xfs_qm *xfs_Gqm;
32 31 extern kmem_zone_t *qm_dqzone;
33 32 extern kmem_zone_t *qm_dqtrxzone;
34 33  
35 34 /*
36   - * Ditto, for xfs_qm_dqreclaim_one.
37   - */
38   -#define XFS_QM_RECLAIM_MAX_RESTARTS 4
39   -
40   -/*
41   - * Ideal ratio of free to in use dquots. Quota manager makes an attempt
42   - * to keep this balance.
43   - */
44   -#define XFS_QM_DQFREE_RATIO 2
45   -
46   -/*
47 35 * Dquot hashtable constants/threshold values.
48 36 */
49 37 #define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t))
... ... @@ -74,7 +62,6 @@
74 62 int qm_dqfrlist_cnt;
75 63 atomic_t qm_totaldquots; /* total incore dquots */
76 64 uint qm_nrefs; /* file systems with quota on */
77   - int qm_dqfree_ratio;/* ratio of free to inuse dquots */
78 65 kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */
79 66 kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */
80 67 } xfs_qm_t;
... ... @@ -143,7 +130,6 @@
143 130 extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
144 131  
145 132 /* dquot stuff */
146   -extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
147 133 extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
148 134 extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
149 135  
fs/xfs/xfs_qm_stats.c
... ... @@ -42,9 +42,9 @@
42 42 {
43 43 /* maximum; incore; ratio free to inuse; freelist */
44 44 seq_printf(m, "%d\t%d\t%d\t%u\n",
45   - ndquot,
  45 + 0,
46 46 xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
47   - xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
  47 + 0,
48 48 xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
49 49 return 0;
50 50 }
... ... @@ -733,11 +733,10 @@
733 733 DEFINE_DQUOT_EVENT(xfs_dqadjust);
734 734 DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
735 735 DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
736   -DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
  736 +DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
  737 +DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
737 738 DEFINE_DQUOT_EVENT(xfs_dqattach_found);
738 739 DEFINE_DQUOT_EVENT(xfs_dqattach_get);
739   -DEFINE_DQUOT_EVENT(xfs_dqinit);
740   -DEFINE_DQUOT_EVENT(xfs_dqreuse);
741 740 DEFINE_DQUOT_EVENT(xfs_dqalloc);
742 741 DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
743 742 DEFINE_DQUOT_EVENT(xfs_dqread);