Commit 7ae4440723a413c7a52edd27f654c34680dd4ea2
Committed by
Ben Myers
1 parent
97e7ade506
Exists in
master
and in
6 other branches
xfs: remove XFS_QMOPT_DQSUSER
Just read the id 0 dquot from disk directly in xfs_qm_init_quotainfo instead of going through dqget and requiring a special flag to not add the dquot to any lists. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Showing 4 changed files with 18 additions and 34 deletions Inline Diff
fs/xfs/xfs_dquot.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. | 2 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | 6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it would be useful, | 9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_fs.h" | 19 | #include "xfs_fs.h" |
20 | #include "xfs_bit.h" | 20 | #include "xfs_bit.h" |
21 | #include "xfs_log.h" | 21 | #include "xfs_log.h" |
22 | #include "xfs_inum.h" | 22 | #include "xfs_inum.h" |
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
27 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
28 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
29 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
30 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
31 | #include "xfs_bmap.h" | 31 | #include "xfs_bmap.h" |
32 | #include "xfs_rtalloc.h" | 32 | #include "xfs_rtalloc.h" |
33 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
34 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
35 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
36 | #include "xfs_buf_item.h" | 36 | #include "xfs_buf_item.h" |
37 | #include "xfs_trans_space.h" | 37 | #include "xfs_trans_space.h" |
38 | #include "xfs_trans_priv.h" | 38 | #include "xfs_trans_priv.h" |
39 | #include "xfs_qm.h" | 39 | #include "xfs_qm.h" |
40 | #include "xfs_trace.h" | 40 | #include "xfs_trace.h" |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Lock order: | 43 | * Lock order: |
44 | * | 44 | * |
45 | * ip->i_lock | 45 | * ip->i_lock |
46 | * qh->qh_lock | 46 | * qh->qh_lock |
47 | * qi->qi_dqlist_lock | 47 | * qi->qi_dqlist_lock |
48 | * dquot->q_qlock (xfs_dqlock() and friends) | 48 | * dquot->q_qlock (xfs_dqlock() and friends) |
49 | * dquot->q_flush (xfs_dqflock() and friends) | 49 | * dquot->q_flush (xfs_dqflock() and friends) |
50 | * xfs_Gqm->qm_dqfrlist_lock | 50 | * xfs_Gqm->qm_dqfrlist_lock |
51 | * | 51 | * |
52 | * If two dquots need to be locked the order is user before group/project, | 52 | * If two dquots need to be locked the order is user before group/project, |
53 | * otherwise by the lowest id first, see xfs_dqlock2. | 53 | * otherwise by the lowest id first, see xfs_dqlock2. |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #ifdef DEBUG | 56 | #ifdef DEBUG |
57 | xfs_buftarg_t *xfs_dqerror_target; | 57 | xfs_buftarg_t *xfs_dqerror_target; |
58 | int xfs_do_dqerror; | 58 | int xfs_do_dqerror; |
59 | int xfs_dqreq_num; | 59 | int xfs_dqreq_num; |
60 | int xfs_dqerror_mod = 33; | 60 | int xfs_dqerror_mod = 33; |
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | static struct lock_class_key xfs_dquot_other_class; | 63 | static struct lock_class_key xfs_dquot_other_class; |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Allocate and initialize a dquot. We don't always allocate fresh memory; | 66 | * Allocate and initialize a dquot. We don't always allocate fresh memory; |
67 | * we try to reclaim a free dquot if the number of incore dquots are above | 67 | * we try to reclaim a free dquot if the number of incore dquots are above |
68 | * a threshold. | 68 | * a threshold. |
69 | * The only field inside the core that gets initialized at this point | 69 | * The only field inside the core that gets initialized at this point |
70 | * is the d_id field. The idea is to fill in the entire q_core | 70 | * is the d_id field. The idea is to fill in the entire q_core |
71 | * when we read in the on disk dquot. | 71 | * when we read in the on disk dquot. |
72 | */ | 72 | */ |
73 | STATIC xfs_dquot_t * | 73 | STATIC xfs_dquot_t * |
74 | xfs_qm_dqinit( | 74 | xfs_qm_dqinit( |
75 | xfs_mount_t *mp, | 75 | xfs_mount_t *mp, |
76 | xfs_dqid_t id, | 76 | xfs_dqid_t id, |
77 | uint type) | 77 | uint type) |
78 | { | 78 | { |
79 | xfs_dquot_t *dqp; | 79 | xfs_dquot_t *dqp; |
80 | boolean_t brandnewdquot; | 80 | boolean_t brandnewdquot; |
81 | 81 | ||
82 | brandnewdquot = xfs_qm_dqalloc_incore(&dqp); | 82 | brandnewdquot = xfs_qm_dqalloc_incore(&dqp); |
83 | dqp->dq_flags = type; | 83 | dqp->dq_flags = type; |
84 | dqp->q_core.d_id = cpu_to_be32(id); | 84 | dqp->q_core.d_id = cpu_to_be32(id); |
85 | dqp->q_mount = mp; | 85 | dqp->q_mount = mp; |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * No need to re-initialize these if this is a reclaimed dquot. | 88 | * No need to re-initialize these if this is a reclaimed dquot. |
89 | */ | 89 | */ |
90 | if (brandnewdquot) { | 90 | if (brandnewdquot) { |
91 | INIT_LIST_HEAD(&dqp->q_freelist); | 91 | INIT_LIST_HEAD(&dqp->q_freelist); |
92 | mutex_init(&dqp->q_qlock); | 92 | mutex_init(&dqp->q_qlock); |
93 | init_waitqueue_head(&dqp->q_pinwait); | 93 | init_waitqueue_head(&dqp->q_pinwait); |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * Because we want to use a counting completion, complete | 96 | * Because we want to use a counting completion, complete |
97 | * the flush completion once to allow a single access to | 97 | * the flush completion once to allow a single access to |
98 | * the flush completion without blocking. | 98 | * the flush completion without blocking. |
99 | */ | 99 | */ |
100 | init_completion(&dqp->q_flush); | 100 | init_completion(&dqp->q_flush); |
101 | complete(&dqp->q_flush); | 101 | complete(&dqp->q_flush); |
102 | 102 | ||
103 | trace_xfs_dqinit(dqp); | 103 | trace_xfs_dqinit(dqp); |
104 | } else { | 104 | } else { |
105 | /* | 105 | /* |
106 | * Only the q_core portion was zeroed in dqreclaim_one(). | 106 | * Only the q_core portion was zeroed in dqreclaim_one(). |
107 | * So, we need to reset others. | 107 | * So, we need to reset others. |
108 | */ | 108 | */ |
109 | dqp->q_nrefs = 0; | 109 | dqp->q_nrefs = 0; |
110 | dqp->q_blkno = 0; | 110 | dqp->q_blkno = 0; |
111 | INIT_LIST_HEAD(&dqp->q_mplist); | 111 | INIT_LIST_HEAD(&dqp->q_mplist); |
112 | INIT_LIST_HEAD(&dqp->q_hashlist); | 112 | INIT_LIST_HEAD(&dqp->q_hashlist); |
113 | dqp->q_bufoffset = 0; | 113 | dqp->q_bufoffset = 0; |
114 | dqp->q_fileoffset = 0; | 114 | dqp->q_fileoffset = 0; |
115 | dqp->q_transp = NULL; | 115 | dqp->q_transp = NULL; |
116 | dqp->q_gdquot = NULL; | 116 | dqp->q_gdquot = NULL; |
117 | dqp->q_res_bcount = 0; | 117 | dqp->q_res_bcount = 0; |
118 | dqp->q_res_icount = 0; | 118 | dqp->q_res_icount = 0; |
119 | dqp->q_res_rtbcount = 0; | 119 | dqp->q_res_rtbcount = 0; |
120 | atomic_set(&dqp->q_pincount, 0); | 120 | atomic_set(&dqp->q_pincount, 0); |
121 | dqp->q_hash = NULL; | 121 | dqp->q_hash = NULL; |
122 | ASSERT(list_empty(&dqp->q_freelist)); | 122 | ASSERT(list_empty(&dqp->q_freelist)); |
123 | 123 | ||
124 | trace_xfs_dqreuse(dqp); | 124 | trace_xfs_dqreuse(dqp); |
125 | } | 125 | } |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * In either case we need to make sure group quotas have a different | 128 | * In either case we need to make sure group quotas have a different |
129 | * lock class than user quotas, to make sure lockdep knows we can | 129 | * lock class than user quotas, to make sure lockdep knows we can |
130 | * locks of one of each at the same time. | 130 | * locks of one of each at the same time. |
131 | */ | 131 | */ |
132 | if (!(type & XFS_DQ_USER)) | 132 | if (!(type & XFS_DQ_USER)) |
133 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); | 133 | lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * log item gets initialized later | 136 | * log item gets initialized later |
137 | */ | 137 | */ |
138 | return (dqp); | 138 | return (dqp); |
139 | } | 139 | } |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * This is called to free all the memory associated with a dquot | 142 | * This is called to free all the memory associated with a dquot |
143 | */ | 143 | */ |
144 | void | 144 | void |
145 | xfs_qm_dqdestroy( | 145 | xfs_qm_dqdestroy( |
146 | xfs_dquot_t *dqp) | 146 | xfs_dquot_t *dqp) |
147 | { | 147 | { |
148 | ASSERT(list_empty(&dqp->q_freelist)); | 148 | ASSERT(list_empty(&dqp->q_freelist)); |
149 | 149 | ||
150 | mutex_destroy(&dqp->q_qlock); | 150 | mutex_destroy(&dqp->q_qlock); |
151 | kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); | 151 | kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); |
152 | 152 | ||
153 | atomic_dec(&xfs_Gqm->qm_totaldquots); | 153 | atomic_dec(&xfs_Gqm->qm_totaldquots); |
154 | } | 154 | } |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * If default limits are in force, push them into the dquot now. | 157 | * If default limits are in force, push them into the dquot now. |
158 | * We overwrite the dquot limits only if they are zero and this | 158 | * We overwrite the dquot limits only if they are zero and this |
159 | * is not the root dquot. | 159 | * is not the root dquot. |
160 | */ | 160 | */ |
161 | void | 161 | void |
162 | xfs_qm_adjust_dqlimits( | 162 | xfs_qm_adjust_dqlimits( |
163 | xfs_mount_t *mp, | 163 | xfs_mount_t *mp, |
164 | xfs_disk_dquot_t *d) | 164 | xfs_disk_dquot_t *d) |
165 | { | 165 | { |
166 | xfs_quotainfo_t *q = mp->m_quotainfo; | 166 | xfs_quotainfo_t *q = mp->m_quotainfo; |
167 | 167 | ||
168 | ASSERT(d->d_id); | 168 | ASSERT(d->d_id); |
169 | 169 | ||
170 | if (q->qi_bsoftlimit && !d->d_blk_softlimit) | 170 | if (q->qi_bsoftlimit && !d->d_blk_softlimit) |
171 | d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); | 171 | d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); |
172 | if (q->qi_bhardlimit && !d->d_blk_hardlimit) | 172 | if (q->qi_bhardlimit && !d->d_blk_hardlimit) |
173 | d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); | 173 | d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); |
174 | if (q->qi_isoftlimit && !d->d_ino_softlimit) | 174 | if (q->qi_isoftlimit && !d->d_ino_softlimit) |
175 | d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); | 175 | d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); |
176 | if (q->qi_ihardlimit && !d->d_ino_hardlimit) | 176 | if (q->qi_ihardlimit && !d->d_ino_hardlimit) |
177 | d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); | 177 | d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); |
178 | if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) | 178 | if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) |
179 | d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); | 179 | d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); |
180 | if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) | 180 | if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) |
181 | d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); | 181 | d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); |
182 | } | 182 | } |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * Check the limits and timers of a dquot and start or reset timers | 185 | * Check the limits and timers of a dquot and start or reset timers |
186 | * if necessary. | 186 | * if necessary. |
187 | * This gets called even when quota enforcement is OFF, which makes our | 187 | * This gets called even when quota enforcement is OFF, which makes our |
188 | * life a little less complicated. (We just don't reject any quota | 188 | * life a little less complicated. (We just don't reject any quota |
189 | * reservations in that case, when enforcement is off). | 189 | * reservations in that case, when enforcement is off). |
190 | * We also return 0 as the values of the timers in Q_GETQUOTA calls, when | 190 | * We also return 0 as the values of the timers in Q_GETQUOTA calls, when |
191 | * enforcement's off. | 191 | * enforcement's off. |
192 | * In contrast, warnings are a little different in that they don't | 192 | * In contrast, warnings are a little different in that they don't |
193 | * 'automatically' get started when limits get exceeded. They do | 193 | * 'automatically' get started when limits get exceeded. They do |
194 | * get reset to zero, however, when we find the count to be under | 194 | * get reset to zero, however, when we find the count to be under |
195 | * the soft limit (they are only ever set non-zero via userspace). | 195 | * the soft limit (they are only ever set non-zero via userspace). |
196 | */ | 196 | */ |
197 | void | 197 | void |
198 | xfs_qm_adjust_dqtimers( | 198 | xfs_qm_adjust_dqtimers( |
199 | xfs_mount_t *mp, | 199 | xfs_mount_t *mp, |
200 | xfs_disk_dquot_t *d) | 200 | xfs_disk_dquot_t *d) |
201 | { | 201 | { |
202 | ASSERT(d->d_id); | 202 | ASSERT(d->d_id); |
203 | 203 | ||
204 | #ifdef DEBUG | 204 | #ifdef DEBUG |
205 | if (d->d_blk_hardlimit) | 205 | if (d->d_blk_hardlimit) |
206 | ASSERT(be64_to_cpu(d->d_blk_softlimit) <= | 206 | ASSERT(be64_to_cpu(d->d_blk_softlimit) <= |
207 | be64_to_cpu(d->d_blk_hardlimit)); | 207 | be64_to_cpu(d->d_blk_hardlimit)); |
208 | if (d->d_ino_hardlimit) | 208 | if (d->d_ino_hardlimit) |
209 | ASSERT(be64_to_cpu(d->d_ino_softlimit) <= | 209 | ASSERT(be64_to_cpu(d->d_ino_softlimit) <= |
210 | be64_to_cpu(d->d_ino_hardlimit)); | 210 | be64_to_cpu(d->d_ino_hardlimit)); |
211 | if (d->d_rtb_hardlimit) | 211 | if (d->d_rtb_hardlimit) |
212 | ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= | 212 | ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= |
213 | be64_to_cpu(d->d_rtb_hardlimit)); | 213 | be64_to_cpu(d->d_rtb_hardlimit)); |
214 | #endif | 214 | #endif |
215 | 215 | ||
216 | if (!d->d_btimer) { | 216 | if (!d->d_btimer) { |
217 | if ((d->d_blk_softlimit && | 217 | if ((d->d_blk_softlimit && |
218 | (be64_to_cpu(d->d_bcount) >= | 218 | (be64_to_cpu(d->d_bcount) >= |
219 | be64_to_cpu(d->d_blk_softlimit))) || | 219 | be64_to_cpu(d->d_blk_softlimit))) || |
220 | (d->d_blk_hardlimit && | 220 | (d->d_blk_hardlimit && |
221 | (be64_to_cpu(d->d_bcount) >= | 221 | (be64_to_cpu(d->d_bcount) >= |
222 | be64_to_cpu(d->d_blk_hardlimit)))) { | 222 | be64_to_cpu(d->d_blk_hardlimit)))) { |
223 | d->d_btimer = cpu_to_be32(get_seconds() + | 223 | d->d_btimer = cpu_to_be32(get_seconds() + |
224 | mp->m_quotainfo->qi_btimelimit); | 224 | mp->m_quotainfo->qi_btimelimit); |
225 | } else { | 225 | } else { |
226 | d->d_bwarns = 0; | 226 | d->d_bwarns = 0; |
227 | } | 227 | } |
228 | } else { | 228 | } else { |
229 | if ((!d->d_blk_softlimit || | 229 | if ((!d->d_blk_softlimit || |
230 | (be64_to_cpu(d->d_bcount) < | 230 | (be64_to_cpu(d->d_bcount) < |
231 | be64_to_cpu(d->d_blk_softlimit))) && | 231 | be64_to_cpu(d->d_blk_softlimit))) && |
232 | (!d->d_blk_hardlimit || | 232 | (!d->d_blk_hardlimit || |
233 | (be64_to_cpu(d->d_bcount) < | 233 | (be64_to_cpu(d->d_bcount) < |
234 | be64_to_cpu(d->d_blk_hardlimit)))) { | 234 | be64_to_cpu(d->d_blk_hardlimit)))) { |
235 | d->d_btimer = 0; | 235 | d->d_btimer = 0; |
236 | } | 236 | } |
237 | } | 237 | } |
238 | 238 | ||
239 | if (!d->d_itimer) { | 239 | if (!d->d_itimer) { |
240 | if ((d->d_ino_softlimit && | 240 | if ((d->d_ino_softlimit && |
241 | (be64_to_cpu(d->d_icount) >= | 241 | (be64_to_cpu(d->d_icount) >= |
242 | be64_to_cpu(d->d_ino_softlimit))) || | 242 | be64_to_cpu(d->d_ino_softlimit))) || |
243 | (d->d_ino_hardlimit && | 243 | (d->d_ino_hardlimit && |
244 | (be64_to_cpu(d->d_icount) >= | 244 | (be64_to_cpu(d->d_icount) >= |
245 | be64_to_cpu(d->d_ino_hardlimit)))) { | 245 | be64_to_cpu(d->d_ino_hardlimit)))) { |
246 | d->d_itimer = cpu_to_be32(get_seconds() + | 246 | d->d_itimer = cpu_to_be32(get_seconds() + |
247 | mp->m_quotainfo->qi_itimelimit); | 247 | mp->m_quotainfo->qi_itimelimit); |
248 | } else { | 248 | } else { |
249 | d->d_iwarns = 0; | 249 | d->d_iwarns = 0; |
250 | } | 250 | } |
251 | } else { | 251 | } else { |
252 | if ((!d->d_ino_softlimit || | 252 | if ((!d->d_ino_softlimit || |
253 | (be64_to_cpu(d->d_icount) < | 253 | (be64_to_cpu(d->d_icount) < |
254 | be64_to_cpu(d->d_ino_softlimit))) && | 254 | be64_to_cpu(d->d_ino_softlimit))) && |
255 | (!d->d_ino_hardlimit || | 255 | (!d->d_ino_hardlimit || |
256 | (be64_to_cpu(d->d_icount) < | 256 | (be64_to_cpu(d->d_icount) < |
257 | be64_to_cpu(d->d_ino_hardlimit)))) { | 257 | be64_to_cpu(d->d_ino_hardlimit)))) { |
258 | d->d_itimer = 0; | 258 | d->d_itimer = 0; |
259 | } | 259 | } |
260 | } | 260 | } |
261 | 261 | ||
262 | if (!d->d_rtbtimer) { | 262 | if (!d->d_rtbtimer) { |
263 | if ((d->d_rtb_softlimit && | 263 | if ((d->d_rtb_softlimit && |
264 | (be64_to_cpu(d->d_rtbcount) >= | 264 | (be64_to_cpu(d->d_rtbcount) >= |
265 | be64_to_cpu(d->d_rtb_softlimit))) || | 265 | be64_to_cpu(d->d_rtb_softlimit))) || |
266 | (d->d_rtb_hardlimit && | 266 | (d->d_rtb_hardlimit && |
267 | (be64_to_cpu(d->d_rtbcount) >= | 267 | (be64_to_cpu(d->d_rtbcount) >= |
268 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 268 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
269 | d->d_rtbtimer = cpu_to_be32(get_seconds() + | 269 | d->d_rtbtimer = cpu_to_be32(get_seconds() + |
270 | mp->m_quotainfo->qi_rtbtimelimit); | 270 | mp->m_quotainfo->qi_rtbtimelimit); |
271 | } else { | 271 | } else { |
272 | d->d_rtbwarns = 0; | 272 | d->d_rtbwarns = 0; |
273 | } | 273 | } |
274 | } else { | 274 | } else { |
275 | if ((!d->d_rtb_softlimit || | 275 | if ((!d->d_rtb_softlimit || |
276 | (be64_to_cpu(d->d_rtbcount) < | 276 | (be64_to_cpu(d->d_rtbcount) < |
277 | be64_to_cpu(d->d_rtb_softlimit))) && | 277 | be64_to_cpu(d->d_rtb_softlimit))) && |
278 | (!d->d_rtb_hardlimit || | 278 | (!d->d_rtb_hardlimit || |
279 | (be64_to_cpu(d->d_rtbcount) < | 279 | (be64_to_cpu(d->d_rtbcount) < |
280 | be64_to_cpu(d->d_rtb_hardlimit)))) { | 280 | be64_to_cpu(d->d_rtb_hardlimit)))) { |
281 | d->d_rtbtimer = 0; | 281 | d->d_rtbtimer = 0; |
282 | } | 282 | } |
283 | } | 283 | } |
284 | } | 284 | } |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * initialize a buffer full of dquots and log the whole thing | 287 | * initialize a buffer full of dquots and log the whole thing |
288 | */ | 288 | */ |
289 | STATIC void | 289 | STATIC void |
290 | xfs_qm_init_dquot_blk( | 290 | xfs_qm_init_dquot_blk( |
291 | xfs_trans_t *tp, | 291 | xfs_trans_t *tp, |
292 | xfs_mount_t *mp, | 292 | xfs_mount_t *mp, |
293 | xfs_dqid_t id, | 293 | xfs_dqid_t id, |
294 | uint type, | 294 | uint type, |
295 | xfs_buf_t *bp) | 295 | xfs_buf_t *bp) |
296 | { | 296 | { |
297 | struct xfs_quotainfo *q = mp->m_quotainfo; | 297 | struct xfs_quotainfo *q = mp->m_quotainfo; |
298 | xfs_dqblk_t *d; | 298 | xfs_dqblk_t *d; |
299 | int curid, i; | 299 | int curid, i; |
300 | 300 | ||
301 | ASSERT(tp); | 301 | ASSERT(tp); |
302 | ASSERT(xfs_buf_islocked(bp)); | 302 | ASSERT(xfs_buf_islocked(bp)); |
303 | 303 | ||
304 | d = bp->b_addr; | 304 | d = bp->b_addr; |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * ID of the first dquot in the block - id's are zero based. | 307 | * ID of the first dquot in the block - id's are zero based. |
308 | */ | 308 | */ |
309 | curid = id - (id % q->qi_dqperchunk); | 309 | curid = id - (id % q->qi_dqperchunk); |
310 | ASSERT(curid >= 0); | 310 | ASSERT(curid >= 0); |
311 | memset(d, 0, BBTOB(q->qi_dqchunklen)); | 311 | memset(d, 0, BBTOB(q->qi_dqchunklen)); |
312 | for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { | 312 | for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { |
313 | d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); | 313 | d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); |
314 | d->dd_diskdq.d_version = XFS_DQUOT_VERSION; | 314 | d->dd_diskdq.d_version = XFS_DQUOT_VERSION; |
315 | d->dd_diskdq.d_id = cpu_to_be32(curid); | 315 | d->dd_diskdq.d_id = cpu_to_be32(curid); |
316 | d->dd_diskdq.d_flags = type; | 316 | d->dd_diskdq.d_flags = type; |
317 | } | 317 | } |
318 | 318 | ||
319 | xfs_trans_dquot_buf(tp, bp, | 319 | xfs_trans_dquot_buf(tp, bp, |
320 | (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : | 320 | (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : |
321 | ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : | 321 | ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : |
322 | XFS_BLF_GDQUOT_BUF))); | 322 | XFS_BLF_GDQUOT_BUF))); |
323 | xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); | 323 | xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); |
324 | } | 324 | } |
325 | 325 | ||
326 | 326 | ||
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Allocate a block and fill it with dquots. | 329 | * Allocate a block and fill it with dquots. |
330 | * This is called when the bmapi finds a hole. | 330 | * This is called when the bmapi finds a hole. |
331 | */ | 331 | */ |
332 | STATIC int | 332 | STATIC int |
333 | xfs_qm_dqalloc( | 333 | xfs_qm_dqalloc( |
334 | xfs_trans_t **tpp, | 334 | xfs_trans_t **tpp, |
335 | xfs_mount_t *mp, | 335 | xfs_mount_t *mp, |
336 | xfs_dquot_t *dqp, | 336 | xfs_dquot_t *dqp, |
337 | xfs_inode_t *quotip, | 337 | xfs_inode_t *quotip, |
338 | xfs_fileoff_t offset_fsb, | 338 | xfs_fileoff_t offset_fsb, |
339 | xfs_buf_t **O_bpp) | 339 | xfs_buf_t **O_bpp) |
340 | { | 340 | { |
341 | xfs_fsblock_t firstblock; | 341 | xfs_fsblock_t firstblock; |
342 | xfs_bmap_free_t flist; | 342 | xfs_bmap_free_t flist; |
343 | xfs_bmbt_irec_t map; | 343 | xfs_bmbt_irec_t map; |
344 | int nmaps, error, committed; | 344 | int nmaps, error, committed; |
345 | xfs_buf_t *bp; | 345 | xfs_buf_t *bp; |
346 | xfs_trans_t *tp = *tpp; | 346 | xfs_trans_t *tp = *tpp; |
347 | 347 | ||
348 | ASSERT(tp != NULL); | 348 | ASSERT(tp != NULL); |
349 | 349 | ||
350 | trace_xfs_dqalloc(dqp); | 350 | trace_xfs_dqalloc(dqp); |
351 | 351 | ||
352 | /* | 352 | /* |
353 | * Initialize the bmap freelist prior to calling bmapi code. | 353 | * Initialize the bmap freelist prior to calling bmapi code. |
354 | */ | 354 | */ |
355 | xfs_bmap_init(&flist, &firstblock); | 355 | xfs_bmap_init(&flist, &firstblock); |
356 | xfs_ilock(quotip, XFS_ILOCK_EXCL); | 356 | xfs_ilock(quotip, XFS_ILOCK_EXCL); |
357 | /* | 357 | /* |
358 | * Return if this type of quotas is turned off while we didn't | 358 | * Return if this type of quotas is turned off while we didn't |
359 | * have an inode lock | 359 | * have an inode lock |
360 | */ | 360 | */ |
361 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { | 361 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { |
362 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | 362 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); |
363 | return (ESRCH); | 363 | return (ESRCH); |
364 | } | 364 | } |
365 | 365 | ||
366 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); | 366 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); |
367 | nmaps = 1; | 367 | nmaps = 1; |
368 | error = xfs_bmapi_write(tp, quotip, offset_fsb, | 368 | error = xfs_bmapi_write(tp, quotip, offset_fsb, |
369 | XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, | 369 | XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, |
370 | &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp), | 370 | &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp), |
371 | &map, &nmaps, &flist); | 371 | &map, &nmaps, &flist); |
372 | if (error) | 372 | if (error) |
373 | goto error0; | 373 | goto error0; |
374 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); | 374 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); |
375 | ASSERT(nmaps == 1); | 375 | ASSERT(nmaps == 1); |
376 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && | 376 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && |
377 | (map.br_startblock != HOLESTARTBLOCK)); | 377 | (map.br_startblock != HOLESTARTBLOCK)); |
378 | 378 | ||
379 | /* | 379 | /* |
380 | * Keep track of the blkno to save a lookup later | 380 | * Keep track of the blkno to save a lookup later |
381 | */ | 381 | */ |
382 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); | 382 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); |
383 | 383 | ||
384 | /* now we can just get the buffer (there's nothing to read yet) */ | 384 | /* now we can just get the buffer (there's nothing to read yet) */ |
385 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, | 385 | bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, |
386 | dqp->q_blkno, | 386 | dqp->q_blkno, |
387 | mp->m_quotainfo->qi_dqchunklen, | 387 | mp->m_quotainfo->qi_dqchunklen, |
388 | 0); | 388 | 0); |
389 | 389 | ||
390 | error = xfs_buf_geterror(bp); | 390 | error = xfs_buf_geterror(bp); |
391 | if (error) | 391 | if (error) |
392 | goto error1; | 392 | goto error1; |
393 | 393 | ||
394 | /* | 394 | /* |
395 | * Make a chunk of dquots out of this buffer and log | 395 | * Make a chunk of dquots out of this buffer and log |
396 | * the entire thing. | 396 | * the entire thing. |
397 | */ | 397 | */ |
398 | xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), | 398 | xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), |
399 | dqp->dq_flags & XFS_DQ_ALLTYPES, bp); | 399 | dqp->dq_flags & XFS_DQ_ALLTYPES, bp); |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * xfs_bmap_finish() may commit the current transaction and | 402 | * xfs_bmap_finish() may commit the current transaction and |
403 | * start a second transaction if the freelist is not empty. | 403 | * start a second transaction if the freelist is not empty. |
404 | * | 404 | * |
405 | * Since we still want to modify this buffer, we need to | 405 | * Since we still want to modify this buffer, we need to |
406 | * ensure that the buffer is not released on commit of | 406 | * ensure that the buffer is not released on commit of |
407 | * the first transaction and ensure the buffer is added to the | 407 | * the first transaction and ensure the buffer is added to the |
408 | * second transaction. | 408 | * second transaction. |
409 | * | 409 | * |
410 | * If there is only one transaction then don't stop the buffer | 410 | * If there is only one transaction then don't stop the buffer |
411 | * from being released when it commits later on. | 411 | * from being released when it commits later on. |
412 | */ | 412 | */ |
413 | 413 | ||
414 | xfs_trans_bhold(tp, bp); | 414 | xfs_trans_bhold(tp, bp); |
415 | 415 | ||
416 | if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { | 416 | if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { |
417 | goto error1; | 417 | goto error1; |
418 | } | 418 | } |
419 | 419 | ||
420 | if (committed) { | 420 | if (committed) { |
421 | tp = *tpp; | 421 | tp = *tpp; |
422 | xfs_trans_bjoin(tp, bp); | 422 | xfs_trans_bjoin(tp, bp); |
423 | } else { | 423 | } else { |
424 | xfs_trans_bhold_release(tp, bp); | 424 | xfs_trans_bhold_release(tp, bp); |
425 | } | 425 | } |
426 | 426 | ||
427 | *O_bpp = bp; | 427 | *O_bpp = bp; |
428 | return 0; | 428 | return 0; |
429 | 429 | ||
430 | error1: | 430 | error1: |
431 | xfs_bmap_cancel(&flist); | 431 | xfs_bmap_cancel(&flist); |
432 | error0: | 432 | error0: |
433 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); | 433 | xfs_iunlock(quotip, XFS_ILOCK_EXCL); |
434 | 434 | ||
435 | return (error); | 435 | return (error); |
436 | } | 436 | } |
437 | 437 | ||
438 | /* | 438 | /* |
439 | * Maps a dquot to the buffer containing its on-disk version. | 439 | * Maps a dquot to the buffer containing its on-disk version. |
440 | * This returns a ptr to the buffer containing the on-disk dquot | 440 | * This returns a ptr to the buffer containing the on-disk dquot |
441 | * in the bpp param, and a ptr to the on-disk dquot within that buffer | 441 | * in the bpp param, and a ptr to the on-disk dquot within that buffer |
442 | */ | 442 | */ |
443 | STATIC int | 443 | STATIC int |
444 | xfs_qm_dqtobp( | 444 | xfs_qm_dqtobp( |
445 | xfs_trans_t **tpp, | 445 | xfs_trans_t **tpp, |
446 | xfs_dquot_t *dqp, | 446 | xfs_dquot_t *dqp, |
447 | xfs_disk_dquot_t **O_ddpp, | 447 | xfs_disk_dquot_t **O_ddpp, |
448 | xfs_buf_t **O_bpp, | 448 | xfs_buf_t **O_bpp, |
449 | uint flags) | 449 | uint flags) |
450 | { | 450 | { |
451 | xfs_bmbt_irec_t map; | 451 | xfs_bmbt_irec_t map; |
452 | int nmaps = 1, error; | 452 | int nmaps = 1, error; |
453 | xfs_buf_t *bp; | 453 | xfs_buf_t *bp; |
454 | xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); | 454 | xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); |
455 | xfs_mount_t *mp = dqp->q_mount; | 455 | xfs_mount_t *mp = dqp->q_mount; |
456 | xfs_disk_dquot_t *ddq; | 456 | xfs_disk_dquot_t *ddq; |
457 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); | 457 | xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); |
458 | xfs_trans_t *tp = (tpp ? *tpp : NULL); | 458 | xfs_trans_t *tp = (tpp ? *tpp : NULL); |
459 | 459 | ||
460 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; | 460 | dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; |
461 | 461 | ||
462 | xfs_ilock(quotip, XFS_ILOCK_SHARED); | 462 | xfs_ilock(quotip, XFS_ILOCK_SHARED); |
463 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { | 463 | if (XFS_IS_THIS_QUOTA_OFF(dqp)) { |
464 | /* | 464 | /* |
465 | * Return if this type of quotas is turned off while we | 465 | * Return if this type of quotas is turned off while we |
466 | * didn't have the quota inode lock. | 466 | * didn't have the quota inode lock. |
467 | */ | 467 | */ |
468 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 468 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); |
469 | return ESRCH; | 469 | return ESRCH; |
470 | } | 470 | } |
471 | 471 | ||
472 | /* | 472 | /* |
473 | * Find the block map; no allocations yet | 473 | * Find the block map; no allocations yet |
474 | */ | 474 | */ |
475 | error = xfs_bmapi_read(quotip, dqp->q_fileoffset, | 475 | error = xfs_bmapi_read(quotip, dqp->q_fileoffset, |
476 | XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); | 476 | XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); |
477 | 477 | ||
478 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 478 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); |
479 | if (error) | 479 | if (error) |
480 | return error; | 480 | return error; |
481 | 481 | ||
482 | ASSERT(nmaps == 1); | 482 | ASSERT(nmaps == 1); |
483 | ASSERT(map.br_blockcount == 1); | 483 | ASSERT(map.br_blockcount == 1); |
484 | 484 | ||
485 | /* | 485 | /* |
486 | * Offset of dquot in the (fixed sized) dquot chunk. | 486 | * Offset of dquot in the (fixed sized) dquot chunk. |
487 | */ | 487 | */ |
488 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * | 488 | dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * |
489 | sizeof(xfs_dqblk_t); | 489 | sizeof(xfs_dqblk_t); |
490 | 490 | ||
491 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); | 491 | ASSERT(map.br_startblock != DELAYSTARTBLOCK); |
492 | if (map.br_startblock == HOLESTARTBLOCK) { | 492 | if (map.br_startblock == HOLESTARTBLOCK) { |
493 | /* | 493 | /* |
494 | * We don't allocate unless we're asked to | 494 | * We don't allocate unless we're asked to |
495 | */ | 495 | */ |
496 | if (!(flags & XFS_QMOPT_DQALLOC)) | 496 | if (!(flags & XFS_QMOPT_DQALLOC)) |
497 | return ENOENT; | 497 | return ENOENT; |
498 | 498 | ||
499 | ASSERT(tp); | 499 | ASSERT(tp); |
500 | error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, | 500 | error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, |
501 | dqp->q_fileoffset, &bp); | 501 | dqp->q_fileoffset, &bp); |
502 | if (error) | 502 | if (error) |
503 | return error; | 503 | return error; |
504 | tp = *tpp; | 504 | tp = *tpp; |
505 | } else { | 505 | } else { |
506 | trace_xfs_dqtobp_read(dqp); | 506 | trace_xfs_dqtobp_read(dqp); |
507 | 507 | ||
508 | /* | 508 | /* |
509 | * store the blkno etc so that we don't have to do the | 509 | * store the blkno etc so that we don't have to do the |
510 | * mapping all the time | 510 | * mapping all the time |
511 | */ | 511 | */ |
512 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); | 512 | dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); |
513 | 513 | ||
514 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, | 514 | error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, |
515 | dqp->q_blkno, | 515 | dqp->q_blkno, |
516 | mp->m_quotainfo->qi_dqchunklen, | 516 | mp->m_quotainfo->qi_dqchunklen, |
517 | 0, &bp); | 517 | 0, &bp); |
518 | if (error || !bp) | 518 | if (error || !bp) |
519 | return XFS_ERROR(error); | 519 | return XFS_ERROR(error); |
520 | } | 520 | } |
521 | 521 | ||
522 | ASSERT(xfs_buf_islocked(bp)); | 522 | ASSERT(xfs_buf_islocked(bp)); |
523 | 523 | ||
524 | /* | 524 | /* |
525 | * calculate the location of the dquot inside the buffer. | 525 | * calculate the location of the dquot inside the buffer. |
526 | */ | 526 | */ |
527 | ddq = bp->b_addr + dqp->q_bufoffset; | 527 | ddq = bp->b_addr + dqp->q_bufoffset; |
528 | 528 | ||
529 | /* | 529 | /* |
530 | * A simple sanity check in case we got a corrupted dquot... | 530 | * A simple sanity check in case we got a corrupted dquot... |
531 | */ | 531 | */ |
532 | error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, | 532 | error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, |
533 | flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), | 533 | flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), |
534 | "dqtobp"); | 534 | "dqtobp"); |
535 | if (error) { | 535 | if (error) { |
536 | if (!(flags & XFS_QMOPT_DQREPAIR)) { | 536 | if (!(flags & XFS_QMOPT_DQREPAIR)) { |
537 | xfs_trans_brelse(tp, bp); | 537 | xfs_trans_brelse(tp, bp); |
538 | return XFS_ERROR(EIO); | 538 | return XFS_ERROR(EIO); |
539 | } | 539 | } |
540 | } | 540 | } |
541 | 541 | ||
542 | *O_bpp = bp; | 542 | *O_bpp = bp; |
543 | *O_ddpp = ddq; | 543 | *O_ddpp = ddq; |
544 | 544 | ||
545 | return (0); | 545 | return (0); |
546 | } | 546 | } |
547 | 547 | ||
548 | 548 | ||
549 | /* | 549 | /* |
550 | * Read in the ondisk dquot using dqtobp() then copy it to an incore version, | 550 | * Read in the ondisk dquot using dqtobp() then copy it to an incore version, |
551 | * and release the buffer immediately. | 551 | * and release the buffer immediately. |
552 | * | 552 | * |
553 | * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. | 553 | * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. |
554 | */ | 554 | */ |
555 | STATIC int | 555 | int |
556 | xfs_qm_dqread( | 556 | xfs_qm_dqread( |
557 | struct xfs_mount *mp, | 557 | struct xfs_mount *mp, |
558 | xfs_dqid_t id, | 558 | xfs_dqid_t id, |
559 | uint type, | 559 | uint type, |
560 | uint flags, | 560 | uint flags, |
561 | struct xfs_dquot **O_dqpp) | 561 | struct xfs_dquot **O_dqpp) |
562 | { | 562 | { |
563 | struct xfs_dquot *dqp; | 563 | struct xfs_dquot *dqp; |
564 | struct xfs_disk_dquot *ddqp; | 564 | struct xfs_disk_dquot *ddqp; |
565 | struct xfs_buf *bp; | 565 | struct xfs_buf *bp; |
566 | struct xfs_trans *tp = NULL; | 566 | struct xfs_trans *tp = NULL; |
567 | int error; | 567 | int error; |
568 | int cancelflags = 0; | 568 | int cancelflags = 0; |
569 | 569 | ||
570 | dqp = xfs_qm_dqinit(mp, id, type); | 570 | dqp = xfs_qm_dqinit(mp, id, type); |
571 | 571 | ||
572 | trace_xfs_dqread(dqp); | 572 | trace_xfs_dqread(dqp); |
573 | 573 | ||
574 | if (flags & XFS_QMOPT_DQALLOC) { | 574 | if (flags & XFS_QMOPT_DQALLOC) { |
575 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); | 575 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); |
576 | error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), | 576 | error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), |
577 | XFS_WRITE_LOG_RES(mp) + | 577 | XFS_WRITE_LOG_RES(mp) + |
578 | /* | 578 | /* |
579 | * Round the chunklen up to the next multiple | 579 | * Round the chunklen up to the next multiple |
580 | * of 128 (buf log item chunk size)). | 580 | * of 128 (buf log item chunk size)). |
581 | */ | 581 | */ |
582 | BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128, | 582 | BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128, |
583 | 0, | 583 | 0, |
584 | XFS_TRANS_PERM_LOG_RES, | 584 | XFS_TRANS_PERM_LOG_RES, |
585 | XFS_WRITE_LOG_COUNT); | 585 | XFS_WRITE_LOG_COUNT); |
586 | if (error) | 586 | if (error) |
587 | goto error1; | 587 | goto error1; |
588 | cancelflags = XFS_TRANS_RELEASE_LOG_RES; | 588 | cancelflags = XFS_TRANS_RELEASE_LOG_RES; |
589 | } | 589 | } |
590 | 590 | ||
591 | /* | 591 | /* |
592 | * get a pointer to the on-disk dquot and the buffer containing it | 592 | * get a pointer to the on-disk dquot and the buffer containing it |
593 | * dqp already knows its own type (GROUP/USER). | 593 | * dqp already knows its own type (GROUP/USER). |
594 | */ | 594 | */ |
595 | error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); | 595 | error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); |
596 | if (error) { | 596 | if (error) { |
597 | /* | 597 | /* |
598 | * This can happen if quotas got turned off (ESRCH), | 598 | * This can happen if quotas got turned off (ESRCH), |
599 | * or if the dquot didn't exist on disk and we ask to | 599 | * or if the dquot didn't exist on disk and we ask to |
600 | * allocate (ENOENT). | 600 | * allocate (ENOENT). |
601 | */ | 601 | */ |
602 | trace_xfs_dqread_fail(dqp); | 602 | trace_xfs_dqread_fail(dqp); |
603 | cancelflags |= XFS_TRANS_ABORT; | 603 | cancelflags |= XFS_TRANS_ABORT; |
604 | goto error1; | 604 | goto error1; |
605 | } | 605 | } |
606 | 606 | ||
607 | /* copy everything from disk dquot to the incore dquot */ | 607 | /* copy everything from disk dquot to the incore dquot */ |
608 | memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); | 608 | memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); |
609 | xfs_qm_dquot_logitem_init(dqp); | 609 | xfs_qm_dquot_logitem_init(dqp); |
610 | 610 | ||
611 | /* | 611 | /* |
612 | * Reservation counters are defined as reservation plus current usage | 612 | * Reservation counters are defined as reservation plus current usage |
613 | * to avoid having to add every time. | 613 | * to avoid having to add every time. |
614 | */ | 614 | */ |
615 | dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); | 615 | dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); |
616 | dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); | 616 | dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); |
617 | dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); | 617 | dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); |
618 | 618 | ||
619 | /* Mark the buf so that this will stay incore a little longer */ | 619 | /* Mark the buf so that this will stay incore a little longer */ |
620 | xfs_buf_set_ref(bp, XFS_DQUOT_REF); | 620 | xfs_buf_set_ref(bp, XFS_DQUOT_REF); |
621 | 621 | ||
622 | /* | 622 | /* |
623 | * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) | 623 | * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) |
624 | * So we need to release with xfs_trans_brelse(). | 624 | * So we need to release with xfs_trans_brelse(). |
625 | * The strategy here is identical to that of inodes; we lock | 625 | * The strategy here is identical to that of inodes; we lock |
626 | * the dquot in xfs_qm_dqget() before making it accessible to | 626 | * the dquot in xfs_qm_dqget() before making it accessible to |
627 | * others. This is because dquots, like inodes, need a good level of | 627 | * others. This is because dquots, like inodes, need a good level of |
628 | * concurrency, and we don't want to take locks on the entire buffers | 628 | * concurrency, and we don't want to take locks on the entire buffers |
629 | * for dquot accesses. | 629 | * for dquot accesses. |
630 | * Note also that the dquot buffer may even be dirty at this point, if | 630 | * Note also that the dquot buffer may even be dirty at this point, if |
631 | * this particular dquot was repaired. We still aren't afraid to | 631 | * this particular dquot was repaired. We still aren't afraid to |
632 | * brelse it because we have the changes incore. | 632 | * brelse it because we have the changes incore. |
633 | */ | 633 | */ |
634 | ASSERT(xfs_buf_islocked(bp)); | 634 | ASSERT(xfs_buf_islocked(bp)); |
635 | xfs_trans_brelse(tp, bp); | 635 | xfs_trans_brelse(tp, bp); |
636 | 636 | ||
637 | if (tp) { | 637 | if (tp) { |
638 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 638 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
639 | if (error) | 639 | if (error) |
640 | goto error0; | 640 | goto error0; |
641 | } | 641 | } |
642 | 642 | ||
643 | *O_dqpp = dqp; | 643 | *O_dqpp = dqp; |
644 | return error; | 644 | return error; |
645 | 645 | ||
646 | error1: | 646 | error1: |
647 | if (tp) | 647 | if (tp) |
648 | xfs_trans_cancel(tp, cancelflags); | 648 | xfs_trans_cancel(tp, cancelflags); |
649 | error0: | 649 | error0: |
650 | xfs_qm_dqdestroy(dqp); | 650 | xfs_qm_dqdestroy(dqp); |
651 | *O_dqpp = NULL; | 651 | *O_dqpp = NULL; |
652 | return error; | 652 | return error; |
653 | } | 653 | } |
654 | 654 | ||
655 | /* | 655 | /* |
656 | * Lookup a dquot in the incore dquot hashtable. We keep two separate | 656 | * Lookup a dquot in the incore dquot hashtable. We keep two separate |
657 | * hashtables for user and group dquots; and, these are global tables | 657 | * hashtables for user and group dquots; and, these are global tables |
658 | * inside the XQM, not per-filesystem tables. | 658 | * inside the XQM, not per-filesystem tables. |
659 | * The hash chain must be locked by caller, and it is left locked | 659 | * The hash chain must be locked by caller, and it is left locked |
660 | * on return. Returning dquot is locked. | 660 | * on return. Returning dquot is locked. |
661 | */ | 661 | */ |
662 | STATIC int | 662 | STATIC int |
663 | xfs_qm_dqlookup( | 663 | xfs_qm_dqlookup( |
664 | xfs_mount_t *mp, | 664 | xfs_mount_t *mp, |
665 | xfs_dqid_t id, | 665 | xfs_dqid_t id, |
666 | xfs_dqhash_t *qh, | 666 | xfs_dqhash_t *qh, |
667 | xfs_dquot_t **O_dqpp) | 667 | xfs_dquot_t **O_dqpp) |
668 | { | 668 | { |
669 | xfs_dquot_t *dqp; | 669 | xfs_dquot_t *dqp; |
670 | 670 | ||
671 | ASSERT(mutex_is_locked(&qh->qh_lock)); | 671 | ASSERT(mutex_is_locked(&qh->qh_lock)); |
672 | 672 | ||
673 | /* | 673 | /* |
674 | * Traverse the hashchain looking for a match | 674 | * Traverse the hashchain looking for a match |
675 | */ | 675 | */ |
676 | list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { | 676 | list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { |
677 | /* | 677 | /* |
678 | * We already have the hashlock. We don't need the | 678 | * We already have the hashlock. We don't need the |
679 | * dqlock to look at the id field of the dquot, since the | 679 | * dqlock to look at the id field of the dquot, since the |
680 | * id can't be modified without the hashlock anyway. | 680 | * id can't be modified without the hashlock anyway. |
681 | */ | 681 | */ |
682 | if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp) | 682 | if (be32_to_cpu(dqp->q_core.d_id) != id || dqp->q_mount != mp) |
683 | continue; | 683 | continue; |
684 | 684 | ||
685 | trace_xfs_dqlookup_found(dqp); | 685 | trace_xfs_dqlookup_found(dqp); |
686 | 686 | ||
687 | xfs_dqlock(dqp); | 687 | xfs_dqlock(dqp); |
688 | if (dqp->dq_flags & XFS_DQ_FREEING) { | 688 | if (dqp->dq_flags & XFS_DQ_FREEING) { |
689 | *O_dqpp = NULL; | 689 | *O_dqpp = NULL; |
690 | xfs_dqunlock(dqp); | 690 | xfs_dqunlock(dqp); |
691 | return -1; | 691 | return -1; |
692 | } | 692 | } |
693 | 693 | ||
694 | dqp->q_nrefs++; | 694 | dqp->q_nrefs++; |
695 | 695 | ||
696 | /* | 696 | /* |
697 | * move the dquot to the front of the hashchain | 697 | * move the dquot to the front of the hashchain |
698 | */ | 698 | */ |
699 | list_move(&dqp->q_hashlist, &qh->qh_list); | 699 | list_move(&dqp->q_hashlist, &qh->qh_list); |
700 | trace_xfs_dqlookup_done(dqp); | 700 | trace_xfs_dqlookup_done(dqp); |
701 | *O_dqpp = dqp; | 701 | *O_dqpp = dqp; |
702 | return 0; | 702 | return 0; |
703 | } | 703 | } |
704 | 704 | ||
705 | *O_dqpp = NULL; | 705 | *O_dqpp = NULL; |
706 | return 1; | 706 | return 1; |
707 | } | 707 | } |
708 | 708 | ||
709 | /* | 709 | /* |
710 | * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a | 710 | * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a |
711 | * a locked dquot, doing an allocation (if requested) as needed. | 711 | * a locked dquot, doing an allocation (if requested) as needed. |
712 | * When both an inode and an id are given, the inode's id takes precedence. | 712 | * When both an inode and an id are given, the inode's id takes precedence. |
713 | * That is, if the id changes while we don't hold the ilock inside this | 713 | * That is, if the id changes while we don't hold the ilock inside this |
714 | * function, the new dquot is returned, not necessarily the one requested | 714 | * function, the new dquot is returned, not necessarily the one requested |
715 | * in the id argument. | 715 | * in the id argument. |
716 | */ | 716 | */ |
717 | int | 717 | int |
718 | xfs_qm_dqget( | 718 | xfs_qm_dqget( |
719 | xfs_mount_t *mp, | 719 | xfs_mount_t *mp, |
720 | xfs_inode_t *ip, /* locked inode (optional) */ | 720 | xfs_inode_t *ip, /* locked inode (optional) */ |
721 | xfs_dqid_t id, /* uid/projid/gid depending on type */ | 721 | xfs_dqid_t id, /* uid/projid/gid depending on type */ |
722 | uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ | 722 | uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ |
723 | uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ | 723 | uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ |
724 | xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ | 724 | xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ |
725 | { | 725 | { |
726 | xfs_dquot_t *dqp; | 726 | xfs_dquot_t *dqp; |
727 | xfs_dqhash_t *h; | 727 | xfs_dqhash_t *h; |
728 | uint version; | 728 | uint version; |
729 | int error; | 729 | int error; |
730 | 730 | ||
731 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 731 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
732 | if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || | 732 | if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || |
733 | (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || | 733 | (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || |
734 | (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { | 734 | (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { |
735 | return (ESRCH); | 735 | return (ESRCH); |
736 | } | 736 | } |
737 | h = XFS_DQ_HASH(mp, id, type); | 737 | h = XFS_DQ_HASH(mp, id, type); |
738 | 738 | ||
739 | #ifdef DEBUG | 739 | #ifdef DEBUG |
740 | if (xfs_do_dqerror) { | 740 | if (xfs_do_dqerror) { |
741 | if ((xfs_dqerror_target == mp->m_ddev_targp) && | 741 | if ((xfs_dqerror_target == mp->m_ddev_targp) && |
742 | (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { | 742 | (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { |
743 | xfs_debug(mp, "Returning error in dqget"); | 743 | xfs_debug(mp, "Returning error in dqget"); |
744 | return (EIO); | 744 | return (EIO); |
745 | } | 745 | } |
746 | } | 746 | } |
747 | 747 | ||
748 | ASSERT(type == XFS_DQ_USER || | 748 | ASSERT(type == XFS_DQ_USER || |
749 | type == XFS_DQ_PROJ || | 749 | type == XFS_DQ_PROJ || |
750 | type == XFS_DQ_GROUP); | 750 | type == XFS_DQ_GROUP); |
751 | if (ip) { | 751 | if (ip) { |
752 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 752 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
753 | if (type == XFS_DQ_USER) | 753 | if (type == XFS_DQ_USER) |
754 | ASSERT(ip->i_udquot == NULL); | 754 | ASSERT(ip->i_udquot == NULL); |
755 | else | 755 | else |
756 | ASSERT(ip->i_gdquot == NULL); | 756 | ASSERT(ip->i_gdquot == NULL); |
757 | } | 757 | } |
758 | #endif | 758 | #endif |
759 | 759 | ||
760 | restart: | 760 | restart: |
761 | mutex_lock(&h->qh_lock); | 761 | mutex_lock(&h->qh_lock); |
762 | 762 | ||
763 | /* | 763 | /* |
764 | * Look in the cache (hashtable). | 764 | * Look in the cache (hashtable). |
765 | * The chain is kept locked during lookup. | 765 | * The chain is kept locked during lookup. |
766 | */ | 766 | */ |
767 | switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) { | 767 | switch (xfs_qm_dqlookup(mp, id, h, O_dqpp)) { |
768 | case -1: | 768 | case -1: |
769 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); | 769 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); |
770 | mutex_unlock(&h->qh_lock); | 770 | mutex_unlock(&h->qh_lock); |
771 | delay(1); | 771 | delay(1); |
772 | goto restart; | 772 | goto restart; |
773 | case 0: | 773 | case 0: |
774 | XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); | 774 | XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); |
775 | /* | 775 | /* |
776 | * The dquot was found, moved to the front of the chain, | 776 | * The dquot was found, moved to the front of the chain, |
777 | * taken off the freelist if it was on it, and locked | 777 | * taken off the freelist if it was on it, and locked |
778 | * at this point. Just unlock the hashchain and return. | 778 | * at this point. Just unlock the hashchain and return. |
779 | */ | 779 | */ |
780 | ASSERT(*O_dqpp); | 780 | ASSERT(*O_dqpp); |
781 | ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); | 781 | ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); |
782 | mutex_unlock(&h->qh_lock); | 782 | mutex_unlock(&h->qh_lock); |
783 | trace_xfs_dqget_hit(*O_dqpp); | 783 | trace_xfs_dqget_hit(*O_dqpp); |
784 | return 0; /* success */ | 784 | return 0; /* success */ |
785 | default: | 785 | default: |
786 | XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); | 786 | XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); |
787 | break; | 787 | break; |
788 | } | 788 | } |
789 | 789 | ||
790 | /* | 790 | /* |
791 | * Dquot cache miss. We don't want to keep the inode lock across | 791 | * Dquot cache miss. We don't want to keep the inode lock across |
792 | * a (potential) disk read. Also we don't want to deal with the lock | 792 | * a (potential) disk read. Also we don't want to deal with the lock |
793 | * ordering between quotainode and this inode. OTOH, dropping the inode | 793 | * ordering between quotainode and this inode. OTOH, dropping the inode |
794 | * lock here means dealing with a chown that can happen before | 794 | * lock here means dealing with a chown that can happen before |
795 | * we re-acquire the lock. | 795 | * we re-acquire the lock. |
796 | */ | 796 | */ |
797 | if (ip) | 797 | if (ip) |
798 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 798 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
799 | /* | 799 | /* |
800 | * Save the hashchain version stamp, and unlock the chain, so that | 800 | * Save the hashchain version stamp, and unlock the chain, so that |
801 | * we don't keep the lock across a disk read | 801 | * we don't keep the lock across a disk read |
802 | */ | 802 | */ |
803 | version = h->qh_version; | 803 | version = h->qh_version; |
804 | mutex_unlock(&h->qh_lock); | 804 | mutex_unlock(&h->qh_lock); |
805 | 805 | ||
806 | error = xfs_qm_dqread(mp, id, type, flags, &dqp); | 806 | error = xfs_qm_dqread(mp, id, type, flags, &dqp); |
807 | if (error) { | 807 | |
808 | if (ip) | 808 | if (ip) |
809 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 809 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
810 | |||
811 | if (error) | ||
810 | return error; | 812 | return error; |
811 | } | ||
812 | 813 | ||
813 | /* | 814 | /* |
814 | * See if this is mount code calling to look at the overall quota limits | ||
815 | * which are stored in the id == 0 user or group's dquot. | ||
816 | * Since we may not have done a quotacheck by this point, just return | ||
817 | * the dquot without attaching it to any hashtables, lists, etc, or even | ||
818 | * taking a reference. | ||
819 | * The caller must dqdestroy this once done. | ||
820 | */ | ||
821 | if (flags & XFS_QMOPT_DQSUSER) { | ||
822 | ASSERT(id == 0); | ||
823 | ASSERT(! ip); | ||
824 | goto dqret; | ||
825 | } | ||
826 | |||
827 | /* | ||
828 | * Dquot lock comes after hashlock in the lock ordering | 815 | * Dquot lock comes after hashlock in the lock ordering |
829 | */ | 816 | */ |
830 | if (ip) { | 817 | if (ip) { |
831 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
832 | |||
833 | /* | 818 | /* |
834 | * A dquot could be attached to this inode by now, since | 819 | * A dquot could be attached to this inode by now, since |
835 | * we had dropped the ilock. | 820 | * we had dropped the ilock. |
836 | */ | 821 | */ |
837 | if (type == XFS_DQ_USER) { | 822 | if (type == XFS_DQ_USER) { |
838 | if (!XFS_IS_UQUOTA_ON(mp)) { | 823 | if (!XFS_IS_UQUOTA_ON(mp)) { |
839 | /* inode stays locked on return */ | 824 | /* inode stays locked on return */ |
840 | xfs_qm_dqdestroy(dqp); | 825 | xfs_qm_dqdestroy(dqp); |
841 | return XFS_ERROR(ESRCH); | 826 | return XFS_ERROR(ESRCH); |
842 | } | 827 | } |
843 | if (ip->i_udquot) { | 828 | if (ip->i_udquot) { |
844 | xfs_qm_dqdestroy(dqp); | 829 | xfs_qm_dqdestroy(dqp); |
845 | dqp = ip->i_udquot; | 830 | dqp = ip->i_udquot; |
846 | xfs_dqlock(dqp); | 831 | xfs_dqlock(dqp); |
847 | goto dqret; | 832 | goto dqret; |
848 | } | 833 | } |
849 | } else { | 834 | } else { |
850 | if (!XFS_IS_OQUOTA_ON(mp)) { | 835 | if (!XFS_IS_OQUOTA_ON(mp)) { |
851 | /* inode stays locked on return */ | 836 | /* inode stays locked on return */ |
852 | xfs_qm_dqdestroy(dqp); | 837 | xfs_qm_dqdestroy(dqp); |
853 | return XFS_ERROR(ESRCH); | 838 | return XFS_ERROR(ESRCH); |
854 | } | 839 | } |
855 | if (ip->i_gdquot) { | 840 | if (ip->i_gdquot) { |
856 | xfs_qm_dqdestroy(dqp); | 841 | xfs_qm_dqdestroy(dqp); |
857 | dqp = ip->i_gdquot; | 842 | dqp = ip->i_gdquot; |
858 | xfs_dqlock(dqp); | 843 | xfs_dqlock(dqp); |
859 | goto dqret; | 844 | goto dqret; |
860 | } | 845 | } |
861 | } | 846 | } |
862 | } | 847 | } |
863 | 848 | ||
864 | /* | 849 | /* |
865 | * Hashlock comes after ilock in lock order | 850 | * Hashlock comes after ilock in lock order |
866 | */ | 851 | */ |
867 | mutex_lock(&h->qh_lock); | 852 | mutex_lock(&h->qh_lock); |
868 | if (version != h->qh_version) { | 853 | if (version != h->qh_version) { |
869 | xfs_dquot_t *tmpdqp; | 854 | xfs_dquot_t *tmpdqp; |
870 | /* | 855 | /* |
871 | * Now, see if somebody else put the dquot in the | 856 | * Now, see if somebody else put the dquot in the |
872 | * hashtable before us. This can happen because we didn't | 857 | * hashtable before us. This can happen because we didn't |
873 | * keep the hashchain lock. We don't have to worry about | 858 | * keep the hashchain lock. We don't have to worry about |
874 | * lock order between the two dquots here since dqp isn't | 859 | * lock order between the two dquots here since dqp isn't |
875 | * on any findable lists yet. | 860 | * on any findable lists yet. |
876 | */ | 861 | */ |
877 | switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) { | 862 | switch (xfs_qm_dqlookup(mp, id, h, &tmpdqp)) { |
878 | case 0: | 863 | case 0: |
879 | case -1: | 864 | case -1: |
880 | /* | 865 | /* |
881 | * Duplicate found, either in cache or on its way out. | 866 | * Duplicate found, either in cache or on its way out. |
882 | * Just throw away the new dquot and start over. | 867 | * Just throw away the new dquot and start over. |
883 | */ | 868 | */ |
884 | if (tmpdqp) | 869 | if (tmpdqp) |
885 | xfs_qm_dqput(tmpdqp); | 870 | xfs_qm_dqput(tmpdqp); |
886 | mutex_unlock(&h->qh_lock); | 871 | mutex_unlock(&h->qh_lock); |
887 | xfs_qm_dqdestroy(dqp); | 872 | xfs_qm_dqdestroy(dqp); |
888 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); | 873 | XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); |
889 | goto restart; | 874 | goto restart; |
890 | default: | 875 | default: |
891 | break; | 876 | break; |
892 | } | 877 | } |
893 | } | 878 | } |
894 | 879 | ||
895 | /* | 880 | /* |
896 | * Put the dquot at the beginning of the hash-chain and mp's list | 881 | * Put the dquot at the beginning of the hash-chain and mp's list |
897 | * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. | 882 | * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. |
898 | */ | 883 | */ |
899 | ASSERT(mutex_is_locked(&h->qh_lock)); | 884 | ASSERT(mutex_is_locked(&h->qh_lock)); |
900 | dqp->q_hash = h; | 885 | dqp->q_hash = h; |
901 | list_add(&dqp->q_hashlist, &h->qh_list); | 886 | list_add(&dqp->q_hashlist, &h->qh_list); |
902 | h->qh_version++; | 887 | h->qh_version++; |
903 | 888 | ||
904 | /* | 889 | /* |
905 | * Attach this dquot to this filesystem's list of all dquots, | 890 | * Attach this dquot to this filesystem's list of all dquots, |
906 | * kept inside the mount structure in m_quotainfo field | 891 | * kept inside the mount structure in m_quotainfo field |
907 | */ | 892 | */ |
908 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); | 893 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); |
909 | 894 | ||
910 | /* | 895 | /* |
911 | * We return a locked dquot to the caller, with a reference taken | 896 | * We return a locked dquot to the caller, with a reference taken |
912 | */ | 897 | */ |
913 | xfs_dqlock(dqp); | 898 | xfs_dqlock(dqp); |
914 | dqp->q_nrefs = 1; | 899 | dqp->q_nrefs = 1; |
915 | 900 | ||
916 | list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); | 901 | list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); |
917 | mp->m_quotainfo->qi_dquots++; | 902 | mp->m_quotainfo->qi_dquots++; |
918 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | 903 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); |
919 | mutex_unlock(&h->qh_lock); | 904 | mutex_unlock(&h->qh_lock); |
920 | dqret: | 905 | dqret: |
921 | ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 906 | ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
922 | trace_xfs_dqget_miss(dqp); | 907 | trace_xfs_dqget_miss(dqp); |
923 | *O_dqpp = dqp; | 908 | *O_dqpp = dqp; |
924 | return (0); | 909 | return (0); |
925 | } | 910 | } |
926 | 911 | ||
927 | 912 | ||
928 | /* | 913 | /* |
929 | * Release a reference to the dquot (decrement ref-count) | 914 | * Release a reference to the dquot (decrement ref-count) |
930 | * and unlock it. If there is a group quota attached to this | 915 | * and unlock it. If there is a group quota attached to this |
931 | * dquot, carefully release that too without tripping over | 916 | * dquot, carefully release that too without tripping over |
932 | * deadlocks'n'stuff. | 917 | * deadlocks'n'stuff. |
933 | */ | 918 | */ |
934 | void | 919 | void |
935 | xfs_qm_dqput( | 920 | xfs_qm_dqput( |
936 | struct xfs_dquot *dqp) | 921 | struct xfs_dquot *dqp) |
937 | { | 922 | { |
938 | struct xfs_dquot *gdqp; | 923 | struct xfs_dquot *gdqp; |
939 | 924 | ||
940 | ASSERT(dqp->q_nrefs > 0); | 925 | ASSERT(dqp->q_nrefs > 0); |
941 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 926 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
942 | 927 | ||
943 | trace_xfs_dqput(dqp); | 928 | trace_xfs_dqput(dqp); |
944 | 929 | ||
945 | recurse: | 930 | recurse: |
946 | if (--dqp->q_nrefs > 0) { | 931 | if (--dqp->q_nrefs > 0) { |
947 | xfs_dqunlock(dqp); | 932 | xfs_dqunlock(dqp); |
948 | return; | 933 | return; |
949 | } | 934 | } |
950 | 935 | ||
951 | trace_xfs_dqput_free(dqp); | 936 | trace_xfs_dqput_free(dqp); |
952 | 937 | ||
953 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 938 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); |
954 | if (list_empty(&dqp->q_freelist)) { | 939 | if (list_empty(&dqp->q_freelist)) { |
955 | list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); | 940 | list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); |
956 | xfs_Gqm->qm_dqfrlist_cnt++; | 941 | xfs_Gqm->qm_dqfrlist_cnt++; |
957 | } | 942 | } |
958 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 943 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
959 | 944 | ||
960 | /* | 945 | /* |
961 | * If we just added a udquot to the freelist, then we want to release | 946 | * If we just added a udquot to the freelist, then we want to release |
962 | * the gdquot reference that it (probably) has. Otherwise it'll keep | 947 | * the gdquot reference that it (probably) has. Otherwise it'll keep |
963 | * the gdquot from getting reclaimed. | 948 | * the gdquot from getting reclaimed. |
964 | */ | 949 | */ |
965 | gdqp = dqp->q_gdquot; | 950 | gdqp = dqp->q_gdquot; |
966 | if (gdqp) { | 951 | if (gdqp) { |
967 | xfs_dqlock(gdqp); | 952 | xfs_dqlock(gdqp); |
968 | dqp->q_gdquot = NULL; | 953 | dqp->q_gdquot = NULL; |
969 | } | 954 | } |
970 | xfs_dqunlock(dqp); | 955 | xfs_dqunlock(dqp); |
971 | 956 | ||
972 | /* | 957 | /* |
973 | * If we had a group quota hint, release it now. | 958 | * If we had a group quota hint, release it now. |
974 | */ | 959 | */ |
975 | if (gdqp) { | 960 | if (gdqp) { |
976 | dqp = gdqp; | 961 | dqp = gdqp; |
977 | goto recurse; | 962 | goto recurse; |
978 | } | 963 | } |
979 | } | 964 | } |
980 | 965 | ||
981 | /* | 966 | /* |
982 | * Release a dquot. Flush it if dirty, then dqput() it. | 967 | * Release a dquot. Flush it if dirty, then dqput() it. |
983 | * dquot must not be locked. | 968 | * dquot must not be locked. |
984 | */ | 969 | */ |
985 | void | 970 | void |
986 | xfs_qm_dqrele( | 971 | xfs_qm_dqrele( |
987 | xfs_dquot_t *dqp) | 972 | xfs_dquot_t *dqp) |
988 | { | 973 | { |
989 | if (!dqp) | 974 | if (!dqp) |
990 | return; | 975 | return; |
991 | 976 | ||
992 | trace_xfs_dqrele(dqp); | 977 | trace_xfs_dqrele(dqp); |
993 | 978 | ||
994 | xfs_dqlock(dqp); | 979 | xfs_dqlock(dqp); |
995 | /* | 980 | /* |
996 | * We don't care to flush it if the dquot is dirty here. | 981 | * We don't care to flush it if the dquot is dirty here. |
997 | * That will create stutters that we want to avoid. | 982 | * That will create stutters that we want to avoid. |
998 | * Instead we do a delayed write when we try to reclaim | 983 | * Instead we do a delayed write when we try to reclaim |
999 | * a dirty dquot. Also xfs_sync will take part of the burden... | 984 | * a dirty dquot. Also xfs_sync will take part of the burden... |
1000 | */ | 985 | */ |
1001 | xfs_qm_dqput(dqp); | 986 | xfs_qm_dqput(dqp); |
1002 | } | 987 | } |
1003 | 988 | ||
1004 | /* | 989 | /* |
1005 | * This is the dquot flushing I/O completion routine. It is called | 990 | * This is the dquot flushing I/O completion routine. It is called |
1006 | * from interrupt level when the buffer containing the dquot is | 991 | * from interrupt level when the buffer containing the dquot is |
1007 | * flushed to disk. It is responsible for removing the dquot logitem | 992 | * flushed to disk. It is responsible for removing the dquot logitem |
1008 | * from the AIL if it has not been re-logged, and unlocking the dquot's | 993 | * from the AIL if it has not been re-logged, and unlocking the dquot's |
1009 | * flush lock. This behavior is very similar to that of inodes.. | 994 | * flush lock. This behavior is very similar to that of inodes.. |
1010 | */ | 995 | */ |
1011 | STATIC void | 996 | STATIC void |
1012 | xfs_qm_dqflush_done( | 997 | xfs_qm_dqflush_done( |
1013 | struct xfs_buf *bp, | 998 | struct xfs_buf *bp, |
1014 | struct xfs_log_item *lip) | 999 | struct xfs_log_item *lip) |
1015 | { | 1000 | { |
1016 | xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; | 1001 | xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; |
1017 | xfs_dquot_t *dqp = qip->qli_dquot; | 1002 | xfs_dquot_t *dqp = qip->qli_dquot; |
1018 | struct xfs_ail *ailp = lip->li_ailp; | 1003 | struct xfs_ail *ailp = lip->li_ailp; |
1019 | 1004 | ||
1020 | /* | 1005 | /* |
1021 | * We only want to pull the item from the AIL if its | 1006 | * We only want to pull the item from the AIL if its |
1022 | * location in the log has not changed since we started the flush. | 1007 | * location in the log has not changed since we started the flush. |
1023 | * Thus, we only bother if the dquot's lsn has | 1008 | * Thus, we only bother if the dquot's lsn has |
1024 | * not changed. First we check the lsn outside the lock | 1009 | * not changed. First we check the lsn outside the lock |
1025 | * since it's cheaper, and then we recheck while | 1010 | * since it's cheaper, and then we recheck while |
1026 | * holding the lock before removing the dquot from the AIL. | 1011 | * holding the lock before removing the dquot from the AIL. |
1027 | */ | 1012 | */ |
1028 | if ((lip->li_flags & XFS_LI_IN_AIL) && | 1013 | if ((lip->li_flags & XFS_LI_IN_AIL) && |
1029 | lip->li_lsn == qip->qli_flush_lsn) { | 1014 | lip->li_lsn == qip->qli_flush_lsn) { |
1030 | 1015 | ||
1031 | /* xfs_trans_ail_delete() drops the AIL lock. */ | 1016 | /* xfs_trans_ail_delete() drops the AIL lock. */ |
1032 | spin_lock(&ailp->xa_lock); | 1017 | spin_lock(&ailp->xa_lock); |
1033 | if (lip->li_lsn == qip->qli_flush_lsn) | 1018 | if (lip->li_lsn == qip->qli_flush_lsn) |
1034 | xfs_trans_ail_delete(ailp, lip); | 1019 | xfs_trans_ail_delete(ailp, lip); |
1035 | else | 1020 | else |
1036 | spin_unlock(&ailp->xa_lock); | 1021 | spin_unlock(&ailp->xa_lock); |
1037 | } | 1022 | } |
1038 | 1023 | ||
1039 | /* | 1024 | /* |
1040 | * Release the dq's flush lock since we're done with it. | 1025 | * Release the dq's flush lock since we're done with it. |
1041 | */ | 1026 | */ |
1042 | xfs_dqfunlock(dqp); | 1027 | xfs_dqfunlock(dqp); |
1043 | } | 1028 | } |
1044 | 1029 | ||
1045 | /* | 1030 | /* |
1046 | * Write a modified dquot to disk. | 1031 | * Write a modified dquot to disk. |
1047 | * The dquot must be locked and the flush lock too taken by caller. | 1032 | * The dquot must be locked and the flush lock too taken by caller. |
1048 | * The flush lock will not be unlocked until the dquot reaches the disk, | 1033 | * The flush lock will not be unlocked until the dquot reaches the disk, |
1049 | * but the dquot is free to be unlocked and modified by the caller | 1034 | * but the dquot is free to be unlocked and modified by the caller |
1050 | * in the interim. Dquot is still locked on return. This behavior is | 1035 | * in the interim. Dquot is still locked on return. This behavior is |
1051 | * identical to that of inodes. | 1036 | * identical to that of inodes. |
1052 | */ | 1037 | */ |
1053 | int | 1038 | int |
1054 | xfs_qm_dqflush( | 1039 | xfs_qm_dqflush( |
1055 | xfs_dquot_t *dqp, | 1040 | xfs_dquot_t *dqp, |
1056 | uint flags) | 1041 | uint flags) |
1057 | { | 1042 | { |
1058 | struct xfs_mount *mp = dqp->q_mount; | 1043 | struct xfs_mount *mp = dqp->q_mount; |
1059 | struct xfs_buf *bp; | 1044 | struct xfs_buf *bp; |
1060 | struct xfs_disk_dquot *ddqp; | 1045 | struct xfs_disk_dquot *ddqp; |
1061 | int error; | 1046 | int error; |
1062 | 1047 | ||
1063 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 1048 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
1064 | ASSERT(!completion_done(&dqp->q_flush)); | 1049 | ASSERT(!completion_done(&dqp->q_flush)); |
1065 | 1050 | ||
1066 | trace_xfs_dqflush(dqp); | 1051 | trace_xfs_dqflush(dqp); |
1067 | 1052 | ||
1068 | /* | 1053 | /* |
1069 | * If not dirty, or it's pinned and we are not supposed to block, nada. | 1054 | * If not dirty, or it's pinned and we are not supposed to block, nada. |
1070 | */ | 1055 | */ |
1071 | if (!XFS_DQ_IS_DIRTY(dqp) || | 1056 | if (!XFS_DQ_IS_DIRTY(dqp) || |
1072 | ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) { | 1057 | ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) { |
1073 | xfs_dqfunlock(dqp); | 1058 | xfs_dqfunlock(dqp); |
1074 | return 0; | 1059 | return 0; |
1075 | } | 1060 | } |
1076 | xfs_qm_dqunpin_wait(dqp); | 1061 | xfs_qm_dqunpin_wait(dqp); |
1077 | 1062 | ||
1078 | /* | 1063 | /* |
1079 | * This may have been unpinned because the filesystem is shutting | 1064 | * This may have been unpinned because the filesystem is shutting |
1080 | * down forcibly. If that's the case we must not write this dquot | 1065 | * down forcibly. If that's the case we must not write this dquot |
1081 | * to disk, because the log record didn't make it to disk! | 1066 | * to disk, because the log record didn't make it to disk! |
1082 | */ | 1067 | */ |
1083 | if (XFS_FORCED_SHUTDOWN(mp)) { | 1068 | if (XFS_FORCED_SHUTDOWN(mp)) { |
1084 | dqp->dq_flags &= ~XFS_DQ_DIRTY; | 1069 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
1085 | xfs_dqfunlock(dqp); | 1070 | xfs_dqfunlock(dqp); |
1086 | return XFS_ERROR(EIO); | 1071 | return XFS_ERROR(EIO); |
1087 | } | 1072 | } |
1088 | 1073 | ||
1089 | /* | 1074 | /* |
1090 | * Get the buffer containing the on-disk dquot | 1075 | * Get the buffer containing the on-disk dquot |
1091 | */ | 1076 | */ |
1092 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, | 1077 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, |
1093 | mp->m_quotainfo->qi_dqchunklen, 0, &bp); | 1078 | mp->m_quotainfo->qi_dqchunklen, 0, &bp); |
1094 | if (error) { | 1079 | if (error) { |
1095 | ASSERT(error != ENOENT); | 1080 | ASSERT(error != ENOENT); |
1096 | xfs_dqfunlock(dqp); | 1081 | xfs_dqfunlock(dqp); |
1097 | return error; | 1082 | return error; |
1098 | } | 1083 | } |
1099 | 1084 | ||
1100 | /* | 1085 | /* |
1101 | * Calculate the location of the dquot inside the buffer. | 1086 | * Calculate the location of the dquot inside the buffer. |
1102 | */ | 1087 | */ |
1103 | ddqp = bp->b_addr + dqp->q_bufoffset; | 1088 | ddqp = bp->b_addr + dqp->q_bufoffset; |
1104 | 1089 | ||
1105 | /* | 1090 | /* |
1106 | * A simple sanity check in case we got a corrupted dquot.. | 1091 | * A simple sanity check in case we got a corrupted dquot.. |
1107 | */ | 1092 | */ |
1108 | error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, | 1093 | error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, |
1109 | XFS_QMOPT_DOWARN, "dqflush (incore copy)"); | 1094 | XFS_QMOPT_DOWARN, "dqflush (incore copy)"); |
1110 | if (error) { | 1095 | if (error) { |
1111 | xfs_buf_relse(bp); | 1096 | xfs_buf_relse(bp); |
1112 | xfs_dqfunlock(dqp); | 1097 | xfs_dqfunlock(dqp); |
1113 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 1098 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
1114 | return XFS_ERROR(EIO); | 1099 | return XFS_ERROR(EIO); |
1115 | } | 1100 | } |
1116 | 1101 | ||
1117 | /* This is the only portion of data that needs to persist */ | 1102 | /* This is the only portion of data that needs to persist */ |
1118 | memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); | 1103 | memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); |
1119 | 1104 | ||
1120 | /* | 1105 | /* |
1121 | * Clear the dirty field and remember the flush lsn for later use. | 1106 | * Clear the dirty field and remember the flush lsn for later use. |
1122 | */ | 1107 | */ |
1123 | dqp->dq_flags &= ~XFS_DQ_DIRTY; | 1108 | dqp->dq_flags &= ~XFS_DQ_DIRTY; |
1124 | 1109 | ||
1125 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, | 1110 | xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, |
1126 | &dqp->q_logitem.qli_item.li_lsn); | 1111 | &dqp->q_logitem.qli_item.li_lsn); |
1127 | 1112 | ||
1128 | /* | 1113 | /* |
1129 | * Attach an iodone routine so that we can remove this dquot from the | 1114 | * Attach an iodone routine so that we can remove this dquot from the |
1130 | * AIL and release the flush lock once the dquot is synced to disk. | 1115 | * AIL and release the flush lock once the dquot is synced to disk. |
1131 | */ | 1116 | */ |
1132 | xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, | 1117 | xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, |
1133 | &dqp->q_logitem.qli_item); | 1118 | &dqp->q_logitem.qli_item); |
1134 | 1119 | ||
1135 | /* | 1120 | /* |
1136 | * If the buffer is pinned then push on the log so we won't | 1121 | * If the buffer is pinned then push on the log so we won't |
1137 | * get stuck waiting in the write for too long. | 1122 | * get stuck waiting in the write for too long. |
1138 | */ | 1123 | */ |
1139 | if (xfs_buf_ispinned(bp)) { | 1124 | if (xfs_buf_ispinned(bp)) { |
1140 | trace_xfs_dqflush_force(dqp); | 1125 | trace_xfs_dqflush_force(dqp); |
1141 | xfs_log_force(mp, 0); | 1126 | xfs_log_force(mp, 0); |
1142 | } | 1127 | } |
1143 | 1128 | ||
1144 | if (flags & SYNC_WAIT) | 1129 | if (flags & SYNC_WAIT) |
1145 | error = xfs_bwrite(bp); | 1130 | error = xfs_bwrite(bp); |
1146 | else | 1131 | else |
1147 | xfs_buf_delwri_queue(bp); | 1132 | xfs_buf_delwri_queue(bp); |
1148 | 1133 | ||
1149 | xfs_buf_relse(bp); | 1134 | xfs_buf_relse(bp); |
1150 | 1135 | ||
1151 | trace_xfs_dqflush_done(dqp); | 1136 | trace_xfs_dqflush_done(dqp); |
1152 | 1137 | ||
1153 | /* | 1138 | /* |
1154 | * dqp is still locked, but caller is free to unlock it now. | 1139 | * dqp is still locked, but caller is free to unlock it now. |
1155 | */ | 1140 | */ |
1156 | return error; | 1141 | return error; |
1157 | 1142 | ||
1158 | } | 1143 | } |
1159 | 1144 | ||
1160 | void | 1145 | void |
1161 | xfs_dqunlock( | 1146 | xfs_dqunlock( |
1162 | xfs_dquot_t *dqp) | 1147 | xfs_dquot_t *dqp) |
1163 | { | 1148 | { |
1164 | xfs_dqunlock_nonotify(dqp); | 1149 | xfs_dqunlock_nonotify(dqp); |
1165 | if (dqp->q_logitem.qli_dquot == dqp) { | 1150 | if (dqp->q_logitem.qli_dquot == dqp) { |
1166 | xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, | 1151 | xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, |
1167 | &dqp->q_logitem.qli_item); | 1152 | &dqp->q_logitem.qli_item); |
1168 | } | 1153 | } |
1169 | } | 1154 | } |
1170 | 1155 | ||
1171 | /* | 1156 | /* |
1172 | * Lock two xfs_dquot structures. | 1157 | * Lock two xfs_dquot structures. |
1173 | * | 1158 | * |
1174 | * To avoid deadlocks we always lock the quota structure with | 1159 | * To avoid deadlocks we always lock the quota structure with |
1175 | * the lowerd id first. | 1160 | * the lowerd id first. |
1176 | */ | 1161 | */ |
1177 | void | 1162 | void |
1178 | xfs_dqlock2( | 1163 | xfs_dqlock2( |
1179 | xfs_dquot_t *d1, | 1164 | xfs_dquot_t *d1, |
1180 | xfs_dquot_t *d2) | 1165 | xfs_dquot_t *d2) |
1181 | { | 1166 | { |
1182 | if (d1 && d2) { | 1167 | if (d1 && d2) { |
1183 | ASSERT(d1 != d2); | 1168 | ASSERT(d1 != d2); |
1184 | if (be32_to_cpu(d1->q_core.d_id) > | 1169 | if (be32_to_cpu(d1->q_core.d_id) > |
1185 | be32_to_cpu(d2->q_core.d_id)) { | 1170 | be32_to_cpu(d2->q_core.d_id)) { |
1186 | mutex_lock(&d2->q_qlock); | 1171 | mutex_lock(&d2->q_qlock); |
1187 | mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); | 1172 | mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); |
1188 | } else { | 1173 | } else { |
1189 | mutex_lock(&d1->q_qlock); | 1174 | mutex_lock(&d1->q_qlock); |
1190 | mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); | 1175 | mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); |
1191 | } | 1176 | } |
1192 | } else if (d1) { | 1177 | } else if (d1) { |
1193 | mutex_lock(&d1->q_qlock); | 1178 | mutex_lock(&d1->q_qlock); |
1194 | } else if (d2) { | 1179 | } else if (d2) { |
1195 | mutex_lock(&d2->q_qlock); | 1180 | mutex_lock(&d2->q_qlock); |
1196 | } | 1181 | } |
1197 | } | 1182 | } |
1198 | 1183 | ||
1199 | /* | 1184 | /* |
1200 | * Take a dquot out of the mount's dqlist as well as the hashlist. This is | 1185 | * Take a dquot out of the mount's dqlist as well as the hashlist. This is |
1201 | * called via unmount as well as quotaoff, and the purge will always succeed. | 1186 | * called via unmount as well as quotaoff, and the purge will always succeed. |
1202 | */ | 1187 | */ |
1203 | void | 1188 | void |
1204 | xfs_qm_dqpurge( | 1189 | xfs_qm_dqpurge( |
1205 | struct xfs_dquot *dqp) | 1190 | struct xfs_dquot *dqp) |
1206 | { | 1191 | { |
1207 | struct xfs_mount *mp = dqp->q_mount; | 1192 | struct xfs_mount *mp = dqp->q_mount; |
1208 | struct xfs_dqhash *qh = dqp->q_hash; | 1193 | struct xfs_dqhash *qh = dqp->q_hash; |
1209 | 1194 | ||
1210 | xfs_dqlock(dqp); | 1195 | xfs_dqlock(dqp); |
1211 | 1196 | ||
1212 | /* | 1197 | /* |
1213 | * If we're turning off quotas, we have to make sure that, for | 1198 | * If we're turning off quotas, we have to make sure that, for |
1214 | * example, we don't delete quota disk blocks while dquots are | 1199 | * example, we don't delete quota disk blocks while dquots are |
1215 | * in the process of getting written to those disk blocks. | 1200 | * in the process of getting written to those disk blocks. |
1216 | * This dquot might well be on AIL, and we can't leave it there | 1201 | * This dquot might well be on AIL, and we can't leave it there |
1217 | * if we're turning off quotas. Basically, we need this flush | 1202 | * if we're turning off quotas. Basically, we need this flush |
1218 | * lock, and are willing to block on it. | 1203 | * lock, and are willing to block on it. |
1219 | */ | 1204 | */ |
1220 | if (!xfs_dqflock_nowait(dqp)) { | 1205 | if (!xfs_dqflock_nowait(dqp)) { |
1221 | /* | 1206 | /* |
1222 | * Block on the flush lock after nudging dquot buffer, | 1207 | * Block on the flush lock after nudging dquot buffer, |
1223 | * if it is incore. | 1208 | * if it is incore. |
1224 | */ | 1209 | */ |
1225 | xfs_dqflock_pushbuf_wait(dqp); | 1210 | xfs_dqflock_pushbuf_wait(dqp); |
1226 | } | 1211 | } |
1227 | 1212 | ||
1228 | /* | 1213 | /* |
1229 | * If we are turning this type of quotas off, we don't care | 1214 | * If we are turning this type of quotas off, we don't care |
1230 | * about the dirty metadata sitting in this dquot. OTOH, if | 1215 | * about the dirty metadata sitting in this dquot. OTOH, if |
1231 | * we're unmounting, we do care, so we flush it and wait. | 1216 | * we're unmounting, we do care, so we flush it and wait. |
1232 | */ | 1217 | */ |
1233 | if (XFS_DQ_IS_DIRTY(dqp)) { | 1218 | if (XFS_DQ_IS_DIRTY(dqp)) { |
1234 | int error; | 1219 | int error; |
1235 | 1220 | ||
1236 | /* | 1221 | /* |
1237 | * We don't care about getting disk errors here. We need | 1222 | * We don't care about getting disk errors here. We need |
1238 | * to purge this dquot anyway, so we go ahead regardless. | 1223 | * to purge this dquot anyway, so we go ahead regardless. |
1239 | */ | 1224 | */ |
1240 | error = xfs_qm_dqflush(dqp, SYNC_WAIT); | 1225 | error = xfs_qm_dqflush(dqp, SYNC_WAIT); |
1241 | if (error) | 1226 | if (error) |
1242 | xfs_warn(mp, "%s: dquot %p flush failed", | 1227 | xfs_warn(mp, "%s: dquot %p flush failed", |
1243 | __func__, dqp); | 1228 | __func__, dqp); |
1244 | xfs_dqflock(dqp); | 1229 | xfs_dqflock(dqp); |
1245 | } | 1230 | } |
1246 | 1231 | ||
1247 | ASSERT(atomic_read(&dqp->q_pincount) == 0); | 1232 | ASSERT(atomic_read(&dqp->q_pincount) == 0); |
1248 | ASSERT(XFS_FORCED_SHUTDOWN(mp) || | 1233 | ASSERT(XFS_FORCED_SHUTDOWN(mp) || |
1249 | !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); | 1234 | !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); |
1250 | 1235 | ||
1251 | xfs_dqfunlock(dqp); | 1236 | xfs_dqfunlock(dqp); |
1252 | xfs_dqunlock(dqp); | 1237 | xfs_dqunlock(dqp); |
1253 | 1238 | ||
1254 | mutex_lock(&qh->qh_lock); | 1239 | mutex_lock(&qh->qh_lock); |
1255 | list_del_init(&dqp->q_hashlist); | 1240 | list_del_init(&dqp->q_hashlist); |
1256 | qh->qh_version++; | 1241 | qh->qh_version++; |
1257 | mutex_unlock(&qh->qh_lock); | 1242 | mutex_unlock(&qh->qh_lock); |
1258 | 1243 | ||
1259 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); | 1244 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); |
1260 | list_del_init(&dqp->q_mplist); | 1245 | list_del_init(&dqp->q_mplist); |
1261 | mp->m_quotainfo->qi_dqreclaims++; | 1246 | mp->m_quotainfo->qi_dqreclaims++; |
1262 | mp->m_quotainfo->qi_dquots--; | 1247 | mp->m_quotainfo->qi_dquots--; |
1263 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | 1248 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); |
1264 | 1249 | ||
1265 | /* | 1250 | /* |
1266 | * We move dquots to the freelist as soon as their reference count | 1251 | * We move dquots to the freelist as soon as their reference count |
1267 | * hits zero, so it really should be on the freelist here. | 1252 | * hits zero, so it really should be on the freelist here. |
1268 | */ | 1253 | */ |
1269 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 1254 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); |
1270 | ASSERT(!list_empty(&dqp->q_freelist)); | 1255 | ASSERT(!list_empty(&dqp->q_freelist)); |
1271 | list_del_init(&dqp->q_freelist); | 1256 | list_del_init(&dqp->q_freelist); |
1272 | xfs_Gqm->qm_dqfrlist_cnt--; | 1257 | xfs_Gqm->qm_dqfrlist_cnt--; |
1273 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1258 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
1274 | 1259 | ||
1275 | xfs_qm_dqdestroy(dqp); | 1260 | xfs_qm_dqdestroy(dqp); |
1276 | } | 1261 | } |
1277 | 1262 | ||
1278 | /* | 1263 | /* |
1279 | * Give the buffer a little push if it is incore and | 1264 | * Give the buffer a little push if it is incore and |
1280 | * wait on the flush lock. | 1265 | * wait on the flush lock. |
1281 | */ | 1266 | */ |
1282 | void | 1267 | void |
1283 | xfs_dqflock_pushbuf_wait( | 1268 | xfs_dqflock_pushbuf_wait( |
1284 | xfs_dquot_t *dqp) | 1269 | xfs_dquot_t *dqp) |
1285 | { | 1270 | { |
1286 | xfs_mount_t *mp = dqp->q_mount; | 1271 | xfs_mount_t *mp = dqp->q_mount; |
1287 | xfs_buf_t *bp; | 1272 | xfs_buf_t *bp; |
1288 | 1273 | ||
1289 | /* | 1274 | /* |
1290 | * Check to see if the dquot has been flushed delayed | 1275 | * Check to see if the dquot has been flushed delayed |
1291 | * write. If so, grab its buffer and send it | 1276 | * write. If so, grab its buffer and send it |
1292 | * out immediately. We'll be able to acquire | 1277 | * out immediately. We'll be able to acquire |
1293 | * the flush lock when the I/O completes. | 1278 | * the flush lock when the I/O completes. |
1294 | */ | 1279 | */ |
1295 | bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, | 1280 | bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, |
1296 | mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); | 1281 | mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); |
1297 | if (!bp) | 1282 | if (!bp) |
1298 | goto out_lock; | 1283 | goto out_lock; |
1299 | 1284 | ||
1300 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 1285 | if (XFS_BUF_ISDELAYWRITE(bp)) { |
1301 | if (xfs_buf_ispinned(bp)) | 1286 | if (xfs_buf_ispinned(bp)) |
1302 | xfs_log_force(mp, 0); | 1287 | xfs_log_force(mp, 0); |
1303 | xfs_buf_delwri_promote(bp); | 1288 | xfs_buf_delwri_promote(bp); |
1304 | wake_up_process(bp->b_target->bt_task); | 1289 | wake_up_process(bp->b_target->bt_task); |
1305 | } | 1290 | } |
1306 | xfs_buf_relse(bp); | 1291 | xfs_buf_relse(bp); |
1307 | out_lock: | 1292 | out_lock: |
1308 | xfs_dqflock(dqp); | 1293 | xfs_dqflock(dqp); |
fs/xfs/xfs_dquot.h
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | 6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it would be useful, | 9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #ifndef __XFS_DQUOT_H__ | 18 | #ifndef __XFS_DQUOT_H__ |
19 | #define __XFS_DQUOT_H__ | 19 | #define __XFS_DQUOT_H__ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * Dquots are structures that hold quota information about a user or a group, | 22 | * Dquots are structures that hold quota information about a user or a group, |
23 | * much like inodes are for files. In fact, dquots share many characteristics | 23 | * much like inodes are for files. In fact, dquots share many characteristics |
24 | * with inodes. However, dquots can also be a centralized resource, relative | 24 | * with inodes. However, dquots can also be a centralized resource, relative |
25 | * to a collection of inodes. In this respect, dquots share some characteristics | 25 | * to a collection of inodes. In this respect, dquots share some characteristics |
26 | * of the superblock. | 26 | * of the superblock. |
27 | * XFS dquots exploit both those in its algorithms. They make every attempt | 27 | * XFS dquots exploit both those in its algorithms. They make every attempt |
28 | * to not be a bottleneck when quotas are on and have minimal impact, if any, | 28 | * to not be a bottleneck when quotas are on and have minimal impact, if any, |
29 | * when quotas are off. | 29 | * when quotas are off. |
30 | */ | 30 | */ |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * The hash chain headers (hash buckets) | 33 | * The hash chain headers (hash buckets) |
34 | */ | 34 | */ |
35 | typedef struct xfs_dqhash { | 35 | typedef struct xfs_dqhash { |
36 | struct list_head qh_list; | 36 | struct list_head qh_list; |
37 | struct mutex qh_lock; | 37 | struct mutex qh_lock; |
38 | uint qh_version; /* ever increasing version */ | 38 | uint qh_version; /* ever increasing version */ |
39 | uint qh_nelems; /* number of dquots on the list */ | 39 | uint qh_nelems; /* number of dquots on the list */ |
40 | } xfs_dqhash_t; | 40 | } xfs_dqhash_t; |
41 | 41 | ||
42 | struct xfs_mount; | 42 | struct xfs_mount; |
43 | struct xfs_trans; | 43 | struct xfs_trans; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * The incore dquot structure | 46 | * The incore dquot structure |
47 | */ | 47 | */ |
48 | typedef struct xfs_dquot { | 48 | typedef struct xfs_dquot { |
49 | uint dq_flags; /* various flags (XFS_DQ_*) */ | 49 | uint dq_flags; /* various flags (XFS_DQ_*) */ |
50 | struct list_head q_freelist; /* global free list of dquots */ | 50 | struct list_head q_freelist; /* global free list of dquots */ |
51 | struct list_head q_mplist; /* mount's list of dquots */ | 51 | struct list_head q_mplist; /* mount's list of dquots */ |
52 | struct list_head q_hashlist; /* gloabl hash list of dquots */ | 52 | struct list_head q_hashlist; /* gloabl hash list of dquots */ |
53 | xfs_dqhash_t *q_hash; /* the hashchain header */ | 53 | xfs_dqhash_t *q_hash; /* the hashchain header */ |
54 | struct xfs_mount*q_mount; /* filesystem this relates to */ | 54 | struct xfs_mount*q_mount; /* filesystem this relates to */ |
55 | struct xfs_trans*q_transp; /* trans this belongs to currently */ | 55 | struct xfs_trans*q_transp; /* trans this belongs to currently */ |
56 | uint q_nrefs; /* # active refs from inodes */ | 56 | uint q_nrefs; /* # active refs from inodes */ |
57 | xfs_daddr_t q_blkno; /* blkno of dquot buffer */ | 57 | xfs_daddr_t q_blkno; /* blkno of dquot buffer */ |
58 | int q_bufoffset; /* off of dq in buffer (# dquots) */ | 58 | int q_bufoffset; /* off of dq in buffer (# dquots) */ |
59 | xfs_fileoff_t q_fileoffset; /* offset in quotas file */ | 59 | xfs_fileoff_t q_fileoffset; /* offset in quotas file */ |
60 | 60 | ||
61 | struct xfs_dquot*q_gdquot; /* group dquot, hint only */ | 61 | struct xfs_dquot*q_gdquot; /* group dquot, hint only */ |
62 | xfs_disk_dquot_t q_core; /* actual usage & quotas */ | 62 | xfs_disk_dquot_t q_core; /* actual usage & quotas */ |
63 | xfs_dq_logitem_t q_logitem; /* dquot log item */ | 63 | xfs_dq_logitem_t q_logitem; /* dquot log item */ |
64 | xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ | 64 | xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ |
65 | xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ | 65 | xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ |
66 | xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ | 66 | xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ |
67 | struct mutex q_qlock; /* quota lock */ | 67 | struct mutex q_qlock; /* quota lock */ |
68 | struct completion q_flush; /* flush completion queue */ | 68 | struct completion q_flush; /* flush completion queue */ |
69 | atomic_t q_pincount; /* dquot pin count */ | 69 | atomic_t q_pincount; /* dquot pin count */ |
70 | wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ | 70 | wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ |
71 | } xfs_dquot_t; | 71 | } xfs_dquot_t; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Lock hierarchy for q_qlock: | 74 | * Lock hierarchy for q_qlock: |
75 | * XFS_QLOCK_NORMAL is the implicit default, | 75 | * XFS_QLOCK_NORMAL is the implicit default, |
76 | * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 | 76 | * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 |
77 | */ | 77 | */ |
78 | enum { | 78 | enum { |
79 | XFS_QLOCK_NORMAL = 0, | 79 | XFS_QLOCK_NORMAL = 0, |
80 | XFS_QLOCK_NESTED, | 80 | XFS_QLOCK_NESTED, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Manage the q_flush completion queue embedded in the dquot. This completion | 84 | * Manage the q_flush completion queue embedded in the dquot. This completion |
85 | * queue synchronizes processes attempting to flush the in-core dquot back to | 85 | * queue synchronizes processes attempting to flush the in-core dquot back to |
86 | * disk. | 86 | * disk. |
87 | */ | 87 | */ |
88 | static inline void xfs_dqflock(xfs_dquot_t *dqp) | 88 | static inline void xfs_dqflock(xfs_dquot_t *dqp) |
89 | { | 89 | { |
90 | wait_for_completion(&dqp->q_flush); | 90 | wait_for_completion(&dqp->q_flush); |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) | 93 | static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) |
94 | { | 94 | { |
95 | return try_wait_for_completion(&dqp->q_flush); | 95 | return try_wait_for_completion(&dqp->q_flush); |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline void xfs_dqfunlock(xfs_dquot_t *dqp) | 98 | static inline void xfs_dqfunlock(xfs_dquot_t *dqp) |
99 | { | 99 | { |
100 | complete(&dqp->q_flush); | 100 | complete(&dqp->q_flush); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp) | 103 | static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp) |
104 | { | 104 | { |
105 | return mutex_trylock(&dqp->q_qlock); | 105 | return mutex_trylock(&dqp->q_qlock); |
106 | } | 106 | } |
107 | 107 | ||
108 | static inline void xfs_dqlock(struct xfs_dquot *dqp) | 108 | static inline void xfs_dqlock(struct xfs_dquot *dqp) |
109 | { | 109 | { |
110 | mutex_lock(&dqp->q_qlock); | 110 | mutex_lock(&dqp->q_qlock); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) | 113 | static inline void xfs_dqunlock_nonotify(struct xfs_dquot *dqp) |
114 | { | 114 | { |
115 | mutex_unlock(&dqp->q_qlock); | 115 | mutex_unlock(&dqp->q_qlock); |
116 | } | 116 | } |
117 | 117 | ||
118 | #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) | 118 | #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) |
119 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) | 119 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) |
120 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) | 120 | #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) |
121 | #define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) | 121 | #define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) |
122 | #define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) | 122 | #define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) |
123 | #define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) | 123 | #define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) |
124 | #define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ | 124 | #define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ |
125 | XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ | 125 | XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ |
126 | XFS_DQ_TO_QINF(dqp)->qi_gquotaip) | 126 | XFS_DQ_TO_QINF(dqp)->qi_gquotaip) |
127 | 127 | ||
128 | #define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ | 128 | #define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ |
129 | (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ | 129 | (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ |
130 | (XFS_IS_OQUOTA_ON((d)->q_mount)))) | 130 | (XFS_IS_OQUOTA_ON((d)->q_mount)))) |
131 | 131 | ||
132 | extern int xfs_qm_dqread(struct xfs_mount *, xfs_dqid_t, uint, | ||
133 | uint, struct xfs_dquot **); | ||
132 | extern void xfs_qm_dqdestroy(xfs_dquot_t *); | 134 | extern void xfs_qm_dqdestroy(xfs_dquot_t *); |
133 | extern int xfs_qm_dqflush(xfs_dquot_t *, uint); | 135 | extern int xfs_qm_dqflush(xfs_dquot_t *, uint); |
134 | extern void xfs_qm_dqpurge(xfs_dquot_t *); | 136 | extern void xfs_qm_dqpurge(xfs_dquot_t *); |
135 | extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); | 137 | extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); |
136 | extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, | 138 | extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, |
137 | xfs_disk_dquot_t *); | 139 | xfs_disk_dquot_t *); |
138 | extern void xfs_qm_adjust_dqlimits(xfs_mount_t *, | 140 | extern void xfs_qm_adjust_dqlimits(xfs_mount_t *, |
139 | xfs_disk_dquot_t *); | 141 | xfs_disk_dquot_t *); |
140 | extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, | 142 | extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, |
141 | xfs_dqid_t, uint, uint, xfs_dquot_t **); | 143 | xfs_dqid_t, uint, uint, xfs_dquot_t **); |
142 | extern void xfs_qm_dqput(xfs_dquot_t *); | 144 | extern void xfs_qm_dqput(xfs_dquot_t *); |
143 | 145 | ||
144 | extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); | 146 | extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); |
145 | extern void xfs_dqunlock(struct xfs_dquot *); | 147 | extern void xfs_dqunlock(struct xfs_dquot *); |
146 | extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp); | 148 | extern void xfs_dqflock_pushbuf_wait(struct xfs_dquot *dqp); |
147 | 149 | ||
148 | static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) | 150 | static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) |
149 | { | 151 | { |
150 | xfs_dqlock(dqp); | 152 | xfs_dqlock(dqp); |
151 | dqp->q_nrefs++; | 153 | dqp->q_nrefs++; |
152 | xfs_dqunlock(dqp); | 154 | xfs_dqunlock(dqp); |
153 | return dqp; | 155 | return dqp; |
154 | } | 156 | } |
155 | 157 | ||
156 | #endif /* __XFS_DQUOT_H__ */ | 158 | #endif /* __XFS_DQUOT_H__ */ |
157 | 159 |
fs/xfs/xfs_qm.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | 6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it would be useful, | 9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_fs.h" | 19 | #include "xfs_fs.h" |
20 | #include "xfs_bit.h" | 20 | #include "xfs_bit.h" |
21 | #include "xfs_log.h" | 21 | #include "xfs_log.h" |
22 | #include "xfs_inum.h" | 22 | #include "xfs_inum.h" |
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
27 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
28 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
29 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
30 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
31 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
32 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
33 | #include "xfs_ialloc.h" | 33 | #include "xfs_ialloc.h" |
34 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
35 | #include "xfs_rtalloc.h" | 35 | #include "xfs_rtalloc.h" |
36 | #include "xfs_error.h" | 36 | #include "xfs_error.h" |
37 | #include "xfs_bmap.h" | 37 | #include "xfs_bmap.h" |
38 | #include "xfs_attr.h" | 38 | #include "xfs_attr.h" |
39 | #include "xfs_buf_item.h" | 39 | #include "xfs_buf_item.h" |
40 | #include "xfs_trans_space.h" | 40 | #include "xfs_trans_space.h" |
41 | #include "xfs_utils.h" | 41 | #include "xfs_utils.h" |
42 | #include "xfs_qm.h" | 42 | #include "xfs_qm.h" |
43 | #include "xfs_trace.h" | 43 | #include "xfs_trace.h" |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * The global quota manager. There is only one of these for the entire | 46 | * The global quota manager. There is only one of these for the entire |
47 | * system, _not_ one per file system. XQM keeps track of the overall | 47 | * system, _not_ one per file system. XQM keeps track of the overall |
48 | * quota functionality, including maintaining the freelist and hash | 48 | * quota functionality, including maintaining the freelist and hash |
49 | * tables of dquots. | 49 | * tables of dquots. |
50 | */ | 50 | */ |
51 | struct mutex xfs_Gqm_lock; | 51 | struct mutex xfs_Gqm_lock; |
52 | struct xfs_qm *xfs_Gqm; | 52 | struct xfs_qm *xfs_Gqm; |
53 | uint ndquot; | 53 | uint ndquot; |
54 | 54 | ||
55 | kmem_zone_t *qm_dqzone; | 55 | kmem_zone_t *qm_dqzone; |
56 | kmem_zone_t *qm_dqtrxzone; | 56 | kmem_zone_t *qm_dqtrxzone; |
57 | 57 | ||
58 | STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); | 58 | STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); |
59 | STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); | 59 | STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); |
60 | 60 | ||
61 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); | 61 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); |
62 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); | 62 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
63 | STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); | 63 | STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); |
64 | 64 | ||
65 | static struct shrinker xfs_qm_shaker = { | 65 | static struct shrinker xfs_qm_shaker = { |
66 | .shrink = xfs_qm_shake, | 66 | .shrink = xfs_qm_shake, |
67 | .seeks = DEFAULT_SEEKS, | 67 | .seeks = DEFAULT_SEEKS, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Initialize the XQM structure. | 71 | * Initialize the XQM structure. |
72 | * Note that there is not one quota manager per file system. | 72 | * Note that there is not one quota manager per file system. |
73 | */ | 73 | */ |
74 | STATIC struct xfs_qm * | 74 | STATIC struct xfs_qm * |
75 | xfs_Gqm_init(void) | 75 | xfs_Gqm_init(void) |
76 | { | 76 | { |
77 | xfs_dqhash_t *udqhash, *gdqhash; | 77 | xfs_dqhash_t *udqhash, *gdqhash; |
78 | xfs_qm_t *xqm; | 78 | xfs_qm_t *xqm; |
79 | size_t hsize; | 79 | size_t hsize; |
80 | uint i; | 80 | uint i; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Initialize the dquot hash tables. | 83 | * Initialize the dquot hash tables. |
84 | */ | 84 | */ |
85 | udqhash = kmem_zalloc_greedy(&hsize, | 85 | udqhash = kmem_zalloc_greedy(&hsize, |
86 | XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), | 86 | XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), |
87 | XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); | 87 | XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); |
88 | if (!udqhash) | 88 | if (!udqhash) |
89 | goto out; | 89 | goto out; |
90 | 90 | ||
91 | gdqhash = kmem_zalloc_large(hsize); | 91 | gdqhash = kmem_zalloc_large(hsize); |
92 | if (!gdqhash) | 92 | if (!gdqhash) |
93 | goto out_free_udqhash; | 93 | goto out_free_udqhash; |
94 | 94 | ||
95 | hsize /= sizeof(xfs_dqhash_t); | 95 | hsize /= sizeof(xfs_dqhash_t); |
96 | ndquot = hsize << 8; | 96 | ndquot = hsize << 8; |
97 | 97 | ||
98 | xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); | 98 | xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); |
99 | xqm->qm_dqhashmask = hsize - 1; | 99 | xqm->qm_dqhashmask = hsize - 1; |
100 | xqm->qm_usr_dqhtable = udqhash; | 100 | xqm->qm_usr_dqhtable = udqhash; |
101 | xqm->qm_grp_dqhtable = gdqhash; | 101 | xqm->qm_grp_dqhtable = gdqhash; |
102 | ASSERT(xqm->qm_usr_dqhtable != NULL); | 102 | ASSERT(xqm->qm_usr_dqhtable != NULL); |
103 | ASSERT(xqm->qm_grp_dqhtable != NULL); | 103 | ASSERT(xqm->qm_grp_dqhtable != NULL); |
104 | 104 | ||
105 | for (i = 0; i < hsize; i++) { | 105 | for (i = 0; i < hsize; i++) { |
106 | xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); | 106 | xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); |
107 | xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); | 107 | xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); |
108 | } | 108 | } |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * Freelist of all dquots of all file systems | 111 | * Freelist of all dquots of all file systems |
112 | */ | 112 | */ |
113 | INIT_LIST_HEAD(&xqm->qm_dqfrlist); | 113 | INIT_LIST_HEAD(&xqm->qm_dqfrlist); |
114 | xqm->qm_dqfrlist_cnt = 0; | 114 | xqm->qm_dqfrlist_cnt = 0; |
115 | mutex_init(&xqm->qm_dqfrlist_lock); | 115 | mutex_init(&xqm->qm_dqfrlist_lock); |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * dquot zone. we register our own low-memory callback. | 118 | * dquot zone. we register our own low-memory callback. |
119 | */ | 119 | */ |
120 | if (!qm_dqzone) { | 120 | if (!qm_dqzone) { |
121 | xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), | 121 | xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), |
122 | "xfs_dquots"); | 122 | "xfs_dquots"); |
123 | qm_dqzone = xqm->qm_dqzone; | 123 | qm_dqzone = xqm->qm_dqzone; |
124 | } else | 124 | } else |
125 | xqm->qm_dqzone = qm_dqzone; | 125 | xqm->qm_dqzone = qm_dqzone; |
126 | 126 | ||
127 | register_shrinker(&xfs_qm_shaker); | 127 | register_shrinker(&xfs_qm_shaker); |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * The t_dqinfo portion of transactions. | 130 | * The t_dqinfo portion of transactions. |
131 | */ | 131 | */ |
132 | if (!qm_dqtrxzone) { | 132 | if (!qm_dqtrxzone) { |
133 | xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), | 133 | xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), |
134 | "xfs_dqtrx"); | 134 | "xfs_dqtrx"); |
135 | qm_dqtrxzone = xqm->qm_dqtrxzone; | 135 | qm_dqtrxzone = xqm->qm_dqtrxzone; |
136 | } else | 136 | } else |
137 | xqm->qm_dqtrxzone = qm_dqtrxzone; | 137 | xqm->qm_dqtrxzone = qm_dqtrxzone; |
138 | 138 | ||
139 | atomic_set(&xqm->qm_totaldquots, 0); | 139 | atomic_set(&xqm->qm_totaldquots, 0); |
140 | xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; | 140 | xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; |
141 | xqm->qm_nrefs = 0; | 141 | xqm->qm_nrefs = 0; |
142 | return xqm; | 142 | return xqm; |
143 | 143 | ||
144 | out_free_udqhash: | 144 | out_free_udqhash: |
145 | kmem_free_large(udqhash); | 145 | kmem_free_large(udqhash); |
146 | out: | 146 | out: |
147 | return NULL; | 147 | return NULL; |
148 | } | 148 | } |
149 | 149 | ||
150 | /* | 150 | /* |
151 | * Destroy the global quota manager when its reference count goes to zero. | 151 | * Destroy the global quota manager when its reference count goes to zero. |
152 | */ | 152 | */ |
153 | STATIC void | 153 | STATIC void |
154 | xfs_qm_destroy( | 154 | xfs_qm_destroy( |
155 | struct xfs_qm *xqm) | 155 | struct xfs_qm *xqm) |
156 | { | 156 | { |
157 | int hsize, i; | 157 | int hsize, i; |
158 | 158 | ||
159 | ASSERT(xqm != NULL); | 159 | ASSERT(xqm != NULL); |
160 | ASSERT(xqm->qm_nrefs == 0); | 160 | ASSERT(xqm->qm_nrefs == 0); |
161 | 161 | ||
162 | unregister_shrinker(&xfs_qm_shaker); | 162 | unregister_shrinker(&xfs_qm_shaker); |
163 | 163 | ||
164 | mutex_lock(&xqm->qm_dqfrlist_lock); | 164 | mutex_lock(&xqm->qm_dqfrlist_lock); |
165 | ASSERT(list_empty(&xqm->qm_dqfrlist)); | 165 | ASSERT(list_empty(&xqm->qm_dqfrlist)); |
166 | mutex_unlock(&xqm->qm_dqfrlist_lock); | 166 | mutex_unlock(&xqm->qm_dqfrlist_lock); |
167 | 167 | ||
168 | hsize = xqm->qm_dqhashmask + 1; | 168 | hsize = xqm->qm_dqhashmask + 1; |
169 | for (i = 0; i < hsize; i++) { | 169 | for (i = 0; i < hsize; i++) { |
170 | xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); | 170 | xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); |
171 | xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); | 171 | xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); |
172 | } | 172 | } |
173 | kmem_free_large(xqm->qm_usr_dqhtable); | 173 | kmem_free_large(xqm->qm_usr_dqhtable); |
174 | kmem_free_large(xqm->qm_grp_dqhtable); | 174 | kmem_free_large(xqm->qm_grp_dqhtable); |
175 | xqm->qm_usr_dqhtable = NULL; | 175 | xqm->qm_usr_dqhtable = NULL; |
176 | xqm->qm_grp_dqhtable = NULL; | 176 | xqm->qm_grp_dqhtable = NULL; |
177 | xqm->qm_dqhashmask = 0; | 177 | xqm->qm_dqhashmask = 0; |
178 | 178 | ||
179 | kmem_free(xqm); | 179 | kmem_free(xqm); |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * Called at mount time to let XQM know that another file system is | 183 | * Called at mount time to let XQM know that another file system is |
184 | * starting quotas. This isn't crucial information as the individual mount | 184 | * starting quotas. This isn't crucial information as the individual mount |
185 | * structures are pretty independent, but it helps the XQM keep a | 185 | * structures are pretty independent, but it helps the XQM keep a |
186 | * global view of what's going on. | 186 | * global view of what's going on. |
187 | */ | 187 | */ |
188 | /* ARGSUSED */ | 188 | /* ARGSUSED */ |
189 | STATIC int | 189 | STATIC int |
190 | xfs_qm_hold_quotafs_ref( | 190 | xfs_qm_hold_quotafs_ref( |
191 | struct xfs_mount *mp) | 191 | struct xfs_mount *mp) |
192 | { | 192 | { |
193 | /* | 193 | /* |
194 | * Need to lock the xfs_Gqm structure for things like this. For example, | 194 | * Need to lock the xfs_Gqm structure for things like this. For example, |
195 | * the structure could disappear between the entry to this routine and | 195 | * the structure could disappear between the entry to this routine and |
196 | * a HOLD operation if not locked. | 196 | * a HOLD operation if not locked. |
197 | */ | 197 | */ |
198 | mutex_lock(&xfs_Gqm_lock); | 198 | mutex_lock(&xfs_Gqm_lock); |
199 | 199 | ||
200 | if (!xfs_Gqm) { | 200 | if (!xfs_Gqm) { |
201 | xfs_Gqm = xfs_Gqm_init(); | 201 | xfs_Gqm = xfs_Gqm_init(); |
202 | if (!xfs_Gqm) { | 202 | if (!xfs_Gqm) { |
203 | mutex_unlock(&xfs_Gqm_lock); | 203 | mutex_unlock(&xfs_Gqm_lock); |
204 | return ENOMEM; | 204 | return ENOMEM; |
205 | } | 205 | } |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * We can keep a list of all filesystems with quotas mounted for | 209 | * We can keep a list of all filesystems with quotas mounted for |
210 | * debugging and statistical purposes, but ... | 210 | * debugging and statistical purposes, but ... |
211 | * Just take a reference and get out. | 211 | * Just take a reference and get out. |
212 | */ | 212 | */ |
213 | xfs_Gqm->qm_nrefs++; | 213 | xfs_Gqm->qm_nrefs++; |
214 | mutex_unlock(&xfs_Gqm_lock); | 214 | mutex_unlock(&xfs_Gqm_lock); |
215 | 215 | ||
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | 219 | ||
220 | /* | 220 | /* |
221 | * Release the reference that a filesystem took at mount time, | 221 | * Release the reference that a filesystem took at mount time, |
222 | * so that we know when we need to destroy the entire quota manager. | 222 | * so that we know when we need to destroy the entire quota manager. |
223 | */ | 223 | */ |
224 | /* ARGSUSED */ | 224 | /* ARGSUSED */ |
225 | STATIC void | 225 | STATIC void |
226 | xfs_qm_rele_quotafs_ref( | 226 | xfs_qm_rele_quotafs_ref( |
227 | struct xfs_mount *mp) | 227 | struct xfs_mount *mp) |
228 | { | 228 | { |
229 | ASSERT(xfs_Gqm); | 229 | ASSERT(xfs_Gqm); |
230 | ASSERT(xfs_Gqm->qm_nrefs > 0); | 230 | ASSERT(xfs_Gqm->qm_nrefs > 0); |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * Destroy the entire XQM. If somebody mounts with quotaon, this'll | 233 | * Destroy the entire XQM. If somebody mounts with quotaon, this'll |
234 | * be restarted. | 234 | * be restarted. |
235 | */ | 235 | */ |
236 | mutex_lock(&xfs_Gqm_lock); | 236 | mutex_lock(&xfs_Gqm_lock); |
237 | if (--xfs_Gqm->qm_nrefs == 0) { | 237 | if (--xfs_Gqm->qm_nrefs == 0) { |
238 | xfs_qm_destroy(xfs_Gqm); | 238 | xfs_qm_destroy(xfs_Gqm); |
239 | xfs_Gqm = NULL; | 239 | xfs_Gqm = NULL; |
240 | } | 240 | } |
241 | mutex_unlock(&xfs_Gqm_lock); | 241 | mutex_unlock(&xfs_Gqm_lock); |
242 | } | 242 | } |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * Just destroy the quotainfo structure. | 245 | * Just destroy the quotainfo structure. |
246 | */ | 246 | */ |
247 | void | 247 | void |
248 | xfs_qm_unmount( | 248 | xfs_qm_unmount( |
249 | struct xfs_mount *mp) | 249 | struct xfs_mount *mp) |
250 | { | 250 | { |
251 | if (mp->m_quotainfo) { | 251 | if (mp->m_quotainfo) { |
252 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); | 252 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); |
253 | xfs_qm_destroy_quotainfo(mp); | 253 | xfs_qm_destroy_quotainfo(mp); |
254 | } | 254 | } |
255 | } | 255 | } |
256 | 256 | ||
257 | 257 | ||
258 | /* | 258 | /* |
259 | * This is called from xfs_mountfs to start quotas and initialize all | 259 | * This is called from xfs_mountfs to start quotas and initialize all |
260 | * necessary data structures like quotainfo. This is also responsible for | 260 | * necessary data structures like quotainfo. This is also responsible for |
261 | * running a quotacheck as necessary. We are guaranteed that the superblock | 261 | * running a quotacheck as necessary. We are guaranteed that the superblock |
262 | * is consistently read in at this point. | 262 | * is consistently read in at this point. |
263 | * | 263 | * |
264 | * If we fail here, the mount will continue with quota turned off. We don't | 264 | * If we fail here, the mount will continue with quota turned off. We don't |
265 | * need to inidicate success or failure at all. | 265 | * need to inidicate success or failure at all. |
266 | */ | 266 | */ |
267 | void | 267 | void |
268 | xfs_qm_mount_quotas( | 268 | xfs_qm_mount_quotas( |
269 | xfs_mount_t *mp) | 269 | xfs_mount_t *mp) |
270 | { | 270 | { |
271 | int error = 0; | 271 | int error = 0; |
272 | uint sbf; | 272 | uint sbf; |
273 | 273 | ||
274 | /* | 274 | /* |
275 | * If quotas on realtime volumes is not supported, we disable | 275 | * If quotas on realtime volumes is not supported, we disable |
276 | * quotas immediately. | 276 | * quotas immediately. |
277 | */ | 277 | */ |
278 | if (mp->m_sb.sb_rextents) { | 278 | if (mp->m_sb.sb_rextents) { |
279 | xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); | 279 | xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); |
280 | mp->m_qflags = 0; | 280 | mp->m_qflags = 0; |
281 | goto write_changes; | 281 | goto write_changes; |
282 | } | 282 | } |
283 | 283 | ||
284 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 284 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * Allocate the quotainfo structure inside the mount struct, and | 287 | * Allocate the quotainfo structure inside the mount struct, and |
288 | * create quotainode(s), and change/rev superblock if necessary. | 288 | * create quotainode(s), and change/rev superblock if necessary. |
289 | */ | 289 | */ |
290 | error = xfs_qm_init_quotainfo(mp); | 290 | error = xfs_qm_init_quotainfo(mp); |
291 | if (error) { | 291 | if (error) { |
292 | /* | 292 | /* |
293 | * We must turn off quotas. | 293 | * We must turn off quotas. |
294 | */ | 294 | */ |
295 | ASSERT(mp->m_quotainfo == NULL); | 295 | ASSERT(mp->m_quotainfo == NULL); |
296 | mp->m_qflags = 0; | 296 | mp->m_qflags = 0; |
297 | goto write_changes; | 297 | goto write_changes; |
298 | } | 298 | } |
299 | /* | 299 | /* |
300 | * If any of the quotas are not consistent, do a quotacheck. | 300 | * If any of the quotas are not consistent, do a quotacheck. |
301 | */ | 301 | */ |
302 | if (XFS_QM_NEED_QUOTACHECK(mp)) { | 302 | if (XFS_QM_NEED_QUOTACHECK(mp)) { |
303 | error = xfs_qm_quotacheck(mp); | 303 | error = xfs_qm_quotacheck(mp); |
304 | if (error) { | 304 | if (error) { |
305 | /* Quotacheck failed and disabled quotas. */ | 305 | /* Quotacheck failed and disabled quotas. */ |
306 | return; | 306 | return; |
307 | } | 307 | } |
308 | } | 308 | } |
309 | /* | 309 | /* |
310 | * If one type of quotas is off, then it will lose its | 310 | * If one type of quotas is off, then it will lose its |
311 | * quotachecked status, since we won't be doing accounting for | 311 | * quotachecked status, since we won't be doing accounting for |
312 | * that type anymore. | 312 | * that type anymore. |
313 | */ | 313 | */ |
314 | if (!XFS_IS_UQUOTA_ON(mp)) | 314 | if (!XFS_IS_UQUOTA_ON(mp)) |
315 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; | 315 | mp->m_qflags &= ~XFS_UQUOTA_CHKD; |
316 | if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) | 316 | if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) |
317 | mp->m_qflags &= ~XFS_OQUOTA_CHKD; | 317 | mp->m_qflags &= ~XFS_OQUOTA_CHKD; |
318 | 318 | ||
319 | write_changes: | 319 | write_changes: |
320 | /* | 320 | /* |
321 | * We actually don't have to acquire the m_sb_lock at all. | 321 | * We actually don't have to acquire the m_sb_lock at all. |
322 | * This can only be called from mount, and that's single threaded. XXX | 322 | * This can only be called from mount, and that's single threaded. XXX |
323 | */ | 323 | */ |
324 | spin_lock(&mp->m_sb_lock); | 324 | spin_lock(&mp->m_sb_lock); |
325 | sbf = mp->m_sb.sb_qflags; | 325 | sbf = mp->m_sb.sb_qflags; |
326 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; | 326 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; |
327 | spin_unlock(&mp->m_sb_lock); | 327 | spin_unlock(&mp->m_sb_lock); |
328 | 328 | ||
329 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { | 329 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { |
330 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { | 330 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { |
331 | /* | 331 | /* |
332 | * We could only have been turning quotas off. | 332 | * We could only have been turning quotas off. |
333 | * We aren't in very good shape actually because | 333 | * We aren't in very good shape actually because |
334 | * the incore structures are convinced that quotas are | 334 | * the incore structures are convinced that quotas are |
335 | * off, but the on disk superblock doesn't know that ! | 335 | * off, but the on disk superblock doesn't know that ! |
336 | */ | 336 | */ |
337 | ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); | 337 | ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); |
338 | xfs_alert(mp, "%s: Superblock update failed!", | 338 | xfs_alert(mp, "%s: Superblock update failed!", |
339 | __func__); | 339 | __func__); |
340 | } | 340 | } |
341 | } | 341 | } |
342 | 342 | ||
343 | if (error) { | 343 | if (error) { |
344 | xfs_warn(mp, "Failed to initialize disk quotas."); | 344 | xfs_warn(mp, "Failed to initialize disk quotas."); |
345 | return; | 345 | return; |
346 | } | 346 | } |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | 349 | /* |
350 | * Called from the vfsops layer. | 350 | * Called from the vfsops layer. |
351 | */ | 351 | */ |
352 | void | 352 | void |
353 | xfs_qm_unmount_quotas( | 353 | xfs_qm_unmount_quotas( |
354 | xfs_mount_t *mp) | 354 | xfs_mount_t *mp) |
355 | { | 355 | { |
356 | /* | 356 | /* |
357 | * Release the dquots that root inode, et al might be holding, | 357 | * Release the dquots that root inode, et al might be holding, |
358 | * before we flush quotas and blow away the quotainfo structure. | 358 | * before we flush quotas and blow away the quotainfo structure. |
359 | */ | 359 | */ |
360 | ASSERT(mp->m_rootip); | 360 | ASSERT(mp->m_rootip); |
361 | xfs_qm_dqdetach(mp->m_rootip); | 361 | xfs_qm_dqdetach(mp->m_rootip); |
362 | if (mp->m_rbmip) | 362 | if (mp->m_rbmip) |
363 | xfs_qm_dqdetach(mp->m_rbmip); | 363 | xfs_qm_dqdetach(mp->m_rbmip); |
364 | if (mp->m_rsumip) | 364 | if (mp->m_rsumip) |
365 | xfs_qm_dqdetach(mp->m_rsumip); | 365 | xfs_qm_dqdetach(mp->m_rsumip); |
366 | 366 | ||
367 | /* | 367 | /* |
368 | * Release the quota inodes. | 368 | * Release the quota inodes. |
369 | */ | 369 | */ |
370 | if (mp->m_quotainfo) { | 370 | if (mp->m_quotainfo) { |
371 | if (mp->m_quotainfo->qi_uquotaip) { | 371 | if (mp->m_quotainfo->qi_uquotaip) { |
372 | IRELE(mp->m_quotainfo->qi_uquotaip); | 372 | IRELE(mp->m_quotainfo->qi_uquotaip); |
373 | mp->m_quotainfo->qi_uquotaip = NULL; | 373 | mp->m_quotainfo->qi_uquotaip = NULL; |
374 | } | 374 | } |
375 | if (mp->m_quotainfo->qi_gquotaip) { | 375 | if (mp->m_quotainfo->qi_gquotaip) { |
376 | IRELE(mp->m_quotainfo->qi_gquotaip); | 376 | IRELE(mp->m_quotainfo->qi_gquotaip); |
377 | mp->m_quotainfo->qi_gquotaip = NULL; | 377 | mp->m_quotainfo->qi_gquotaip = NULL; |
378 | } | 378 | } |
379 | } | 379 | } |
380 | } | 380 | } |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * Flush all dquots of the given file system to disk. The dquots are | 383 | * Flush all dquots of the given file system to disk. The dquots are |
384 | * _not_ purged from memory here, just their data written to disk. | 384 | * _not_ purged from memory here, just their data written to disk. |
385 | */ | 385 | */ |
386 | STATIC int | 386 | STATIC int |
387 | xfs_qm_dqflush_all( | 387 | xfs_qm_dqflush_all( |
388 | struct xfs_mount *mp) | 388 | struct xfs_mount *mp) |
389 | { | 389 | { |
390 | struct xfs_quotainfo *q = mp->m_quotainfo; | 390 | struct xfs_quotainfo *q = mp->m_quotainfo; |
391 | int recl; | 391 | int recl; |
392 | struct xfs_dquot *dqp; | 392 | struct xfs_dquot *dqp; |
393 | int error; | 393 | int error; |
394 | 394 | ||
395 | if (!q) | 395 | if (!q) |
396 | return 0; | 396 | return 0; |
397 | again: | 397 | again: |
398 | mutex_lock(&q->qi_dqlist_lock); | 398 | mutex_lock(&q->qi_dqlist_lock); |
399 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { | 399 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { |
400 | xfs_dqlock(dqp); | 400 | xfs_dqlock(dqp); |
401 | if ((dqp->dq_flags & XFS_DQ_FREEING) || | 401 | if ((dqp->dq_flags & XFS_DQ_FREEING) || |
402 | !XFS_DQ_IS_DIRTY(dqp)) { | 402 | !XFS_DQ_IS_DIRTY(dqp)) { |
403 | xfs_dqunlock(dqp); | 403 | xfs_dqunlock(dqp); |
404 | continue; | 404 | continue; |
405 | } | 405 | } |
406 | 406 | ||
407 | /* XXX a sentinel would be better */ | 407 | /* XXX a sentinel would be better */ |
408 | recl = q->qi_dqreclaims; | 408 | recl = q->qi_dqreclaims; |
409 | if (!xfs_dqflock_nowait(dqp)) { | 409 | if (!xfs_dqflock_nowait(dqp)) { |
410 | /* | 410 | /* |
411 | * If we can't grab the flush lock then check | 411 | * If we can't grab the flush lock then check |
412 | * to see if the dquot has been flushed delayed | 412 | * to see if the dquot has been flushed delayed |
413 | * write. If so, grab its buffer and send it | 413 | * write. If so, grab its buffer and send it |
414 | * out immediately. We'll be able to acquire | 414 | * out immediately. We'll be able to acquire |
415 | * the flush lock when the I/O completes. | 415 | * the flush lock when the I/O completes. |
416 | */ | 416 | */ |
417 | xfs_dqflock_pushbuf_wait(dqp); | 417 | xfs_dqflock_pushbuf_wait(dqp); |
418 | } | 418 | } |
419 | /* | 419 | /* |
420 | * Let go of the mplist lock. We don't want to hold it | 420 | * Let go of the mplist lock. We don't want to hold it |
421 | * across a disk write. | 421 | * across a disk write. |
422 | */ | 422 | */ |
423 | mutex_unlock(&q->qi_dqlist_lock); | 423 | mutex_unlock(&q->qi_dqlist_lock); |
424 | error = xfs_qm_dqflush(dqp, 0); | 424 | error = xfs_qm_dqflush(dqp, 0); |
425 | xfs_dqunlock(dqp); | 425 | xfs_dqunlock(dqp); |
426 | if (error) | 426 | if (error) |
427 | return error; | 427 | return error; |
428 | 428 | ||
429 | mutex_lock(&q->qi_dqlist_lock); | 429 | mutex_lock(&q->qi_dqlist_lock); |
430 | if (recl != q->qi_dqreclaims) { | 430 | if (recl != q->qi_dqreclaims) { |
431 | mutex_unlock(&q->qi_dqlist_lock); | 431 | mutex_unlock(&q->qi_dqlist_lock); |
432 | /* XXX restart limit */ | 432 | /* XXX restart limit */ |
433 | goto again; | 433 | goto again; |
434 | } | 434 | } |
435 | } | 435 | } |
436 | 436 | ||
437 | mutex_unlock(&q->qi_dqlist_lock); | 437 | mutex_unlock(&q->qi_dqlist_lock); |
438 | /* return ! busy */ | 438 | /* return ! busy */ |
439 | return 0; | 439 | return 0; |
440 | } | 440 | } |
441 | 441 | ||
442 | /* | 442 | /* |
443 | * Release the group dquot pointers the user dquots may be | 443 | * Release the group dquot pointers the user dquots may be |
444 | * carrying around as a hint. mplist is locked on entry and exit. | 444 | * carrying around as a hint. mplist is locked on entry and exit. |
445 | */ | 445 | */ |
446 | STATIC void | 446 | STATIC void |
447 | xfs_qm_detach_gdquots( | 447 | xfs_qm_detach_gdquots( |
448 | struct xfs_mount *mp) | 448 | struct xfs_mount *mp) |
449 | { | 449 | { |
450 | struct xfs_quotainfo *q = mp->m_quotainfo; | 450 | struct xfs_quotainfo *q = mp->m_quotainfo; |
451 | struct xfs_dquot *dqp, *gdqp; | 451 | struct xfs_dquot *dqp, *gdqp; |
452 | int nrecl; | 452 | int nrecl; |
453 | 453 | ||
454 | again: | 454 | again: |
455 | ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); | 455 | ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); |
456 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { | 456 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { |
457 | xfs_dqlock(dqp); | 457 | xfs_dqlock(dqp); |
458 | if (dqp->dq_flags & XFS_DQ_FREEING) { | 458 | if (dqp->dq_flags & XFS_DQ_FREEING) { |
459 | xfs_dqunlock(dqp); | 459 | xfs_dqunlock(dqp); |
460 | mutex_unlock(&q->qi_dqlist_lock); | 460 | mutex_unlock(&q->qi_dqlist_lock); |
461 | delay(1); | 461 | delay(1); |
462 | mutex_lock(&q->qi_dqlist_lock); | 462 | mutex_lock(&q->qi_dqlist_lock); |
463 | goto again; | 463 | goto again; |
464 | } | 464 | } |
465 | if ((gdqp = dqp->q_gdquot)) { | 465 | if ((gdqp = dqp->q_gdquot)) { |
466 | xfs_dqlock(gdqp); | 466 | xfs_dqlock(gdqp); |
467 | dqp->q_gdquot = NULL; | 467 | dqp->q_gdquot = NULL; |
468 | } | 468 | } |
469 | xfs_dqunlock(dqp); | 469 | xfs_dqunlock(dqp); |
470 | 470 | ||
471 | if (gdqp) { | 471 | if (gdqp) { |
472 | /* | 472 | /* |
473 | * Can't hold the mplist lock across a dqput. | 473 | * Can't hold the mplist lock across a dqput. |
474 | * XXXmust convert to marker based iterations here. | 474 | * XXXmust convert to marker based iterations here. |
475 | */ | 475 | */ |
476 | nrecl = q->qi_dqreclaims; | 476 | nrecl = q->qi_dqreclaims; |
477 | mutex_unlock(&q->qi_dqlist_lock); | 477 | mutex_unlock(&q->qi_dqlist_lock); |
478 | xfs_qm_dqput(gdqp); | 478 | xfs_qm_dqput(gdqp); |
479 | 479 | ||
480 | mutex_lock(&q->qi_dqlist_lock); | 480 | mutex_lock(&q->qi_dqlist_lock); |
481 | if (nrecl != q->qi_dqreclaims) | 481 | if (nrecl != q->qi_dqreclaims) |
482 | goto again; | 482 | goto again; |
483 | } | 483 | } |
484 | } | 484 | } |
485 | } | 485 | } |
486 | 486 | ||
487 | /* | 487 | /* |
488 | * Go through all the incore dquots of this file system and take them | 488 | * Go through all the incore dquots of this file system and take them |
489 | * off the mplist and hashlist, if the dquot type matches the dqtype | 489 | * off the mplist and hashlist, if the dquot type matches the dqtype |
490 | * parameter. This is used when turning off quota accounting for | 490 | * parameter. This is used when turning off quota accounting for |
491 | * users and/or groups, as well as when the filesystem is unmounting. | 491 | * users and/or groups, as well as when the filesystem is unmounting. |
492 | */ | 492 | */ |
493 | STATIC int | 493 | STATIC int |
494 | xfs_qm_dqpurge_int( | 494 | xfs_qm_dqpurge_int( |
495 | struct xfs_mount *mp, | 495 | struct xfs_mount *mp, |
496 | uint flags) | 496 | uint flags) |
497 | { | 497 | { |
498 | struct xfs_quotainfo *q = mp->m_quotainfo; | 498 | struct xfs_quotainfo *q = mp->m_quotainfo; |
499 | struct xfs_dquot *dqp, *n; | 499 | struct xfs_dquot *dqp, *n; |
500 | uint dqtype; | 500 | uint dqtype; |
501 | int nmisses = 0; | 501 | int nmisses = 0; |
502 | LIST_HEAD (dispose_list); | 502 | LIST_HEAD (dispose_list); |
503 | 503 | ||
504 | if (!q) | 504 | if (!q) |
505 | return 0; | 505 | return 0; |
506 | 506 | ||
507 | dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; | 507 | dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; |
508 | dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; | 508 | dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; |
509 | dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; | 509 | dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; |
510 | 510 | ||
511 | mutex_lock(&q->qi_dqlist_lock); | 511 | mutex_lock(&q->qi_dqlist_lock); |
512 | 512 | ||
513 | /* | 513 | /* |
514 | * In the first pass through all incore dquots of this filesystem, | 514 | * In the first pass through all incore dquots of this filesystem, |
515 | * we release the group dquot pointers the user dquots may be | 515 | * we release the group dquot pointers the user dquots may be |
516 | * carrying around as a hint. We need to do this irrespective of | 516 | * carrying around as a hint. We need to do this irrespective of |
517 | * what's being turned off. | 517 | * what's being turned off. |
518 | */ | 518 | */ |
519 | xfs_qm_detach_gdquots(mp); | 519 | xfs_qm_detach_gdquots(mp); |
520 | 520 | ||
521 | /* | 521 | /* |
522 | * Try to get rid of all of the unwanted dquots. | 522 | * Try to get rid of all of the unwanted dquots. |
523 | */ | 523 | */ |
524 | list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { | 524 | list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { |
525 | xfs_dqlock(dqp); | 525 | xfs_dqlock(dqp); |
526 | if ((dqp->dq_flags & dqtype) != 0 && | 526 | if ((dqp->dq_flags & dqtype) != 0 && |
527 | !(dqp->dq_flags & XFS_DQ_FREEING)) { | 527 | !(dqp->dq_flags & XFS_DQ_FREEING)) { |
528 | if (dqp->q_nrefs == 0) { | 528 | if (dqp->q_nrefs == 0) { |
529 | dqp->dq_flags |= XFS_DQ_FREEING; | 529 | dqp->dq_flags |= XFS_DQ_FREEING; |
530 | list_move_tail(&dqp->q_mplist, &dispose_list); | 530 | list_move_tail(&dqp->q_mplist, &dispose_list); |
531 | } else | 531 | } else |
532 | nmisses++; | 532 | nmisses++; |
533 | } | 533 | } |
534 | xfs_dqunlock(dqp); | 534 | xfs_dqunlock(dqp); |
535 | } | 535 | } |
536 | mutex_unlock(&q->qi_dqlist_lock); | 536 | mutex_unlock(&q->qi_dqlist_lock); |
537 | 537 | ||
538 | list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist) | 538 | list_for_each_entry_safe(dqp, n, &dispose_list, q_mplist) |
539 | xfs_qm_dqpurge(dqp); | 539 | xfs_qm_dqpurge(dqp); |
540 | 540 | ||
541 | return nmisses; | 541 | return nmisses; |
542 | } | 542 | } |
543 | 543 | ||
544 | int | 544 | int |
545 | xfs_qm_dqpurge_all( | 545 | xfs_qm_dqpurge_all( |
546 | xfs_mount_t *mp, | 546 | xfs_mount_t *mp, |
547 | uint flags) | 547 | uint flags) |
548 | { | 548 | { |
549 | int ndquots; | 549 | int ndquots; |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * Purge the dquot cache. | 552 | * Purge the dquot cache. |
553 | * None of the dquots should really be busy at this point. | 553 | * None of the dquots should really be busy at this point. |
554 | */ | 554 | */ |
555 | if (mp->m_quotainfo) { | 555 | if (mp->m_quotainfo) { |
556 | while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { | 556 | while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { |
557 | delay(ndquots * 10); | 557 | delay(ndquots * 10); |
558 | } | 558 | } |
559 | } | 559 | } |
560 | return 0; | 560 | return 0; |
561 | } | 561 | } |
562 | 562 | ||
563 | STATIC int | 563 | STATIC int |
564 | xfs_qm_dqattach_one( | 564 | xfs_qm_dqattach_one( |
565 | xfs_inode_t *ip, | 565 | xfs_inode_t *ip, |
566 | xfs_dqid_t id, | 566 | xfs_dqid_t id, |
567 | uint type, | 567 | uint type, |
568 | uint doalloc, | 568 | uint doalloc, |
569 | xfs_dquot_t *udqhint, /* hint */ | 569 | xfs_dquot_t *udqhint, /* hint */ |
570 | xfs_dquot_t **IO_idqpp) | 570 | xfs_dquot_t **IO_idqpp) |
571 | { | 571 | { |
572 | xfs_dquot_t *dqp; | 572 | xfs_dquot_t *dqp; |
573 | int error; | 573 | int error; |
574 | 574 | ||
575 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 575 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
576 | error = 0; | 576 | error = 0; |
577 | 577 | ||
578 | /* | 578 | /* |
579 | * See if we already have it in the inode itself. IO_idqpp is | 579 | * See if we already have it in the inode itself. IO_idqpp is |
580 | * &i_udquot or &i_gdquot. This made the code look weird, but | 580 | * &i_udquot or &i_gdquot. This made the code look weird, but |
581 | * made the logic a lot simpler. | 581 | * made the logic a lot simpler. |
582 | */ | 582 | */ |
583 | dqp = *IO_idqpp; | 583 | dqp = *IO_idqpp; |
584 | if (dqp) { | 584 | if (dqp) { |
585 | trace_xfs_dqattach_found(dqp); | 585 | trace_xfs_dqattach_found(dqp); |
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | 589 | /* |
590 | * udqhint is the i_udquot field in inode, and is non-NULL only | 590 | * udqhint is the i_udquot field in inode, and is non-NULL only |
591 | * when the type arg is group/project. Its purpose is to save a | 591 | * when the type arg is group/project. Its purpose is to save a |
592 | * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside | 592 | * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside |
593 | * the user dquot. | 593 | * the user dquot. |
594 | */ | 594 | */ |
595 | if (udqhint) { | 595 | if (udqhint) { |
596 | ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); | 596 | ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); |
597 | xfs_dqlock(udqhint); | 597 | xfs_dqlock(udqhint); |
598 | 598 | ||
599 | /* | 599 | /* |
600 | * No need to take dqlock to look at the id. | 600 | * No need to take dqlock to look at the id. |
601 | * | 601 | * |
602 | * The ID can't change until it gets reclaimed, and it won't | 602 | * The ID can't change until it gets reclaimed, and it won't |
603 | * be reclaimed as long as we have a ref from inode and we | 603 | * be reclaimed as long as we have a ref from inode and we |
604 | * hold the ilock. | 604 | * hold the ilock. |
605 | */ | 605 | */ |
606 | dqp = udqhint->q_gdquot; | 606 | dqp = udqhint->q_gdquot; |
607 | if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { | 607 | if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { |
608 | ASSERT(*IO_idqpp == NULL); | 608 | ASSERT(*IO_idqpp == NULL); |
609 | 609 | ||
610 | *IO_idqpp = xfs_qm_dqhold(dqp); | 610 | *IO_idqpp = xfs_qm_dqhold(dqp); |
611 | xfs_dqunlock(udqhint); | 611 | xfs_dqunlock(udqhint); |
612 | return 0; | 612 | return 0; |
613 | } | 613 | } |
614 | 614 | ||
615 | /* | 615 | /* |
616 | * We can't hold a dquot lock when we call the dqget code. | 616 | * We can't hold a dquot lock when we call the dqget code. |
617 | * We'll deadlock in no time, because of (not conforming to) | 617 | * We'll deadlock in no time, because of (not conforming to) |
618 | * lock ordering - the inodelock comes before any dquot lock, | 618 | * lock ordering - the inodelock comes before any dquot lock, |
619 | * and we may drop and reacquire the ilock in xfs_qm_dqget(). | 619 | * and we may drop and reacquire the ilock in xfs_qm_dqget(). |
620 | */ | 620 | */ |
621 | xfs_dqunlock(udqhint); | 621 | xfs_dqunlock(udqhint); |
622 | } | 622 | } |
623 | 623 | ||
624 | /* | 624 | /* |
625 | * Find the dquot from somewhere. This bumps the | 625 | * Find the dquot from somewhere. This bumps the |
626 | * reference count of dquot and returns it locked. | 626 | * reference count of dquot and returns it locked. |
627 | * This can return ENOENT if dquot didn't exist on | 627 | * This can return ENOENT if dquot didn't exist on |
628 | * disk and we didn't ask it to allocate; | 628 | * disk and we didn't ask it to allocate; |
629 | * ESRCH if quotas got turned off suddenly. | 629 | * ESRCH if quotas got turned off suddenly. |
630 | */ | 630 | */ |
631 | error = xfs_qm_dqget(ip->i_mount, ip, id, type, | 631 | error = xfs_qm_dqget(ip->i_mount, ip, id, type, |
632 | doalloc | XFS_QMOPT_DOWARN, &dqp); | 632 | doalloc | XFS_QMOPT_DOWARN, &dqp); |
633 | if (error) | 633 | if (error) |
634 | return error; | 634 | return error; |
635 | 635 | ||
636 | trace_xfs_dqattach_get(dqp); | 636 | trace_xfs_dqattach_get(dqp); |
637 | 637 | ||
638 | /* | 638 | /* |
639 | * dqget may have dropped and re-acquired the ilock, but it guarantees | 639 | * dqget may have dropped and re-acquired the ilock, but it guarantees |
640 | * that the dquot returned is the one that should go in the inode. | 640 | * that the dquot returned is the one that should go in the inode. |
641 | */ | 641 | */ |
642 | *IO_idqpp = dqp; | 642 | *IO_idqpp = dqp; |
643 | xfs_dqunlock(dqp); | 643 | xfs_dqunlock(dqp); |
644 | return 0; | 644 | return 0; |
645 | } | 645 | } |
646 | 646 | ||
647 | 647 | ||
648 | /* | 648 | /* |
649 | * Given a udquot and gdquot, attach a ptr to the group dquot in the | 649 | * Given a udquot and gdquot, attach a ptr to the group dquot in the |
650 | * udquot as a hint for future lookups. | 650 | * udquot as a hint for future lookups. |
651 | */ | 651 | */ |
652 | STATIC void | 652 | STATIC void |
653 | xfs_qm_dqattach_grouphint( | 653 | xfs_qm_dqattach_grouphint( |
654 | xfs_dquot_t *udq, | 654 | xfs_dquot_t *udq, |
655 | xfs_dquot_t *gdq) | 655 | xfs_dquot_t *gdq) |
656 | { | 656 | { |
657 | xfs_dquot_t *tmp; | 657 | xfs_dquot_t *tmp; |
658 | 658 | ||
659 | xfs_dqlock(udq); | 659 | xfs_dqlock(udq); |
660 | 660 | ||
661 | tmp = udq->q_gdquot; | 661 | tmp = udq->q_gdquot; |
662 | if (tmp) { | 662 | if (tmp) { |
663 | if (tmp == gdq) | 663 | if (tmp == gdq) |
664 | goto done; | 664 | goto done; |
665 | 665 | ||
666 | udq->q_gdquot = NULL; | 666 | udq->q_gdquot = NULL; |
667 | xfs_qm_dqrele(tmp); | 667 | xfs_qm_dqrele(tmp); |
668 | } | 668 | } |
669 | 669 | ||
670 | udq->q_gdquot = xfs_qm_dqhold(gdq); | 670 | udq->q_gdquot = xfs_qm_dqhold(gdq); |
671 | done: | 671 | done: |
672 | xfs_dqunlock(udq); | 672 | xfs_dqunlock(udq); |
673 | } | 673 | } |
674 | 674 | ||
675 | 675 | ||
676 | /* | 676 | /* |
677 | * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON | 677 | * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON |
678 | * into account. | 678 | * into account. |
679 | * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. | 679 | * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. |
680 | * Inode may get unlocked and relocked in here, and the caller must deal with | 680 | * Inode may get unlocked and relocked in here, and the caller must deal with |
681 | * the consequences. | 681 | * the consequences. |
682 | */ | 682 | */ |
683 | int | 683 | int |
684 | xfs_qm_dqattach_locked( | 684 | xfs_qm_dqattach_locked( |
685 | xfs_inode_t *ip, | 685 | xfs_inode_t *ip, |
686 | uint flags) | 686 | uint flags) |
687 | { | 687 | { |
688 | xfs_mount_t *mp = ip->i_mount; | 688 | xfs_mount_t *mp = ip->i_mount; |
689 | uint nquotas = 0; | 689 | uint nquotas = 0; |
690 | int error = 0; | 690 | int error = 0; |
691 | 691 | ||
692 | if (!XFS_IS_QUOTA_RUNNING(mp) || | 692 | if (!XFS_IS_QUOTA_RUNNING(mp) || |
693 | !XFS_IS_QUOTA_ON(mp) || | 693 | !XFS_IS_QUOTA_ON(mp) || |
694 | !XFS_NOT_DQATTACHED(mp, ip) || | 694 | !XFS_NOT_DQATTACHED(mp, ip) || |
695 | ip->i_ino == mp->m_sb.sb_uquotino || | 695 | ip->i_ino == mp->m_sb.sb_uquotino || |
696 | ip->i_ino == mp->m_sb.sb_gquotino) | 696 | ip->i_ino == mp->m_sb.sb_gquotino) |
697 | return 0; | 697 | return 0; |
698 | 698 | ||
699 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 699 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
700 | 700 | ||
701 | if (XFS_IS_UQUOTA_ON(mp)) { | 701 | if (XFS_IS_UQUOTA_ON(mp)) { |
702 | error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, | 702 | error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, |
703 | flags & XFS_QMOPT_DQALLOC, | 703 | flags & XFS_QMOPT_DQALLOC, |
704 | NULL, &ip->i_udquot); | 704 | NULL, &ip->i_udquot); |
705 | if (error) | 705 | if (error) |
706 | goto done; | 706 | goto done; |
707 | nquotas++; | 707 | nquotas++; |
708 | } | 708 | } |
709 | 709 | ||
710 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 710 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
711 | if (XFS_IS_OQUOTA_ON(mp)) { | 711 | if (XFS_IS_OQUOTA_ON(mp)) { |
712 | error = XFS_IS_GQUOTA_ON(mp) ? | 712 | error = XFS_IS_GQUOTA_ON(mp) ? |
713 | xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, | 713 | xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, |
714 | flags & XFS_QMOPT_DQALLOC, | 714 | flags & XFS_QMOPT_DQALLOC, |
715 | ip->i_udquot, &ip->i_gdquot) : | 715 | ip->i_udquot, &ip->i_gdquot) : |
716 | xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, | 716 | xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, |
717 | flags & XFS_QMOPT_DQALLOC, | 717 | flags & XFS_QMOPT_DQALLOC, |
718 | ip->i_udquot, &ip->i_gdquot); | 718 | ip->i_udquot, &ip->i_gdquot); |
719 | /* | 719 | /* |
720 | * Don't worry about the udquot that we may have | 720 | * Don't worry about the udquot that we may have |
721 | * attached above. It'll get detached, if not already. | 721 | * attached above. It'll get detached, if not already. |
722 | */ | 722 | */ |
723 | if (error) | 723 | if (error) |
724 | goto done; | 724 | goto done; |
725 | nquotas++; | 725 | nquotas++; |
726 | } | 726 | } |
727 | 727 | ||
728 | /* | 728 | /* |
729 | * Attach this group quota to the user quota as a hint. | 729 | * Attach this group quota to the user quota as a hint. |
730 | * This WON'T, in general, result in a thrash. | 730 | * This WON'T, in general, result in a thrash. |
731 | */ | 731 | */ |
732 | if (nquotas == 2) { | 732 | if (nquotas == 2) { |
733 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 733 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
734 | ASSERT(ip->i_udquot); | 734 | ASSERT(ip->i_udquot); |
735 | ASSERT(ip->i_gdquot); | 735 | ASSERT(ip->i_gdquot); |
736 | 736 | ||
737 | /* | 737 | /* |
738 | * We do not have i_udquot locked at this point, but this check | 738 | * We do not have i_udquot locked at this point, but this check |
739 | * is OK since we don't depend on the i_gdquot to be accurate | 739 | * is OK since we don't depend on the i_gdquot to be accurate |
740 | * 100% all the time. It is just a hint, and this will | 740 | * 100% all the time. It is just a hint, and this will |
741 | * succeed in general. | 741 | * succeed in general. |
742 | */ | 742 | */ |
743 | if (ip->i_udquot->q_gdquot != ip->i_gdquot) | 743 | if (ip->i_udquot->q_gdquot != ip->i_gdquot) |
744 | xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); | 744 | xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); |
745 | } | 745 | } |
746 | 746 | ||
747 | done: | 747 | done: |
748 | #ifdef DEBUG | 748 | #ifdef DEBUG |
749 | if (!error) { | 749 | if (!error) { |
750 | if (XFS_IS_UQUOTA_ON(mp)) | 750 | if (XFS_IS_UQUOTA_ON(mp)) |
751 | ASSERT(ip->i_udquot); | 751 | ASSERT(ip->i_udquot); |
752 | if (XFS_IS_OQUOTA_ON(mp)) | 752 | if (XFS_IS_OQUOTA_ON(mp)) |
753 | ASSERT(ip->i_gdquot); | 753 | ASSERT(ip->i_gdquot); |
754 | } | 754 | } |
755 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 755 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
756 | #endif | 756 | #endif |
757 | return error; | 757 | return error; |
758 | } | 758 | } |
759 | 759 | ||
760 | int | 760 | int |
761 | xfs_qm_dqattach( | 761 | xfs_qm_dqattach( |
762 | struct xfs_inode *ip, | 762 | struct xfs_inode *ip, |
763 | uint flags) | 763 | uint flags) |
764 | { | 764 | { |
765 | int error; | 765 | int error; |
766 | 766 | ||
767 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 767 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
768 | error = xfs_qm_dqattach_locked(ip, flags); | 768 | error = xfs_qm_dqattach_locked(ip, flags); |
769 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 769 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
770 | 770 | ||
771 | return error; | 771 | return error; |
772 | } | 772 | } |
773 | 773 | ||
774 | /* | 774 | /* |
775 | * Release dquots (and their references) if any. | 775 | * Release dquots (and their references) if any. |
776 | * The inode should be locked EXCL except when this's called by | 776 | * The inode should be locked EXCL except when this's called by |
777 | * xfs_ireclaim. | 777 | * xfs_ireclaim. |
778 | */ | 778 | */ |
779 | void | 779 | void |
780 | xfs_qm_dqdetach( | 780 | xfs_qm_dqdetach( |
781 | xfs_inode_t *ip) | 781 | xfs_inode_t *ip) |
782 | { | 782 | { |
783 | if (!(ip->i_udquot || ip->i_gdquot)) | 783 | if (!(ip->i_udquot || ip->i_gdquot)) |
784 | return; | 784 | return; |
785 | 785 | ||
786 | trace_xfs_dquot_dqdetach(ip); | 786 | trace_xfs_dquot_dqdetach(ip); |
787 | 787 | ||
788 | ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); | 788 | ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); |
789 | ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); | 789 | ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); |
790 | if (ip->i_udquot) { | 790 | if (ip->i_udquot) { |
791 | xfs_qm_dqrele(ip->i_udquot); | 791 | xfs_qm_dqrele(ip->i_udquot); |
792 | ip->i_udquot = NULL; | 792 | ip->i_udquot = NULL; |
793 | } | 793 | } |
794 | if (ip->i_gdquot) { | 794 | if (ip->i_gdquot) { |
795 | xfs_qm_dqrele(ip->i_gdquot); | 795 | xfs_qm_dqrele(ip->i_gdquot); |
796 | ip->i_gdquot = NULL; | 796 | ip->i_gdquot = NULL; |
797 | } | 797 | } |
798 | } | 798 | } |
799 | 799 | ||
800 | /* | 800 | /* |
801 | * The hash chains and the mplist use the same xfs_dqhash structure as | 801 | * The hash chains and the mplist use the same xfs_dqhash structure as |
802 | * their list head, but we can take the mplist qh_lock and one of the | 802 | * their list head, but we can take the mplist qh_lock and one of the |
803 | * hash qh_locks at the same time without any problem as they aren't | 803 | * hash qh_locks at the same time without any problem as they aren't |
804 | * related. | 804 | * related. |
805 | */ | 805 | */ |
806 | static struct lock_class_key xfs_quota_mplist_class; | 806 | static struct lock_class_key xfs_quota_mplist_class; |
807 | 807 | ||
808 | /* | 808 | /* |
809 | * This initializes all the quota information that's kept in the | 809 | * This initializes all the quota information that's kept in the |
810 | * mount structure | 810 | * mount structure |
811 | */ | 811 | */ |
812 | STATIC int | 812 | STATIC int |
813 | xfs_qm_init_quotainfo( | 813 | xfs_qm_init_quotainfo( |
814 | xfs_mount_t *mp) | 814 | xfs_mount_t *mp) |
815 | { | 815 | { |
816 | xfs_quotainfo_t *qinf; | 816 | xfs_quotainfo_t *qinf; |
817 | int error; | 817 | int error; |
818 | xfs_dquot_t *dqp; | 818 | xfs_dquot_t *dqp; |
819 | 819 | ||
820 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 820 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
821 | 821 | ||
822 | /* | 822 | /* |
823 | * Tell XQM that we exist as soon as possible. | 823 | * Tell XQM that we exist as soon as possible. |
824 | */ | 824 | */ |
825 | if ((error = xfs_qm_hold_quotafs_ref(mp))) { | 825 | if ((error = xfs_qm_hold_quotafs_ref(mp))) { |
826 | return error; | 826 | return error; |
827 | } | 827 | } |
828 | 828 | ||
829 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); | 829 | qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); |
830 | 830 | ||
831 | /* | 831 | /* |
832 | * See if quotainodes are setup, and if not, allocate them, | 832 | * See if quotainodes are setup, and if not, allocate them, |
833 | * and change the superblock accordingly. | 833 | * and change the superblock accordingly. |
834 | */ | 834 | */ |
835 | if ((error = xfs_qm_init_quotainos(mp))) { | 835 | if ((error = xfs_qm_init_quotainos(mp))) { |
836 | kmem_free(qinf); | 836 | kmem_free(qinf); |
837 | mp->m_quotainfo = NULL; | 837 | mp->m_quotainfo = NULL; |
838 | return error; | 838 | return error; |
839 | } | 839 | } |
840 | 840 | ||
841 | INIT_LIST_HEAD(&qinf->qi_dqlist); | 841 | INIT_LIST_HEAD(&qinf->qi_dqlist); |
842 | mutex_init(&qinf->qi_dqlist_lock); | 842 | mutex_init(&qinf->qi_dqlist_lock); |
843 | lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); | 843 | lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); |
844 | 844 | ||
845 | qinf->qi_dqreclaims = 0; | 845 | qinf->qi_dqreclaims = 0; |
846 | 846 | ||
847 | /* mutex used to serialize quotaoffs */ | 847 | /* mutex used to serialize quotaoffs */ |
848 | mutex_init(&qinf->qi_quotaofflock); | 848 | mutex_init(&qinf->qi_quotaofflock); |
849 | 849 | ||
850 | /* Precalc some constants */ | 850 | /* Precalc some constants */ |
851 | qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); | 851 | qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); |
852 | ASSERT(qinf->qi_dqchunklen); | 852 | ASSERT(qinf->qi_dqchunklen); |
853 | qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); | 853 | qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); |
854 | do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); | 854 | do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); |
855 | 855 | ||
856 | mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); | 856 | mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); |
857 | 857 | ||
858 | /* | 858 | /* |
859 | * We try to get the limits from the superuser's limits fields. | 859 | * We try to get the limits from the superuser's limits fields. |
860 | * This is quite hacky, but it is standard quota practice. | 860 | * This is quite hacky, but it is standard quota practice. |
861 | * | ||
861 | * We look at the USR dquot with id == 0 first, but if user quotas | 862 | * We look at the USR dquot with id == 0 first, but if user quotas |
862 | * are not enabled we goto the GRP dquot with id == 0. | 863 | * are not enabled we goto the GRP dquot with id == 0. |
863 | * We don't really care to keep separate default limits for user | 864 | * We don't really care to keep separate default limits for user |
864 | * and group quotas, at least not at this point. | 865 | * and group quotas, at least not at this point. |
866 | * | ||
867 | * Since we may not have done a quotacheck by this point, just read | ||
868 | * the dquot without attaching it to any hashtables or lists. | ||
865 | */ | 869 | */ |
866 | error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0, | 870 | error = xfs_qm_dqread(mp, 0, |
867 | XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : | 871 | XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : |
868 | (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : | 872 | (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : |
869 | XFS_DQ_PROJ), | 873 | XFS_DQ_PROJ), |
870 | XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN, | 874 | XFS_QMOPT_DOWARN, &dqp); |
871 | &dqp); | 875 | if (!error) { |
872 | if (! error) { | ||
873 | xfs_disk_dquot_t *ddqp = &dqp->q_core; | 876 | xfs_disk_dquot_t *ddqp = &dqp->q_core; |
874 | 877 | ||
875 | /* | 878 | /* |
876 | * The warnings and timers set the grace period given to | 879 | * The warnings and timers set the grace period given to |
877 | * a user or group before he or she can not perform any | 880 | * a user or group before he or she can not perform any |
878 | * more writing. If it is zero, a default is used. | 881 | * more writing. If it is zero, a default is used. |
879 | */ | 882 | */ |
880 | qinf->qi_btimelimit = ddqp->d_btimer ? | 883 | qinf->qi_btimelimit = ddqp->d_btimer ? |
881 | be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; | 884 | be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; |
882 | qinf->qi_itimelimit = ddqp->d_itimer ? | 885 | qinf->qi_itimelimit = ddqp->d_itimer ? |
883 | be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; | 886 | be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; |
884 | qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? | 887 | qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? |
885 | be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; | 888 | be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; |
886 | qinf->qi_bwarnlimit = ddqp->d_bwarns ? | 889 | qinf->qi_bwarnlimit = ddqp->d_bwarns ? |
887 | be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; | 890 | be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; |
888 | qinf->qi_iwarnlimit = ddqp->d_iwarns ? | 891 | qinf->qi_iwarnlimit = ddqp->d_iwarns ? |
889 | be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; | 892 | be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; |
890 | qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? | 893 | qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? |
891 | be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; | 894 | be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; |
892 | qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); | 895 | qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); |
893 | qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); | 896 | qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); |
894 | qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); | 897 | qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); |
895 | qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); | 898 | qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); |
896 | qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); | 899 | qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); |
897 | qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); | 900 | qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); |
898 | 901 | ||
899 | /* | ||
900 | * We sent the XFS_QMOPT_DQSUSER flag to dqget because | ||
901 | * we don't want this dquot cached. We haven't done a | ||
902 | * quotacheck yet, and quotacheck doesn't like incore dquots. | ||
903 | */ | ||
904 | xfs_qm_dqdestroy(dqp); | 902 | xfs_qm_dqdestroy(dqp); |
905 | } else { | 903 | } else { |
906 | qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; | 904 | qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; |
907 | qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; | 905 | qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; |
908 | qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; | 906 | qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; |
909 | qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; | 907 | qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; |
910 | qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; | 908 | qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; |
911 | qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; | 909 | qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; |
912 | } | 910 | } |
913 | 911 | ||
914 | return 0; | 912 | return 0; |
915 | } | 913 | } |
916 | 914 | ||
917 | 915 | ||
918 | /* | 916 | /* |
919 | * Gets called when unmounting a filesystem or when all quotas get | 917 | * Gets called when unmounting a filesystem or when all quotas get |
920 | * turned off. | 918 | * turned off. |
921 | * This purges the quota inodes, destroys locks and frees itself. | 919 | * This purges the quota inodes, destroys locks and frees itself. |
922 | */ | 920 | */ |
923 | void | 921 | void |
924 | xfs_qm_destroy_quotainfo( | 922 | xfs_qm_destroy_quotainfo( |
925 | xfs_mount_t *mp) | 923 | xfs_mount_t *mp) |
926 | { | 924 | { |
927 | xfs_quotainfo_t *qi; | 925 | xfs_quotainfo_t *qi; |
928 | 926 | ||
929 | qi = mp->m_quotainfo; | 927 | qi = mp->m_quotainfo; |
930 | ASSERT(qi != NULL); | 928 | ASSERT(qi != NULL); |
931 | ASSERT(xfs_Gqm != NULL); | 929 | ASSERT(xfs_Gqm != NULL); |
932 | 930 | ||
933 | /* | 931 | /* |
934 | * Release the reference that XQM kept, so that we know | 932 | * Release the reference that XQM kept, so that we know |
935 | * when the XQM structure should be freed. We cannot assume | 933 | * when the XQM structure should be freed. We cannot assume |
936 | * that xfs_Gqm is non-null after this point. | 934 | * that xfs_Gqm is non-null after this point. |
937 | */ | 935 | */ |
938 | xfs_qm_rele_quotafs_ref(mp); | 936 | xfs_qm_rele_quotafs_ref(mp); |
939 | 937 | ||
940 | ASSERT(list_empty(&qi->qi_dqlist)); | 938 | ASSERT(list_empty(&qi->qi_dqlist)); |
941 | mutex_destroy(&qi->qi_dqlist_lock); | 939 | mutex_destroy(&qi->qi_dqlist_lock); |
942 | 940 | ||
943 | if (qi->qi_uquotaip) { | 941 | if (qi->qi_uquotaip) { |
944 | IRELE(qi->qi_uquotaip); | 942 | IRELE(qi->qi_uquotaip); |
945 | qi->qi_uquotaip = NULL; /* paranoia */ | 943 | qi->qi_uquotaip = NULL; /* paranoia */ |
946 | } | 944 | } |
947 | if (qi->qi_gquotaip) { | 945 | if (qi->qi_gquotaip) { |
948 | IRELE(qi->qi_gquotaip); | 946 | IRELE(qi->qi_gquotaip); |
949 | qi->qi_gquotaip = NULL; | 947 | qi->qi_gquotaip = NULL; |
950 | } | 948 | } |
951 | mutex_destroy(&qi->qi_quotaofflock); | 949 | mutex_destroy(&qi->qi_quotaofflock); |
952 | kmem_free(qi); | 950 | kmem_free(qi); |
953 | mp->m_quotainfo = NULL; | 951 | mp->m_quotainfo = NULL; |
954 | } | 952 | } |
955 | 953 | ||
956 | 954 | ||
957 | 955 | ||
958 | /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ | 956 | /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ |
959 | 957 | ||
960 | /* ARGSUSED */ | 958 | /* ARGSUSED */ |
961 | STATIC void | 959 | STATIC void |
962 | xfs_qm_list_init( | 960 | xfs_qm_list_init( |
963 | xfs_dqlist_t *list, | 961 | xfs_dqlist_t *list, |
964 | char *str, | 962 | char *str, |
965 | int n) | 963 | int n) |
966 | { | 964 | { |
967 | mutex_init(&list->qh_lock); | 965 | mutex_init(&list->qh_lock); |
968 | INIT_LIST_HEAD(&list->qh_list); | 966 | INIT_LIST_HEAD(&list->qh_list); |
969 | list->qh_version = 0; | 967 | list->qh_version = 0; |
970 | list->qh_nelems = 0; | 968 | list->qh_nelems = 0; |
971 | } | 969 | } |
972 | 970 | ||
973 | STATIC void | 971 | STATIC void |
974 | xfs_qm_list_destroy( | 972 | xfs_qm_list_destroy( |
975 | xfs_dqlist_t *list) | 973 | xfs_dqlist_t *list) |
976 | { | 974 | { |
977 | mutex_destroy(&(list->qh_lock)); | 975 | mutex_destroy(&(list->qh_lock)); |
978 | } | 976 | } |
979 | 977 | ||
980 | /* | 978 | /* |
981 | * Create an inode and return with a reference already taken, but unlocked | 979 | * Create an inode and return with a reference already taken, but unlocked |
982 | * This is how we create quota inodes | 980 | * This is how we create quota inodes |
983 | */ | 981 | */ |
984 | STATIC int | 982 | STATIC int |
985 | xfs_qm_qino_alloc( | 983 | xfs_qm_qino_alloc( |
986 | xfs_mount_t *mp, | 984 | xfs_mount_t *mp, |
987 | xfs_inode_t **ip, | 985 | xfs_inode_t **ip, |
988 | __int64_t sbfields, | 986 | __int64_t sbfields, |
989 | uint flags) | 987 | uint flags) |
990 | { | 988 | { |
991 | xfs_trans_t *tp; | 989 | xfs_trans_t *tp; |
992 | int error; | 990 | int error; |
993 | int committed; | 991 | int committed; |
994 | 992 | ||
995 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); | 993 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); |
996 | if ((error = xfs_trans_reserve(tp, | 994 | if ((error = xfs_trans_reserve(tp, |
997 | XFS_QM_QINOCREATE_SPACE_RES(mp), | 995 | XFS_QM_QINOCREATE_SPACE_RES(mp), |
998 | XFS_CREATE_LOG_RES(mp), 0, | 996 | XFS_CREATE_LOG_RES(mp), 0, |
999 | XFS_TRANS_PERM_LOG_RES, | 997 | XFS_TRANS_PERM_LOG_RES, |
1000 | XFS_CREATE_LOG_COUNT))) { | 998 | XFS_CREATE_LOG_COUNT))) { |
1001 | xfs_trans_cancel(tp, 0); | 999 | xfs_trans_cancel(tp, 0); |
1002 | return error; | 1000 | return error; |
1003 | } | 1001 | } |
1004 | 1002 | ||
1005 | error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); | 1003 | error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); |
1006 | if (error) { | 1004 | if (error) { |
1007 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | | 1005 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | |
1008 | XFS_TRANS_ABORT); | 1006 | XFS_TRANS_ABORT); |
1009 | return error; | 1007 | return error; |
1010 | } | 1008 | } |
1011 | 1009 | ||
1012 | /* | 1010 | /* |
1013 | * Make the changes in the superblock, and log those too. | 1011 | * Make the changes in the superblock, and log those too. |
1014 | * sbfields arg may contain fields other than *QUOTINO; | 1012 | * sbfields arg may contain fields other than *QUOTINO; |
1015 | * VERSIONNUM for example. | 1013 | * VERSIONNUM for example. |
1016 | */ | 1014 | */ |
1017 | spin_lock(&mp->m_sb_lock); | 1015 | spin_lock(&mp->m_sb_lock); |
1018 | if (flags & XFS_QMOPT_SBVERSION) { | 1016 | if (flags & XFS_QMOPT_SBVERSION) { |
1019 | ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); | 1017 | ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); |
1020 | ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | | 1018 | ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | |
1021 | XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == | 1019 | XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == |
1022 | (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | | 1020 | (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | |
1023 | XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); | 1021 | XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); |
1024 | 1022 | ||
1025 | xfs_sb_version_addquota(&mp->m_sb); | 1023 | xfs_sb_version_addquota(&mp->m_sb); |
1026 | mp->m_sb.sb_uquotino = NULLFSINO; | 1024 | mp->m_sb.sb_uquotino = NULLFSINO; |
1027 | mp->m_sb.sb_gquotino = NULLFSINO; | 1025 | mp->m_sb.sb_gquotino = NULLFSINO; |
1028 | 1026 | ||
1029 | /* qflags will get updated _after_ quotacheck */ | 1027 | /* qflags will get updated _after_ quotacheck */ |
1030 | mp->m_sb.sb_qflags = 0; | 1028 | mp->m_sb.sb_qflags = 0; |
1031 | } | 1029 | } |
1032 | if (flags & XFS_QMOPT_UQUOTA) | 1030 | if (flags & XFS_QMOPT_UQUOTA) |
1033 | mp->m_sb.sb_uquotino = (*ip)->i_ino; | 1031 | mp->m_sb.sb_uquotino = (*ip)->i_ino; |
1034 | else | 1032 | else |
1035 | mp->m_sb.sb_gquotino = (*ip)->i_ino; | 1033 | mp->m_sb.sb_gquotino = (*ip)->i_ino; |
1036 | spin_unlock(&mp->m_sb_lock); | 1034 | spin_unlock(&mp->m_sb_lock); |
1037 | xfs_mod_sb(tp, sbfields); | 1035 | xfs_mod_sb(tp, sbfields); |
1038 | 1036 | ||
1039 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { | 1037 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { |
1040 | xfs_alert(mp, "%s failed (error %d)!", __func__, error); | 1038 | xfs_alert(mp, "%s failed (error %d)!", __func__, error); |
1041 | return error; | 1039 | return error; |
1042 | } | 1040 | } |
1043 | return 0; | 1041 | return 0; |
1044 | } | 1042 | } |
1045 | 1043 | ||
1046 | 1044 | ||
1047 | STATIC void | 1045 | STATIC void |
1048 | xfs_qm_reset_dqcounts( | 1046 | xfs_qm_reset_dqcounts( |
1049 | xfs_mount_t *mp, | 1047 | xfs_mount_t *mp, |
1050 | xfs_buf_t *bp, | 1048 | xfs_buf_t *bp, |
1051 | xfs_dqid_t id, | 1049 | xfs_dqid_t id, |
1052 | uint type) | 1050 | uint type) |
1053 | { | 1051 | { |
1054 | xfs_disk_dquot_t *ddq; | 1052 | xfs_disk_dquot_t *ddq; |
1055 | int j; | 1053 | int j; |
1056 | 1054 | ||
1057 | trace_xfs_reset_dqcounts(bp, _RET_IP_); | 1055 | trace_xfs_reset_dqcounts(bp, _RET_IP_); |
1058 | 1056 | ||
1059 | /* | 1057 | /* |
1060 | * Reset all counters and timers. They'll be | 1058 | * Reset all counters and timers. They'll be |
1061 | * started afresh by xfs_qm_quotacheck. | 1059 | * started afresh by xfs_qm_quotacheck. |
1062 | */ | 1060 | */ |
1063 | #ifdef DEBUG | 1061 | #ifdef DEBUG |
1064 | j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); | 1062 | j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); |
1065 | do_div(j, sizeof(xfs_dqblk_t)); | 1063 | do_div(j, sizeof(xfs_dqblk_t)); |
1066 | ASSERT(mp->m_quotainfo->qi_dqperchunk == j); | 1064 | ASSERT(mp->m_quotainfo->qi_dqperchunk == j); |
1067 | #endif | 1065 | #endif |
1068 | ddq = bp->b_addr; | 1066 | ddq = bp->b_addr; |
1069 | for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { | 1067 | for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { |
1070 | /* | 1068 | /* |
1071 | * Do a sanity check, and if needed, repair the dqblk. Don't | 1069 | * Do a sanity check, and if needed, repair the dqblk. Don't |
1072 | * output any warnings because it's perfectly possible to | 1070 | * output any warnings because it's perfectly possible to |
1073 | * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. | 1071 | * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. |
1074 | */ | 1072 | */ |
1075 | (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, | 1073 | (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, |
1076 | "xfs_quotacheck"); | 1074 | "xfs_quotacheck"); |
1077 | ddq->d_bcount = 0; | 1075 | ddq->d_bcount = 0; |
1078 | ddq->d_icount = 0; | 1076 | ddq->d_icount = 0; |
1079 | ddq->d_rtbcount = 0; | 1077 | ddq->d_rtbcount = 0; |
1080 | ddq->d_btimer = 0; | 1078 | ddq->d_btimer = 0; |
1081 | ddq->d_itimer = 0; | 1079 | ddq->d_itimer = 0; |
1082 | ddq->d_rtbtimer = 0; | 1080 | ddq->d_rtbtimer = 0; |
1083 | ddq->d_bwarns = 0; | 1081 | ddq->d_bwarns = 0; |
1084 | ddq->d_iwarns = 0; | 1082 | ddq->d_iwarns = 0; |
1085 | ddq->d_rtbwarns = 0; | 1083 | ddq->d_rtbwarns = 0; |
1086 | ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); | 1084 | ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); |
1087 | } | 1085 | } |
1088 | } | 1086 | } |
1089 | 1087 | ||
1090 | STATIC int | 1088 | STATIC int |
1091 | xfs_qm_dqiter_bufs( | 1089 | xfs_qm_dqiter_bufs( |
1092 | xfs_mount_t *mp, | 1090 | xfs_mount_t *mp, |
1093 | xfs_dqid_t firstid, | 1091 | xfs_dqid_t firstid, |
1094 | xfs_fsblock_t bno, | 1092 | xfs_fsblock_t bno, |
1095 | xfs_filblks_t blkcnt, | 1093 | xfs_filblks_t blkcnt, |
1096 | uint flags) | 1094 | uint flags) |
1097 | { | 1095 | { |
1098 | xfs_buf_t *bp; | 1096 | xfs_buf_t *bp; |
1099 | int error; | 1097 | int error; |
1100 | int type; | 1098 | int type; |
1101 | 1099 | ||
1102 | ASSERT(blkcnt > 0); | 1100 | ASSERT(blkcnt > 0); |
1103 | type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : | 1101 | type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : |
1104 | (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); | 1102 | (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); |
1105 | error = 0; | 1103 | error = 0; |
1106 | 1104 | ||
1107 | /* | 1105 | /* |
1108 | * Blkcnt arg can be a very big number, and might even be | 1106 | * Blkcnt arg can be a very big number, and might even be |
1109 | * larger than the log itself. So, we have to break it up into | 1107 | * larger than the log itself. So, we have to break it up into |
1110 | * manageable-sized transactions. | 1108 | * manageable-sized transactions. |
1111 | * Note that we don't start a permanent transaction here; we might | 1109 | * Note that we don't start a permanent transaction here; we might |
1112 | * not be able to get a log reservation for the whole thing up front, | 1110 | * not be able to get a log reservation for the whole thing up front, |
1113 | * and we don't really care to either, because we just discard | 1111 | * and we don't really care to either, because we just discard |
1114 | * everything if we were to crash in the middle of this loop. | 1112 | * everything if we were to crash in the middle of this loop. |
1115 | */ | 1113 | */ |
1116 | while (blkcnt--) { | 1114 | while (blkcnt--) { |
1117 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, | 1115 | error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, |
1118 | XFS_FSB_TO_DADDR(mp, bno), | 1116 | XFS_FSB_TO_DADDR(mp, bno), |
1119 | mp->m_quotainfo->qi_dqchunklen, 0, &bp); | 1117 | mp->m_quotainfo->qi_dqchunklen, 0, &bp); |
1120 | if (error) | 1118 | if (error) |
1121 | break; | 1119 | break; |
1122 | 1120 | ||
1123 | xfs_qm_reset_dqcounts(mp, bp, firstid, type); | 1121 | xfs_qm_reset_dqcounts(mp, bp, firstid, type); |
1124 | xfs_buf_delwri_queue(bp); | 1122 | xfs_buf_delwri_queue(bp); |
1125 | xfs_buf_relse(bp); | 1123 | xfs_buf_relse(bp); |
1126 | /* | 1124 | /* |
1127 | * goto the next block. | 1125 | * goto the next block. |
1128 | */ | 1126 | */ |
1129 | bno++; | 1127 | bno++; |
1130 | firstid += mp->m_quotainfo->qi_dqperchunk; | 1128 | firstid += mp->m_quotainfo->qi_dqperchunk; |
1131 | } | 1129 | } |
1132 | return error; | 1130 | return error; |
1133 | } | 1131 | } |
1134 | 1132 | ||
1135 | /* | 1133 | /* |
1136 | * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a | 1134 | * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a |
1137 | * caller supplied function for every chunk of dquots that we find. | 1135 | * caller supplied function for every chunk of dquots that we find. |
1138 | */ | 1136 | */ |
1139 | STATIC int | 1137 | STATIC int |
1140 | xfs_qm_dqiterate( | 1138 | xfs_qm_dqiterate( |
1141 | xfs_mount_t *mp, | 1139 | xfs_mount_t *mp, |
1142 | xfs_inode_t *qip, | 1140 | xfs_inode_t *qip, |
1143 | uint flags) | 1141 | uint flags) |
1144 | { | 1142 | { |
1145 | xfs_bmbt_irec_t *map; | 1143 | xfs_bmbt_irec_t *map; |
1146 | int i, nmaps; /* number of map entries */ | 1144 | int i, nmaps; /* number of map entries */ |
1147 | int error; /* return value */ | 1145 | int error; /* return value */ |
1148 | xfs_fileoff_t lblkno; | 1146 | xfs_fileoff_t lblkno; |
1149 | xfs_filblks_t maxlblkcnt; | 1147 | xfs_filblks_t maxlblkcnt; |
1150 | xfs_dqid_t firstid; | 1148 | xfs_dqid_t firstid; |
1151 | xfs_fsblock_t rablkno; | 1149 | xfs_fsblock_t rablkno; |
1152 | xfs_filblks_t rablkcnt; | 1150 | xfs_filblks_t rablkcnt; |
1153 | 1151 | ||
1154 | error = 0; | 1152 | error = 0; |
1155 | /* | 1153 | /* |
1156 | * This looks racy, but we can't keep an inode lock across a | 1154 | * This looks racy, but we can't keep an inode lock across a |
1157 | * trans_reserve. But, this gets called during quotacheck, and that | 1155 | * trans_reserve. But, this gets called during quotacheck, and that |
1158 | * happens only at mount time which is single threaded. | 1156 | * happens only at mount time which is single threaded. |
1159 | */ | 1157 | */ |
1160 | if (qip->i_d.di_nblocks == 0) | 1158 | if (qip->i_d.di_nblocks == 0) |
1161 | return 0; | 1159 | return 0; |
1162 | 1160 | ||
1163 | map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); | 1161 | map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); |
1164 | 1162 | ||
1165 | lblkno = 0; | 1163 | lblkno = 0; |
1166 | maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); | 1164 | maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); |
1167 | do { | 1165 | do { |
1168 | nmaps = XFS_DQITER_MAP_SIZE; | 1166 | nmaps = XFS_DQITER_MAP_SIZE; |
1169 | /* | 1167 | /* |
1170 | * We aren't changing the inode itself. Just changing | 1168 | * We aren't changing the inode itself. Just changing |
1171 | * some of its data. No new blocks are added here, and | 1169 | * some of its data. No new blocks are added here, and |
1172 | * the inode is never added to the transaction. | 1170 | * the inode is never added to the transaction. |
1173 | */ | 1171 | */ |
1174 | xfs_ilock(qip, XFS_ILOCK_SHARED); | 1172 | xfs_ilock(qip, XFS_ILOCK_SHARED); |
1175 | error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, | 1173 | error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, |
1176 | map, &nmaps, 0); | 1174 | map, &nmaps, 0); |
1177 | xfs_iunlock(qip, XFS_ILOCK_SHARED); | 1175 | xfs_iunlock(qip, XFS_ILOCK_SHARED); |
1178 | if (error) | 1176 | if (error) |
1179 | break; | 1177 | break; |
1180 | 1178 | ||
1181 | ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); | 1179 | ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); |
1182 | for (i = 0; i < nmaps; i++) { | 1180 | for (i = 0; i < nmaps; i++) { |
1183 | ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); | 1181 | ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); |
1184 | ASSERT(map[i].br_blockcount); | 1182 | ASSERT(map[i].br_blockcount); |
1185 | 1183 | ||
1186 | 1184 | ||
1187 | lblkno += map[i].br_blockcount; | 1185 | lblkno += map[i].br_blockcount; |
1188 | 1186 | ||
1189 | if (map[i].br_startblock == HOLESTARTBLOCK) | 1187 | if (map[i].br_startblock == HOLESTARTBLOCK) |
1190 | continue; | 1188 | continue; |
1191 | 1189 | ||
1192 | firstid = (xfs_dqid_t) map[i].br_startoff * | 1190 | firstid = (xfs_dqid_t) map[i].br_startoff * |
1193 | mp->m_quotainfo->qi_dqperchunk; | 1191 | mp->m_quotainfo->qi_dqperchunk; |
1194 | /* | 1192 | /* |
1195 | * Do a read-ahead on the next extent. | 1193 | * Do a read-ahead on the next extent. |
1196 | */ | 1194 | */ |
1197 | if ((i+1 < nmaps) && | 1195 | if ((i+1 < nmaps) && |
1198 | (map[i+1].br_startblock != HOLESTARTBLOCK)) { | 1196 | (map[i+1].br_startblock != HOLESTARTBLOCK)) { |
1199 | rablkcnt = map[i+1].br_blockcount; | 1197 | rablkcnt = map[i+1].br_blockcount; |
1200 | rablkno = map[i+1].br_startblock; | 1198 | rablkno = map[i+1].br_startblock; |
1201 | while (rablkcnt--) { | 1199 | while (rablkcnt--) { |
1202 | xfs_buf_readahead(mp->m_ddev_targp, | 1200 | xfs_buf_readahead(mp->m_ddev_targp, |
1203 | XFS_FSB_TO_DADDR(mp, rablkno), | 1201 | XFS_FSB_TO_DADDR(mp, rablkno), |
1204 | mp->m_quotainfo->qi_dqchunklen); | 1202 | mp->m_quotainfo->qi_dqchunklen); |
1205 | rablkno++; | 1203 | rablkno++; |
1206 | } | 1204 | } |
1207 | } | 1205 | } |
1208 | /* | 1206 | /* |
1209 | * Iterate thru all the blks in the extent and | 1207 | * Iterate thru all the blks in the extent and |
1210 | * reset the counters of all the dquots inside them. | 1208 | * reset the counters of all the dquots inside them. |
1211 | */ | 1209 | */ |
1212 | if ((error = xfs_qm_dqiter_bufs(mp, | 1210 | if ((error = xfs_qm_dqiter_bufs(mp, |
1213 | firstid, | 1211 | firstid, |
1214 | map[i].br_startblock, | 1212 | map[i].br_startblock, |
1215 | map[i].br_blockcount, | 1213 | map[i].br_blockcount, |
1216 | flags))) { | 1214 | flags))) { |
1217 | break; | 1215 | break; |
1218 | } | 1216 | } |
1219 | } | 1217 | } |
1220 | 1218 | ||
1221 | if (error) | 1219 | if (error) |
1222 | break; | 1220 | break; |
1223 | } while (nmaps > 0); | 1221 | } while (nmaps > 0); |
1224 | 1222 | ||
1225 | kmem_free(map); | 1223 | kmem_free(map); |
1226 | 1224 | ||
1227 | return error; | 1225 | return error; |
1228 | } | 1226 | } |
1229 | 1227 | ||
1230 | /* | 1228 | /* |
1231 | * Called by dqusage_adjust in doing a quotacheck. | 1229 | * Called by dqusage_adjust in doing a quotacheck. |
1232 | * | 1230 | * |
1233 | * Given the inode, and a dquot id this updates both the incore dqout as well | 1231 | * Given the inode, and a dquot id this updates both the incore dqout as well |
1234 | * as the buffer copy. This is so that once the quotacheck is done, we can | 1232 | * as the buffer copy. This is so that once the quotacheck is done, we can |
1235 | * just log all the buffers, as opposed to logging numerous updates to | 1233 | * just log all the buffers, as opposed to logging numerous updates to |
1236 | * individual dquots. | 1234 | * individual dquots. |
1237 | */ | 1235 | */ |
1238 | STATIC int | 1236 | STATIC int |
1239 | xfs_qm_quotacheck_dqadjust( | 1237 | xfs_qm_quotacheck_dqadjust( |
1240 | struct xfs_inode *ip, | 1238 | struct xfs_inode *ip, |
1241 | xfs_dqid_t id, | 1239 | xfs_dqid_t id, |
1242 | uint type, | 1240 | uint type, |
1243 | xfs_qcnt_t nblks, | 1241 | xfs_qcnt_t nblks, |
1244 | xfs_qcnt_t rtblks) | 1242 | xfs_qcnt_t rtblks) |
1245 | { | 1243 | { |
1246 | struct xfs_mount *mp = ip->i_mount; | 1244 | struct xfs_mount *mp = ip->i_mount; |
1247 | struct xfs_dquot *dqp; | 1245 | struct xfs_dquot *dqp; |
1248 | int error; | 1246 | int error; |
1249 | 1247 | ||
1250 | error = xfs_qm_dqget(mp, ip, id, type, | 1248 | error = xfs_qm_dqget(mp, ip, id, type, |
1251 | XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); | 1249 | XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); |
1252 | if (error) { | 1250 | if (error) { |
1253 | /* | 1251 | /* |
1254 | * Shouldn't be able to turn off quotas here. | 1252 | * Shouldn't be able to turn off quotas here. |
1255 | */ | 1253 | */ |
1256 | ASSERT(error != ESRCH); | 1254 | ASSERT(error != ESRCH); |
1257 | ASSERT(error != ENOENT); | 1255 | ASSERT(error != ENOENT); |
1258 | return error; | 1256 | return error; |
1259 | } | 1257 | } |
1260 | 1258 | ||
1261 | trace_xfs_dqadjust(dqp); | 1259 | trace_xfs_dqadjust(dqp); |
1262 | 1260 | ||
1263 | /* | 1261 | /* |
1264 | * Adjust the inode count and the block count to reflect this inode's | 1262 | * Adjust the inode count and the block count to reflect this inode's |
1265 | * resource usage. | 1263 | * resource usage. |
1266 | */ | 1264 | */ |
1267 | be64_add_cpu(&dqp->q_core.d_icount, 1); | 1265 | be64_add_cpu(&dqp->q_core.d_icount, 1); |
1268 | dqp->q_res_icount++; | 1266 | dqp->q_res_icount++; |
1269 | if (nblks) { | 1267 | if (nblks) { |
1270 | be64_add_cpu(&dqp->q_core.d_bcount, nblks); | 1268 | be64_add_cpu(&dqp->q_core.d_bcount, nblks); |
1271 | dqp->q_res_bcount += nblks; | 1269 | dqp->q_res_bcount += nblks; |
1272 | } | 1270 | } |
1273 | if (rtblks) { | 1271 | if (rtblks) { |
1274 | be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); | 1272 | be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); |
1275 | dqp->q_res_rtbcount += rtblks; | 1273 | dqp->q_res_rtbcount += rtblks; |
1276 | } | 1274 | } |
1277 | 1275 | ||
1278 | /* | 1276 | /* |
1279 | * Set default limits, adjust timers (since we changed usages) | 1277 | * Set default limits, adjust timers (since we changed usages) |
1280 | * | 1278 | * |
1281 | * There are no timers for the default values set in the root dquot. | 1279 | * There are no timers for the default values set in the root dquot. |
1282 | */ | 1280 | */ |
1283 | if (dqp->q_core.d_id) { | 1281 | if (dqp->q_core.d_id) { |
1284 | xfs_qm_adjust_dqlimits(mp, &dqp->q_core); | 1282 | xfs_qm_adjust_dqlimits(mp, &dqp->q_core); |
1285 | xfs_qm_adjust_dqtimers(mp, &dqp->q_core); | 1283 | xfs_qm_adjust_dqtimers(mp, &dqp->q_core); |
1286 | } | 1284 | } |
1287 | 1285 | ||
1288 | dqp->dq_flags |= XFS_DQ_DIRTY; | 1286 | dqp->dq_flags |= XFS_DQ_DIRTY; |
1289 | xfs_qm_dqput(dqp); | 1287 | xfs_qm_dqput(dqp); |
1290 | return 0; | 1288 | return 0; |
1291 | } | 1289 | } |
1292 | 1290 | ||
1293 | STATIC int | 1291 | STATIC int |
1294 | xfs_qm_get_rtblks( | 1292 | xfs_qm_get_rtblks( |
1295 | xfs_inode_t *ip, | 1293 | xfs_inode_t *ip, |
1296 | xfs_qcnt_t *O_rtblks) | 1294 | xfs_qcnt_t *O_rtblks) |
1297 | { | 1295 | { |
1298 | xfs_filblks_t rtblks; /* total rt blks */ | 1296 | xfs_filblks_t rtblks; /* total rt blks */ |
1299 | xfs_extnum_t idx; /* extent record index */ | 1297 | xfs_extnum_t idx; /* extent record index */ |
1300 | xfs_ifork_t *ifp; /* inode fork pointer */ | 1298 | xfs_ifork_t *ifp; /* inode fork pointer */ |
1301 | xfs_extnum_t nextents; /* number of extent entries */ | 1299 | xfs_extnum_t nextents; /* number of extent entries */ |
1302 | int error; | 1300 | int error; |
1303 | 1301 | ||
1304 | ASSERT(XFS_IS_REALTIME_INODE(ip)); | 1302 | ASSERT(XFS_IS_REALTIME_INODE(ip)); |
1305 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 1303 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
1306 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { | 1304 | if (!(ifp->if_flags & XFS_IFEXTENTS)) { |
1307 | if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) | 1305 | if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) |
1308 | return error; | 1306 | return error; |
1309 | } | 1307 | } |
1310 | rtblks = 0; | 1308 | rtblks = 0; |
1311 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 1309 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
1312 | for (idx = 0; idx < nextents; idx++) | 1310 | for (idx = 0; idx < nextents; idx++) |
1313 | rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); | 1311 | rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); |
1314 | *O_rtblks = (xfs_qcnt_t)rtblks; | 1312 | *O_rtblks = (xfs_qcnt_t)rtblks; |
1315 | return 0; | 1313 | return 0; |
1316 | } | 1314 | } |
1317 | 1315 | ||
1318 | /* | 1316 | /* |
1319 | * callback routine supplied to bulkstat(). Given an inumber, find its | 1317 | * callback routine supplied to bulkstat(). Given an inumber, find its |
1320 | * dquots and update them to account for resources taken by that inode. | 1318 | * dquots and update them to account for resources taken by that inode. |
1321 | */ | 1319 | */ |
1322 | /* ARGSUSED */ | 1320 | /* ARGSUSED */ |
1323 | STATIC int | 1321 | STATIC int |
1324 | xfs_qm_dqusage_adjust( | 1322 | xfs_qm_dqusage_adjust( |
1325 | xfs_mount_t *mp, /* mount point for filesystem */ | 1323 | xfs_mount_t *mp, /* mount point for filesystem */ |
1326 | xfs_ino_t ino, /* inode number to get data for */ | 1324 | xfs_ino_t ino, /* inode number to get data for */ |
1327 | void __user *buffer, /* not used */ | 1325 | void __user *buffer, /* not used */ |
1328 | int ubsize, /* not used */ | 1326 | int ubsize, /* not used */ |
1329 | int *ubused, /* not used */ | 1327 | int *ubused, /* not used */ |
1330 | int *res) /* result code value */ | 1328 | int *res) /* result code value */ |
1331 | { | 1329 | { |
1332 | xfs_inode_t *ip; | 1330 | xfs_inode_t *ip; |
1333 | xfs_qcnt_t nblks, rtblks = 0; | 1331 | xfs_qcnt_t nblks, rtblks = 0; |
1334 | int error; | 1332 | int error; |
1335 | 1333 | ||
1336 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 1334 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
1337 | 1335 | ||
1338 | /* | 1336 | /* |
1339 | * rootino must have its resources accounted for, not so with the quota | 1337 | * rootino must have its resources accounted for, not so with the quota |
1340 | * inodes. | 1338 | * inodes. |
1341 | */ | 1339 | */ |
1342 | if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { | 1340 | if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { |
1343 | *res = BULKSTAT_RV_NOTHING; | 1341 | *res = BULKSTAT_RV_NOTHING; |
1344 | return XFS_ERROR(EINVAL); | 1342 | return XFS_ERROR(EINVAL); |
1345 | } | 1343 | } |
1346 | 1344 | ||
1347 | /* | 1345 | /* |
1348 | * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget | 1346 | * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget |
1349 | * interface expects the inode to be exclusively locked because that's | 1347 | * interface expects the inode to be exclusively locked because that's |
1350 | * the case in all other instances. It's OK that we do this because | 1348 | * the case in all other instances. It's OK that we do this because |
1351 | * quotacheck is done only at mount time. | 1349 | * quotacheck is done only at mount time. |
1352 | */ | 1350 | */ |
1353 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); | 1351 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); |
1354 | if (error) { | 1352 | if (error) { |
1355 | *res = BULKSTAT_RV_NOTHING; | 1353 | *res = BULKSTAT_RV_NOTHING; |
1356 | return error; | 1354 | return error; |
1357 | } | 1355 | } |
1358 | 1356 | ||
1359 | ASSERT(ip->i_delayed_blks == 0); | 1357 | ASSERT(ip->i_delayed_blks == 0); |
1360 | 1358 | ||
1361 | if (XFS_IS_REALTIME_INODE(ip)) { | 1359 | if (XFS_IS_REALTIME_INODE(ip)) { |
1362 | /* | 1360 | /* |
1363 | * Walk thru the extent list and count the realtime blocks. | 1361 | * Walk thru the extent list and count the realtime blocks. |
1364 | */ | 1362 | */ |
1365 | error = xfs_qm_get_rtblks(ip, &rtblks); | 1363 | error = xfs_qm_get_rtblks(ip, &rtblks); |
1366 | if (error) | 1364 | if (error) |
1367 | goto error0; | 1365 | goto error0; |
1368 | } | 1366 | } |
1369 | 1367 | ||
1370 | nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; | 1368 | nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; |
1371 | 1369 | ||
1372 | /* | 1370 | /* |
1373 | * Add the (disk blocks and inode) resources occupied by this | 1371 | * Add the (disk blocks and inode) resources occupied by this |
1374 | * inode to its dquots. We do this adjustment in the incore dquot, | 1372 | * inode to its dquots. We do this adjustment in the incore dquot, |
1375 | * and also copy the changes to its buffer. | 1373 | * and also copy the changes to its buffer. |
1376 | * We don't care about putting these changes in a transaction | 1374 | * We don't care about putting these changes in a transaction |
1377 | * envelope because if we crash in the middle of a 'quotacheck' | 1375 | * envelope because if we crash in the middle of a 'quotacheck' |
1378 | * we have to start from the beginning anyway. | 1376 | * we have to start from the beginning anyway. |
1379 | * Once we're done, we'll log all the dquot bufs. | 1377 | * Once we're done, we'll log all the dquot bufs. |
1380 | * | 1378 | * |
1381 | * The *QUOTA_ON checks below may look pretty racy, but quotachecks | 1379 | * The *QUOTA_ON checks below may look pretty racy, but quotachecks |
1382 | * and quotaoffs don't race. (Quotachecks happen at mount time only). | 1380 | * and quotaoffs don't race. (Quotachecks happen at mount time only). |
1383 | */ | 1381 | */ |
1384 | if (XFS_IS_UQUOTA_ON(mp)) { | 1382 | if (XFS_IS_UQUOTA_ON(mp)) { |
1385 | error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, | 1383 | error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, |
1386 | XFS_DQ_USER, nblks, rtblks); | 1384 | XFS_DQ_USER, nblks, rtblks); |
1387 | if (error) | 1385 | if (error) |
1388 | goto error0; | 1386 | goto error0; |
1389 | } | 1387 | } |
1390 | 1388 | ||
1391 | if (XFS_IS_GQUOTA_ON(mp)) { | 1389 | if (XFS_IS_GQUOTA_ON(mp)) { |
1392 | error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, | 1390 | error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, |
1393 | XFS_DQ_GROUP, nblks, rtblks); | 1391 | XFS_DQ_GROUP, nblks, rtblks); |
1394 | if (error) | 1392 | if (error) |
1395 | goto error0; | 1393 | goto error0; |
1396 | } | 1394 | } |
1397 | 1395 | ||
1398 | if (XFS_IS_PQUOTA_ON(mp)) { | 1396 | if (XFS_IS_PQUOTA_ON(mp)) { |
1399 | error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), | 1397 | error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), |
1400 | XFS_DQ_PROJ, nblks, rtblks); | 1398 | XFS_DQ_PROJ, nblks, rtblks); |
1401 | if (error) | 1399 | if (error) |
1402 | goto error0; | 1400 | goto error0; |
1403 | } | 1401 | } |
1404 | 1402 | ||
1405 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 1403 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1406 | IRELE(ip); | 1404 | IRELE(ip); |
1407 | *res = BULKSTAT_RV_DIDONE; | 1405 | *res = BULKSTAT_RV_DIDONE; |
1408 | return 0; | 1406 | return 0; |
1409 | 1407 | ||
1410 | error0: | 1408 | error0: |
1411 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 1409 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1412 | IRELE(ip); | 1410 | IRELE(ip); |
1413 | *res = BULKSTAT_RV_GIVEUP; | 1411 | *res = BULKSTAT_RV_GIVEUP; |
1414 | return error; | 1412 | return error; |
1415 | } | 1413 | } |
1416 | 1414 | ||
1417 | /* | 1415 | /* |
1418 | * Walk thru all the filesystem inodes and construct a consistent view | 1416 | * Walk thru all the filesystem inodes and construct a consistent view |
1419 | * of the disk quota world. If the quotacheck fails, disable quotas. | 1417 | * of the disk quota world. If the quotacheck fails, disable quotas. |
1420 | */ | 1418 | */ |
1421 | int | 1419 | int |
1422 | xfs_qm_quotacheck( | 1420 | xfs_qm_quotacheck( |
1423 | xfs_mount_t *mp) | 1421 | xfs_mount_t *mp) |
1424 | { | 1422 | { |
1425 | int done, count, error; | 1423 | int done, count, error; |
1426 | xfs_ino_t lastino; | 1424 | xfs_ino_t lastino; |
1427 | size_t structsz; | 1425 | size_t structsz; |
1428 | xfs_inode_t *uip, *gip; | 1426 | xfs_inode_t *uip, *gip; |
1429 | uint flags; | 1427 | uint flags; |
1430 | 1428 | ||
1431 | count = INT_MAX; | 1429 | count = INT_MAX; |
1432 | structsz = 1; | 1430 | structsz = 1; |
1433 | lastino = 0; | 1431 | lastino = 0; |
1434 | flags = 0; | 1432 | flags = 0; |
1435 | 1433 | ||
1436 | ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); | 1434 | ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); |
1437 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 1435 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
1438 | 1436 | ||
1439 | /* | 1437 | /* |
1440 | * There should be no cached dquots. The (simplistic) quotacheck | 1438 | * There should be no cached dquots. The (simplistic) quotacheck |
1441 | * algorithm doesn't like that. | 1439 | * algorithm doesn't like that. |
1442 | */ | 1440 | */ |
1443 | ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); | 1441 | ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); |
1444 | 1442 | ||
1445 | xfs_notice(mp, "Quotacheck needed: Please wait."); | 1443 | xfs_notice(mp, "Quotacheck needed: Please wait."); |
1446 | 1444 | ||
1447 | /* | 1445 | /* |
1448 | * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset | 1446 | * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset |
1449 | * their counters to zero. We need a clean slate. | 1447 | * their counters to zero. We need a clean slate. |
1450 | * We don't log our changes till later. | 1448 | * We don't log our changes till later. |
1451 | */ | 1449 | */ |
1452 | uip = mp->m_quotainfo->qi_uquotaip; | 1450 | uip = mp->m_quotainfo->qi_uquotaip; |
1453 | if (uip) { | 1451 | if (uip) { |
1454 | error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA); | 1452 | error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA); |
1455 | if (error) | 1453 | if (error) |
1456 | goto error_return; | 1454 | goto error_return; |
1457 | flags |= XFS_UQUOTA_CHKD; | 1455 | flags |= XFS_UQUOTA_CHKD; |
1458 | } | 1456 | } |
1459 | 1457 | ||
1460 | gip = mp->m_quotainfo->qi_gquotaip; | 1458 | gip = mp->m_quotainfo->qi_gquotaip; |
1461 | if (gip) { | 1459 | if (gip) { |
1462 | error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? | 1460 | error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? |
1463 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); | 1461 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); |
1464 | if (error) | 1462 | if (error) |
1465 | goto error_return; | 1463 | goto error_return; |
1466 | flags |= XFS_OQUOTA_CHKD; | 1464 | flags |= XFS_OQUOTA_CHKD; |
1467 | } | 1465 | } |
1468 | 1466 | ||
1469 | do { | 1467 | do { |
1470 | /* | 1468 | /* |
1471 | * Iterate thru all the inodes in the file system, | 1469 | * Iterate thru all the inodes in the file system, |
1472 | * adjusting the corresponding dquot counters in core. | 1470 | * adjusting the corresponding dquot counters in core. |
1473 | */ | 1471 | */ |
1474 | error = xfs_bulkstat(mp, &lastino, &count, | 1472 | error = xfs_bulkstat(mp, &lastino, &count, |
1475 | xfs_qm_dqusage_adjust, | 1473 | xfs_qm_dqusage_adjust, |
1476 | structsz, NULL, &done); | 1474 | structsz, NULL, &done); |
1477 | if (error) | 1475 | if (error) |
1478 | break; | 1476 | break; |
1479 | 1477 | ||
1480 | } while (!done); | 1478 | } while (!done); |
1481 | 1479 | ||
1482 | /* | 1480 | /* |
1483 | * We've made all the changes that we need to make incore. | 1481 | * We've made all the changes that we need to make incore. |
1484 | * Flush them down to disk buffers if everything was updated | 1482 | * Flush them down to disk buffers if everything was updated |
1485 | * successfully. | 1483 | * successfully. |
1486 | */ | 1484 | */ |
1487 | if (!error) | 1485 | if (!error) |
1488 | error = xfs_qm_dqflush_all(mp); | 1486 | error = xfs_qm_dqflush_all(mp); |
1489 | 1487 | ||
1490 | /* | 1488 | /* |
1491 | * We can get this error if we couldn't do a dquot allocation inside | 1489 | * We can get this error if we couldn't do a dquot allocation inside |
1492 | * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the | 1490 | * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the |
1493 | * dirty dquots that might be cached, we just want to get rid of them | 1491 | * dirty dquots that might be cached, we just want to get rid of them |
1494 | * and turn quotaoff. The dquots won't be attached to any of the inodes | 1492 | * and turn quotaoff. The dquots won't be attached to any of the inodes |
1495 | * at this point (because we intentionally didn't in dqget_noattach). | 1493 | * at this point (because we intentionally didn't in dqget_noattach). |
1496 | */ | 1494 | */ |
1497 | if (error) { | 1495 | if (error) { |
1498 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); | 1496 | xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); |
1499 | goto error_return; | 1497 | goto error_return; |
1500 | } | 1498 | } |
1501 | 1499 | ||
1502 | /* | 1500 | /* |
1503 | * We didn't log anything, because if we crashed, we'll have to | 1501 | * We didn't log anything, because if we crashed, we'll have to |
1504 | * start the quotacheck from scratch anyway. However, we must make | 1502 | * start the quotacheck from scratch anyway. However, we must make |
1505 | * sure that our dquot changes are secure before we put the | 1503 | * sure that our dquot changes are secure before we put the |
1506 | * quotacheck'd stamp on the superblock. So, here we do a synchronous | 1504 | * quotacheck'd stamp on the superblock. So, here we do a synchronous |
1507 | * flush. | 1505 | * flush. |
1508 | */ | 1506 | */ |
1509 | xfs_flush_buftarg(mp->m_ddev_targp, 1); | 1507 | xfs_flush_buftarg(mp->m_ddev_targp, 1); |
1510 | 1508 | ||
1511 | /* | 1509 | /* |
1512 | * If one type of quotas is off, then it will lose its | 1510 | * If one type of quotas is off, then it will lose its |
1513 | * quotachecked status, since we won't be doing accounting for | 1511 | * quotachecked status, since we won't be doing accounting for |
1514 | * that type anymore. | 1512 | * that type anymore. |
1515 | */ | 1513 | */ |
1516 | mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); | 1514 | mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); |
1517 | mp->m_qflags |= flags; | 1515 | mp->m_qflags |= flags; |
1518 | 1516 | ||
1519 | error_return: | 1517 | error_return: |
1520 | if (error) { | 1518 | if (error) { |
1521 | xfs_warn(mp, | 1519 | xfs_warn(mp, |
1522 | "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", | 1520 | "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", |
1523 | error); | 1521 | error); |
1524 | /* | 1522 | /* |
1525 | * We must turn off quotas. | 1523 | * We must turn off quotas. |
1526 | */ | 1524 | */ |
1527 | ASSERT(mp->m_quotainfo != NULL); | 1525 | ASSERT(mp->m_quotainfo != NULL); |
1528 | ASSERT(xfs_Gqm != NULL); | 1526 | ASSERT(xfs_Gqm != NULL); |
1529 | xfs_qm_destroy_quotainfo(mp); | 1527 | xfs_qm_destroy_quotainfo(mp); |
1530 | if (xfs_mount_reset_sbqflags(mp)) { | 1528 | if (xfs_mount_reset_sbqflags(mp)) { |
1531 | xfs_warn(mp, | 1529 | xfs_warn(mp, |
1532 | "Quotacheck: Failed to reset quota flags."); | 1530 | "Quotacheck: Failed to reset quota flags."); |
1533 | } | 1531 | } |
1534 | } else | 1532 | } else |
1535 | xfs_notice(mp, "Quotacheck: Done."); | 1533 | xfs_notice(mp, "Quotacheck: Done."); |
1536 | return (error); | 1534 | return (error); |
1537 | } | 1535 | } |
1538 | 1536 | ||
1539 | /* | 1537 | /* |
1540 | * This is called after the superblock has been read in and we're ready to | 1538 | * This is called after the superblock has been read in and we're ready to |
1541 | * iget the quota inodes. | 1539 | * iget the quota inodes. |
1542 | */ | 1540 | */ |
1543 | STATIC int | 1541 | STATIC int |
1544 | xfs_qm_init_quotainos( | 1542 | xfs_qm_init_quotainos( |
1545 | xfs_mount_t *mp) | 1543 | xfs_mount_t *mp) |
1546 | { | 1544 | { |
1547 | xfs_inode_t *uip, *gip; | 1545 | xfs_inode_t *uip, *gip; |
1548 | int error; | 1546 | int error; |
1549 | __int64_t sbflags; | 1547 | __int64_t sbflags; |
1550 | uint flags; | 1548 | uint flags; |
1551 | 1549 | ||
1552 | ASSERT(mp->m_quotainfo); | 1550 | ASSERT(mp->m_quotainfo); |
1553 | uip = gip = NULL; | 1551 | uip = gip = NULL; |
1554 | sbflags = 0; | 1552 | sbflags = 0; |
1555 | flags = 0; | 1553 | flags = 0; |
1556 | 1554 | ||
1557 | /* | 1555 | /* |
1558 | * Get the uquota and gquota inodes | 1556 | * Get the uquota and gquota inodes |
1559 | */ | 1557 | */ |
1560 | if (xfs_sb_version_hasquota(&mp->m_sb)) { | 1558 | if (xfs_sb_version_hasquota(&mp->m_sb)) { |
1561 | if (XFS_IS_UQUOTA_ON(mp) && | 1559 | if (XFS_IS_UQUOTA_ON(mp) && |
1562 | mp->m_sb.sb_uquotino != NULLFSINO) { | 1560 | mp->m_sb.sb_uquotino != NULLFSINO) { |
1563 | ASSERT(mp->m_sb.sb_uquotino > 0); | 1561 | ASSERT(mp->m_sb.sb_uquotino > 0); |
1564 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 1562 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
1565 | 0, 0, &uip))) | 1563 | 0, 0, &uip))) |
1566 | return XFS_ERROR(error); | 1564 | return XFS_ERROR(error); |
1567 | } | 1565 | } |
1568 | if (XFS_IS_OQUOTA_ON(mp) && | 1566 | if (XFS_IS_OQUOTA_ON(mp) && |
1569 | mp->m_sb.sb_gquotino != NULLFSINO) { | 1567 | mp->m_sb.sb_gquotino != NULLFSINO) { |
1570 | ASSERT(mp->m_sb.sb_gquotino > 0); | 1568 | ASSERT(mp->m_sb.sb_gquotino > 0); |
1571 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 1569 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
1572 | 0, 0, &gip))) { | 1570 | 0, 0, &gip))) { |
1573 | if (uip) | 1571 | if (uip) |
1574 | IRELE(uip); | 1572 | IRELE(uip); |
1575 | return XFS_ERROR(error); | 1573 | return XFS_ERROR(error); |
1576 | } | 1574 | } |
1577 | } | 1575 | } |
1578 | } else { | 1576 | } else { |
1579 | flags |= XFS_QMOPT_SBVERSION; | 1577 | flags |= XFS_QMOPT_SBVERSION; |
1580 | sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | | 1578 | sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | |
1581 | XFS_SB_GQUOTINO | XFS_SB_QFLAGS); | 1579 | XFS_SB_GQUOTINO | XFS_SB_QFLAGS); |
1582 | } | 1580 | } |
1583 | 1581 | ||
1584 | /* | 1582 | /* |
1585 | * Create the two inodes, if they don't exist already. The changes | 1583 | * Create the two inodes, if they don't exist already. The changes |
1586 | * made above will get added to a transaction and logged in one of | 1584 | * made above will get added to a transaction and logged in one of |
1587 | * the qino_alloc calls below. If the device is readonly, | 1585 | * the qino_alloc calls below. If the device is readonly, |
1588 | * temporarily switch to read-write to do this. | 1586 | * temporarily switch to read-write to do this. |
1589 | */ | 1587 | */ |
1590 | if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { | 1588 | if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { |
1591 | if ((error = xfs_qm_qino_alloc(mp, &uip, | 1589 | if ((error = xfs_qm_qino_alloc(mp, &uip, |
1592 | sbflags | XFS_SB_UQUOTINO, | 1590 | sbflags | XFS_SB_UQUOTINO, |
1593 | flags | XFS_QMOPT_UQUOTA))) | 1591 | flags | XFS_QMOPT_UQUOTA))) |
1594 | return XFS_ERROR(error); | 1592 | return XFS_ERROR(error); |
1595 | 1593 | ||
1596 | flags &= ~XFS_QMOPT_SBVERSION; | 1594 | flags &= ~XFS_QMOPT_SBVERSION; |
1597 | } | 1595 | } |
1598 | if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { | 1596 | if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { |
1599 | flags |= (XFS_IS_GQUOTA_ON(mp) ? | 1597 | flags |= (XFS_IS_GQUOTA_ON(mp) ? |
1600 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); | 1598 | XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); |
1601 | error = xfs_qm_qino_alloc(mp, &gip, | 1599 | error = xfs_qm_qino_alloc(mp, &gip, |
1602 | sbflags | XFS_SB_GQUOTINO, flags); | 1600 | sbflags | XFS_SB_GQUOTINO, flags); |
1603 | if (error) { | 1601 | if (error) { |
1604 | if (uip) | 1602 | if (uip) |
1605 | IRELE(uip); | 1603 | IRELE(uip); |
1606 | 1604 | ||
1607 | return XFS_ERROR(error); | 1605 | return XFS_ERROR(error); |
1608 | } | 1606 | } |
1609 | } | 1607 | } |
1610 | 1608 | ||
1611 | mp->m_quotainfo->qi_uquotaip = uip; | 1609 | mp->m_quotainfo->qi_uquotaip = uip; |
1612 | mp->m_quotainfo->qi_gquotaip = gip; | 1610 | mp->m_quotainfo->qi_gquotaip = gip; |
1613 | 1611 | ||
1614 | return 0; | 1612 | return 0; |
1615 | } | 1613 | } |
1616 | 1614 | ||
1617 | 1615 | ||
1618 | 1616 | ||
1619 | /* | 1617 | /* |
1620 | * Pop the least recently used dquot off the freelist and recycle it. | 1618 | * Pop the least recently used dquot off the freelist and recycle it. |
1621 | */ | 1619 | */ |
1622 | STATIC struct xfs_dquot * | 1620 | STATIC struct xfs_dquot * |
1623 | xfs_qm_dqreclaim_one(void) | 1621 | xfs_qm_dqreclaim_one(void) |
1624 | { | 1622 | { |
1625 | struct xfs_dquot *dqp; | 1623 | struct xfs_dquot *dqp; |
1626 | int restarts = 0; | 1624 | int restarts = 0; |
1627 | 1625 | ||
1628 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); | 1626 | mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); |
1629 | restart: | 1627 | restart: |
1630 | list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { | 1628 | list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { |
1631 | struct xfs_mount *mp = dqp->q_mount; | 1629 | struct xfs_mount *mp = dqp->q_mount; |
1632 | 1630 | ||
1633 | if (!xfs_dqlock_nowait(dqp)) | 1631 | if (!xfs_dqlock_nowait(dqp)) |
1634 | continue; | 1632 | continue; |
1635 | 1633 | ||
1636 | /* | 1634 | /* |
1637 | * This dquot has already been grabbed by dqlookup. | 1635 | * This dquot has already been grabbed by dqlookup. |
1638 | * Remove it from the freelist and try again. | 1636 | * Remove it from the freelist and try again. |
1639 | */ | 1637 | */ |
1640 | if (dqp->q_nrefs) { | 1638 | if (dqp->q_nrefs) { |
1641 | trace_xfs_dqreclaim_want(dqp); | 1639 | trace_xfs_dqreclaim_want(dqp); |
1642 | XQM_STATS_INC(xqmstats.xs_qm_dqwants); | 1640 | XQM_STATS_INC(xqmstats.xs_qm_dqwants); |
1643 | 1641 | ||
1644 | list_del_init(&dqp->q_freelist); | 1642 | list_del_init(&dqp->q_freelist); |
1645 | xfs_Gqm->qm_dqfrlist_cnt--; | 1643 | xfs_Gqm->qm_dqfrlist_cnt--; |
1646 | restarts++; | 1644 | restarts++; |
1647 | goto dqunlock; | 1645 | goto dqunlock; |
1648 | } | 1646 | } |
1649 | 1647 | ||
1650 | ASSERT(dqp->q_hash); | 1648 | ASSERT(dqp->q_hash); |
1651 | ASSERT(!list_empty(&dqp->q_mplist)); | 1649 | ASSERT(!list_empty(&dqp->q_mplist)); |
1652 | 1650 | ||
1653 | /* | 1651 | /* |
1654 | * Try to grab the flush lock. If this dquot is in the process | 1652 | * Try to grab the flush lock. If this dquot is in the process |
1655 | * of getting flushed to disk, we don't want to reclaim it. | 1653 | * of getting flushed to disk, we don't want to reclaim it. |
1656 | */ | 1654 | */ |
1657 | if (!xfs_dqflock_nowait(dqp)) | 1655 | if (!xfs_dqflock_nowait(dqp)) |
1658 | goto dqunlock; | 1656 | goto dqunlock; |
1659 | 1657 | ||
1660 | /* | 1658 | /* |
1661 | * We have the flush lock so we know that this is not in the | 1659 | * We have the flush lock so we know that this is not in the |
1662 | * process of being flushed. So, if this is dirty, flush it | 1660 | * process of being flushed. So, if this is dirty, flush it |
1663 | * DELWRI so that we don't get a freelist infested with | 1661 | * DELWRI so that we don't get a freelist infested with |
1664 | * dirty dquots. | 1662 | * dirty dquots. |
1665 | */ | 1663 | */ |
1666 | if (XFS_DQ_IS_DIRTY(dqp)) { | 1664 | if (XFS_DQ_IS_DIRTY(dqp)) { |
1667 | int error; | 1665 | int error; |
1668 | 1666 | ||
1669 | trace_xfs_dqreclaim_dirty(dqp); | 1667 | trace_xfs_dqreclaim_dirty(dqp); |
1670 | 1668 | ||
1671 | /* | 1669 | /* |
1672 | * We flush it delayed write, so don't bother | 1670 | * We flush it delayed write, so don't bother |
1673 | * releasing the freelist lock. | 1671 | * releasing the freelist lock. |
1674 | */ | 1672 | */ |
1675 | error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK); | 1673 | error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK); |
1676 | if (error) { | 1674 | if (error) { |
1677 | xfs_warn(mp, "%s: dquot %p flush failed", | 1675 | xfs_warn(mp, "%s: dquot %p flush failed", |
1678 | __func__, dqp); | 1676 | __func__, dqp); |
1679 | } | 1677 | } |
1680 | goto dqunlock; | 1678 | goto dqunlock; |
1681 | } | 1679 | } |
1682 | xfs_dqfunlock(dqp); | 1680 | xfs_dqfunlock(dqp); |
1683 | 1681 | ||
1684 | /* | 1682 | /* |
1685 | * Prevent lookup now that we are going to reclaim the dquot. | 1683 | * Prevent lookup now that we are going to reclaim the dquot. |
1686 | * Once XFS_DQ_FREEING is set lookup won't touch the dquot, | 1684 | * Once XFS_DQ_FREEING is set lookup won't touch the dquot, |
1687 | * thus we can drop the lock now. | 1685 | * thus we can drop the lock now. |
1688 | */ | 1686 | */ |
1689 | dqp->dq_flags |= XFS_DQ_FREEING; | 1687 | dqp->dq_flags |= XFS_DQ_FREEING; |
1690 | xfs_dqunlock(dqp); | 1688 | xfs_dqunlock(dqp); |
1691 | 1689 | ||
1692 | mutex_lock(&dqp->q_hash->qh_lock); | 1690 | mutex_lock(&dqp->q_hash->qh_lock); |
1693 | list_del_init(&dqp->q_hashlist); | 1691 | list_del_init(&dqp->q_hashlist); |
1694 | dqp->q_hash->qh_version++; | 1692 | dqp->q_hash->qh_version++; |
1695 | mutex_unlock(&dqp->q_hash->qh_lock); | 1693 | mutex_unlock(&dqp->q_hash->qh_lock); |
1696 | 1694 | ||
1697 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); | 1695 | mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); |
1698 | list_del_init(&dqp->q_mplist); | 1696 | list_del_init(&dqp->q_mplist); |
1699 | mp->m_quotainfo->qi_dquots--; | 1697 | mp->m_quotainfo->qi_dquots--; |
1700 | mp->m_quotainfo->qi_dqreclaims++; | 1698 | mp->m_quotainfo->qi_dqreclaims++; |
1701 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); | 1699 | mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); |
1702 | 1700 | ||
1703 | ASSERT(dqp->q_nrefs == 0); | 1701 | ASSERT(dqp->q_nrefs == 0); |
1704 | list_del_init(&dqp->q_freelist); | 1702 | list_del_init(&dqp->q_freelist); |
1705 | xfs_Gqm->qm_dqfrlist_cnt--; | 1703 | xfs_Gqm->qm_dqfrlist_cnt--; |
1706 | 1704 | ||
1707 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1705 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
1708 | return dqp; | 1706 | return dqp; |
1709 | dqunlock: | 1707 | dqunlock: |
1710 | xfs_dqunlock(dqp); | 1708 | xfs_dqunlock(dqp); |
1711 | if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) | 1709 | if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) |
1712 | break; | 1710 | break; |
1713 | goto restart; | 1711 | goto restart; |
1714 | } | 1712 | } |
1715 | 1713 | ||
1716 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); | 1714 | mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); |
1717 | return NULL; | 1715 | return NULL; |
1718 | } | 1716 | } |
1719 | 1717 | ||
1720 | /* | 1718 | /* |
1721 | * Traverse the freelist of dquots and attempt to reclaim a maximum of | 1719 | * Traverse the freelist of dquots and attempt to reclaim a maximum of |
1722 | * 'howmany' dquots. This operation races with dqlookup(), and attempts to | 1720 | * 'howmany' dquots. This operation races with dqlookup(), and attempts to |
1723 | * favor the lookup function ... | 1721 | * favor the lookup function ... |
1724 | */ | 1722 | */ |
1725 | STATIC int | 1723 | STATIC int |
1726 | xfs_qm_shake_freelist( | 1724 | xfs_qm_shake_freelist( |
1727 | int howmany) | 1725 | int howmany) |
1728 | { | 1726 | { |
1729 | int nreclaimed = 0; | 1727 | int nreclaimed = 0; |
1730 | xfs_dquot_t *dqp; | 1728 | xfs_dquot_t *dqp; |
1731 | 1729 | ||
1732 | if (howmany <= 0) | 1730 | if (howmany <= 0) |
1733 | return 0; | 1731 | return 0; |
1734 | 1732 | ||
1735 | while (nreclaimed < howmany) { | 1733 | while (nreclaimed < howmany) { |
1736 | dqp = xfs_qm_dqreclaim_one(); | 1734 | dqp = xfs_qm_dqreclaim_one(); |
1737 | if (!dqp) | 1735 | if (!dqp) |
1738 | return nreclaimed; | 1736 | return nreclaimed; |
1739 | xfs_qm_dqdestroy(dqp); | 1737 | xfs_qm_dqdestroy(dqp); |
1740 | nreclaimed++; | 1738 | nreclaimed++; |
1741 | } | 1739 | } |
1742 | return nreclaimed; | 1740 | return nreclaimed; |
1743 | } | 1741 | } |
1744 | 1742 | ||
1745 | /* | 1743 | /* |
1746 | * The kmem_shake interface is invoked when memory is running low. | 1744 | * The kmem_shake interface is invoked when memory is running low. |
1747 | */ | 1745 | */ |
1748 | /* ARGSUSED */ | 1746 | /* ARGSUSED */ |
1749 | STATIC int | 1747 | STATIC int |
1750 | xfs_qm_shake( | 1748 | xfs_qm_shake( |
1751 | struct shrinker *shrink, | 1749 | struct shrinker *shrink, |
1752 | struct shrink_control *sc) | 1750 | struct shrink_control *sc) |
1753 | { | 1751 | { |
1754 | int ndqused, nfree, n; | 1752 | int ndqused, nfree, n; |
1755 | gfp_t gfp_mask = sc->gfp_mask; | 1753 | gfp_t gfp_mask = sc->gfp_mask; |
1756 | 1754 | ||
1757 | if (!kmem_shake_allow(gfp_mask)) | 1755 | if (!kmem_shake_allow(gfp_mask)) |
1758 | return 0; | 1756 | return 0; |
1759 | if (!xfs_Gqm) | 1757 | if (!xfs_Gqm) |
1760 | return 0; | 1758 | return 0; |
1761 | 1759 | ||
1762 | nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ | 1760 | nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ |
1763 | /* incore dquots in all f/s's */ | 1761 | /* incore dquots in all f/s's */ |
1764 | ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; | 1762 | ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; |
1765 | 1763 | ||
1766 | ASSERT(ndqused >= 0); | 1764 | ASSERT(ndqused >= 0); |
1767 | 1765 | ||
1768 | if (nfree <= ndqused && nfree < ndquot) | 1766 | if (nfree <= ndqused && nfree < ndquot) |
1769 | return 0; | 1767 | return 0; |
1770 | 1768 | ||
1771 | ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ | 1769 | ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ |
1772 | n = nfree - ndqused - ndquot; /* # over target */ | 1770 | n = nfree - ndqused - ndquot; /* # over target */ |
1773 | 1771 | ||
1774 | return xfs_qm_shake_freelist(MAX(nfree, n)); | 1772 | return xfs_qm_shake_freelist(MAX(nfree, n)); |
1775 | } | 1773 | } |
1776 | 1774 | ||
1777 | 1775 | ||
1778 | /*------------------------------------------------------------------*/ | 1776 | /*------------------------------------------------------------------*/ |
1779 | 1777 | ||
1780 | /* | 1778 | /* |
1781 | * Return a new incore dquot. Depending on the number of | 1779 | * Return a new incore dquot. Depending on the number of |
1782 | * dquots in the system, we either allocate a new one on the kernel heap, | 1780 | * dquots in the system, we either allocate a new one on the kernel heap, |
1783 | * or reclaim a free one. | 1781 | * or reclaim a free one. |
1784 | * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed | 1782 | * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed |
1785 | * to reclaim an existing one from the freelist. | 1783 | * to reclaim an existing one from the freelist. |
1786 | */ | 1784 | */ |
1787 | boolean_t | 1785 | boolean_t |
1788 | xfs_qm_dqalloc_incore( | 1786 | xfs_qm_dqalloc_incore( |
1789 | xfs_dquot_t **O_dqpp) | 1787 | xfs_dquot_t **O_dqpp) |
1790 | { | 1788 | { |
1791 | xfs_dquot_t *dqp; | 1789 | xfs_dquot_t *dqp; |
1792 | 1790 | ||
1793 | /* | 1791 | /* |
1794 | * Check against high water mark to see if we want to pop | 1792 | * Check against high water mark to see if we want to pop |
1795 | * a nincompoop dquot off the freelist. | 1793 | * a nincompoop dquot off the freelist. |
1796 | */ | 1794 | */ |
1797 | if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { | 1795 | if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { |
1798 | /* | 1796 | /* |
1799 | * Try to recycle a dquot from the freelist. | 1797 | * Try to recycle a dquot from the freelist. |
1800 | */ | 1798 | */ |
1801 | if ((dqp = xfs_qm_dqreclaim_one())) { | 1799 | if ((dqp = xfs_qm_dqreclaim_one())) { |
1802 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); | 1800 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); |
1803 | /* | 1801 | /* |
1804 | * Just zero the core here. The rest will get | 1802 | * Just zero the core here. The rest will get |
1805 | * reinitialized by caller. XXX we shouldn't even | 1803 | * reinitialized by caller. XXX we shouldn't even |
1806 | * do this zero ... | 1804 | * do this zero ... |
1807 | */ | 1805 | */ |
1808 | memset(&dqp->q_core, 0, sizeof(dqp->q_core)); | 1806 | memset(&dqp->q_core, 0, sizeof(dqp->q_core)); |
1809 | *O_dqpp = dqp; | 1807 | *O_dqpp = dqp; |
1810 | return B_FALSE; | 1808 | return B_FALSE; |
1811 | } | 1809 | } |
1812 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); | 1810 | XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); |
1813 | } | 1811 | } |
1814 | 1812 | ||
1815 | /* | 1813 | /* |
1816 | * Allocate a brand new dquot on the kernel heap and return it | 1814 | * Allocate a brand new dquot on the kernel heap and return it |
1817 | * to the caller to initialize. | 1815 | * to the caller to initialize. |
1818 | */ | 1816 | */ |
1819 | ASSERT(xfs_Gqm->qm_dqzone != NULL); | 1817 | ASSERT(xfs_Gqm->qm_dqzone != NULL); |
1820 | *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); | 1818 | *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); |
1821 | atomic_inc(&xfs_Gqm->qm_totaldquots); | 1819 | atomic_inc(&xfs_Gqm->qm_totaldquots); |
1822 | 1820 | ||
1823 | return B_TRUE; | 1821 | return B_TRUE; |
1824 | } | 1822 | } |
1825 | 1823 | ||
1826 | 1824 | ||
1827 | /* | 1825 | /* |
1828 | * Start a transaction and write the incore superblock changes to | 1826 | * Start a transaction and write the incore superblock changes to |
1829 | * disk. flags parameter indicates which fields have changed. | 1827 | * disk. flags parameter indicates which fields have changed. |
1830 | */ | 1828 | */ |
1831 | int | 1829 | int |
1832 | xfs_qm_write_sb_changes( | 1830 | xfs_qm_write_sb_changes( |
1833 | xfs_mount_t *mp, | 1831 | xfs_mount_t *mp, |
1834 | __int64_t flags) | 1832 | __int64_t flags) |
1835 | { | 1833 | { |
1836 | xfs_trans_t *tp; | 1834 | xfs_trans_t *tp; |
1837 | int error; | 1835 | int error; |
1838 | 1836 | ||
1839 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); | 1837 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); |
1840 | if ((error = xfs_trans_reserve(tp, 0, | 1838 | if ((error = xfs_trans_reserve(tp, 0, |
1841 | mp->m_sb.sb_sectsize + 128, 0, | 1839 | mp->m_sb.sb_sectsize + 128, 0, |
1842 | 0, | 1840 | 0, |
1843 | XFS_DEFAULT_LOG_COUNT))) { | 1841 | XFS_DEFAULT_LOG_COUNT))) { |
1844 | xfs_trans_cancel(tp, 0); | 1842 | xfs_trans_cancel(tp, 0); |
1845 | return error; | 1843 | return error; |
1846 | } | 1844 | } |
1847 | 1845 | ||
1848 | xfs_mod_sb(tp, flags); | 1846 | xfs_mod_sb(tp, flags); |
1849 | error = xfs_trans_commit(tp, 0); | 1847 | error = xfs_trans_commit(tp, 0); |
1850 | 1848 | ||
1851 | return error; | 1849 | return error; |
1852 | } | 1850 | } |
1853 | 1851 | ||
1854 | 1852 | ||
1855 | /* --------------- utility functions for vnodeops ---------------- */ | 1853 | /* --------------- utility functions for vnodeops ---------------- */ |
1856 | 1854 | ||
1857 | 1855 | ||
1858 | /* | 1856 | /* |
1859 | * Given an inode, a uid, gid and prid make sure that we have | 1857 | * Given an inode, a uid, gid and prid make sure that we have |
1860 | * allocated relevant dquot(s) on disk, and that we won't exceed inode | 1858 | * allocated relevant dquot(s) on disk, and that we won't exceed inode |
1861 | * quotas by creating this file. | 1859 | * quotas by creating this file. |
1862 | * This also attaches dquot(s) to the given inode after locking it, | 1860 | * This also attaches dquot(s) to the given inode after locking it, |
1863 | * and returns the dquots corresponding to the uid and/or gid. | 1861 | * and returns the dquots corresponding to the uid and/or gid. |
1864 | * | 1862 | * |
1865 | * in : inode (unlocked) | 1863 | * in : inode (unlocked) |
1866 | * out : udquot, gdquot with references taken and unlocked | 1864 | * out : udquot, gdquot with references taken and unlocked |
1867 | */ | 1865 | */ |
1868 | int | 1866 | int |
1869 | xfs_qm_vop_dqalloc( | 1867 | xfs_qm_vop_dqalloc( |
1870 | struct xfs_inode *ip, | 1868 | struct xfs_inode *ip, |
1871 | uid_t uid, | 1869 | uid_t uid, |
1872 | gid_t gid, | 1870 | gid_t gid, |
1873 | prid_t prid, | 1871 | prid_t prid, |
1874 | uint flags, | 1872 | uint flags, |
1875 | struct xfs_dquot **O_udqpp, | 1873 | struct xfs_dquot **O_udqpp, |
1876 | struct xfs_dquot **O_gdqpp) | 1874 | struct xfs_dquot **O_gdqpp) |
1877 | { | 1875 | { |
1878 | struct xfs_mount *mp = ip->i_mount; | 1876 | struct xfs_mount *mp = ip->i_mount; |
1879 | struct xfs_dquot *uq, *gq; | 1877 | struct xfs_dquot *uq, *gq; |
1880 | int error; | 1878 | int error; |
1881 | uint lockflags; | 1879 | uint lockflags; |
1882 | 1880 | ||
1883 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) | 1881 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
1884 | return 0; | 1882 | return 0; |
1885 | 1883 | ||
1886 | lockflags = XFS_ILOCK_EXCL; | 1884 | lockflags = XFS_ILOCK_EXCL; |
1887 | xfs_ilock(ip, lockflags); | 1885 | xfs_ilock(ip, lockflags); |
1888 | 1886 | ||
1889 | if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) | 1887 | if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) |
1890 | gid = ip->i_d.di_gid; | 1888 | gid = ip->i_d.di_gid; |
1891 | 1889 | ||
1892 | /* | 1890 | /* |
1893 | * Attach the dquot(s) to this inode, doing a dquot allocation | 1891 | * Attach the dquot(s) to this inode, doing a dquot allocation |
1894 | * if necessary. The dquot(s) will not be locked. | 1892 | * if necessary. The dquot(s) will not be locked. |
1895 | */ | 1893 | */ |
1896 | if (XFS_NOT_DQATTACHED(mp, ip)) { | 1894 | if (XFS_NOT_DQATTACHED(mp, ip)) { |
1897 | error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); | 1895 | error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); |
1898 | if (error) { | 1896 | if (error) { |
1899 | xfs_iunlock(ip, lockflags); | 1897 | xfs_iunlock(ip, lockflags); |
1900 | return error; | 1898 | return error; |
1901 | } | 1899 | } |
1902 | } | 1900 | } |
1903 | 1901 | ||
1904 | uq = gq = NULL; | 1902 | uq = gq = NULL; |
1905 | if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { | 1903 | if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { |
1906 | if (ip->i_d.di_uid != uid) { | 1904 | if (ip->i_d.di_uid != uid) { |
1907 | /* | 1905 | /* |
1908 | * What we need is the dquot that has this uid, and | 1906 | * What we need is the dquot that has this uid, and |
1909 | * if we send the inode to dqget, the uid of the inode | 1907 | * if we send the inode to dqget, the uid of the inode |
1910 | * takes priority over what's sent in the uid argument. | 1908 | * takes priority over what's sent in the uid argument. |
1911 | * We must unlock inode here before calling dqget if | 1909 | * We must unlock inode here before calling dqget if |
1912 | * we're not sending the inode, because otherwise | 1910 | * we're not sending the inode, because otherwise |
1913 | * we'll deadlock by doing trans_reserve while | 1911 | * we'll deadlock by doing trans_reserve while |
1914 | * holding ilock. | 1912 | * holding ilock. |
1915 | */ | 1913 | */ |
1916 | xfs_iunlock(ip, lockflags); | 1914 | xfs_iunlock(ip, lockflags); |
1917 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, | 1915 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, |
1918 | XFS_DQ_USER, | 1916 | XFS_DQ_USER, |
1919 | XFS_QMOPT_DQALLOC | | 1917 | XFS_QMOPT_DQALLOC | |
1920 | XFS_QMOPT_DOWARN, | 1918 | XFS_QMOPT_DOWARN, |
1921 | &uq))) { | 1919 | &uq))) { |
1922 | ASSERT(error != ENOENT); | 1920 | ASSERT(error != ENOENT); |
1923 | return error; | 1921 | return error; |
1924 | } | 1922 | } |
1925 | /* | 1923 | /* |
1926 | * Get the ilock in the right order. | 1924 | * Get the ilock in the right order. |
1927 | */ | 1925 | */ |
1928 | xfs_dqunlock(uq); | 1926 | xfs_dqunlock(uq); |
1929 | lockflags = XFS_ILOCK_SHARED; | 1927 | lockflags = XFS_ILOCK_SHARED; |
1930 | xfs_ilock(ip, lockflags); | 1928 | xfs_ilock(ip, lockflags); |
1931 | } else { | 1929 | } else { |
1932 | /* | 1930 | /* |
1933 | * Take an extra reference, because we'll return | 1931 | * Take an extra reference, because we'll return |
1934 | * this to caller | 1932 | * this to caller |
1935 | */ | 1933 | */ |
1936 | ASSERT(ip->i_udquot); | 1934 | ASSERT(ip->i_udquot); |
1937 | uq = xfs_qm_dqhold(ip->i_udquot); | 1935 | uq = xfs_qm_dqhold(ip->i_udquot); |
1938 | } | 1936 | } |
1939 | } | 1937 | } |
1940 | if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { | 1938 | if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { |
1941 | if (ip->i_d.di_gid != gid) { | 1939 | if (ip->i_d.di_gid != gid) { |
1942 | xfs_iunlock(ip, lockflags); | 1940 | xfs_iunlock(ip, lockflags); |
1943 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, | 1941 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, |
1944 | XFS_DQ_GROUP, | 1942 | XFS_DQ_GROUP, |
1945 | XFS_QMOPT_DQALLOC | | 1943 | XFS_QMOPT_DQALLOC | |
1946 | XFS_QMOPT_DOWARN, | 1944 | XFS_QMOPT_DOWARN, |
1947 | &gq))) { | 1945 | &gq))) { |
1948 | if (uq) | 1946 | if (uq) |
1949 | xfs_qm_dqrele(uq); | 1947 | xfs_qm_dqrele(uq); |
1950 | ASSERT(error != ENOENT); | 1948 | ASSERT(error != ENOENT); |
1951 | return error; | 1949 | return error; |
1952 | } | 1950 | } |
1953 | xfs_dqunlock(gq); | 1951 | xfs_dqunlock(gq); |
1954 | lockflags = XFS_ILOCK_SHARED; | 1952 | lockflags = XFS_ILOCK_SHARED; |
1955 | xfs_ilock(ip, lockflags); | 1953 | xfs_ilock(ip, lockflags); |
1956 | } else { | 1954 | } else { |
1957 | ASSERT(ip->i_gdquot); | 1955 | ASSERT(ip->i_gdquot); |
1958 | gq = xfs_qm_dqhold(ip->i_gdquot); | 1956 | gq = xfs_qm_dqhold(ip->i_gdquot); |
1959 | } | 1957 | } |
1960 | } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { | 1958 | } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { |
1961 | if (xfs_get_projid(ip) != prid) { | 1959 | if (xfs_get_projid(ip) != prid) { |
1962 | xfs_iunlock(ip, lockflags); | 1960 | xfs_iunlock(ip, lockflags); |
1963 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, | 1961 | if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, |
1964 | XFS_DQ_PROJ, | 1962 | XFS_DQ_PROJ, |
1965 | XFS_QMOPT_DQALLOC | | 1963 | XFS_QMOPT_DQALLOC | |
1966 | XFS_QMOPT_DOWARN, | 1964 | XFS_QMOPT_DOWARN, |
1967 | &gq))) { | 1965 | &gq))) { |
1968 | if (uq) | 1966 | if (uq) |
1969 | xfs_qm_dqrele(uq); | 1967 | xfs_qm_dqrele(uq); |
1970 | ASSERT(error != ENOENT); | 1968 | ASSERT(error != ENOENT); |
1971 | return (error); | 1969 | return (error); |
1972 | } | 1970 | } |
1973 | xfs_dqunlock(gq); | 1971 | xfs_dqunlock(gq); |
1974 | lockflags = XFS_ILOCK_SHARED; | 1972 | lockflags = XFS_ILOCK_SHARED; |
1975 | xfs_ilock(ip, lockflags); | 1973 | xfs_ilock(ip, lockflags); |
1976 | } else { | 1974 | } else { |
1977 | ASSERT(ip->i_gdquot); | 1975 | ASSERT(ip->i_gdquot); |
1978 | gq = xfs_qm_dqhold(ip->i_gdquot); | 1976 | gq = xfs_qm_dqhold(ip->i_gdquot); |
1979 | } | 1977 | } |
1980 | } | 1978 | } |
1981 | if (uq) | 1979 | if (uq) |
1982 | trace_xfs_dquot_dqalloc(ip); | 1980 | trace_xfs_dquot_dqalloc(ip); |
1983 | 1981 | ||
1984 | xfs_iunlock(ip, lockflags); | 1982 | xfs_iunlock(ip, lockflags); |
1985 | if (O_udqpp) | 1983 | if (O_udqpp) |
1986 | *O_udqpp = uq; | 1984 | *O_udqpp = uq; |
1987 | else if (uq) | 1985 | else if (uq) |
1988 | xfs_qm_dqrele(uq); | 1986 | xfs_qm_dqrele(uq); |
1989 | if (O_gdqpp) | 1987 | if (O_gdqpp) |
1990 | *O_gdqpp = gq; | 1988 | *O_gdqpp = gq; |
1991 | else if (gq) | 1989 | else if (gq) |
1992 | xfs_qm_dqrele(gq); | 1990 | xfs_qm_dqrele(gq); |
1993 | return 0; | 1991 | return 0; |
1994 | } | 1992 | } |
1995 | 1993 | ||
1996 | /* | 1994 | /* |
1997 | * Actually transfer ownership, and do dquot modifications. | 1995 | * Actually transfer ownership, and do dquot modifications. |
1998 | * These were already reserved. | 1996 | * These were already reserved. |
1999 | */ | 1997 | */ |
2000 | xfs_dquot_t * | 1998 | xfs_dquot_t * |
2001 | xfs_qm_vop_chown( | 1999 | xfs_qm_vop_chown( |
2002 | xfs_trans_t *tp, | 2000 | xfs_trans_t *tp, |
2003 | xfs_inode_t *ip, | 2001 | xfs_inode_t *ip, |
2004 | xfs_dquot_t **IO_olddq, | 2002 | xfs_dquot_t **IO_olddq, |
2005 | xfs_dquot_t *newdq) | 2003 | xfs_dquot_t *newdq) |
2006 | { | 2004 | { |
2007 | xfs_dquot_t *prevdq; | 2005 | xfs_dquot_t *prevdq; |
2008 | uint bfield = XFS_IS_REALTIME_INODE(ip) ? | 2006 | uint bfield = XFS_IS_REALTIME_INODE(ip) ? |
2009 | XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; | 2007 | XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; |
2010 | 2008 | ||
2011 | 2009 | ||
2012 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2010 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
2013 | ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); | 2011 | ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); |
2014 | 2012 | ||
2015 | /* old dquot */ | 2013 | /* old dquot */ |
2016 | prevdq = *IO_olddq; | 2014 | prevdq = *IO_olddq; |
2017 | ASSERT(prevdq); | 2015 | ASSERT(prevdq); |
2018 | ASSERT(prevdq != newdq); | 2016 | ASSERT(prevdq != newdq); |
2019 | 2017 | ||
2020 | xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); | 2018 | xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); |
2021 | xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); | 2019 | xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); |
2022 | 2020 | ||
2023 | /* the sparkling new dquot */ | 2021 | /* the sparkling new dquot */ |
2024 | xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); | 2022 | xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); |
2025 | xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); | 2023 | xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); |
2026 | 2024 | ||
2027 | /* | 2025 | /* |
2028 | * Take an extra reference, because the inode is going to keep | 2026 | * Take an extra reference, because the inode is going to keep |
2029 | * this dquot pointer even after the trans_commit. | 2027 | * this dquot pointer even after the trans_commit. |
2030 | */ | 2028 | */ |
2031 | *IO_olddq = xfs_qm_dqhold(newdq); | 2029 | *IO_olddq = xfs_qm_dqhold(newdq); |
2032 | 2030 | ||
2033 | return prevdq; | 2031 | return prevdq; |
2034 | } | 2032 | } |
2035 | 2033 | ||
2036 | /* | 2034 | /* |
2037 | * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). | 2035 | * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). |
2038 | */ | 2036 | */ |
2039 | int | 2037 | int |
2040 | xfs_qm_vop_chown_reserve( | 2038 | xfs_qm_vop_chown_reserve( |
2041 | xfs_trans_t *tp, | 2039 | xfs_trans_t *tp, |
2042 | xfs_inode_t *ip, | 2040 | xfs_inode_t *ip, |
2043 | xfs_dquot_t *udqp, | 2041 | xfs_dquot_t *udqp, |
2044 | xfs_dquot_t *gdqp, | 2042 | xfs_dquot_t *gdqp, |
2045 | uint flags) | 2043 | uint flags) |
2046 | { | 2044 | { |
2047 | xfs_mount_t *mp = ip->i_mount; | 2045 | xfs_mount_t *mp = ip->i_mount; |
2048 | uint delblks, blkflags, prjflags = 0; | 2046 | uint delblks, blkflags, prjflags = 0; |
2049 | xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; | 2047 | xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; |
2050 | int error; | 2048 | int error; |
2051 | 2049 | ||
2052 | 2050 | ||
2053 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 2051 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
2054 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 2052 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
2055 | 2053 | ||
2056 | delblks = ip->i_delayed_blks; | 2054 | delblks = ip->i_delayed_blks; |
2057 | delblksudq = delblksgdq = unresudq = unresgdq = NULL; | 2055 | delblksudq = delblksgdq = unresudq = unresgdq = NULL; |
2058 | blkflags = XFS_IS_REALTIME_INODE(ip) ? | 2056 | blkflags = XFS_IS_REALTIME_INODE(ip) ? |
2059 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; | 2057 | XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; |
2060 | 2058 | ||
2061 | if (XFS_IS_UQUOTA_ON(mp) && udqp && | 2059 | if (XFS_IS_UQUOTA_ON(mp) && udqp && |
2062 | ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { | 2060 | ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { |
2063 | delblksudq = udqp; | 2061 | delblksudq = udqp; |
2064 | /* | 2062 | /* |
2065 | * If there are delayed allocation blocks, then we have to | 2063 | * If there are delayed allocation blocks, then we have to |
2066 | * unreserve those from the old dquot, and add them to the | 2064 | * unreserve those from the old dquot, and add them to the |
2067 | * new dquot. | 2065 | * new dquot. |
2068 | */ | 2066 | */ |
2069 | if (delblks) { | 2067 | if (delblks) { |
2070 | ASSERT(ip->i_udquot); | 2068 | ASSERT(ip->i_udquot); |
2071 | unresudq = ip->i_udquot; | 2069 | unresudq = ip->i_udquot; |
2072 | } | 2070 | } |
2073 | } | 2071 | } |
2074 | if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { | 2072 | if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { |
2075 | if (XFS_IS_PQUOTA_ON(ip->i_mount) && | 2073 | if (XFS_IS_PQUOTA_ON(ip->i_mount) && |
2076 | xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) | 2074 | xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) |
2077 | prjflags = XFS_QMOPT_ENOSPC; | 2075 | prjflags = XFS_QMOPT_ENOSPC; |
2078 | 2076 | ||
2079 | if (prjflags || | 2077 | if (prjflags || |
2080 | (XFS_IS_GQUOTA_ON(ip->i_mount) && | 2078 | (XFS_IS_GQUOTA_ON(ip->i_mount) && |
2081 | ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { | 2079 | ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { |
2082 | delblksgdq = gdqp; | 2080 | delblksgdq = gdqp; |
2083 | if (delblks) { | 2081 | if (delblks) { |
2084 | ASSERT(ip->i_gdquot); | 2082 | ASSERT(ip->i_gdquot); |
2085 | unresgdq = ip->i_gdquot; | 2083 | unresgdq = ip->i_gdquot; |
2086 | } | 2084 | } |
2087 | } | 2085 | } |
2088 | } | 2086 | } |
2089 | 2087 | ||
2090 | if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, | 2088 | if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, |
2091 | delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, | 2089 | delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, |
2092 | flags | blkflags | prjflags))) | 2090 | flags | blkflags | prjflags))) |
2093 | return (error); | 2091 | return (error); |
2094 | 2092 | ||
2095 | /* | 2093 | /* |
2096 | * Do the delayed blks reservations/unreservations now. Since, these | 2094 | * Do the delayed blks reservations/unreservations now. Since, these |
2097 | * are done without the help of a transaction, if a reservation fails | 2095 | * are done without the help of a transaction, if a reservation fails |
2098 | * its previous reservations won't be automatically undone by trans | 2096 | * its previous reservations won't be automatically undone by trans |
2099 | * code. So, we have to do it manually here. | 2097 | * code. So, we have to do it manually here. |
2100 | */ | 2098 | */ |
2101 | if (delblks) { | 2099 | if (delblks) { |
2102 | /* | 2100 | /* |
2103 | * Do the reservations first. Unreservation can't fail. | 2101 | * Do the reservations first. Unreservation can't fail. |
2104 | */ | 2102 | */ |
2105 | ASSERT(delblksudq || delblksgdq); | 2103 | ASSERT(delblksudq || delblksgdq); |
2106 | ASSERT(unresudq || unresgdq); | 2104 | ASSERT(unresudq || unresgdq); |
2107 | if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, | 2105 | if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, |
2108 | delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, | 2106 | delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, |
2109 | flags | blkflags | prjflags))) | 2107 | flags | blkflags | prjflags))) |
2110 | return (error); | 2108 | return (error); |
2111 | xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, | 2109 | xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, |
2112 | unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, | 2110 | unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, |
2113 | blkflags); | 2111 | blkflags); |
2114 | } | 2112 | } |
2115 | 2113 | ||
2116 | return (0); | 2114 | return (0); |
2117 | } | 2115 | } |
2118 | 2116 | ||
2119 | int | 2117 | int |
2120 | xfs_qm_vop_rename_dqattach( | 2118 | xfs_qm_vop_rename_dqattach( |
2121 | struct xfs_inode **i_tab) | 2119 | struct xfs_inode **i_tab) |
2122 | { | 2120 | { |
2123 | struct xfs_mount *mp = i_tab[0]->i_mount; | 2121 | struct xfs_mount *mp = i_tab[0]->i_mount; |
2124 | int i; | 2122 | int i; |
2125 | 2123 | ||
2126 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) | 2124 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
2127 | return 0; | 2125 | return 0; |
2128 | 2126 | ||
2129 | for (i = 0; (i < 4 && i_tab[i]); i++) { | 2127 | for (i = 0; (i < 4 && i_tab[i]); i++) { |
2130 | struct xfs_inode *ip = i_tab[i]; | 2128 | struct xfs_inode *ip = i_tab[i]; |
2131 | int error; | 2129 | int error; |
2132 | 2130 | ||
2133 | /* | 2131 | /* |
2134 | * Watch out for duplicate entries in the table. | 2132 | * Watch out for duplicate entries in the table. |
2135 | */ | 2133 | */ |
2136 | if (i == 0 || ip != i_tab[i-1]) { | 2134 | if (i == 0 || ip != i_tab[i-1]) { |
2137 | if (XFS_NOT_DQATTACHED(mp, ip)) { | 2135 | if (XFS_NOT_DQATTACHED(mp, ip)) { |
2138 | error = xfs_qm_dqattach(ip, 0); | 2136 | error = xfs_qm_dqattach(ip, 0); |
2139 | if (error) | 2137 | if (error) |
2140 | return error; | 2138 | return error; |
2141 | } | 2139 | } |
2142 | } | 2140 | } |
2143 | } | 2141 | } |
2144 | return 0; | 2142 | return 0; |
2145 | } | 2143 | } |
2146 | 2144 | ||
2147 | void | 2145 | void |
2148 | xfs_qm_vop_create_dqattach( | 2146 | xfs_qm_vop_create_dqattach( |
2149 | struct xfs_trans *tp, | 2147 | struct xfs_trans *tp, |
2150 | struct xfs_inode *ip, | 2148 | struct xfs_inode *ip, |
2151 | struct xfs_dquot *udqp, | 2149 | struct xfs_dquot *udqp, |
2152 | struct xfs_dquot *gdqp) | 2150 | struct xfs_dquot *gdqp) |
2153 | { | 2151 | { |
2154 | struct xfs_mount *mp = tp->t_mountp; | 2152 | struct xfs_mount *mp = tp->t_mountp; |
2155 | 2153 | ||
2156 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) | 2154 | if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
2157 | return; | 2155 | return; |
2158 | 2156 | ||
2159 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 2157 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
2160 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); | 2158 | ASSERT(XFS_IS_QUOTA_RUNNING(mp)); |
2161 | 2159 | ||
2162 | if (udqp) { | 2160 | if (udqp) { |
2163 | ASSERT(ip->i_udquot == NULL); | 2161 | ASSERT(ip->i_udquot == NULL); |
2164 | ASSERT(XFS_IS_UQUOTA_ON(mp)); | 2162 | ASSERT(XFS_IS_UQUOTA_ON(mp)); |
2165 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); | 2163 | ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); |
2166 | 2164 | ||
2167 | ip->i_udquot = xfs_qm_dqhold(udqp); | 2165 | ip->i_udquot = xfs_qm_dqhold(udqp); |
2168 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); | 2166 | xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); |
2169 | } | 2167 | } |
2170 | if (gdqp) { | 2168 | if (gdqp) { |
2171 | ASSERT(ip->i_gdquot == NULL); | 2169 | ASSERT(ip->i_gdquot == NULL); |
2172 | ASSERT(XFS_IS_OQUOTA_ON(mp)); | 2170 | ASSERT(XFS_IS_OQUOTA_ON(mp)); |
2173 | ASSERT((XFS_IS_GQUOTA_ON(mp) ? | 2171 | ASSERT((XFS_IS_GQUOTA_ON(mp) ? |
2174 | ip->i_d.di_gid : xfs_get_projid(ip)) == | 2172 | ip->i_d.di_gid : xfs_get_projid(ip)) == |
2175 | be32_to_cpu(gdqp->q_core.d_id)); | 2173 | be32_to_cpu(gdqp->q_core.d_id)); |
2176 | 2174 | ||
2177 | ip->i_gdquot = xfs_qm_dqhold(gdqp); | 2175 | ip->i_gdquot = xfs_qm_dqhold(gdqp); |
2178 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); | 2176 | xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); |
fs/xfs/xfs_quota.h
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | 3 | * All Rights Reserved. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | 6 | * modify it under the terms of the GNU General Public License as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it would be useful, | 9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #ifndef __XFS_QUOTA_H__ | 18 | #ifndef __XFS_QUOTA_H__ |
19 | #define __XFS_QUOTA_H__ | 19 | #define __XFS_QUOTA_H__ |
20 | 20 | ||
21 | struct xfs_trans; | 21 | struct xfs_trans; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * The ondisk form of a dquot structure. | 24 | * The ondisk form of a dquot structure. |
25 | */ | 25 | */ |
26 | #define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */ | 26 | #define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */ |
27 | #define XFS_DQUOT_VERSION (u_int8_t)0x01 /* latest version number */ | 27 | #define XFS_DQUOT_VERSION (u_int8_t)0x01 /* latest version number */ |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * uid_t and gid_t are hard-coded to 32 bits in the inode. | 30 | * uid_t and gid_t are hard-coded to 32 bits in the inode. |
31 | * Hence, an 'id' in a dquot is 32 bits.. | 31 | * Hence, an 'id' in a dquot is 32 bits.. |
32 | */ | 32 | */ |
33 | typedef __uint32_t xfs_dqid_t; | 33 | typedef __uint32_t xfs_dqid_t; |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Even though users may not have quota limits occupying all 64-bits, | 36 | * Even though users may not have quota limits occupying all 64-bits, |
37 | * they may need 64-bit accounting. Hence, 64-bit quota-counters, | 37 | * they may need 64-bit accounting. Hence, 64-bit quota-counters, |
38 | * and quota-limits. This is a waste in the common case, but hey ... | 38 | * and quota-limits. This is a waste in the common case, but hey ... |
39 | */ | 39 | */ |
40 | typedef __uint64_t xfs_qcnt_t; | 40 | typedef __uint64_t xfs_qcnt_t; |
41 | typedef __uint16_t xfs_qwarncnt_t; | 41 | typedef __uint16_t xfs_qwarncnt_t; |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * This is the main portion of the on-disk representation of quota | 44 | * This is the main portion of the on-disk representation of quota |
45 | * information for a user. This is the q_core of the xfs_dquot_t that | 45 | * information for a user. This is the q_core of the xfs_dquot_t that |
46 | * is kept in kernel memory. We pad this with some more expansion room | 46 | * is kept in kernel memory. We pad this with some more expansion room |
47 | * to construct the on disk structure. | 47 | * to construct the on disk structure. |
48 | */ | 48 | */ |
49 | typedef struct xfs_disk_dquot { | 49 | typedef struct xfs_disk_dquot { |
50 | __be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */ | 50 | __be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */ |
51 | __u8 d_version; /* dquot version */ | 51 | __u8 d_version; /* dquot version */ |
52 | __u8 d_flags; /* XFS_DQ_USER/PROJ/GROUP */ | 52 | __u8 d_flags; /* XFS_DQ_USER/PROJ/GROUP */ |
53 | __be32 d_id; /* user,project,group id */ | 53 | __be32 d_id; /* user,project,group id */ |
54 | __be64 d_blk_hardlimit;/* absolute limit on disk blks */ | 54 | __be64 d_blk_hardlimit;/* absolute limit on disk blks */ |
55 | __be64 d_blk_softlimit;/* preferred limit on disk blks */ | 55 | __be64 d_blk_softlimit;/* preferred limit on disk blks */ |
56 | __be64 d_ino_hardlimit;/* maximum # allocated inodes */ | 56 | __be64 d_ino_hardlimit;/* maximum # allocated inodes */ |
57 | __be64 d_ino_softlimit;/* preferred inode limit */ | 57 | __be64 d_ino_softlimit;/* preferred inode limit */ |
58 | __be64 d_bcount; /* disk blocks owned by the user */ | 58 | __be64 d_bcount; /* disk blocks owned by the user */ |
59 | __be64 d_icount; /* inodes owned by the user */ | 59 | __be64 d_icount; /* inodes owned by the user */ |
60 | __be32 d_itimer; /* zero if within inode limits if not, | 60 | __be32 d_itimer; /* zero if within inode limits if not, |
61 | this is when we refuse service */ | 61 | this is when we refuse service */ |
62 | __be32 d_btimer; /* similar to above; for disk blocks */ | 62 | __be32 d_btimer; /* similar to above; for disk blocks */ |
63 | __be16 d_iwarns; /* warnings issued wrt num inodes */ | 63 | __be16 d_iwarns; /* warnings issued wrt num inodes */ |
64 | __be16 d_bwarns; /* warnings issued wrt disk blocks */ | 64 | __be16 d_bwarns; /* warnings issued wrt disk blocks */ |
65 | __be32 d_pad0; /* 64 bit align */ | 65 | __be32 d_pad0; /* 64 bit align */ |
66 | __be64 d_rtb_hardlimit;/* absolute limit on realtime blks */ | 66 | __be64 d_rtb_hardlimit;/* absolute limit on realtime blks */ |
67 | __be64 d_rtb_softlimit;/* preferred limit on RT disk blks */ | 67 | __be64 d_rtb_softlimit;/* preferred limit on RT disk blks */ |
68 | __be64 d_rtbcount; /* realtime blocks owned */ | 68 | __be64 d_rtbcount; /* realtime blocks owned */ |
69 | __be32 d_rtbtimer; /* similar to above; for RT disk blocks */ | 69 | __be32 d_rtbtimer; /* similar to above; for RT disk blocks */ |
70 | __be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */ | 70 | __be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */ |
71 | __be16 d_pad; | 71 | __be16 d_pad; |
72 | } xfs_disk_dquot_t; | 72 | } xfs_disk_dquot_t; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * This is what goes on disk. This is separated from the xfs_disk_dquot because | 75 | * This is what goes on disk. This is separated from the xfs_disk_dquot because |
76 | * carrying the unnecessary padding would be a waste of memory. | 76 | * carrying the unnecessary padding would be a waste of memory. |
77 | */ | 77 | */ |
78 | typedef struct xfs_dqblk { | 78 | typedef struct xfs_dqblk { |
79 | xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */ | 79 | xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */ |
80 | char dd_fill[32]; /* filling for posterity */ | 80 | char dd_fill[32]; /* filling for posterity */ |
81 | } xfs_dqblk_t; | 81 | } xfs_dqblk_t; |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * flags for q_flags field in the dquot. | 84 | * flags for q_flags field in the dquot. |
85 | */ | 85 | */ |
86 | #define XFS_DQ_USER 0x0001 /* a user quota */ | 86 | #define XFS_DQ_USER 0x0001 /* a user quota */ |
87 | #define XFS_DQ_PROJ 0x0002 /* project quota */ | 87 | #define XFS_DQ_PROJ 0x0002 /* project quota */ |
88 | #define XFS_DQ_GROUP 0x0004 /* a group quota */ | 88 | #define XFS_DQ_GROUP 0x0004 /* a group quota */ |
89 | #define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */ | 89 | #define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */ |
90 | #define XFS_DQ_FREEING 0x0010 /* dquot is beeing torn down */ | 90 | #define XFS_DQ_FREEING 0x0010 /* dquot is beeing torn down */ |
91 | 91 | ||
92 | #define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) | 92 | #define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) |
93 | 93 | ||
94 | #define XFS_DQ_FLAGS \ | 94 | #define XFS_DQ_FLAGS \ |
95 | { XFS_DQ_USER, "USER" }, \ | 95 | { XFS_DQ_USER, "USER" }, \ |
96 | { XFS_DQ_PROJ, "PROJ" }, \ | 96 | { XFS_DQ_PROJ, "PROJ" }, \ |
97 | { XFS_DQ_GROUP, "GROUP" }, \ | 97 | { XFS_DQ_GROUP, "GROUP" }, \ |
98 | { XFS_DQ_DIRTY, "DIRTY" }, \ | 98 | { XFS_DQ_DIRTY, "DIRTY" }, \ |
99 | { XFS_DQ_FREEING, "FREEING" } | 99 | { XFS_DQ_FREEING, "FREEING" } |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * In the worst case, when both user and group quotas are on, | 102 | * In the worst case, when both user and group quotas are on, |
103 | * we can have a max of three dquots changing in a single transaction. | 103 | * we can have a max of three dquots changing in a single transaction. |
104 | */ | 104 | */ |
105 | #define XFS_DQUOT_LOGRES(mp) (sizeof(xfs_disk_dquot_t) * 3) | 105 | #define XFS_DQUOT_LOGRES(mp) (sizeof(xfs_disk_dquot_t) * 3) |
106 | 106 | ||
107 | 107 | ||
108 | /* | 108 | /* |
109 | * These are the structures used to lay out dquots and quotaoff | 109 | * These are the structures used to lay out dquots and quotaoff |
110 | * records on the log. Quite similar to those of inodes. | 110 | * records on the log. Quite similar to those of inodes. |
111 | */ | 111 | */ |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * log format struct for dquots. | 114 | * log format struct for dquots. |
115 | * The first two fields must be the type and size fitting into | 115 | * The first two fields must be the type and size fitting into |
116 | * 32 bits : log_recovery code assumes that. | 116 | * 32 bits : log_recovery code assumes that. |
117 | */ | 117 | */ |
118 | typedef struct xfs_dq_logformat { | 118 | typedef struct xfs_dq_logformat { |
119 | __uint16_t qlf_type; /* dquot log item type */ | 119 | __uint16_t qlf_type; /* dquot log item type */ |
120 | __uint16_t qlf_size; /* size of this item */ | 120 | __uint16_t qlf_size; /* size of this item */ |
121 | xfs_dqid_t qlf_id; /* usr/grp/proj id : 32 bits */ | 121 | xfs_dqid_t qlf_id; /* usr/grp/proj id : 32 bits */ |
122 | __int64_t qlf_blkno; /* blkno of dquot buffer */ | 122 | __int64_t qlf_blkno; /* blkno of dquot buffer */ |
123 | __int32_t qlf_len; /* len of dquot buffer */ | 123 | __int32_t qlf_len; /* len of dquot buffer */ |
124 | __uint32_t qlf_boffset; /* off of dquot in buffer */ | 124 | __uint32_t qlf_boffset; /* off of dquot in buffer */ |
125 | } xfs_dq_logformat_t; | 125 | } xfs_dq_logformat_t; |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * log format struct for QUOTAOFF records. | 128 | * log format struct for QUOTAOFF records. |
129 | * The first two fields must be the type and size fitting into | 129 | * The first two fields must be the type and size fitting into |
130 | * 32 bits : log_recovery code assumes that. | 130 | * 32 bits : log_recovery code assumes that. |
131 | * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer | 131 | * We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer |
132 | * to the first and ensures that the first logitem is taken out of the AIL | 132 | * to the first and ensures that the first logitem is taken out of the AIL |
133 | * only when the last one is securely committed. | 133 | * only when the last one is securely committed. |
134 | */ | 134 | */ |
135 | typedef struct xfs_qoff_logformat { | 135 | typedef struct xfs_qoff_logformat { |
136 | unsigned short qf_type; /* quotaoff log item type */ | 136 | unsigned short qf_type; /* quotaoff log item type */ |
137 | unsigned short qf_size; /* size of this item */ | 137 | unsigned short qf_size; /* size of this item */ |
138 | unsigned int qf_flags; /* USR and/or GRP */ | 138 | unsigned int qf_flags; /* USR and/or GRP */ |
139 | char qf_pad[12]; /* padding for future */ | 139 | char qf_pad[12]; /* padding for future */ |
140 | } xfs_qoff_logformat_t; | 140 | } xfs_qoff_logformat_t; |
141 | 141 | ||
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Disk quotas status in m_qflags, and also sb_qflags. 16 bits. | 144 | * Disk quotas status in m_qflags, and also sb_qflags. 16 bits. |
145 | */ | 145 | */ |
146 | #define XFS_UQUOTA_ACCT 0x0001 /* user quota accounting ON */ | 146 | #define XFS_UQUOTA_ACCT 0x0001 /* user quota accounting ON */ |
147 | #define XFS_UQUOTA_ENFD 0x0002 /* user quota limits enforced */ | 147 | #define XFS_UQUOTA_ENFD 0x0002 /* user quota limits enforced */ |
148 | #define XFS_UQUOTA_CHKD 0x0004 /* quotacheck run on usr quotas */ | 148 | #define XFS_UQUOTA_CHKD 0x0004 /* quotacheck run on usr quotas */ |
149 | #define XFS_PQUOTA_ACCT 0x0008 /* project quota accounting ON */ | 149 | #define XFS_PQUOTA_ACCT 0x0008 /* project quota accounting ON */ |
150 | #define XFS_OQUOTA_ENFD 0x0010 /* other (grp/prj) quota limits enforced */ | 150 | #define XFS_OQUOTA_ENFD 0x0010 /* other (grp/prj) quota limits enforced */ |
151 | #define XFS_OQUOTA_CHKD 0x0020 /* quotacheck run on other (grp/prj) quotas */ | 151 | #define XFS_OQUOTA_CHKD 0x0020 /* quotacheck run on other (grp/prj) quotas */ |
152 | #define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ | 152 | #define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * Quota Accounting/Enforcement flags | 155 | * Quota Accounting/Enforcement flags |
156 | */ | 156 | */ |
157 | #define XFS_ALL_QUOTA_ACCT \ | 157 | #define XFS_ALL_QUOTA_ACCT \ |
158 | (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT) | 158 | (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT) |
159 | #define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD) | 159 | #define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD) |
160 | #define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD) | 160 | #define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD) |
161 | 161 | ||
162 | #define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT) | 162 | #define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT) |
163 | #define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT) | 163 | #define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT) |
164 | #define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT) | 164 | #define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT) |
165 | #define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT) | 165 | #define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT) |
166 | #define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD) | 166 | #define XFS_IS_UQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_UQUOTA_ENFD) |
167 | #define XFS_IS_OQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_OQUOTA_ENFD) | 167 | #define XFS_IS_OQUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_OQUOTA_ENFD) |
168 | 168 | ||
169 | /* | 169 | /* |
170 | * Incore only flags for quotaoff - these bits get cleared when quota(s) | 170 | * Incore only flags for quotaoff - these bits get cleared when quota(s) |
171 | * are in the process of getting turned off. These flags are in m_qflags but | 171 | * are in the process of getting turned off. These flags are in m_qflags but |
172 | * never in sb_qflags. | 172 | * never in sb_qflags. |
173 | */ | 173 | */ |
174 | #define XFS_UQUOTA_ACTIVE 0x0100 /* uquotas are being turned off */ | 174 | #define XFS_UQUOTA_ACTIVE 0x0100 /* uquotas are being turned off */ |
175 | #define XFS_PQUOTA_ACTIVE 0x0200 /* pquotas are being turned off */ | 175 | #define XFS_PQUOTA_ACTIVE 0x0200 /* pquotas are being turned off */ |
176 | #define XFS_GQUOTA_ACTIVE 0x0400 /* gquotas are being turned off */ | 176 | #define XFS_GQUOTA_ACTIVE 0x0400 /* gquotas are being turned off */ |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees | 179 | * Checking XFS_IS_*QUOTA_ON() while holding any inode lock guarantees |
180 | * quota will be not be switched off as long as that inode lock is held. | 180 | * quota will be not be switched off as long as that inode lock is held. |
181 | */ | 181 | */ |
182 | #define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ | 182 | #define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ |
183 | XFS_GQUOTA_ACTIVE | \ | 183 | XFS_GQUOTA_ACTIVE | \ |
184 | XFS_PQUOTA_ACTIVE)) | 184 | XFS_PQUOTA_ACTIVE)) |
185 | #define XFS_IS_OQUOTA_ON(mp) ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \ | 185 | #define XFS_IS_OQUOTA_ON(mp) ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \ |
186 | XFS_PQUOTA_ACTIVE)) | 186 | XFS_PQUOTA_ACTIVE)) |
187 | #define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) | 187 | #define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) |
188 | #define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) | 188 | #define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) |
189 | #define XFS_IS_PQUOTA_ON(mp) ((mp)->m_qflags & XFS_PQUOTA_ACTIVE) | 189 | #define XFS_IS_PQUOTA_ON(mp) ((mp)->m_qflags & XFS_PQUOTA_ACTIVE) |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * Flags to tell various functions what to do. Not all of these are meaningful | 192 | * Flags to tell various functions what to do. Not all of these are meaningful |
193 | * to a single function. None of these XFS_QMOPT_* flags are meant to have | 193 | * to a single function. None of these XFS_QMOPT_* flags are meant to have |
194 | * persistent values (ie. their values can and will change between versions) | 194 | * persistent values (ie. their values can and will change between versions) |
195 | */ | 195 | */ |
196 | #define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */ | 196 | #define XFS_QMOPT_DQALLOC 0x0000002 /* alloc dquot ondisk if needed */ |
197 | #define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */ | 197 | #define XFS_QMOPT_UQUOTA 0x0000004 /* user dquot requested */ |
198 | #define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */ | 198 | #define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */ |
199 | #define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */ | 199 | #define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */ |
200 | #define XFS_QMOPT_DQSUSER 0x0000020 /* don't cache super users dquot */ | ||
201 | #define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */ | 200 | #define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */ |
202 | #define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */ | 201 | #define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */ |
203 | #define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */ | 202 | #define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */ |
204 | #define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ | 203 | #define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */ |
205 | #define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */ | 204 | #define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */ |
206 | 205 | ||
207 | /* | 206 | /* |
208 | * flags to xfs_trans_mod_dquot to indicate which field needs to be | 207 | * flags to xfs_trans_mod_dquot to indicate which field needs to be |
209 | * modified. | 208 | * modified. |
210 | */ | 209 | */ |
211 | #define XFS_QMOPT_RES_REGBLKS 0x0010000 | 210 | #define XFS_QMOPT_RES_REGBLKS 0x0010000 |
212 | #define XFS_QMOPT_RES_RTBLKS 0x0020000 | 211 | #define XFS_QMOPT_RES_RTBLKS 0x0020000 |
213 | #define XFS_QMOPT_BCOUNT 0x0040000 | 212 | #define XFS_QMOPT_BCOUNT 0x0040000 |
214 | #define XFS_QMOPT_ICOUNT 0x0080000 | 213 | #define XFS_QMOPT_ICOUNT 0x0080000 |
215 | #define XFS_QMOPT_RTBCOUNT 0x0100000 | 214 | #define XFS_QMOPT_RTBCOUNT 0x0100000 |
216 | #define XFS_QMOPT_DELBCOUNT 0x0200000 | 215 | #define XFS_QMOPT_DELBCOUNT 0x0200000 |
217 | #define XFS_QMOPT_DELRTBCOUNT 0x0400000 | 216 | #define XFS_QMOPT_DELRTBCOUNT 0x0400000 |
218 | #define XFS_QMOPT_RES_INOS 0x0800000 | 217 | #define XFS_QMOPT_RES_INOS 0x0800000 |
219 | 218 | ||
220 | /* | 219 | /* |
221 | * flags for dqalloc. | 220 | * flags for dqalloc. |
222 | */ | 221 | */ |
223 | #define XFS_QMOPT_INHERIT 0x1000000 | 222 | #define XFS_QMOPT_INHERIT 0x1000000 |
224 | 223 | ||
225 | /* | 224 | /* |
226 | * flags to xfs_trans_mod_dquot. | 225 | * flags to xfs_trans_mod_dquot. |
227 | */ | 226 | */ |
228 | #define XFS_TRANS_DQ_RES_BLKS XFS_QMOPT_RES_REGBLKS | 227 | #define XFS_TRANS_DQ_RES_BLKS XFS_QMOPT_RES_REGBLKS |
229 | #define XFS_TRANS_DQ_RES_RTBLKS XFS_QMOPT_RES_RTBLKS | 228 | #define XFS_TRANS_DQ_RES_RTBLKS XFS_QMOPT_RES_RTBLKS |
230 | #define XFS_TRANS_DQ_RES_INOS XFS_QMOPT_RES_INOS | 229 | #define XFS_TRANS_DQ_RES_INOS XFS_QMOPT_RES_INOS |
231 | #define XFS_TRANS_DQ_BCOUNT XFS_QMOPT_BCOUNT | 230 | #define XFS_TRANS_DQ_BCOUNT XFS_QMOPT_BCOUNT |
232 | #define XFS_TRANS_DQ_DELBCOUNT XFS_QMOPT_DELBCOUNT | 231 | #define XFS_TRANS_DQ_DELBCOUNT XFS_QMOPT_DELBCOUNT |
233 | #define XFS_TRANS_DQ_ICOUNT XFS_QMOPT_ICOUNT | 232 | #define XFS_TRANS_DQ_ICOUNT XFS_QMOPT_ICOUNT |
234 | #define XFS_TRANS_DQ_RTBCOUNT XFS_QMOPT_RTBCOUNT | 233 | #define XFS_TRANS_DQ_RTBCOUNT XFS_QMOPT_RTBCOUNT |
235 | #define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT | 234 | #define XFS_TRANS_DQ_DELRTBCOUNT XFS_QMOPT_DELRTBCOUNT |
236 | 235 | ||
237 | 236 | ||
238 | #define XFS_QMOPT_QUOTALL \ | 237 | #define XFS_QMOPT_QUOTALL \ |
239 | (XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA) | 238 | (XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA) |
240 | #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS) | 239 | #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS) |
241 | 240 | ||
242 | #ifdef __KERNEL__ | 241 | #ifdef __KERNEL__ |
243 | /* | 242 | /* |
244 | * This check is done typically without holding the inode lock; | 243 | * This check is done typically without holding the inode lock; |
245 | * that may seem racy, but it is harmless in the context that it is used. | 244 | * that may seem racy, but it is harmless in the context that it is used. |
246 | * The inode cannot go inactive as long a reference is kept, and | 245 | * The inode cannot go inactive as long a reference is kept, and |
247 | * therefore if dquot(s) were attached, they'll stay consistent. | 246 | * therefore if dquot(s) were attached, they'll stay consistent. |
248 | * If, for example, the ownership of the inode changes while | 247 | * If, for example, the ownership of the inode changes while |
249 | * we didn't have the inode locked, the appropriate dquot(s) will be | 248 | * we didn't have the inode locked, the appropriate dquot(s) will be |
250 | * attached atomically. | 249 | * attached atomically. |
251 | */ | 250 | */ |
252 | #define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\ | 251 | #define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\ |
253 | (ip)->i_udquot == NULL) || \ | 252 | (ip)->i_udquot == NULL) || \ |
254 | (XFS_IS_OQUOTA_ON(mp) && \ | 253 | (XFS_IS_OQUOTA_ON(mp) && \ |
255 | (ip)->i_gdquot == NULL)) | 254 | (ip)->i_gdquot == NULL)) |
256 | 255 | ||
257 | #define XFS_QM_NEED_QUOTACHECK(mp) \ | 256 | #define XFS_QM_NEED_QUOTACHECK(mp) \ |
258 | ((XFS_IS_UQUOTA_ON(mp) && \ | 257 | ((XFS_IS_UQUOTA_ON(mp) && \ |
259 | (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ | 258 | (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ |
260 | (XFS_IS_GQUOTA_ON(mp) && \ | 259 | (XFS_IS_GQUOTA_ON(mp) && \ |
261 | ((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \ | 260 | ((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \ |
262 | (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT))) || \ | 261 | (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT))) || \ |
263 | (XFS_IS_PQUOTA_ON(mp) && \ | 262 | (XFS_IS_PQUOTA_ON(mp) && \ |
264 | ((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \ | 263 | ((mp->m_sb.sb_qflags & XFS_OQUOTA_CHKD) == 0 || \ |
265 | (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT)))) | 264 | (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT)))) |
266 | 265 | ||
267 | #define XFS_MOUNT_QUOTA_SET1 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ | 266 | #define XFS_MOUNT_QUOTA_SET1 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ |
268 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ | 267 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ |
269 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD) | 268 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD) |
270 | 269 | ||
271 | #define XFS_MOUNT_QUOTA_SET2 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ | 270 | #define XFS_MOUNT_QUOTA_SET2 (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ |
272 | XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ | 271 | XFS_UQUOTA_CHKD|XFS_GQUOTA_ACCT|\ |
273 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD) | 272 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD) |
274 | 273 | ||
275 | #define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ | 274 | #define XFS_MOUNT_QUOTA_ALL (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD|\ |
276 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ | 275 | XFS_UQUOTA_CHKD|XFS_PQUOTA_ACCT|\ |
277 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD|\ | 276 | XFS_OQUOTA_ENFD|XFS_OQUOTA_CHKD|\ |
278 | XFS_GQUOTA_ACCT) | 277 | XFS_GQUOTA_ACCT) |
279 | 278 | ||
280 | 279 | ||
281 | /* | 280 | /* |
282 | * The structure kept inside the xfs_trans_t keep track of dquot changes | 281 | * The structure kept inside the xfs_trans_t keep track of dquot changes |
283 | * within a transaction and apply them later. | 282 | * within a transaction and apply them later. |
284 | */ | 283 | */ |
285 | typedef struct xfs_dqtrx { | 284 | typedef struct xfs_dqtrx { |
286 | struct xfs_dquot *qt_dquot; /* the dquot this refers to */ | 285 | struct xfs_dquot *qt_dquot; /* the dquot this refers to */ |
287 | ulong qt_blk_res; /* blks reserved on a dquot */ | 286 | ulong qt_blk_res; /* blks reserved on a dquot */ |
288 | ulong qt_blk_res_used; /* blks used from the reservation */ | 287 | ulong qt_blk_res_used; /* blks used from the reservation */ |
289 | ulong qt_ino_res; /* inode reserved on a dquot */ | 288 | ulong qt_ino_res; /* inode reserved on a dquot */ |
290 | ulong qt_ino_res_used; /* inodes used from the reservation */ | 289 | ulong qt_ino_res_used; /* inodes used from the reservation */ |
291 | long qt_bcount_delta; /* dquot blk count changes */ | 290 | long qt_bcount_delta; /* dquot blk count changes */ |
292 | long qt_delbcnt_delta; /* delayed dquot blk count changes */ | 291 | long qt_delbcnt_delta; /* delayed dquot blk count changes */ |
293 | long qt_icount_delta; /* dquot inode count changes */ | 292 | long qt_icount_delta; /* dquot inode count changes */ |
294 | ulong qt_rtblk_res; /* # blks reserved on a dquot */ | 293 | ulong qt_rtblk_res; /* # blks reserved on a dquot */ |
295 | ulong qt_rtblk_res_used;/* # blks used from reservation */ | 294 | ulong qt_rtblk_res_used;/* # blks used from reservation */ |
296 | long qt_rtbcount_delta;/* dquot realtime blk changes */ | 295 | long qt_rtbcount_delta;/* dquot realtime blk changes */ |
297 | long qt_delrtb_delta; /* delayed RT blk count changes */ | 296 | long qt_delrtb_delta; /* delayed RT blk count changes */ |
298 | } xfs_dqtrx_t; | 297 | } xfs_dqtrx_t; |
299 | 298 | ||
300 | #ifdef CONFIG_XFS_QUOTA | 299 | #ifdef CONFIG_XFS_QUOTA |
301 | extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *); | 300 | extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *); |
302 | extern void xfs_trans_free_dqinfo(struct xfs_trans *); | 301 | extern void xfs_trans_free_dqinfo(struct xfs_trans *); |
303 | extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *, | 302 | extern void xfs_trans_mod_dquot_byino(struct xfs_trans *, struct xfs_inode *, |
304 | uint, long); | 303 | uint, long); |
305 | extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *); | 304 | extern void xfs_trans_apply_dquot_deltas(struct xfs_trans *); |
306 | extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *); | 305 | extern void xfs_trans_unreserve_and_mod_dquots(struct xfs_trans *); |
307 | extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *, | 306 | extern int xfs_trans_reserve_quota_nblks(struct xfs_trans *, |
308 | struct xfs_inode *, long, long, uint); | 307 | struct xfs_inode *, long, long, uint); |
309 | extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, | 308 | extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *, |
310 | struct xfs_mount *, struct xfs_dquot *, | 309 | struct xfs_mount *, struct xfs_dquot *, |
311 | struct xfs_dquot *, long, long, uint); | 310 | struct xfs_dquot *, long, long, uint); |
312 | 311 | ||
313 | extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint, | 312 | extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint, |
314 | struct xfs_dquot **, struct xfs_dquot **); | 313 | struct xfs_dquot **, struct xfs_dquot **); |
315 | extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *, | 314 | extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *, |
316 | struct xfs_dquot *, struct xfs_dquot *); | 315 | struct xfs_dquot *, struct xfs_dquot *); |
317 | extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **); | 316 | extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **); |
318 | extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *, | 317 | extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *, |
319 | struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *); | 318 | struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *); |
320 | extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *, | 319 | extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *, |
321 | struct xfs_dquot *, struct xfs_dquot *, uint); | 320 | struct xfs_dquot *, struct xfs_dquot *, uint); |
322 | extern int xfs_qm_dqattach(struct xfs_inode *, uint); | 321 | extern int xfs_qm_dqattach(struct xfs_inode *, uint); |
323 | extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint); | 322 | extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint); |
324 | extern void xfs_qm_dqdetach(struct xfs_inode *); | 323 | extern void xfs_qm_dqdetach(struct xfs_inode *); |
325 | extern void xfs_qm_dqrele(struct xfs_dquot *); | 324 | extern void xfs_qm_dqrele(struct xfs_dquot *); |
326 | extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *); | 325 | extern void xfs_qm_statvfs(struct xfs_inode *, struct kstatfs *); |
327 | extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *); | 326 | extern int xfs_qm_newmount(struct xfs_mount *, uint *, uint *); |
328 | extern void xfs_qm_mount_quotas(struct xfs_mount *); | 327 | extern void xfs_qm_mount_quotas(struct xfs_mount *); |
329 | extern void xfs_qm_unmount(struct xfs_mount *); | 328 | extern void xfs_qm_unmount(struct xfs_mount *); |
330 | extern void xfs_qm_unmount_quotas(struct xfs_mount *); | 329 | extern void xfs_qm_unmount_quotas(struct xfs_mount *); |
331 | 330 | ||
332 | #else | 331 | #else |
333 | static inline int | 332 | static inline int |
334 | xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, | 333 | xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid, |
335 | uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp) | 334 | uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp) |
336 | { | 335 | { |
337 | *udqp = NULL; | 336 | *udqp = NULL; |
338 | *gdqp = NULL; | 337 | *gdqp = NULL; |
339 | return 0; | 338 | return 0; |
340 | } | 339 | } |
341 | #define xfs_trans_dup_dqinfo(tp, tp2) | 340 | #define xfs_trans_dup_dqinfo(tp, tp2) |
342 | #define xfs_trans_free_dqinfo(tp) | 341 | #define xfs_trans_free_dqinfo(tp) |
343 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) | 342 | #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) |
344 | #define xfs_trans_apply_dquot_deltas(tp) | 343 | #define xfs_trans_apply_dquot_deltas(tp) |
345 | #define xfs_trans_unreserve_and_mod_dquots(tp) | 344 | #define xfs_trans_unreserve_and_mod_dquots(tp) |
346 | static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, | 345 | static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, |
347 | struct xfs_inode *ip, long nblks, long ninos, uint flags) | 346 | struct xfs_inode *ip, long nblks, long ninos, uint flags) |
348 | { | 347 | { |
349 | return 0; | 348 | return 0; |
350 | } | 349 | } |
351 | static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, | 350 | static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, |
352 | struct xfs_mount *mp, struct xfs_dquot *udqp, | 351 | struct xfs_mount *mp, struct xfs_dquot *udqp, |
353 | struct xfs_dquot *gdqp, long nblks, long nions, uint flags) | 352 | struct xfs_dquot *gdqp, long nblks, long nions, uint flags) |
354 | { | 353 | { |
355 | return 0; | 354 | return 0; |
356 | } | 355 | } |
357 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) | 356 | #define xfs_qm_vop_create_dqattach(tp, ip, u, g) |
358 | #define xfs_qm_vop_rename_dqattach(it) (0) | 357 | #define xfs_qm_vop_rename_dqattach(it) (0) |
359 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) | 358 | #define xfs_qm_vop_chown(tp, ip, old, new) (NULL) |
360 | #define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0) | 359 | #define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0) |
361 | #define xfs_qm_dqattach(ip, fl) (0) | 360 | #define xfs_qm_dqattach(ip, fl) (0) |
362 | #define xfs_qm_dqattach_locked(ip, fl) (0) | 361 | #define xfs_qm_dqattach_locked(ip, fl) (0) |
363 | #define xfs_qm_dqdetach(ip) | 362 | #define xfs_qm_dqdetach(ip) |
364 | #define xfs_qm_dqrele(d) | 363 | #define xfs_qm_dqrele(d) |
365 | #define xfs_qm_statvfs(ip, s) | 364 | #define xfs_qm_statvfs(ip, s) |
366 | #define xfs_qm_newmount(mp, a, b) (0) | 365 | #define xfs_qm_newmount(mp, a, b) (0) |
367 | #define xfs_qm_mount_quotas(mp) | 366 | #define xfs_qm_mount_quotas(mp) |
368 | #define xfs_qm_unmount(mp) | 367 | #define xfs_qm_unmount(mp) |
369 | #define xfs_qm_unmount_quotas(mp) | 368 | #define xfs_qm_unmount_quotas(mp) |
370 | #endif /* CONFIG_XFS_QUOTA */ | 369 | #endif /* CONFIG_XFS_QUOTA */ |
371 | 370 | ||
372 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ | 371 | #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ |
373 | xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags) | 372 | xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags) |
374 | #define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \ | 373 | #define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \ |
375 | xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \ | 374 | xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \ |
376 | f | XFS_QMOPT_RES_REGBLKS) | 375 | f | XFS_QMOPT_RES_REGBLKS) |
377 | 376 | ||
378 | extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *, | 377 | extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *, |
379 | xfs_dqid_t, uint, uint, char *); | 378 | xfs_dqid_t, uint, uint, char *); |
380 | extern int xfs_mount_reset_sbqflags(struct xfs_mount *); | 379 | extern int xfs_mount_reset_sbqflags(struct xfs_mount *); |
381 | 380 | ||
382 | #endif /* __KERNEL__ */ | 381 | #endif /* __KERNEL__ */ |
383 | #endif /* __XFS_QUOTA_H__ */ | 382 | #endif /* __XFS_QUOTA_H__ */ |
384 | 383 |