Blame view
fs/xfs/xfs_trans_dquot.c
21.9 KB
1da177e4c
|
1 |
/* |
4ce3121f6
|
2 3 |
* Copyright (c) 2000-2002 Silicon Graphics, Inc. * All Rights Reserved. |
1da177e4c
|
4 |
* |
4ce3121f6
|
5 6 |
* This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as |
1da177e4c
|
7 8 |
* published by the Free Software Foundation. * |
4ce3121f6
|
9 10 11 12 |
* This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. |
1da177e4c
|
13 |
* |
4ce3121f6
|
14 15 16 |
* You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
1da177e4c
|
17 |
*/ |
1da177e4c
|
18 19 |
#include "xfs.h" #include "xfs_fs.h" |
70a9883c5
|
20 |
#include "xfs_shared.h" |
239880ef6
|
21 22 23 |
#include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" |
1da177e4c
|
24 |
#include "xfs_mount.h" |
1da177e4c
|
25 |
#include "xfs_inode.h" |
1da177e4c
|
26 |
#include "xfs_error.h" |
239880ef6
|
27 |
#include "xfs_trans.h" |
1da177e4c
|
28 |
#include "xfs_trans_priv.h" |
a4fbe6ab1
|
29 |
#include "xfs_quota.h" |
1da177e4c
|
30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
#include "xfs_qm.h" STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); /* * Add the locked dquot to the transaction. * The dquot must be locked, and it cannot be associated with any * transaction. */ void xfs_trans_dqjoin( xfs_trans_t *tp, xfs_dquot_t *dqp) { |
191f8488f
|
44 |
ASSERT(dqp->q_transp != tp); |
1da177e4c
|
45 |
ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
e98c414f9
|
46 |
ASSERT(dqp->q_logitem.qli_dquot == dqp); |
1da177e4c
|
47 48 49 50 |
/* * Get a log_item_desc to point at the new item. */ |
e98c414f9
|
51 |
xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); |
1da177e4c
|
52 53 |
/* |
f3ca87389
|
54 |
* Initialize d_transp so we can later determine if this dquot is |
1da177e4c
|
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
* associated with this transaction. */ dqp->q_transp = tp; } /* * This is called to mark the dquot as needing * to be logged when the transaction is committed. The dquot must * already be associated with the given transaction. * Note that it marks the entire transaction as dirty. In the ordinary * case, this gets called via xfs_trans_commit, after the transaction * is already dirty. However, there's nothing stop this from getting * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY * flag. */ void xfs_trans_log_dquot( xfs_trans_t *tp, xfs_dquot_t *dqp) { |
191f8488f
|
76 |
ASSERT(dqp->q_transp == tp); |
1da177e4c
|
77 |
ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
1da177e4c
|
78 |
tp->t_flags |= XFS_TRANS_DIRTY; |
e98c414f9
|
79 |
dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
1da177e4c
|
80 81 82 83 84 85 |
} /* * Carry forward whatever is left of the quota blk reservation to * the spanky new transaction */ |
7d095257e
|
86 |
void |
1da177e4c
|
87 88 89 90 91 |
xfs_trans_dup_dqinfo( xfs_trans_t *otp, xfs_trans_t *ntp) { xfs_dqtrx_t *oq, *nq; |
339e4f66d
|
92 |
int i, j; |
1da177e4c
|
93 |
xfs_dqtrx_t *oqa, *nqa; |
7f884dc19
|
94 |
ulong blk_res_used; |
1da177e4c
|
95 96 97 98 99 |
if (!otp->t_dqinfo) return; xfs_trans_alloc_dqinfo(ntp); |
1da177e4c
|
100 101 102 103 104 |
/* * Because the quota blk reservation is carried forward, * it is also necessary to carry forward the DQ_DIRTY flag. */ |
339e4f66d
|
105 |
if (otp->t_flags & XFS_TRANS_DQ_DIRTY) |
1da177e4c
|
106 |
ntp->t_flags |= XFS_TRANS_DQ_DIRTY; |
0e6436d99
|
107 108 109 |
for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { oqa = otp->t_dqinfo->dqs[j]; nqa = ntp->t_dqinfo->dqs[j]; |
1da177e4c
|
110 |
for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { |
7f884dc19
|
111 |
blk_res_used = 0; |
1da177e4c
|
112 113 114 115 |
if (oqa[i].qt_dquot == NULL) break; oq = &oqa[i]; nq = &nqa[i]; |
7f884dc19
|
116 117 |
if (oq->qt_blk_res && oq->qt_bcount_delta > 0) blk_res_used = oq->qt_bcount_delta; |
1da177e4c
|
118 119 120 121 122 123 124 |
nq->qt_dquot = oq->qt_dquot; nq->qt_bcount_delta = nq->qt_icount_delta = 0; nq->qt_rtbcount_delta = 0; /* * Transfer whatever is left of the reservations. */ |
7f884dc19
|
125 126 |
nq->qt_blk_res = oq->qt_blk_res - blk_res_used; oq->qt_blk_res = blk_res_used; |
1da177e4c
|
127 128 129 130 131 132 133 134 135 |
nq->qt_rtblk_res = oq->qt_rtblk_res - oq->qt_rtblk_res_used; oq->qt_rtblk_res = oq->qt_rtblk_res_used; nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; oq->qt_ino_res = oq->qt_ino_res_used; } |
1da177e4c
|
136 137 138 139 140 141 |
} } /* * Wrap around mod_dquot to account for both user and group quotas. */ |
7d095257e
|
142 |
void |
1da177e4c
|
143 144 145 146 147 148 |
xfs_trans_mod_dquot_byino( xfs_trans_t *tp, xfs_inode_t *ip, uint field, long delta) { |
7d095257e
|
149 |
xfs_mount_t *mp = tp->t_mountp; |
1da177e4c
|
150 |
|
7d095257e
|
151 152 |
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp) || |
9cad19d2c
|
153 |
xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) |
1da177e4c
|
154 155 156 157 |
return; if (tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); |
c8ad20ffe
|
158 |
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) |
1da177e4c
|
159 |
(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); |
92f8ff73f
|
160 |
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) |
1da177e4c
|
161 |
(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); |
92f8ff73f
|
162 163 |
if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta); |
1da177e4c
|
164 |
} |
113a56835
|
165 |
STATIC struct xfs_dqtrx * |
1da177e4c
|
166 |
xfs_trans_get_dqtrx( |
113a56835
|
167 168 |
struct xfs_trans *tp, struct xfs_dquot *dqp) |
1da177e4c
|
169 |
{ |
113a56835
|
170 171 |
int i; struct xfs_dqtrx *qa; |
1da177e4c
|
172 |
|
0e6436d99
|
173 174 |
if (XFS_QM_ISUDQ(dqp)) qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; |
92f8ff73f
|
175 |
else if (XFS_QM_ISGDQ(dqp)) |
0e6436d99
|
176 |
qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; |
92f8ff73f
|
177 178 179 180 |
else if (XFS_QM_ISPDQ(dqp)) qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; else return NULL; |
1da177e4c
|
181 |
|
191f8488f
|
182 |
for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { |
1da177e4c
|
183 |
if (qa[i].qt_dquot == NULL || |
191f8488f
|
184 185 |
qa[i].qt_dquot == dqp) return &qa[i]; |
1da177e4c
|
186 |
} |
191f8488f
|
187 |
return NULL; |
1da177e4c
|
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
} /* * Make the changes in the transaction structure. * The moral equivalent to xfs_trans_mod_sb(). * We don't touch any fields in the dquot, so we don't care * if it's locked or not (most of the time it won't be). */ void xfs_trans_mod_dquot( xfs_trans_t *tp, xfs_dquot_t *dqp, uint field, long delta) { xfs_dqtrx_t *qtrx; ASSERT(tp); |
7d095257e
|
206 |
ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); |
1da177e4c
|
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 |
qtrx = NULL; if (tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); /* * Find either the first free slot or the slot that belongs * to this dquot. */ qtrx = xfs_trans_get_dqtrx(tp, dqp); ASSERT(qtrx); if (qtrx->qt_dquot == NULL) qtrx->qt_dquot = dqp; switch (field) { /* * regular disk blk reservation */ case XFS_TRANS_DQ_RES_BLKS: qtrx->qt_blk_res += (ulong)delta; break; /* * inode reservation */ case XFS_TRANS_DQ_RES_INOS: qtrx->qt_ino_res += (ulong)delta; break; /* * disk blocks used. */ case XFS_TRANS_DQ_BCOUNT: |
1da177e4c
|
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 |
qtrx->qt_bcount_delta += delta; break; case XFS_TRANS_DQ_DELBCOUNT: qtrx->qt_delbcnt_delta += delta; break; /* * Inode Count */ case XFS_TRANS_DQ_ICOUNT: if (qtrx->qt_ino_res && delta > 0) { qtrx->qt_ino_res_used += (ulong)delta; ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); } qtrx->qt_icount_delta += delta; break; /* * rtblk reservation */ case XFS_TRANS_DQ_RES_RTBLKS: qtrx->qt_rtblk_res += (ulong)delta; break; /* * rtblk count */ case XFS_TRANS_DQ_RTBCOUNT: if (qtrx->qt_rtblk_res && delta > 0) { qtrx->qt_rtblk_res_used += (ulong)delta; ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); } qtrx->qt_rtbcount_delta += delta; break; case XFS_TRANS_DQ_DELRTBCOUNT: qtrx->qt_delrtb_delta += delta; break; default: ASSERT(0); } tp->t_flags |= XFS_TRANS_DQ_DIRTY; } /* |
b0a9dab78
|
288 289 |
* Given an array of dqtrx structures, lock all the dquots associated and join * them to the transaction, provided they have been modified. We know that the |
10f73d27c
|
290 291 |
* highest number of dquots of one type - usr, grp and prj - involved in a * transaction is 3 so we don't need to make this very generic. |
1da177e4c
|
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
*/ STATIC void xfs_trans_dqlockedjoin( xfs_trans_t *tp, xfs_dqtrx_t *q) { ASSERT(q[0].qt_dquot != NULL); if (q[1].qt_dquot == NULL) { xfs_dqlock(q[0].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); } else { ASSERT(XFS_QM_TRANS_MAXDQS == 2); xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); xfs_trans_dqjoin(tp, q[1].qt_dquot); } } /* * Called by xfs_trans_commit() and similar in spirit to * xfs_trans_apply_sb_deltas(). * Go thru all the dquots belonging to this transaction and modify the * INCORE dquot to reflect the actual usages. * Unreserve just the reservations done by this transaction. * dquot is still left locked at exit. */ |
7d095257e
|
319 |
void |
1da177e4c
|
320 |
xfs_trans_apply_dquot_deltas( |
4b6eae2e6
|
321 |
struct xfs_trans *tp) |
1da177e4c
|
322 323 |
{ int i, j; |
4b6eae2e6
|
324 325 326 |
struct xfs_dquot *dqp; struct xfs_dqtrx *qtrx, *qa; struct xfs_disk_dquot *d; |
1da177e4c
|
327 328 |
long totalbdelta; long totalrtbdelta; |
7d095257e
|
329 |
if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) |
1da177e4c
|
330 331 332 |
return; ASSERT(tp->t_dqinfo); |
0e6436d99
|
333 334 335 |
for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { qa = tp->t_dqinfo->dqs[j]; if (qa[0].qt_dquot == NULL) |
1da177e4c
|
336 |
continue; |
1da177e4c
|
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 |
/* * Lock all of the dquots and join them to the transaction. */ xfs_trans_dqlockedjoin(tp, qa); for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * The array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
191f8488f
|
353 |
ASSERT(dqp->q_transp == tp); |
1da177e4c
|
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 |
/* * adjust the actual number of blocks used */ d = &dqp->q_core; /* * The issue here is - sometimes we don't make a blkquota * reservation intentionally to be fair to users * (when the amount is small). On the other hand, * delayed allocs do make reservations, but that's * outside of a transaction, so we have no * idea how much was really reserved. * So, here we've accumulated delayed allocation blks and * non-delay blks. The assumption is that the * delayed ones are always reserved (outside of a * transaction), and the others may or may not have * quota reservations. */ totalbdelta = qtrx->qt_bcount_delta + qtrx->qt_delbcnt_delta; totalrtbdelta = qtrx->qt_rtbcount_delta + qtrx->qt_delrtb_delta; |
ea15ab3cd
|
377 |
#ifdef DEBUG |
1da177e4c
|
378 |
if (totalbdelta < 0) |
1149d96ae
|
379 |
ASSERT(be64_to_cpu(d->d_bcount) >= |
ea15ab3cd
|
380 |
-totalbdelta); |
1da177e4c
|
381 382 |
if (totalrtbdelta < 0) |
1149d96ae
|
383 |
ASSERT(be64_to_cpu(d->d_rtbcount) >= |
ea15ab3cd
|
384 |
-totalrtbdelta); |
1da177e4c
|
385 386 |
if (qtrx->qt_icount_delta < 0) |
1149d96ae
|
387 |
ASSERT(be64_to_cpu(d->d_icount) >= |
ea15ab3cd
|
388 |
-qtrx->qt_icount_delta); |
1da177e4c
|
389 390 |
#endif if (totalbdelta) |
413d57c99
|
391 |
be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); |
1da177e4c
|
392 393 |
if (qtrx->qt_icount_delta) |
413d57c99
|
394 |
be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); |
1da177e4c
|
395 396 |
if (totalrtbdelta) |
413d57c99
|
397 |
be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); |
1da177e4c
|
398 399 400 401 402 403 |
/* * Get any default limits in use. * Start/reset the timer(s) if needed. */ if (d->d_id) { |
4b6eae2e6
|
404 |
xfs_qm_adjust_dqlimits(tp->t_mountp, dqp); |
1da177e4c
|
405 406 407 408 409 410 411 412 413 414 415 416 417 418 |
xfs_qm_adjust_dqtimers(tp->t_mountp, d); } dqp->dq_flags |= XFS_DQ_DIRTY; /* * add this to the list of items to get logged */ xfs_trans_log_dquot(tp, dqp); /* * Take off what's left of the original reservation. * In case of delayed allocations, there's no * reservation that a transaction structure knows of. */ if (qtrx->qt_blk_res != 0) { |
7f884dc19
|
419 420 421 422 423 424 425 |
ulong blk_res_used = 0; if (qtrx->qt_bcount_delta > 0) blk_res_used = qtrx->qt_bcount_delta; if (qtrx->qt_blk_res != blk_res_used) { if (qtrx->qt_blk_res > blk_res_used) |
1da177e4c
|
426 427 |
dqp->q_res_bcount -= (xfs_qcnt_t) (qtrx->qt_blk_res - |
7f884dc19
|
428 |
blk_res_used); |
1da177e4c
|
429 430 |
else dqp->q_res_bcount -= (xfs_qcnt_t) |
7f884dc19
|
431 |
(blk_res_used - |
1da177e4c
|
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 |
qtrx->qt_blk_res); } } else { /* * These blks were never reserved, either inside * a transaction or outside one (in a delayed * allocation). Also, this isn't always a * negative number since we sometimes * deliberately skip quota reservations. */ if (qtrx->qt_bcount_delta) { dqp->q_res_bcount += (xfs_qcnt_t)qtrx->qt_bcount_delta; } } /* * Adjust the RT reservation. */ if (qtrx->qt_rtblk_res != 0) { |
06d10dd9c
|
451 |
if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) { |
1da177e4c
|
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 |
if (qtrx->qt_rtblk_res > qtrx->qt_rtblk_res_used) dqp->q_res_rtbcount -= (xfs_qcnt_t) (qtrx->qt_rtblk_res - qtrx->qt_rtblk_res_used); else dqp->q_res_rtbcount -= (xfs_qcnt_t) (qtrx->qt_rtblk_res_used - qtrx->qt_rtblk_res); } } else { if (qtrx->qt_rtbcount_delta) dqp->q_res_rtbcount += (xfs_qcnt_t)qtrx->qt_rtbcount_delta; } /* * Adjust the inode reservation. */ if (qtrx->qt_ino_res != 0) { ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) dqp->q_res_icount -= (xfs_qcnt_t) (qtrx->qt_ino_res - qtrx->qt_ino_res_used); } else { if (qtrx->qt_icount_delta) dqp->q_res_icount += (xfs_qcnt_t)qtrx->qt_icount_delta; } |
1da177e4c
|
483 |
ASSERT(dqp->q_res_bcount >= |
1149d96ae
|
484 |
be64_to_cpu(dqp->q_core.d_bcount)); |
1da177e4c
|
485 |
ASSERT(dqp->q_res_icount >= |
1149d96ae
|
486 |
be64_to_cpu(dqp->q_core.d_icount)); |
1da177e4c
|
487 |
ASSERT(dqp->q_res_rtbcount >= |
1149d96ae
|
488 |
be64_to_cpu(dqp->q_core.d_rtbcount)); |
1da177e4c
|
489 |
} |
1da177e4c
|
490 491 492 493 494 495 496 497 498 499 |
} } /* * Release the reservations, and adjust the dquots accordingly. * This is called only when the transaction is being aborted. If by * any chance we have done dquot modifications incore (ie. deltas) already, * we simply throw those away, since that's the expected behavior * when a transaction is curtailed without a commit. */ |
7d095257e
|
500 |
void |
1da177e4c
|
501 502 503 504 505 506 |
xfs_trans_unreserve_and_mod_dquots( xfs_trans_t *tp) { int i, j; xfs_dquot_t *dqp; xfs_dqtrx_t *qtrx, *qa; |
667a9291c
|
507 |
bool locked; |
1da177e4c
|
508 509 510 |
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) return; |
0e6436d99
|
511 512 |
for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { qa = tp->t_dqinfo->dqs[j]; |
1da177e4c
|
513 |
|
1da177e4c
|
514 515 516 517 518 519 520 521 522 523 524 525 526 |
for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * We assume that the array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; /* * Unreserve the original reservation. We don't care * about the number of blocks used field, or deltas. * Also we don't bother to zero the fields. */ |
667a9291c
|
527 |
locked = false; |
1da177e4c
|
528 529 |
if (qtrx->qt_blk_res) { xfs_dqlock(dqp); |
667a9291c
|
530 |
locked = true; |
1da177e4c
|
531 532 533 534 535 536 |
dqp->q_res_bcount -= (xfs_qcnt_t)qtrx->qt_blk_res; } if (qtrx->qt_ino_res) { if (!locked) { xfs_dqlock(dqp); |
667a9291c
|
537 |
locked = true; |
1da177e4c
|
538 539 540 541 542 543 544 545 |
} dqp->q_res_icount -= (xfs_qcnt_t)qtrx->qt_ino_res; } if (qtrx->qt_rtblk_res) { if (!locked) { xfs_dqlock(dqp); |
667a9291c
|
546 |
locked = true; |
1da177e4c
|
547 548 549 550 551 552 553 554 |
} dqp->q_res_rtbcount -= (xfs_qcnt_t)qtrx->qt_rtblk_res; } if (locked) xfs_dqunlock(dqp); } |
1da177e4c
|
555 556 |
} } |
a210c1aa7
|
557 558 559 560 561 562 |
STATIC void xfs_quota_warn( struct xfs_mount *mp, struct xfs_dquot *dqp, int type) { |
ffc671f1e
|
563 |
enum quota_type qtype; |
a210c1aa7
|
564 |
if (dqp->dq_flags & XFS_DQ_PROJ) |
ffc671f1e
|
565 566 567 568 569 570 571 |
qtype = PRJQUOTA; else if (dqp->dq_flags & XFS_DQ_USER) qtype = USRQUOTA; else qtype = GRPQUOTA; quota_send_warning(make_kqid(&init_user_ns, qtype, |
431f19744
|
572 573 |
be32_to_cpu(dqp->q_core.d_id)), mp->m_super->s_dev, type); |
a210c1aa7
|
574 |
} |
1da177e4c
|
575 576 577 578 579 |
/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. |
1da177e4c
|
580 581 582 583 584 585 586 587 588 589 |
*/ STATIC int xfs_trans_dqresv( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dquot_t *dqp, long nblks, long ninos, uint flags) { |
1da177e4c
|
590 591 |
xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; |
06d10dd9c
|
592 593 594 |
time_t timer; xfs_qwarncnt_t warns; xfs_qwarncnt_t warnlimit; |
70b543765
|
595 |
xfs_qcnt_t total_count; |
1da177e4c
|
596 597 |
xfs_qcnt_t *resbcountp; xfs_quotainfo_t *q = mp->m_quotainfo; |
be6079461
|
598 |
struct xfs_def_quota *defq; |
1da177e4c
|
599 |
|
8e9b6e7fa
|
600 601 |
xfs_dqlock(dqp); |
be6079461
|
602 |
defq = xfs_get_defquota(dqp, q); |
1da177e4c
|
603 |
if (flags & XFS_TRANS_DQ_RES_BLKS) { |
1149d96ae
|
604 |
hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); |
1da177e4c
|
605 |
if (!hardlimit) |
be6079461
|
606 |
hardlimit = defq->bhardlimit; |
1149d96ae
|
607 |
softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); |
1da177e4c
|
608 |
if (!softlimit) |
be6079461
|
609 |
softlimit = defq->bsoftlimit; |
1149d96ae
|
610 611 |
timer = be32_to_cpu(dqp->q_core.d_btimer); warns = be16_to_cpu(dqp->q_core.d_bwarns); |
8a7b8a89a
|
612 |
warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; |
1da177e4c
|
613 614 615 |
resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); |
1149d96ae
|
616 |
hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); |
1da177e4c
|
617 |
if (!hardlimit) |
be6079461
|
618 |
hardlimit = defq->rtbhardlimit; |
1149d96ae
|
619 |
softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); |
1da177e4c
|
620 |
if (!softlimit) |
be6079461
|
621 |
softlimit = defq->rtbsoftlimit; |
1149d96ae
|
622 623 |
timer = be32_to_cpu(dqp->q_core.d_rtbtimer); warns = be16_to_cpu(dqp->q_core.d_rtbwarns); |
8a7b8a89a
|
624 |
warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; |
1da177e4c
|
625 626 |
resbcountp = &dqp->q_res_rtbcount; } |
1da177e4c
|
627 628 629 |
if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_core.d_id && |
e6d29426b
|
630 |
((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || |
83e782e1a
|
631 632 |
(XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) || (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) { |
1da177e4c
|
633 634 635 636 637 638 |
if (nblks > 0) { /* * dquot is locked already. See if we'd go over the * hardlimit or exceed the timelimit if we allocate * nblks. */ |
70b543765
|
639 640 |
total_count = *resbcountp + nblks; if (hardlimit && total_count > hardlimit) { |
a210c1aa7
|
641 |
xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); |
1da177e4c
|
642 |
goto error_return; |
a210c1aa7
|
643 |
} |
70b543765
|
644 |
if (softlimit && total_count > softlimit) { |
a210c1aa7
|
645 646 647 648 649 650 651 652 653 |
if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTLONGWARN); goto error_return; } xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); } |
1da177e4c
|
654 655 |
} if (ninos > 0) { |
70b543765
|
656 |
total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; |
1149d96ae
|
657 658 |
timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); |
8a7b8a89a
|
659 |
warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; |
1149d96ae
|
660 |
hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); |
1da177e4c
|
661 |
if (!hardlimit) |
be6079461
|
662 |
hardlimit = defq->ihardlimit; |
1149d96ae
|
663 |
softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); |
1da177e4c
|
664 |
if (!softlimit) |
be6079461
|
665 |
softlimit = defq->isoftlimit; |
4d1f88d75
|
666 |
|
70b543765
|
667 |
if (hardlimit && total_count > hardlimit) { |
a210c1aa7
|
668 |
xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); |
1da177e4c
|
669 |
goto error_return; |
a210c1aa7
|
670 |
} |
70b543765
|
671 |
if (softlimit && total_count > softlimit) { |
a210c1aa7
|
672 673 674 675 676 677 678 679 |
if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTLONGWARN); goto error_return; } xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); } |
1da177e4c
|
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 |
} } /* * Change the reservation, but not the actual usage. * Note that q_res_bcount = q_core.d_bcount + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) dqp->q_res_icount += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(tp->t_dqinfo); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (nblks != 0) xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); if (ninos != 0) xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } |
1149d96ae
|
710 711 712 |
ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); |
1da177e4c
|
713 |
|
4d1f88d75
|
714 715 |
xfs_dqunlock(dqp); return 0; |
1da177e4c
|
716 |
error_return: |
8e9b6e7fa
|
717 |
xfs_dqunlock(dqp); |
4d1f88d75
|
718 |
if (flags & XFS_QMOPT_ENOSPC) |
2451337dd
|
719 720 |
return -ENOSPC; return -EDQUOT; |
1da177e4c
|
721 722 723 724 |
} /* |
9a2a7de26
|
725 |
* Given dquot(s), make disk block and/or inode reservations against them. |
92f8ff73f
|
726 727 |
* The fact that this does the reservation against user, group and * project quotas is important, because this follows a all-or-nothing |
1da177e4c
|
728 729 |
* approach. * |
8e9b6e7fa
|
730 |
* flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. |
9a2a7de26
|
731 |
* XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. |
1da177e4c
|
732 733 734 735 736 737 |
* XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks * dquots are unlocked on return, if they were not locked by caller. */ int xfs_trans_reserve_quota_bydquots( |
113a56835
|
738 739 740 741 |
struct xfs_trans *tp, struct xfs_mount *mp, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, |
92f8ff73f
|
742 |
struct xfs_dquot *pdqp, |
113a56835
|
743 744 745 |
long nblks, long ninos, uint flags) |
1da177e4c
|
746 |
{ |
113a56835
|
747 |
int error; |
1da177e4c
|
748 |
|
7d095257e
|
749 |
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
9a2a7de26
|
750 |
return 0; |
1da177e4c
|
751 752 753 754 755 |
if (tp && tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); |
1da177e4c
|
756 757 |
if (udqp) { |
9a2a7de26
|
758 759 760 761 |
error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, (flags & ~XFS_QMOPT_ENOSPC)); if (error) return error; |
1da177e4c
|
762 763 764 |
} if (gdqp) { |
9a2a7de26
|
765 |
error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); |
113a56835
|
766 767 |
if (error) goto unwind_usr; |
1da177e4c
|
768 |
} |
92f8ff73f
|
769 770 771 772 773 |
if (pdqp) { error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); if (error) goto unwind_grp; } |
1da177e4c
|
774 |
/* |
c41564b5a
|
775 |
* Didn't change anything critical, so, no need to log |
1da177e4c
|
776 |
*/ |
9a2a7de26
|
777 |
return 0; |
113a56835
|
778 |
|
92f8ff73f
|
779 780 781 782 |
unwind_grp: flags |= XFS_QMOPT_FORCE_RES; if (gdqp) xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); |
113a56835
|
783 784 785 786 787 |
unwind_usr: flags |= XFS_QMOPT_FORCE_RES; if (udqp) xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); return error; |
1da177e4c
|
788 789 790 791 792 793 794 |
} /* * Lock the dquot and change the reservation if we can. * This doesn't change the actual usage, just the reservation. * The inode sent in is locked. |
1da177e4c
|
795 |
*/ |
7d095257e
|
796 |
int |
1da177e4c
|
797 |
xfs_trans_reserve_quota_nblks( |
7d095257e
|
798 799 800 801 802 |
struct xfs_trans *tp, struct xfs_inode *ip, long nblks, long ninos, uint flags) |
1da177e4c
|
803 |
{ |
7d095257e
|
804 |
struct xfs_mount *mp = ip->i_mount; |
1da177e4c
|
805 |
|
7d095257e
|
806 |
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) |
9a2a7de26
|
807 808 809 |
return 0; if (XFS_IS_PQUOTA_ON(mp)) flags |= XFS_QMOPT_ENOSPC; |
1da177e4c
|
810 |
|
9cad19d2c
|
811 |
ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); |
1da177e4c
|
812 |
|
579aa9caf
|
813 |
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
9a2a7de26
|
814 815 816 817 |
ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == XFS_TRANS_DQ_RES_RTBLKS || (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == XFS_TRANS_DQ_RES_BLKS); |
1da177e4c
|
818 819 820 821 |
/* * Reserve nblks against these dquots, with trans as the mediator. */ |
7d095257e
|
822 823 |
return xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, ip->i_gdquot, |
92f8ff73f
|
824 |
ip->i_pdquot, |
7d095257e
|
825 |
nblks, ninos, flags); |
1da177e4c
|
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 |
} /* * This routine is called to allocate a quotaoff log item. */ xfs_qoff_logitem_t * xfs_trans_get_qoff_item( xfs_trans_t *tp, xfs_qoff_logitem_t *startqoff, uint flags) { xfs_qoff_logitem_t *q; ASSERT(tp != NULL); q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); ASSERT(q != NULL); /* * Get a log_item_desc to point at the new item. */ |
e98c414f9
|
847 848 |
xfs_trans_add_item(tp, &q->qql_item); return q; |
1da177e4c
|
849 850 851 852 853 854 855 856 857 858 859 860 861 |
} /* * This is called to mark the quotaoff logitem as needing * to be logged when the transaction is committed. The logitem must * already be associated with the given transaction. */ void xfs_trans_log_quotaoff_item( xfs_trans_t *tp, xfs_qoff_logitem_t *qlp) { |
1da177e4c
|
862 |
tp->t_flags |= XFS_TRANS_DIRTY; |
e98c414f9
|
863 |
qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
1da177e4c
|
864 865 866 867 868 869 |
} STATIC void xfs_trans_alloc_dqinfo( xfs_trans_t *tp) { |
a05931ceb
|
870 |
tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP); |
1da177e4c
|
871 |
} |
7d095257e
|
872 |
void |
1da177e4c
|
873 874 875 876 877 |
xfs_trans_free_dqinfo( xfs_trans_t *tp) { if (!tp->t_dqinfo) return; |
a05931ceb
|
878 |
kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo); |
7d095257e
|
879 |
tp->t_dqinfo = NULL; |
1da177e4c
|
880 |
} |