Commit e285c100362762f7440643be637dd332460fdc75
1 parent
113d6b3c99
Exists in
master
and in
7 other branches
GFS2: Add set_xquota support
This patch adds the ability to set GFS2 quota limit and warning levels via the XFS quota API. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Showing 1 changed file with 172 additions and 26 deletions Side-by-side Diff
fs/gfs2/quota.c
... | ... | @@ -615,8 +615,9 @@ |
615 | 615 | * gfs2_adjust_quota - adjust record of current block usage |
616 | 616 | * @ip: The quota inode |
617 | 617 | * @loc: Offset of the entry in the quota file |
618 | - * @change: The amount of change to record | |
618 | + * @change: The amount of usage change to record | |
619 | 619 | * @qd: The quota data |
620 | + * @fdq: The updated limits to record | |
620 | 621 | * |
621 | 622 | * This function was mostly borrowed from gfs2_block_truncate_page which was |
622 | 623 | * in turn mostly borrowed from ext3 |
623 | 624 | |
624 | 625 | |
... | ... | @@ -625,19 +626,21 @@ |
625 | 626 | */ |
626 | 627 | |
627 | 628 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
628 | - s64 change, struct gfs2_quota_data *qd) | |
629 | + s64 change, struct gfs2_quota_data *qd, | |
630 | + struct fs_disk_quota *fdq) | |
629 | 631 | { |
630 | 632 | struct inode *inode = &ip->i_inode; |
631 | 633 | struct address_space *mapping = inode->i_mapping; |
632 | 634 | unsigned long index = loc >> PAGE_CACHE_SHIFT; |
633 | 635 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); |
634 | 636 | unsigned blocksize, iblock, pos; |
635 | - struct buffer_head *bh; | |
637 | + struct buffer_head *bh, *dibh; | |
636 | 638 | struct page *page; |
637 | 639 | void *kaddr; |
638 | 640 | struct gfs2_quota *qp; |
639 | 641 | s64 value; |
640 | 642 | int err = -EIO; |
643 | + u64 size; | |
641 | 644 | |
642 | 645 | if (gfs2_is_stuffed(ip)) |
643 | 646 | gfs2_unstuff_dinode(ip, NULL); |
644 | 647 | |
... | ... | @@ -683,9 +686,34 @@ |
683 | 686 | value = (s64)be64_to_cpu(qp->qu_value) + change; |
684 | 687 | qp->qu_value = cpu_to_be64(value); |
685 | 688 | qd->qd_qb.qb_value = qp->qu_value; |
689 | + if (fdq) { | |
690 | + if (fdq->d_fieldmask & FS_DQ_BSOFT) { | |
691 | + qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | |
692 | + qd->qd_qb.qb_warn = qp->qu_warn; | |
693 | + } | |
694 | + if (fdq->d_fieldmask & FS_DQ_BHARD) { | |
695 | + qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | |
696 | + qd->qd_qb.qb_limit = qp->qu_limit; | |
697 | + } | |
698 | + } | |
686 | 699 | flush_dcache_page(page); |
687 | 700 | kunmap_atomic(kaddr, KM_USER0); |
688 | - err = 0; | |
701 | + | |
702 | + err = gfs2_meta_inode_buffer(ip, &dibh); | |
703 | + if (err) | |
704 | + goto unlock; | |
705 | + | |
706 | + size = loc + sizeof(struct gfs2_quota); | |
707 | + if (size > inode->i_size) { | |
708 | + ip->i_disksize = size; | |
709 | + i_size_write(inode, size); | |
710 | + } | |
711 | + inode->i_mtime = inode->i_atime = CURRENT_TIME; | |
712 | + gfs2_trans_add_bh(ip->i_gl, dibh, 1); | |
713 | + gfs2_dinode_out(ip, dibh->b_data); | |
714 | + brelse(dibh); | |
715 | + mark_inode_dirty(inode); | |
716 | + | |
689 | 717 | unlock: |
690 | 718 | unlock_page(page); |
691 | 719 | page_cache_release(page); |
... | ... | @@ -713,6 +741,7 @@ |
713 | 741 | return -ENOMEM; |
714 | 742 | |
715 | 743 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); |
744 | + mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA); | |
716 | 745 | for (qx = 0; qx < num_qd; qx++) { |
717 | 746 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, |
718 | 747 | GL_NOCACHE, &ghs[qx]); |
... | ... | @@ -768,8 +797,7 @@ |
768 | 797 | for (x = 0; x < num_qd; x++) { |
769 | 798 | qd = qda[x]; |
770 | 799 | offset = qd2offset(qd); |
771 | - error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, | |
772 | - (struct gfs2_quota_data *)qd); | |
800 | + error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); | |
773 | 801 | if (error) |
774 | 802 | goto out_end_trans; |
775 | 803 | |
776 | 804 | |
777 | 805 | |
778 | 806 | |
... | ... | @@ -789,20 +817,44 @@ |
789 | 817 | out: |
790 | 818 | while (qx--) |
791 | 819 | gfs2_glock_dq_uninit(&ghs[qx]); |
820 | + mutex_unlock(&ip->i_inode.i_mutex); | |
792 | 821 | kfree(ghs); |
793 | 822 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); |
794 | 823 | return error; |
795 | 824 | } |
796 | 825 | |
826 | +static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) | |
827 | +{ | |
828 | + struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | |
829 | + struct gfs2_quota q; | |
830 | + struct gfs2_quota_lvb *qlvb; | |
831 | + loff_t pos; | |
832 | + int error; | |
833 | + | |
834 | + memset(&q, 0, sizeof(struct gfs2_quota)); | |
835 | + pos = qd2offset(qd); | |
836 | + error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | |
837 | + if (error < 0) | |
838 | + return error; | |
839 | + | |
840 | + qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | |
841 | + qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | |
842 | + qlvb->__pad = 0; | |
843 | + qlvb->qb_limit = q.qu_limit; | |
844 | + qlvb->qb_warn = q.qu_warn; | |
845 | + qlvb->qb_value = q.qu_value; | |
846 | + qd->qd_qb = *qlvb; | |
847 | + | |
848 | + return 0; | |
849 | +} | |
850 | + | |
797 | 851 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, |
798 | 852 | struct gfs2_holder *q_gh) |
799 | 853 | { |
800 | 854 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
801 | 855 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
802 | 856 | struct gfs2_holder i_gh; |
803 | - struct gfs2_quota q; | |
804 | 857 | int error; |
805 | - struct gfs2_quota_lvb *qlvb; | |
806 | 858 | |
807 | 859 | restart: |
808 | 860 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); |
... | ... | @@ -812,7 +864,6 @@ |
812 | 864 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
813 | 865 | |
814 | 866 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { |
815 | - loff_t pos; | |
816 | 867 | gfs2_glock_dq_uninit(q_gh); |
817 | 868 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, |
818 | 869 | GL_NOCACHE, q_gh); |
819 | 870 | |
820 | 871 | |
... | ... | @@ -823,25 +874,11 @@ |
823 | 874 | if (error) |
824 | 875 | goto fail; |
825 | 876 | |
826 | - memset(&q, 0, sizeof(struct gfs2_quota)); | |
827 | - pos = qd2offset(qd); | |
828 | - error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | |
829 | - if (error < 0) | |
877 | + error = update_qd(sdp, qd); | |
878 | + if (error) | |
830 | 879 | goto fail_gunlock; |
831 | - if ((error < sizeof(q)) && force_refresh) { | |
832 | - error = -ENOENT; | |
833 | - goto fail_gunlock; | |
834 | - } | |
835 | - gfs2_glock_dq_uninit(&i_gh); | |
836 | 880 | |
837 | - qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | |
838 | - qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | |
839 | - qlvb->__pad = 0; | |
840 | - qlvb->qb_limit = q.qu_limit; | |
841 | - qlvb->qb_warn = q.qu_warn; | |
842 | - qlvb->qb_value = q.qu_value; | |
843 | - qd->qd_qb = *qlvb; | |
844 | - | |
881 | + gfs2_glock_dq_uninit(&i_gh); | |
845 | 882 | gfs2_glock_dq_uninit(q_gh); |
846 | 883 | force_refresh = 0; |
847 | 884 | goto restart; |
848 | 885 | |
... | ... | @@ -1409,9 +1446,118 @@ |
1409 | 1446 | return error; |
1410 | 1447 | } |
1411 | 1448 | |
1449 | +/* GFS2 only supports a subset of the XFS fields */ | |
1450 | +#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) | |
1451 | + | |
1452 | +static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id, | |
1453 | + struct fs_disk_quota *fdq) | |
1454 | +{ | |
1455 | + struct gfs2_sbd *sdp = sb->s_fs_info; | |
1456 | + struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | |
1457 | + struct gfs2_quota_data *qd; | |
1458 | + struct gfs2_holder q_gh, i_gh; | |
1459 | + unsigned int data_blocks, ind_blocks; | |
1460 | + unsigned int blocks = 0; | |
1461 | + int alloc_required; | |
1462 | + struct gfs2_alloc *al; | |
1463 | + loff_t offset; | |
1464 | + int error; | |
1465 | + | |
1466 | + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | |
1467 | + return -ESRCH; /* Crazy XFS error code */ | |
1468 | + | |
1469 | + switch(type) { | |
1470 | + case USRQUOTA: | |
1471 | + type = QUOTA_USER; | |
1472 | + if (fdq->d_flags != XFS_USER_QUOTA) | |
1473 | + return -EINVAL; | |
1474 | + break; | |
1475 | + case GRPQUOTA: | |
1476 | + type = QUOTA_GROUP; | |
1477 | + if (fdq->d_flags != XFS_GROUP_QUOTA) | |
1478 | + return -EINVAL; | |
1479 | + break; | |
1480 | + default: | |
1481 | + return -EINVAL; | |
1482 | + } | |
1483 | + | |
1484 | + if (fdq->d_fieldmask & ~GFS2_FIELDMASK) | |
1485 | + return -EINVAL; | |
1486 | + if (fdq->d_id != id) | |
1487 | + return -EINVAL; | |
1488 | + | |
1489 | + error = qd_get(sdp, type, id, &qd); | |
1490 | + if (error) | |
1491 | + return error; | |
1492 | + | |
1493 | + mutex_lock(&ip->i_inode.i_mutex); | |
1494 | + error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); | |
1495 | + if (error) | |
1496 | + goto out_put; | |
1497 | + error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); | |
1498 | + if (error) | |
1499 | + goto out_q; | |
1500 | + | |
1501 | + /* Check for existing entry, if none then alloc new blocks */ | |
1502 | + error = update_qd(sdp, qd); | |
1503 | + if (error) | |
1504 | + goto out_i; | |
1505 | + | |
1506 | + /* If nothing has changed, this is a no-op */ | |
1507 | + if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | |
1508 | + (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) | |
1509 | + fdq->d_fieldmask ^= FS_DQ_BSOFT; | |
1510 | + if ((fdq->d_fieldmask & FS_DQ_BHARD) && | |
1511 | + (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) | |
1512 | + fdq->d_fieldmask ^= FS_DQ_BHARD; | |
1513 | + if (fdq->d_fieldmask == 0) | |
1514 | + goto out_i; | |
1515 | + | |
1516 | + offset = qd2offset(qd); | |
1517 | + error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), | |
1518 | + &alloc_required); | |
1519 | + if (error) | |
1520 | + goto out_i; | |
1521 | + if (alloc_required) { | |
1522 | + al = gfs2_alloc_get(ip); | |
1523 | + if (al == NULL) | |
1524 | + goto out_i; | |
1525 | + gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), | |
1526 | + &data_blocks, &ind_blocks); | |
1527 | + blocks = al->al_requested = 1 + data_blocks + ind_blocks; | |
1528 | + error = gfs2_inplace_reserve(ip); | |
1529 | + if (error) | |
1530 | + goto out_alloc; | |
1531 | + } | |
1532 | + | |
1533 | + error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); | |
1534 | + if (error) | |
1535 | + goto out_release; | |
1536 | + | |
1537 | + /* Apply changes */ | |
1538 | + error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); | |
1539 | + | |
1540 | + gfs2_trans_end(sdp); | |
1541 | +out_release: | |
1542 | + if (alloc_required) { | |
1543 | + gfs2_inplace_release(ip); | |
1544 | +out_alloc: | |
1545 | + gfs2_alloc_put(ip); | |
1546 | + } | |
1547 | +out_i: | |
1548 | + gfs2_glock_dq_uninit(&i_gh); | |
1549 | +out_q: | |
1550 | + gfs2_glock_dq_uninit(&q_gh); | |
1551 | +out_put: | |
1552 | + mutex_unlock(&ip->i_inode.i_mutex); | |
1553 | + qd_put(qd); | |
1554 | + return error; | |
1555 | +} | |
1556 | + | |
1412 | 1557 | const struct quotactl_ops gfs2_quotactl_ops = { |
1413 | 1558 | .quota_sync = gfs2_quota_sync, |
1414 | 1559 | .get_xstate = gfs2_quota_get_xstate, |
1415 | 1560 | .get_xquota = gfs2_xquota_get, |
1561 | + .set_xquota = gfs2_xquota_set, | |
1416 | 1562 | }; |