Commit 97216be09efd41414725068212e3af0f05cde11a

Authored by Al Viro
1 parent 328e6d9014

fold release_mounts() into namespace_unlock()

... and provide namespace_lock() as a trivial wrapper;
switch to those two consistently.

Result is patterned after rtnl_lock/rtnl_unlock pair.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 1 changed file with 30 additions and 23 deletions Side-by-side Diff

... ... @@ -1121,11 +1121,21 @@
1121 1121  
1122 1122 static LIST_HEAD(unmounted); /* protected by namespace_sem */
1123 1123  
1124   -static void release_mounts(struct list_head *head)
  1124 +static void namespace_unlock(void)
1125 1125 {
1126 1126 struct mount *mnt;
1127   - while (!list_empty(head)) {
1128   - mnt = list_first_entry(head, struct mount, mnt_hash);
  1127 + LIST_HEAD(head);
  1128 +
  1129 + if (likely(list_empty(&unmounted))) {
  1130 + up_write(&namespace_sem);
  1131 + return;
  1132 + }
  1133 +
  1134 + list_splice_init(&unmounted, &head);
  1135 + up_write(&namespace_sem);
  1136 +
  1137 + while (!list_empty(&head)) {
  1138 + mnt = list_first_entry(&head, struct mount, mnt_hash);
1129 1139 list_del_init(&mnt->mnt_hash);
1130 1140 if (mnt_has_parent(mnt)) {
1131 1141 struct dentry *dentry;
1132 1142  
... ... @@ -1145,12 +1155,9 @@
1145 1155 }
1146 1156 }
1147 1157  
1148   -static void namespace_unlock(void)
  1158 +static inline void namespace_lock(void)
1149 1159 {
1150   - LIST_HEAD(head);
1151   - list_splice_init(&unmounted, &head);
1152   - up_write(&namespace_sem);
1153   - release_mounts(&head);
  1160 + down_write(&namespace_sem);
1154 1161 }
1155 1162  
1156 1163 /*
... ... @@ -1256,7 +1263,7 @@
1256 1263 return retval;
1257 1264 }
1258 1265  
1259   - down_write(&namespace_sem);
  1266 + namespace_lock();
1260 1267 br_write_lock(&vfsmount_lock);
1261 1268 event++;
1262 1269  
... ... @@ -1412,7 +1419,7 @@
1412 1419 struct vfsmount *collect_mounts(struct path *path)
1413 1420 {
1414 1421 struct mount *tree;
1415   - down_write(&namespace_sem);
  1422 + namespace_lock();
1416 1423 tree = copy_tree(real_mount(path->mnt), path->dentry,
1417 1424 CL_COPY_ALL | CL_PRIVATE);
1418 1425 namespace_unlock();
... ... @@ -1423,7 +1430,7 @@
1423 1430  
1424 1431 void drop_collected_mounts(struct vfsmount *mnt)
1425 1432 {
1426   - down_write(&namespace_sem);
  1433 + namespace_lock();
1427 1434 br_write_lock(&vfsmount_lock);
1428 1435 umount_tree(real_mount(mnt), 0);
1429 1436 br_write_unlock(&vfsmount_lock);
1430 1437  
1431 1438  
... ... @@ -1593,18 +1600,18 @@
1593 1600 mutex_unlock(&dentry->d_inode->i_mutex);
1594 1601 return ERR_PTR(-ENOENT);
1595 1602 }
1596   - down_write(&namespace_sem);
  1603 + namespace_lock();
1597 1604 mnt = lookup_mnt(path);
1598 1605 if (likely(!mnt)) {
1599 1606 struct mountpoint *mp = new_mountpoint(dentry);
1600 1607 if (IS_ERR(mp)) {
1601   - up_write(&namespace_sem);
  1608 + namespace_unlock();
1602 1609 mutex_unlock(&dentry->d_inode->i_mutex);
1603 1610 return mp;
1604 1611 }
1605 1612 return mp;
1606 1613 }
1607   - up_write(&namespace_sem);
  1614 + namespace_unlock();
1608 1615 mutex_unlock(&path->dentry->d_inode->i_mutex);
1609 1616 path_put(path);
1610 1617 path->mnt = mnt;
... ... @@ -1667,7 +1674,7 @@
1667 1674 if (!type)
1668 1675 return -EINVAL;
1669 1676  
1670   - down_write(&namespace_sem);
  1677 + namespace_lock();
1671 1678 if (type == MS_SHARED) {
1672 1679 err = invent_group_ids(mnt, recurse);
1673 1680 if (err)
... ... @@ -1680,7 +1687,7 @@
1680 1687 br_write_unlock(&vfsmount_lock);
1681 1688  
1682 1689 out_unlock:
1683   - up_write(&namespace_sem);
  1690 + namespace_unlock();
1684 1691 return err;
1685 1692 }
1686 1693  
1687 1694  
... ... @@ -2016,11 +2023,11 @@
2016 2023 fail:
2017 2024 /* remove m from any expiration list it may be on */
2018 2025 if (!list_empty(&mnt->mnt_expire)) {
2019   - down_write(&namespace_sem);
  2026 + namespace_lock();
2020 2027 br_write_lock(&vfsmount_lock);
2021 2028 list_del_init(&mnt->mnt_expire);
2022 2029 br_write_unlock(&vfsmount_lock);
2023   - up_write(&namespace_sem);
  2030 + namespace_unlock();
2024 2031 }
2025 2032 mntput(m);
2026 2033 mntput(m);
2027 2034  
... ... @@ -2034,13 +2041,13 @@
2034 2041 */
2035 2042 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2036 2043 {
2037   - down_write(&namespace_sem);
  2044 + namespace_lock();
2038 2045 br_write_lock(&vfsmount_lock);
2039 2046  
2040 2047 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2041 2048  
2042 2049 br_write_unlock(&vfsmount_lock);
2043   - up_write(&namespace_sem);
  2050 + namespace_unlock();
2044 2051 }
2045 2052 EXPORT_SYMBOL(mnt_set_expiry);
2046 2053  
... ... @@ -2057,7 +2064,7 @@
2057 2064 if (list_empty(mounts))
2058 2065 return;
2059 2066  
2060   - down_write(&namespace_sem);
  2067 + namespace_lock();
2061 2068 br_write_lock(&vfsmount_lock);
2062 2069  
2063 2070 /* extract from the expiration list every vfsmount that matches the
... ... @@ -2373,7 +2380,7 @@
2373 2380 if (IS_ERR(new_ns))
2374 2381 return new_ns;
2375 2382  
2376   - down_write(&namespace_sem);
  2383 + namespace_lock();
2377 2384 /* First pass: copy the tree topology */
2378 2385 copy_flags = CL_COPY_ALL | CL_EXPIRE;
2379 2386 if (user_ns != mnt_ns->user_ns)
... ... @@ -2733,7 +2740,7 @@
2733 2740 {
2734 2741 if (!atomic_dec_and_test(&ns->count))
2735 2742 return;
2736   - down_write(&namespace_sem);
  2743 + namespace_lock();
2737 2744 br_write_lock(&vfsmount_lock);
2738 2745 umount_tree(ns->root, 0);
2739 2746 br_write_unlock(&vfsmount_lock);