Commit 38129a13e6e71f666e0468e99fdd932a687b4d7e

Authored by Al Viro
1 parent 0b1b901b5a

switch mnt_hash to hlist

fixes RCU bug - walking through hlist is safe in face of element moves,
since it's self-terminating.  Cyclic lists are not - if we end up jumping
to another hash chain, we'll loop infinitely without ever hitting the
original list head.

[fix for dumb braino folded]

Spotted by: Max Kellermann <mk@cm4all.com>
Cc: stable@vger.kernel.org
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Showing 4 changed files with 61 additions and 50 deletions Side-by-side Diff

... ... @@ -25,7 +25,7 @@
25 25 };
26 26  
27 27 struct mount {
28   - struct list_head mnt_hash;
  28 + struct hlist_node mnt_hash;
29 29 struct mount *mnt_parent;
30 30 struct dentry *mnt_mountpoint;
31 31 struct vfsmount mnt;
... ... @@ -59,7 +59,7 @@
59 59 static int mnt_id_start = 0;
60 60 static int mnt_group_start = 1;
61 61  
62   -static struct list_head *mount_hashtable __read_mostly;
  62 +static struct hlist_head *mount_hashtable __read_mostly;
63 63 static struct hlist_head *mountpoint_hashtable __read_mostly;
64 64 static struct kmem_cache *mnt_cache __read_mostly;
65 65 static DECLARE_RWSEM(namespace_sem);
... ... @@ -78,7 +78,7 @@
78 78 */
79 79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
80 80  
81   -static inline struct list_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
  81 +static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
82 82 {
83 83 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
84 84 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
... ... @@ -217,7 +217,7 @@
217 217 mnt->mnt_writers = 0;
218 218 #endif
219 219  
220   - INIT_LIST_HEAD(&mnt->mnt_hash);
  220 + INIT_HLIST_NODE(&mnt->mnt_hash);
221 221 INIT_LIST_HEAD(&mnt->mnt_child);
222 222 INIT_LIST_HEAD(&mnt->mnt_mounts);
223 223 INIT_LIST_HEAD(&mnt->mnt_list);
224 224  
... ... @@ -605,10 +605,10 @@
605 605 */
606 606 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
607 607 {
608   - struct list_head *head = m_hash(mnt, dentry);
  608 + struct hlist_head *head = m_hash(mnt, dentry);
609 609 struct mount *p;
610 610  
611   - list_for_each_entry_rcu(p, head, mnt_hash)
  611 + hlist_for_each_entry_rcu(p, head, mnt_hash)
612 612 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
613 613 return p;
614 614 return NULL;
615 615  
... ... @@ -620,20 +620,16 @@
620 620 */
621 621 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
622 622 {
623   - struct list_head *head = m_hash(mnt, dentry);
624   - struct mount *p, *res = NULL;
625   -
626   - list_for_each_entry(p, head, mnt_hash)
627   - if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
628   - goto found;
629   - return res;
630   -found:
631   - res = p;
632   - list_for_each_entry_continue(p, head, mnt_hash) {
  623 + struct mount *p, *res;
  624 + res = p = __lookup_mnt(mnt, dentry);
  625 + if (!p)
  626 + goto out;
  627 + hlist_for_each_entry_continue(p, mnt_hash) {
633 628 if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
634 629 break;
635 630 res = p;
636 631 }
  632 +out:
637 633 return res;
638 634 }
639 635  
... ... @@ -750,7 +746,7 @@
750 746 mnt->mnt_parent = mnt;
751 747 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
752 748 list_del_init(&mnt->mnt_child);
753   - list_del_init(&mnt->mnt_hash);
  749 + hlist_del_init_rcu(&mnt->mnt_hash);
754 750 put_mountpoint(mnt->mnt_mp);
755 751 mnt->mnt_mp = NULL;
756 752 }
... ... @@ -777,7 +773,7 @@
777 773 struct mountpoint *mp)
778 774 {
779 775 mnt_set_mountpoint(parent, mp, mnt);
780   - list_add(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
  776 + hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
781 777 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
782 778 }
783 779  
784 780  
... ... @@ -800,9 +796,9 @@
800 796 list_splice(&head, n->list.prev);
801 797  
802 798 if (shadows)
803   - list_add(&mnt->mnt_hash, &shadows->mnt_hash);
  799 + hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
804 800 else
805   - list_add(&mnt->mnt_hash,
  801 + hlist_add_head_rcu(&mnt->mnt_hash,
806 802 m_hash(&parent->mnt, mnt->mnt_mountpoint));
807 803 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
808 804 touch_mnt_namespace(n);
809 805  
810 806  
811 807  
812 808  
... ... @@ -1193,26 +1189,28 @@
1193 1189  
1194 1190 EXPORT_SYMBOL(may_umount);
1195 1191  
1196   -static LIST_HEAD(unmounted); /* protected by namespace_sem */
  1192 +static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1197 1193  
1198 1194 static void namespace_unlock(void)
1199 1195 {
1200 1196 struct mount *mnt;
1201   - LIST_HEAD(head);
  1197 + struct hlist_head head = unmounted;
1202 1198  
1203   - if (likely(list_empty(&unmounted))) {
  1199 + if (likely(hlist_empty(&head))) {
1204 1200 up_write(&namespace_sem);
1205 1201 return;
1206 1202 }
1207 1203  
1208   - list_splice_init(&unmounted, &head);
  1204 + head.first->pprev = &head.first;
  1205 + INIT_HLIST_HEAD(&unmounted);
  1206 +
1209 1207 up_write(&namespace_sem);
1210 1208  
1211 1209 synchronize_rcu();
1212 1210  
1213   - while (!list_empty(&head)) {
1214   - mnt = list_first_entry(&head, struct mount, mnt_hash);
1215   - list_del_init(&mnt->mnt_hash);
  1211 + while (!hlist_empty(&head)) {
  1212 + mnt = hlist_entry(head.first, struct mount, mnt_hash);
  1213 + hlist_del_init(&mnt->mnt_hash);
1216 1214 if (mnt->mnt_ex_mountpoint.mnt)
1217 1215 path_put(&mnt->mnt_ex_mountpoint);
1218 1216 mntput(&mnt->mnt);
1219 1217  
1220 1218  
1221 1219  
... ... @@ -1233,16 +1231,19 @@
1233 1231 */
1234 1232 void umount_tree(struct mount *mnt, int how)
1235 1233 {
1236   - LIST_HEAD(tmp_list);
  1234 + HLIST_HEAD(tmp_list);
1237 1235 struct mount *p;
  1236 + struct mount *last = NULL;
1238 1237  
1239   - for (p = mnt; p; p = next_mnt(p, mnt))
1240   - list_move(&p->mnt_hash, &tmp_list);
  1238 + for (p = mnt; p; p = next_mnt(p, mnt)) {
  1239 + hlist_del_init_rcu(&p->mnt_hash);
  1240 + hlist_add_head(&p->mnt_hash, &tmp_list);
  1241 + }
1241 1242  
1242 1243 if (how)
1243 1244 propagate_umount(&tmp_list);
1244 1245  
1245   - list_for_each_entry(p, &tmp_list, mnt_hash) {
  1246 + hlist_for_each_entry(p, &tmp_list, mnt_hash) {
1246 1247 list_del_init(&p->mnt_expire);
1247 1248 list_del_init(&p->mnt_list);
1248 1249 __touch_mnt_namespace(p->mnt_ns);
1249 1250  
... ... @@ -1260,8 +1261,13 @@
1260 1261 p->mnt_mp = NULL;
1261 1262 }
1262 1263 change_mnt_propagation(p, MS_PRIVATE);
  1264 + last = p;
1263 1265 }
1264   - list_splice(&tmp_list, &unmounted);
  1266 + if (last) {
  1267 + last->mnt_hash.next = unmounted.first;
  1268 + unmounted.first = tmp_list.first;
  1269 + unmounted.first->pprev = &unmounted.first;
  1270 + }
1265 1271 }
1266 1272  
1267 1273 static void shrink_submounts(struct mount *mnt);
1268 1274  
... ... @@ -1645,8 +1651,9 @@
1645 1651 struct mountpoint *dest_mp,
1646 1652 struct path *parent_path)
1647 1653 {
1648   - LIST_HEAD(tree_list);
  1654 + HLIST_HEAD(tree_list);
1649 1655 struct mount *child, *p;
  1656 + struct hlist_node *n;
1650 1657 int err;
1651 1658  
1652 1659 if (IS_MNT_SHARED(dest_mnt)) {
1653 1660  
... ... @@ -1671,9 +1678,9 @@
1671 1678 commit_tree(source_mnt, NULL);
1672 1679 }
1673 1680  
1674   - list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
  1681 + hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1675 1682 struct mount *q;
1676   - list_del_init(&child->mnt_hash);
  1683 + hlist_del_init(&child->mnt_hash);
1677 1684 q = __lookup_mnt_last(&child->mnt_parent->mnt,
1678 1685 child->mnt_mountpoint);
1679 1686 commit_tree(child, q);
... ... @@ -2818,7 +2825,7 @@
2818 2825 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2819 2826  
2820 2827 mount_hashtable = alloc_large_system_hash("Mount-cache",
2821   - sizeof(struct list_head),
  2828 + sizeof(struct hlist_head),
2822 2829 mhash_entries, 19,
2823 2830 0,
2824 2831 &m_hash_shift, &m_hash_mask, 0, 0);
... ... @@ -2832,7 +2839,7 @@
2832 2839 panic("Failed to allocate mount hash table\n");
2833 2840  
2834 2841 for (u = 0; u <= m_hash_mask; u++)
2835   - INIT_LIST_HEAD(&mount_hashtable[u]);
  2842 + INIT_HLIST_HEAD(&mount_hashtable[u]);
2836 2843 for (u = 0; u <= mp_hash_mask; u++)
2837 2844 INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
2838 2845  
... ... @@ -220,14 +220,14 @@
220 220 * @tree_list : list of heads of trees to be attached.
221 221 */
222 222 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
223   - struct mount *source_mnt, struct list_head *tree_list)
  223 + struct mount *source_mnt, struct hlist_head *tree_list)
224 224 {
225 225 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
226 226 struct mount *m, *child;
227 227 int ret = 0;
228 228 struct mount *prev_dest_mnt = dest_mnt;
229 229 struct mount *prev_src_mnt = source_mnt;
230   - LIST_HEAD(tmp_list);
  230 + HLIST_HEAD(tmp_list);
231 231  
232 232 for (m = propagation_next(dest_mnt, dest_mnt); m;
233 233 m = propagation_next(m, dest_mnt)) {
234 234  
235 235  
236 236  
... ... @@ -246,27 +246,29 @@
246 246 child = copy_tree(source, source->mnt.mnt_root, type);
247 247 if (IS_ERR(child)) {
248 248 ret = PTR_ERR(child);
249   - list_splice(tree_list, tmp_list.prev);
  249 + tmp_list = *tree_list;
  250 + tmp_list.first->pprev = &tmp_list.first;
  251 + INIT_HLIST_HEAD(tree_list);
250 252 goto out;
251 253 }
252 254  
253 255 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
254 256 mnt_set_mountpoint(m, dest_mp, child);
255   - list_add_tail(&child->mnt_hash, tree_list);
  257 + hlist_add_head(&child->mnt_hash, tree_list);
256 258 } else {
257 259 /*
258 260 * This can happen if the parent mount was bind mounted
259 261 * on some subdirectory of a shared/slave mount.
260 262 */
261   - list_add_tail(&child->mnt_hash, &tmp_list);
  263 + hlist_add_head(&child->mnt_hash, &tmp_list);
262 264 }
263 265 prev_dest_mnt = m;
264 266 prev_src_mnt = child;
265 267 }
266 268 out:
267 269 lock_mount_hash();
268   - while (!list_empty(&tmp_list)) {
269   - child = list_first_entry(&tmp_list, struct mount, mnt_hash);
  270 + while (!hlist_empty(&tmp_list)) {
  271 + child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
270 272 umount_tree(child, 0);
271 273 }
272 274 unlock_mount_hash();
... ... @@ -338,8 +340,10 @@
338 340 * umount the child only if the child has no
339 341 * other children
340 342 */
341   - if (child && list_empty(&child->mnt_mounts))
342   - list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
  343 + if (child && list_empty(&child->mnt_mounts)) {
  344 + hlist_del_init_rcu(&child->mnt_hash);
  345 + hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
  346 + }
343 347 }
344 348 }
345 349  
346 350  
... ... @@ -350,11 +354,11 @@
350 354 *
351 355 * vfsmount lock must be held for write
352 356 */
353   -int propagate_umount(struct list_head *list)
  357 +int propagate_umount(struct hlist_head *list)
354 358 {
355 359 struct mount *mnt;
356 360  
357   - list_for_each_entry(mnt, list, mnt_hash)
  361 + hlist_for_each_entry(mnt, list, mnt_hash)
358 362 __propagate_umount(mnt);
359 363 return 0;
360 364 }
... ... @@ -36,8 +36,8 @@
36 36  
37 37 void change_mnt_propagation(struct mount *, int);
38 38 int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
39   - struct list_head *);
40   -int propagate_umount(struct list_head *);
  39 + struct hlist_head *);
  40 +int propagate_umount(struct hlist_head *);
41 41 int propagate_mount_busy(struct mount *, int);
42 42 void mnt_release_group_id(struct mount *);
43 43 int get_dominating_id(struct mount *mnt, const struct path *root);