Commit 4ce5d2b1a8fde84c0eebe70652cf28b9beda6b4e
1 parent
21e851943e
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
vfs: Don't copy mount bind mounts of /proc/<pid>/ns/mnt between namespaces
Don't copy bind mounts of /proc/<pid>/ns/mnt between namespaces. These files hold references to a mount namespace and copying them between namespaces could result in a reference counting loop. The current mnt_ns_loop test prevents loops on the assumption that mounts don't cross between namespaces. Unfortunately unsharing a mount namespace and shared substrees can both cause mounts to propogate between mount namespaces. Add two flags CL_COPY_UNBINDABLE and CL_COPY_MNT_NS_FILE are added to control this behavior, and CL_COPY_ALL is redefined as both of them. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Showing 2 changed files with 38 additions and 13 deletions Side-by-side Diff
fs/namespace.c
... | ... | @@ -1355,14 +1355,11 @@ |
1355 | 1355 | |
1356 | 1356 | #endif |
1357 | 1357 | |
1358 | -static bool mnt_ns_loop(struct path *path) | |
1358 | +static bool is_mnt_ns_file(struct dentry *dentry) | |
1359 | 1359 | { |
1360 | - /* Could bind mounting the mount namespace inode cause a | |
1361 | - * mount namespace loop? | |
1362 | - */ | |
1363 | - struct inode *inode = path->dentry->d_inode; | |
1360 | + /* Is this a proxy for a mount namespace? */ | |
1361 | + struct inode *inode = dentry->d_inode; | |
1364 | 1362 | struct proc_ns *ei; |
1365 | - struct mnt_namespace *mnt_ns; | |
1366 | 1363 | |
1367 | 1364 | if (!proc_ns_inode(inode)) |
1368 | 1365 | return false; |
... | ... | @@ -1371,7 +1368,19 @@ |
1371 | 1368 | if (ei->ns_ops != &mntns_operations) |
1372 | 1369 | return false; |
1373 | 1370 | |
1374 | - mnt_ns = ei->ns; | |
1371 | + return true; | |
1372 | +} | |
1373 | + | |
1374 | +static bool mnt_ns_loop(struct dentry *dentry) | |
1375 | +{ | |
1376 | + /* Could bind mounting the mount namespace inode cause a | |
1377 | + * mount namespace loop? | |
1378 | + */ | |
1379 | + struct mnt_namespace *mnt_ns; | |
1380 | + if (!is_mnt_ns_file(dentry)) | |
1381 | + return false; | |
1382 | + | |
1383 | + mnt_ns = get_proc_ns(dentry->d_inode)->ns; | |
1375 | 1384 | return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; |
1376 | 1385 | } |
1377 | 1386 | |
1378 | 1387 | |
... | ... | @@ -1380,9 +1389,12 @@ |
1380 | 1389 | { |
1381 | 1390 | struct mount *res, *p, *q, *r, *parent; |
1382 | 1391 | |
1383 | - if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt)) | |
1392 | + if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) | |
1384 | 1393 | return ERR_PTR(-EINVAL); |
1385 | 1394 | |
1395 | + if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) | |
1396 | + return ERR_PTR(-EINVAL); | |
1397 | + | |
1386 | 1398 | res = q = clone_mnt(mnt, dentry, flag); |
1387 | 1399 | if (IS_ERR(q)) |
1388 | 1400 | return q; |
1389 | 1401 | |
... | ... | @@ -1397,10 +1409,16 @@ |
1397 | 1409 | continue; |
1398 | 1410 | |
1399 | 1411 | for (s = r; s; s = next_mnt(s, r)) { |
1400 | - if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) { | |
1412 | + if (!(flag & CL_COPY_UNBINDABLE) && | |
1413 | + IS_MNT_UNBINDABLE(s)) { | |
1401 | 1414 | s = skip_mnt_tree(s); |
1402 | 1415 | continue; |
1403 | 1416 | } |
1417 | + if (!(flag & CL_COPY_MNT_NS_FILE) && | |
1418 | + is_mnt_ns_file(s->mnt.mnt_root)) { | |
1419 | + s = skip_mnt_tree(s); | |
1420 | + continue; | |
1421 | + } | |
1404 | 1422 | while (p != s->mnt_parent) { |
1405 | 1423 | p = p->mnt_parent; |
1406 | 1424 | q = q->mnt_parent; |
... | ... | @@ -1733,7 +1751,7 @@ |
1733 | 1751 | return err; |
1734 | 1752 | |
1735 | 1753 | err = -EINVAL; |
1736 | - if (mnt_ns_loop(&old_path)) | |
1754 | + if (mnt_ns_loop(old_path.dentry)) | |
1737 | 1755 | goto out; |
1738 | 1756 | |
1739 | 1757 | mp = lock_mount(path); |
... | ... | @@ -1755,7 +1773,7 @@ |
1755 | 1773 | goto out2; |
1756 | 1774 | |
1757 | 1775 | if (recurse) |
1758 | - mnt = copy_tree(old, old_path.dentry, 0); | |
1776 | + mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); | |
1759 | 1777 | else |
1760 | 1778 | mnt = clone_mnt(old, old_path.dentry, 0); |
1761 | 1779 | |
... | ... | @@ -2417,7 +2435,7 @@ |
2417 | 2435 | |
2418 | 2436 | namespace_lock(); |
2419 | 2437 | /* First pass: copy the tree topology */ |
2420 | - copy_flags = CL_COPY_ALL | CL_EXPIRE; | |
2438 | + copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; | |
2421 | 2439 | if (user_ns != mnt_ns->user_ns) |
2422 | 2440 | copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; |
2423 | 2441 | new = copy_tree(old, old->mnt.mnt_root, copy_flags); |
... | ... | @@ -2452,6 +2470,10 @@ |
2452 | 2470 | } |
2453 | 2471 | p = next_mnt(p, old); |
2454 | 2472 | q = next_mnt(q, new); |
2473 | + if (!q) | |
2474 | + break; | |
2475 | + while (p->mnt.mnt_root != q->mnt.mnt_root) | |
2476 | + p = next_mnt(p, old); | |
2455 | 2477 | } |
2456 | 2478 | namespace_unlock(); |
2457 | 2479 |
fs/pnode.h
... | ... | @@ -19,11 +19,14 @@ |
19 | 19 | |
20 | 20 | #define CL_EXPIRE 0x01 |
21 | 21 | #define CL_SLAVE 0x02 |
22 | -#define CL_COPY_ALL 0x04 | |
22 | +#define CL_COPY_UNBINDABLE 0x04 | |
23 | 23 | #define CL_MAKE_SHARED 0x08 |
24 | 24 | #define CL_PRIVATE 0x10 |
25 | 25 | #define CL_SHARED_TO_SLAVE 0x20 |
26 | 26 | #define CL_UNPRIVILEGED 0x40 |
27 | +#define CL_COPY_MNT_NS_FILE 0x80 | |
28 | + | |
29 | +#define CL_COPY_ALL (CL_COPY_UNBINDABLE | CL_COPY_MNT_NS_FILE) | |
27 | 30 | |
28 | 31 | static inline void set_mnt_shared(struct mount *mnt) |
29 | 32 | { |