Blame view
fs/pnode.c
10.1 KB
07b20889e [PATCH] beginning... |
1 2 3 4 5 6 7 8 |
/* * linux/fs/pnode.c * * (C) Copyright IBM Corporation 2005. * Released under GPL v2. * Author : Ram Pai (linuxram@us.ibm.com) * */ |
6b3286ed1 [PATCH] rename st... |
9 |
#include <linux/mnt_namespace.h> |
07b20889e [PATCH] beginning... |
10 11 |
#include <linux/mount.h> #include <linux/fs.h> |
132c94e31 vfs: Carefully pr... |
12 |
#include <linux/nsproxy.h> |
6d59e7f58 [PATCH] move a bu... |
13 |
#include "internal.h" |
07b20889e [PATCH] beginning... |
14 |
#include "pnode.h" |
03e06e68f [PATCH] introduce... |
15 |
/* return the next shared peer mount of @p */ |
c937135d9 vfs: spread struc... |
16 |
static inline struct mount *next_peer(struct mount *p) |
03e06e68f [PATCH] introduce... |
17 |
{ |
6776db3d3 vfs: take mnt_sha... |
18 |
return list_entry(p->mnt_share.next, struct mount, mnt_share); |
03e06e68f [PATCH] introduce... |
19 |
} |
c937135d9 vfs: spread struc... |
20 |
static inline struct mount *first_slave(struct mount *p) |
5afe00221 [PATCH] handling ... |
21 |
{ |
6776db3d3 vfs: take mnt_sha... |
22 |
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); |
5afe00221 [PATCH] handling ... |
23 |
} |
c937135d9 vfs: spread struc... |
24 |
static inline struct mount *next_slave(struct mount *p) |
5afe00221 [PATCH] handling ... |
25 |
{ |
6776db3d3 vfs: take mnt_sha... |
26 |
return list_entry(p->mnt_slave.next, struct mount, mnt_slave); |
5afe00221 [PATCH] handling ... |
27 |
} |
6fc7871fe vfs: spread struc... |
28 29 30 |
static struct mount *get_peer_under_root(struct mount *mnt, struct mnt_namespace *ns, const struct path *root) |
97e7e0f71 [patch 7/7] vfs: ... |
31 |
{ |
6fc7871fe vfs: spread struc... |
32 |
struct mount *m = mnt; |
97e7e0f71 [patch 7/7] vfs: ... |
33 34 35 |
do { /* Check the namespace first for optimization */ |
143c8c91c vfs: mnt_ns moved... |
36 |
if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) |
6fc7871fe vfs: spread struc... |
37 |
return m; |
97e7e0f71 [patch 7/7] vfs: ... |
38 |
|
c937135d9 vfs: spread struc... |
39 |
m = next_peer(m); |
6fc7871fe vfs: spread struc... |
40 |
} while (m != mnt); |
97e7e0f71 [patch 7/7] vfs: ... |
41 42 43 44 45 46 47 48 49 50 |
return NULL; } /* * Get ID of closest dominating peer group having a representative * under the given root. * * Caller must hold namespace_sem */ |
6fc7871fe vfs: spread struc... |
51 |
int get_dominating_id(struct mount *mnt, const struct path *root) |
97e7e0f71 [patch 7/7] vfs: ... |
52 |
{ |
6fc7871fe vfs: spread struc... |
53 |
struct mount *m; |
97e7e0f71 [patch 7/7] vfs: ... |
54 |
|
32301920f vfs: and now we c... |
55 |
for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { |
143c8c91c vfs: mnt_ns moved... |
56 |
struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); |
97e7e0f71 [patch 7/7] vfs: ... |
57 |
if (d) |
15169fe78 vfs: mnt_id/mnt_g... |
58 |
return d->mnt_group_id; |
97e7e0f71 [patch 7/7] vfs: ... |
59 60 61 62 |
} return 0; } |
6fc7871fe vfs: spread struc... |
63 |
static int do_make_slave(struct mount *mnt) |
a58b0eb8e [PATCH] introduce... |
64 |
{ |
32301920f vfs: and now we c... |
65 |
struct mount *peer_mnt = mnt, *master = mnt->mnt_master; |
d10e8def0 vfs: take mnt_mas... |
66 |
struct mount *slave_mnt; |
a58b0eb8e [PATCH] introduce... |
67 68 69 |
/* * slave 'mnt' to a peer mount that has the |
796a6b521 Kill CL_PROPAGATI... |
70 |
* same root dentry. If none is available then |
a58b0eb8e [PATCH] introduce... |
71 72 |
* slave it to anything that is available. */ |
c937135d9 vfs: spread struc... |
73 |
while ((peer_mnt = next_peer(peer_mnt)) != mnt && |
6fc7871fe vfs: spread struc... |
74 |
peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ; |
a58b0eb8e [PATCH] introduce... |
75 76 |
if (peer_mnt == mnt) { |
c937135d9 vfs: spread struc... |
77 |
peer_mnt = next_peer(mnt); |
a58b0eb8e [PATCH] introduce... |
78 79 80 |
if (peer_mnt == mnt) peer_mnt = NULL; } |
5d477b607 vfs: Fix invalid ... |
81 82 |
if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) && list_empty(&mnt->mnt_share)) |
6fc7871fe vfs: spread struc... |
83 |
mnt_release_group_id(mnt); |
719f5d7f0 [patch 4/7] vfs: ... |
84 |
|
6776db3d3 vfs: take mnt_sha... |
85 |
list_del_init(&mnt->mnt_share); |
15169fe78 vfs: mnt_id/mnt_g... |
86 |
mnt->mnt_group_id = 0; |
a58b0eb8e [PATCH] introduce... |
87 88 89 90 91 |
if (peer_mnt) master = peer_mnt; if (master) { |
6776db3d3 vfs: take mnt_sha... |
92 |
list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) |
32301920f vfs: and now we c... |
93 |
slave_mnt->mnt_master = master; |
6776db3d3 vfs: take mnt_sha... |
94 95 96 |
list_move(&mnt->mnt_slave, &master->mnt_slave_list); list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); INIT_LIST_HEAD(&mnt->mnt_slave_list); |
a58b0eb8e [PATCH] introduce... |
97 |
} else { |
6776db3d3 vfs: take mnt_sha... |
98 |
struct list_head *p = &mnt->mnt_slave_list; |
a58b0eb8e [PATCH] introduce... |
99 |
while (!list_empty(p)) { |
b5e618181 Introduce a handy... |
100 |
slave_mnt = list_first_entry(p, |
6776db3d3 vfs: take mnt_sha... |
101 102 |
struct mount, mnt_slave); list_del_init(&slave_mnt->mnt_slave); |
a58b0eb8e [PATCH] introduce... |
103 104 105 |
slave_mnt->mnt_master = NULL; } } |
32301920f vfs: and now we c... |
106 |
mnt->mnt_master = master; |
fc7be130c vfs: switch pnode... |
107 |
CLEAR_MNT_SHARED(mnt); |
a58b0eb8e [PATCH] introduce... |
108 109 |
return 0; } |
99b7db7b8 fs: brlock vfsmou... |
110 111 112 |
/* * vfsmount lock must be held for write */ |
0f0afb1dc vfs: spread struc... |
113 |
void change_mnt_propagation(struct mount *mnt, int type) |
07b20889e [PATCH] beginning... |
114 |
{ |
03e06e68f [PATCH] introduce... |
115 |
if (type == MS_SHARED) { |
b90fa9ae8 [PATCH] shared mo... |
116 |
set_mnt_shared(mnt); |
a58b0eb8e [PATCH] introduce... |
117 118 |
return; } |
6fc7871fe vfs: spread struc... |
119 |
do_make_slave(mnt); |
a58b0eb8e [PATCH] introduce... |
120 |
if (type != MS_SLAVE) { |
6776db3d3 vfs: take mnt_sha... |
121 |
list_del_init(&mnt->mnt_slave); |
d10e8def0 vfs: take mnt_mas... |
122 |
mnt->mnt_master = NULL; |
9676f0c63 [PATCH] unbindabl... |
123 |
if (type == MS_UNBINDABLE) |
0f0afb1dc vfs: spread struc... |
124 |
mnt->mnt.mnt_flags |= MNT_UNBINDABLE; |
0b03cfb25 MNT_UNBINDABLE fix |
125 |
else |
0f0afb1dc vfs: spread struc... |
126 |
mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; |
03e06e68f [PATCH] introduce... |
127 |
} |
07b20889e [PATCH] beginning... |
128 |
} |
b90fa9ae8 [PATCH] shared mo... |
129 130 131 132 133 |
/* * get the next mount in the propagation tree. * @m: the mount seen last * @origin: the original mount from where the tree walk initiated |
796a6b521 Kill CL_PROPAGATI... |
134 135 136 137 138 |
* * Note that peer groups form contiguous segments of slave lists. * We rely on that in get_source() to be able to find out if * vfsmount found while iterating with propagation_next() is * a peer of one we'd found earlier. |
b90fa9ae8 [PATCH] shared mo... |
139 |
*/ |
c937135d9 vfs: spread struc... |
140 141 |
static struct mount *propagation_next(struct mount *m, struct mount *origin) |
b90fa9ae8 [PATCH] shared mo... |
142 |
{ |
5afe00221 [PATCH] handling ... |
143 |
/* are there any slaves of this mount? */ |
143c8c91c vfs: mnt_ns moved... |
144 |
if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
5afe00221 [PATCH] handling ... |
145 146 147 |
return first_slave(m); while (1) { |
32301920f vfs: and now we c... |
148 |
struct mount *master = m->mnt_master; |
5afe00221 [PATCH] handling ... |
149 |
|
32301920f vfs: and now we c... |
150 |
if (master == origin->mnt_master) { |
c937135d9 vfs: spread struc... |
151 152 |
struct mount *next = next_peer(m); return (next == origin) ? NULL : next; |
6776db3d3 vfs: take mnt_sha... |
153 |
} else if (m->mnt_slave.next != &master->mnt_slave_list) |
5afe00221 [PATCH] handling ... |
154 155 156 157 158 159 |
return next_slave(m); /* back at master */ m = master; } } |
f2ebb3a92 smarter propagate... |
160 |
static struct mount *next_group(struct mount *m, struct mount *origin) |
5afe00221 [PATCH] handling ... |
161 |
{ |
f2ebb3a92 smarter propagate... |
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
while (1) { while (1) { struct mount *next; if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); next = next_peer(m); if (m->mnt_group_id == origin->mnt_group_id) { if (next == origin) return NULL; } else if (m->mnt_slave.next != &next->mnt_slave) break; m = next; } /* m is the last peer */ while (1) { struct mount *master = m->mnt_master; if (m->mnt_slave.next != &master->mnt_slave_list) return next_slave(m); m = next_peer(master); if (master->mnt_group_id == origin->mnt_group_id) break; if (master->mnt_slave.next == &m->mnt_slave) break; m = master; } if (m == origin) return NULL; |
5afe00221 [PATCH] handling ... |
189 |
} |
f2ebb3a92 smarter propagate... |
190 |
} |
5afe00221 [PATCH] handling ... |
191 |
|
f2ebb3a92 smarter propagate... |
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 |
/* all accesses are serialized by namespace_sem */ static struct user_namespace *user_ns; static struct mount *last_dest, *last_source, *dest_master; static struct mountpoint *mp; static struct hlist_head *list; static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (m->mnt_group_id == last_dest->mnt_group_id) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) { while (last_dest->mnt_master != p) { last_source = last_source->mnt_master; last_dest = last_source->mnt_parent; } if (n->mnt_group_id != last_dest->mnt_group_id) { last_source = last_source->mnt_master; last_dest = last_source->mnt_parent; } break; } |
796a6b521 Kill CL_PROPAGATI... |
225 |
} |
f2ebb3a92 smarter propagate... |
226 227 228 229 |
type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; |
5afe00221 [PATCH] handling ... |
230 |
} |
f2ebb3a92 smarter propagate... |
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
/* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); mnt_set_mountpoint(m, mp, child); last_dest = m; last_source = child; if (m->mnt_master != dest_master) { read_seqlock_excl(&mount_lock); SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); } hlist_add_head(&child->mnt_hash, list); return 0; |
b90fa9ae8 [PATCH] shared mo... |
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 |
} /* * mount 'source_mnt' under the destination 'dest_mnt' at * dentry 'dest_dentry'. And propagate that mount to * all the peer and slave mounts of 'dest_mnt'. * Link all the new mounts into a propagation tree headed at * source_mnt. Also link all the new mounts using ->mnt_list * headed at source_mnt's ->mnt_list * * @dest_mnt: destination mount. * @dest_dentry: destination dentry. * @source_mnt: source mount. * @tree_list : list of heads of trees to be attached. */ |
84d17192d get rid of full-h... |
263 |
int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, |
38129a13e switch mnt_hash t... |
264 |
struct mount *source_mnt, struct hlist_head *tree_list) |
b90fa9ae8 [PATCH] shared mo... |
265 |
{ |
f2ebb3a92 smarter propagate... |
266 |
struct mount *m, *n; |
b90fa9ae8 [PATCH] shared mo... |
267 |
int ret = 0; |
132c94e31 vfs: Carefully pr... |
268 |
|
f2ebb3a92 smarter propagate... |
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 |
/* * we don't want to bother passing tons of arguments to * propagate_one(); everything is serialized by namespace_sem, * so globals will do just fine. */ user_ns = current->nsproxy->mnt_ns->user_ns; last_dest = dest_mnt; last_source = source_mnt; mp = dest_mp; list = tree_list; dest_master = dest_mnt->mnt_master; /* all peers of dest_mnt, except dest_mnt itself */ for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { ret = propagate_one(n); if (ret) |
b90fa9ae8 [PATCH] shared mo... |
285 |
goto out; |
f2ebb3a92 smarter propagate... |
286 |
} |
b90fa9ae8 [PATCH] shared mo... |
287 |
|
f2ebb3a92 smarter propagate... |
288 289 290 291 292 293 294 295 296 297 298 |
/* all slave groups */ for (m = next_group(dest_mnt, dest_mnt); m; m = next_group(m, dest_mnt)) { /* everything in that slave group */ n = m; do { ret = propagate_one(n); if (ret) goto out; n = next_peer(n); } while (n != m); |
b90fa9ae8 [PATCH] shared mo... |
299 300 |
} out: |
f2ebb3a92 smarter propagate... |
301 302 303 304 305 |
read_seqlock_excl(&mount_lock); hlist_for_each_entry(n, tree_list, mnt_hash) { m = n->mnt_parent; if (m->mnt_master != dest_mnt->mnt_master) CLEAR_MNT_MARK(m->mnt_master); |
b90fa9ae8 [PATCH] shared mo... |
306 |
} |
f2ebb3a92 smarter propagate... |
307 |
read_sequnlock_excl(&mount_lock); |
b90fa9ae8 [PATCH] shared mo... |
308 309 |
return ret; } |
a05964f39 [PATCH] shared mo... |
310 311 312 313 |
/* * return true if the refcount is greater than count */ |
1ab597386 vfs: spread struc... |
314 |
static inline int do_refcount_check(struct mount *mnt, int count) |
a05964f39 [PATCH] shared mo... |
315 |
{ |
aba809cf0 namespace.c: get ... |
316 |
return mnt_get_count(mnt) > count; |
a05964f39 [PATCH] shared mo... |
317 318 319 320 321 322 323 324 325 |
} /* * check if the mount 'mnt' can be unmounted successfully. * @mnt: the mount to be checked for unmount * NOTE: unmounting 'mnt' would naturally propagate to all * other mounts its parent propagates to. * Check if any of these mounts that **do not have submounts** * have more references than 'refcnt'. If so return busy. |
99b7db7b8 fs: brlock vfsmou... |
326 |
* |
b3e19d924 fs: scale mntget/... |
327 |
* vfsmount lock must be held for write |
a05964f39 [PATCH] shared mo... |
328 |
*/ |
1ab597386 vfs: spread struc... |
329 |
int propagate_mount_busy(struct mount *mnt, int refcnt) |
a05964f39 [PATCH] shared mo... |
330 |
{ |
c937135d9 vfs: spread struc... |
331 |
struct mount *m, *child; |
0714a5338 vfs: now it can b... |
332 |
struct mount *parent = mnt->mnt_parent; |
a05964f39 [PATCH] shared mo... |
333 |
int ret = 0; |
0714a5338 vfs: now it can b... |
334 |
if (mnt == parent) |
a05964f39 [PATCH] shared mo... |
335 336 337 338 339 340 341 |
return do_refcount_check(mnt, refcnt); /* * quickly check if the current mount can be unmounted. * If not, we don't have to go checking for all other * mounts */ |
6b41d536f vfs: take mnt_chi... |
342 |
if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) |
a05964f39 [PATCH] shared mo... |
343 |
return 1; |
c937135d9 vfs: spread struc... |
344 345 |
for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { |
474279dc0 split __lookup_mn... |
346 |
child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); |
6b41d536f vfs: take mnt_chi... |
347 |
if (child && list_empty(&child->mnt_mounts) && |
1ab597386 vfs: spread struc... |
348 |
(ret = do_refcount_check(child, 1))) |
a05964f39 [PATCH] shared mo... |
349 350 351 352 353 354 355 356 357 |
break; } return ret; } /* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ |
61ef47b1e vfs: spread struc... |
358 |
static void __propagate_umount(struct mount *mnt) |
a05964f39 [PATCH] shared mo... |
359 |
{ |
0714a5338 vfs: now it can b... |
360 |
struct mount *parent = mnt->mnt_parent; |
c937135d9 vfs: spread struc... |
361 |
struct mount *m; |
a05964f39 [PATCH] shared mo... |
362 |
|
0714a5338 vfs: now it can b... |
363 |
BUG_ON(parent == mnt); |
a05964f39 [PATCH] shared mo... |
364 |
|
c937135d9 vfs: spread struc... |
365 366 |
for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { |
a05964f39 [PATCH] shared mo... |
367 |
|
474279dc0 split __lookup_mn... |
368 369 |
struct mount *child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); |
a05964f39 [PATCH] shared mo... |
370 371 372 373 |
/* * umount the child only if the child has no * other children */ |
38129a13e switch mnt_hash t... |
374 375 376 377 |
if (child && list_empty(&child->mnt_mounts)) { hlist_del_init_rcu(&child->mnt_hash); hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); } |
a05964f39 [PATCH] shared mo... |
378 379 380 381 382 383 384 |
} } /* * collect all mounts that receive propagation from the mount in @list, * and return these additional mounts in the same list. * @list: the list of mounts to be unmounted. |
99b7db7b8 fs: brlock vfsmou... |
385 386 |
* * vfsmount lock must be held for write |
a05964f39 [PATCH] shared mo... |
387 |
*/ |
38129a13e switch mnt_hash t... |
388 |
int propagate_umount(struct hlist_head *list) |
a05964f39 [PATCH] shared mo... |
389 |
{ |
61ef47b1e vfs: spread struc... |
390 |
struct mount *mnt; |
a05964f39 [PATCH] shared mo... |
391 |
|
38129a13e switch mnt_hash t... |
392 |
hlist_for_each_entry(mnt, list, mnt_hash) |
a05964f39 [PATCH] shared mo... |
393 394 395 |
__propagate_umount(mnt); return 0; } |