Blame view
fs/ceph/dir.c
36.8 KB
3d14c5d2b ceph: factor out ... |
1 |
#include <linux/ceph/ceph_debug.h> |
2817b000b ceph: directory o... |
2 3 4 5 |
#include <linux/spinlock.h> #include <linux/fs_struct.h> #include <linux/namei.h> |
5a0e3ad6a include cleanup: ... |
6 |
#include <linux/slab.h> |
2817b000b ceph: directory o... |
7 8 9 |
#include <linux/sched.h> #include "super.h" |
3d14c5d2b ceph: factor out ... |
10 |
#include "mds_client.h" |
2817b000b ceph: directory o... |
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
/* * Directory operations: readdir, lookup, create, link, unlink, * rename, etc. */ /* * Ceph MDS operations are specified in terms of a base ino and * relative path. Thus, the client can specify an operation on a * specific inode (e.g., a getattr due to fstat(2)), or as a path * relative to, say, the root directory. * * Normally, we limit ourselves to strict inode ops (no path component) * or dentry operations (a single path component relative to an ino). The * exception to this is open_root_dentry(), which will open the mount * point by name. */ const struct inode_operations ceph_dir_iops; const struct file_operations ceph_dir_fops; |
52dfb8ac0 ceph: constify de... |
31 |
const struct dentry_operations ceph_dentry_ops; |
2817b000b ceph: directory o... |
32 33 34 35 36 37 38 39 40 41 |
/* * Initialize ceph dentry state. */ int ceph_init_dentry(struct dentry *dentry) { struct ceph_dentry_info *di; if (dentry->d_fsdata) return 0; |
36e21687e ceph: initialize ... |
42 |
di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); |
2817b000b ceph: directory o... |
43 44 45 46 |
if (!di) return -ENOMEM; /* oh well */ spin_lock(&dentry->d_lock); |
8c6efb58a ceph: fix memory ... |
47 48 49 |
if (dentry->d_fsdata) { /* lost a race */ kmem_cache_free(ceph_dentry_cachep, di); |
2817b000b ceph: directory o... |
50 |
goto out_unlock; |
8c6efb58a ceph: fix memory ... |
51 |
} |
48d0cbd12 ceph: handle raci... |
52 53 54 55 56 57 58 59 |
if (dentry->d_parent == NULL || /* nfs fh_to_dentry */ ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) d_set_d_op(dentry, &ceph_dentry_ops); else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) d_set_d_op(dentry, &ceph_snapdir_dentry_ops); else d_set_d_op(dentry, &ceph_snap_dentry_ops); |
2817b000b ceph: directory o... |
60 61 |
di->dentry = dentry; di->lease_session = NULL; |
2817b000b ceph: directory o... |
62 |
dentry->d_time = jiffies; |
48d0cbd12 ceph: handle raci... |
63 64 65 |
/* avoid reordering d_fsdata setup so that the check above is safe */ smp_mb(); dentry->d_fsdata = di; |
2817b000b ceph: directory o... |
66 67 68 69 70 |
ceph_dentry_lru_add(dentry); out_unlock: spin_unlock(&dentry->d_lock); return 0; } |
5f21c96dd ceph: protect acc... |
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) { struct inode *inode = NULL; if (!dentry) return NULL; spin_lock(&dentry->d_lock); if (dentry->d_parent) { inode = dentry->d_parent->d_inode; ihold(inode); } spin_unlock(&dentry->d_lock); return inode; } |
2817b000b ceph: directory o... |
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
/* * for readdir, we encode the directory frag and offset within that * frag into f_pos. */ static unsigned fpos_frag(loff_t p) { return p >> 32; } static unsigned fpos_off(loff_t p) { return p & 0xffffffff; } /* * When possible, we try to satisfy a readdir by peeking at the * dcache. We make this work by carefully ordering dentries on * d_u.d_child when we initially get results back from the MDS, and * falling back to a "normal" sync readdir if any dentries in the dir * are dropped. * |
c6ffe1001 ceph: use new D_C... |
108 |
* D_COMPLETE tells indicates we have all dentries in the dir. It is |
2817b000b ceph: directory o... |
109 110 111 112 113 114 |
* defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by * the MDS if/when the directory is modified). */ static int __dcache_readdir(struct file *filp, void *dirent, filldir_t filldir) { |
2817b000b ceph: directory o... |
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
struct ceph_file_info *fi = filp->private_data; struct dentry *parent = filp->f_dentry; struct inode *dir = parent->d_inode; struct list_head *p; struct dentry *dentry, *last; struct ceph_dentry_info *di; int err = 0; /* claim ref on last dentry we returned */ last = fi->dentry; fi->dentry = NULL; dout("__dcache_readdir %p at %llu (last %p) ", dir, filp->f_pos, last); |
2fd6b7f50 fs: dcache scale ... |
130 |
spin_lock(&parent->d_lock); |
2817b000b ceph: directory o... |
131 132 |
/* start at beginning? */ |
884ea8927 ceph: avoid possi... |
133 134 |
if (filp->f_pos == 2 || last == NULL || filp->f_pos < ceph_dentry(last)->offset) { |
2817b000b ceph: directory o... |
135 136 137 138 139 140 141 142 143 144 145 146 147 |
if (list_empty(&parent->d_subdirs)) goto out_unlock; p = parent->d_subdirs.prev; dout(" initial p %p/%p ", p->prev, p->next); } else { p = last->d_u.d_child.prev; } more: dentry = list_entry(p, struct dentry, d_u.d_child); di = ceph_dentry(dentry); while (1) { |
1cd3935be ceph: set dn offs... |
148 149 150 |
dout(" p %p/%p %s d_subdirs %p/%p ", p->prev, p->next, d_unhashed(dentry) ? "!hashed" : "hashed", |
2817b000b ceph: directory o... |
151 152 |
parent->d_subdirs.prev, parent->d_subdirs.next); if (p == &parent->d_subdirs) { |
9cfa1098d ceph: use flag bi... |
153 |
fi->flags |= CEPH_F_ATEND; |
2817b000b ceph: directory o... |
154 155 |
goto out_unlock; } |
2fd6b7f50 fs: dcache scale ... |
156 |
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
2817b000b ceph: directory o... |
157 |
if (!d_unhashed(dentry) && dentry->d_inode && |
09b8a7d2a ceph: exclude sna... |
158 |
ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && |
1d1de9160 ceph: hide /.ceph... |
159 |
ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && |
2817b000b ceph: directory o... |
160 161 162 163 164 165 166 |
filp->f_pos <= di->offset) break; dout(" skipping %p %.*s at %llu (%llu)%s%s ", dentry, dentry->d_name.len, dentry->d_name.name, di->offset, filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", !dentry->d_inode ? " null" : ""); |
da5029563 fs: dcache scale ... |
167 |
spin_unlock(&dentry->d_lock); |
2817b000b ceph: directory o... |
168 169 170 171 |
p = p->prev; dentry = list_entry(p, struct dentry, d_u.d_child); di = ceph_dentry(dentry); } |
da5029563 fs: dcache scale ... |
172 |
dget_dlock(dentry); |
b7ab39f63 fs: dcache scale ... |
173 |
spin_unlock(&dentry->d_lock); |
2fd6b7f50 fs: dcache scale ... |
174 |
spin_unlock(&parent->d_lock); |
2817b000b ceph: directory o... |
175 176 177 178 179 180 181 |
dout(" %llu (%llu) dentry %p %.*s %p ", di->offset, filp->f_pos, dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); filp->f_pos = di->offset; err = filldir(dirent, dentry->d_name.name, dentry->d_name.len, di->offset, |
ad1fee96c ceph: add ino32 m... |
182 |
ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), |
2817b000b ceph: directory o... |
183 184 185 186 187 188 189 190 191 192 |
dentry->d_inode->i_mode >> 12); if (last) { if (err < 0) { /* remember our position */ fi->dentry = last; fi->next_offset = di->offset; } else { dput(last); } |
2817b000b ceph: directory o... |
193 |
} |
f5b066287 ceph: fix dentry ... |
194 |
last = dentry; |
2817b000b ceph: directory o... |
195 |
if (err < 0) |
efa4c1206 ceph: do not carr... |
196 |
goto out; |
2817b000b ceph: directory o... |
197 |
|
2817b000b ceph: directory o... |
198 |
filp->f_pos++; |
b5c84bf6f fs: dcache remove... |
199 |
/* make sure a dentry wasn't dropped while we didn't have parent lock */ |
c6ffe1001 ceph: use new D_C... |
200 201 202 |
if (!ceph_dir_test_complete(dir)) { dout(" lost D_COMPLETE on %p; falling back to mds ", dir); |
efa4c1206 ceph: do not carr... |
203 204 205 |
err = -EAGAIN; goto out; } |
2fd6b7f50 fs: dcache scale ... |
206 |
spin_lock(&parent->d_lock); |
efa4c1206 ceph: do not carr... |
207 208 |
p = p->prev; /* advance to next dentry */ goto more; |
2817b000b ceph: directory o... |
209 210 |
out_unlock: |
2fd6b7f50 fs: dcache scale ... |
211 |
spin_unlock(&parent->d_lock); |
efa4c1206 ceph: do not carr... |
212 213 |
out: if (last) |
2817b000b ceph: directory o... |
214 |
dput(last); |
2817b000b ceph: directory o... |
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 |
return err; } /* * make note of the last dentry we read, so we can * continue at the same lexicographical point, * regardless of what dir changes take place on the * server. */ static int note_last_dentry(struct ceph_file_info *fi, const char *name, int len) { kfree(fi->last_name); fi->last_name = kmalloc(len+1, GFP_NOFS); if (!fi->last_name) return -ENOMEM; memcpy(fi->last_name, name, len); fi->last_name[len] = 0; dout("note_last_dentry '%s' ", fi->last_name); return 0; } static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct ceph_file_info *fi = filp->private_data; struct inode *inode = filp->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); |
3d14c5d2b ceph: factor out ... |
243 244 |
struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
245 246 247 248 249 |
unsigned frag = fpos_frag(filp->f_pos); int off = fpos_off(filp->f_pos); int err; u32 ftype; struct ceph_mds_reply_info_parsed *rinfo; |
3d14c5d2b ceph: factor out ... |
250 251 |
const int max_entries = fsc->mount_options->max_readdir; const int max_bytes = fsc->mount_options->max_readdir_bytes; |
2817b000b ceph: directory o... |
252 253 254 |
dout("readdir %p filp %p frag %u off %u ", inode, filp, frag, off); |
9cfa1098d ceph: use flag bi... |
255 |
if (fi->flags & CEPH_F_ATEND) |
2817b000b ceph: directory o... |
256 257 258 259 260 261 262 263 264 265 266 |
return 0; /* always start with . and .. */ if (filp->f_pos == 0) { /* note dir version at start of readdir so we can tell * if any dentries get dropped */ fi->dir_release_count = ci->i_release_count; dout("readdir off 0 -> '.' "); if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), |
ad1fee96c ceph: add ino32 m... |
267 268 |
ceph_translate_ino(inode->i_sb, inode->i_ino), inode->i_mode >> 12) < 0) |
2817b000b ceph: directory o... |
269 270 271 272 273 |
return 0; filp->f_pos = 1; off = 1; } if (filp->f_pos == 1) { |
b85fd6bdc don't open-code p... |
274 |
ino_t ino = parent_ino(filp->f_dentry); |
2817b000b ceph: directory o... |
275 276 277 |
dout("readdir off 1 -> '..' "); if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), |
ad1fee96c ceph: add ino32 m... |
278 |
ceph_translate_ino(inode->i_sb, ino), |
2817b000b ceph: directory o... |
279 280 281 282 283 284 285 |
inode->i_mode >> 12) < 0) return 0; filp->f_pos = 2; off = 2; } /* can we use the dcache? */ |
be655596b ceph: use i_ceph_... |
286 |
spin_lock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
287 |
if ((filp->f_pos == 2 || fi->dentry) && |
3d14c5d2b ceph: factor out ... |
288 |
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) && |
a0dff78da ceph: avoid dcach... |
289 |
ceph_snap(inode) != CEPH_SNAPDIR && |
c6ffe1001 ceph: use new D_C... |
290 |
ceph_dir_test_complete(inode) && |
2817b000b ceph: directory o... |
291 |
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { |
be655596b ceph: use i_ceph_... |
292 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
293 |
err = __dcache_readdir(filp, dirent, filldir); |
efa4c1206 ceph: do not carr... |
294 |
if (err != -EAGAIN) |
2817b000b ceph: directory o... |
295 |
return err; |
efa4c1206 ceph: do not carr... |
296 |
} else { |
be655596b ceph: use i_ceph_... |
297 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
298 |
} |
2817b000b ceph: directory o... |
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 |
if (fi->dentry) { err = note_last_dentry(fi, fi->dentry->d_name.name, fi->dentry->d_name.len); if (err) return err; dput(fi->dentry); fi->dentry = NULL; } /* proceed with a normal readdir */ more: /* do we have the correct frag content buffered? */ if (fi->frag != frag || fi->last_readdir == NULL) { struct ceph_mds_request *req; int op = ceph_snap(inode) == CEPH_SNAPDIR ? CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; /* discard old result, if any */ |
393f66209 ceph: fix possibl... |
318 |
if (fi->last_readdir) { |
2817b000b ceph: directory o... |
319 |
ceph_mdsc_put_request(fi->last_readdir); |
393f66209 ceph: fix possibl... |
320 321 |
fi->last_readdir = NULL; } |
2817b000b ceph: directory o... |
322 323 324 325 326 327 328 329 330 331 |
/* requery frag tree, as the frag topology may have changed */ frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); dout("readdir fetching %llx.%llx frag %x offset '%s' ", ceph_vinop(inode), frag, fi->last_name); req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); |
70b666c3b ceph: use ihold w... |
332 333 |
req->r_inode = inode; ihold(inode); |
2817b000b ceph: directory o... |
334 335 336 337 338 339 340 341 342 |
req->r_dentry = dget(filp->f_dentry); /* hints to request -> mds selection code */ req->r_direct_mode = USE_AUTH_MDS; req->r_direct_hash = ceph_frag_value(frag); req->r_direct_is_hash = true; req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); req->r_readdir_offset = fi->next_offset; req->r_args.readdir.frag = cpu_to_le32(frag); req->r_args.readdir.max_entries = cpu_to_le32(max_entries); |
23804d91f ceph: specify max... |
343 |
req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); |
e1e4dd0ca ceph: reserve one... |
344 |
req->r_num_caps = max_entries + 1; |
2817b000b ceph: directory o... |
345 346 347 348 349 350 351 352 353 354 355 356 357 |
err = ceph_mdsc_do_request(mdsc, NULL, req); if (err < 0) { ceph_mdsc_put_request(req); return err; } dout("readdir got and parsed readdir result=%d" " on frag %x, end=%d, complete=%d ", err, frag, (int)req->r_reply_info.dir_end, (int)req->r_reply_info.dir_complete); if (!req->r_did_prepopulate) { dout("readdir !did_prepopulate"); |
c6ffe1001 ceph: use new D_C... |
358 |
fi->dir_release_count--; /* preclude D_COMPLETE */ |
2817b000b ceph: directory o... |
359 360 361 362 363 364 365 366 367 |
} /* note next offset and last dentry name */ fi->offset = fi->next_offset; fi->last_readdir = req; if (req->r_reply_info.dir_end) { kfree(fi->last_name); fi->last_name = NULL; |
7b88dadc1 ceph: fix frag of... |
368 369 370 371 |
if (ceph_frag_is_rightmost(frag)) fi->next_offset = 2; else fi->next_offset = 0; |
2817b000b ceph: directory o... |
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 |
} else { rinfo = &req->r_reply_info; err = note_last_dentry(fi, rinfo->dir_dname[rinfo->dir_nr-1], rinfo->dir_dname_len[rinfo->dir_nr-1]); if (err) return err; fi->next_offset += rinfo->dir_nr; } } rinfo = &fi->last_readdir->r_reply_info; dout("readdir frag %x num %d off %d chunkoff %d ", frag, rinfo->dir_nr, off, fi->offset); |
da39822c6 ceph: fix broken ... |
387 |
while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { |
2817b000b ceph: directory o... |
388 389 390 |
u64 pos = ceph_make_fpos(frag, off); struct ceph_mds_reply_inode *in = rinfo->dir_in[off - fi->offset].in; |
3105c19c4 ceph: fix readdir... |
391 392 |
struct ceph_vino vino; ino_t ino; |
2817b000b ceph: directory o... |
393 394 395 396 397 398 399 |
dout("readdir off %d (%d/%d) -> %lld '%.*s' %p ", off, off - fi->offset, rinfo->dir_nr, pos, rinfo->dir_dname_len[off - fi->offset], rinfo->dir_dname[off - fi->offset], in); BUG_ON(!in); ftype = le32_to_cpu(in->mode) >> 12; |
3105c19c4 ceph: fix readdir... |
400 401 402 |
vino.ino = le64_to_cpu(in->ino); vino.snap = le64_to_cpu(in->snapid); ino = ceph_vino_to_ino(vino); |
2817b000b ceph: directory o... |
403 404 405 |
if (filldir(dirent, rinfo->dir_dname[off - fi->offset], rinfo->dir_dname_len[off - fi->offset], |
ad1fee96c ceph: add ino32 m... |
406 407 |
pos, ceph_translate_ino(inode->i_sb, ino), ftype) < 0) { |
2817b000b ceph: directory o... |
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 |
dout("filldir stopping us... "); return 0; } off++; filp->f_pos = pos + 1; } if (fi->last_name) { ceph_mdsc_put_request(fi->last_readdir); fi->last_readdir = NULL; goto more; } /* more frags? */ if (!ceph_frag_is_rightmost(frag)) { frag = ceph_frag_next(frag); off = 0; filp->f_pos = ceph_make_fpos(frag, off); dout("readdir next frag is %x ", frag); goto more; } |
9cfa1098d ceph: use flag bi... |
431 |
fi->flags |= CEPH_F_ATEND; |
2817b000b ceph: directory o... |
432 433 434 435 436 437 |
/* * if dir_release_count still matches the dir, no dentries * were released during the whole readdir, and we should have * the complete dir contents in our cache. */ |
be655596b ceph: use i_ceph_... |
438 |
spin_lock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
439 |
if (ci->i_release_count == fi->dir_release_count) { |
c6ffe1001 ceph: use new D_C... |
440 |
ceph_dir_set_complete(inode); |
2817b000b ceph: directory o... |
441 442 |
ci->i_max_offset = filp->f_pos; } |
be655596b ceph: use i_ceph_... |
443 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
444 445 446 447 448 449 450 451 452 453 454 455 456 |
dout("readdir %p filp %p done. ", inode, filp); return 0; } static void reset_readdir(struct ceph_file_info *fi) { if (fi->last_readdir) { ceph_mdsc_put_request(fi->last_readdir); fi->last_readdir = NULL; } kfree(fi->last_name); |
a1629c3b2 ceph: fix danglin... |
457 |
fi->last_name = NULL; |
2817b000b ceph: directory o... |
458 459 460 461 462 |
fi->next_offset = 2; /* compensate for . and .. */ if (fi->dentry) { dput(fi->dentry); fi->dentry = NULL; } |
9cfa1098d ceph: use flag bi... |
463 |
fi->flags &= ~CEPH_F_ATEND; |
2817b000b ceph: directory o... |
464 465 466 467 468 469 470 471 472 473 |
} static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) { struct ceph_file_info *fi = file->private_data; struct inode *inode = file->f_mapping->host; loff_t old_offset = offset; loff_t retval; mutex_lock(&inode->i_mutex); |
06222e491 fs: handle SEEK_H... |
474 |
retval = -EINVAL; |
2817b000b ceph: directory o... |
475 476 477 478 479 480 |
switch (origin) { case SEEK_END: offset += inode->i_size + 2; /* FIXME */ break; case SEEK_CUR: offset += file->f_pos; |
06222e491 fs: handle SEEK_H... |
481 482 483 484 |
case SEEK_SET: break; default: goto out; |
2817b000b ceph: directory o... |
485 |
} |
06222e491 fs: handle SEEK_H... |
486 |
|
2817b000b ceph: directory o... |
487 488 489 490 |
if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; |
9cfa1098d ceph: use flag bi... |
491 |
fi->flags &= ~CEPH_F_ATEND; |
2817b000b ceph: directory o... |
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 |
} retval = offset; /* * discard buffered readdir content on seekdir(0), or * seek to new frag, or seek prior to current chunk. */ if (offset == 0 || fpos_frag(offset) != fpos_frag(old_offset) || fpos_off(offset) < fi->offset) { dout("dir_llseek dropping %p content ", file); reset_readdir(fi); } /* bump dir_release_count if we did a forward seek */ if (offset > old_offset) fi->dir_release_count--; } |
06222e491 fs: handle SEEK_H... |
511 |
out: |
2817b000b ceph: directory o... |
512 513 514 515 516 |
mutex_unlock(&inode->i_mutex); return retval; } /* |
468640e32 ceph: fix ceph_lo... |
517 |
* Handle lookups for the hidden .snap directory. |
2817b000b ceph: directory o... |
518 |
*/ |
468640e32 ceph: fix ceph_lo... |
519 520 |
int ceph_handle_snapdir(struct ceph_mds_request *req, struct dentry *dentry, int err) |
2817b000b ceph: directory o... |
521 |
{ |
3d14c5d2b ceph: factor out ... |
522 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); |
d79698da3 ceph: document un... |
523 |
struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ |
2817b000b ceph: directory o... |
524 525 526 |
/* .snap dir? */ if (err == -ENOENT && |
455cec0ab ceph: no .snap in... |
527 |
ceph_snap(parent) == CEPH_NOSNAP && |
6b8051855 ceph: allocate an... |
528 |
strcmp(dentry->d_name.name, |
3d14c5d2b ceph: factor out ... |
529 |
fsc->mount_options->snapdir_name) == 0) { |
2817b000b ceph: directory o... |
530 531 532 533 |
struct inode *inode = ceph_get_snapdir(parent); dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p ", dentry, dentry->d_name.len, dentry->d_name.name, inode); |
9358c6d4c ceph: fix dentry ... |
534 |
BUG_ON(!d_unhashed(dentry)); |
2817b000b ceph: directory o... |
535 536 537 |
d_add(dentry, inode); err = 0; } |
468640e32 ceph: fix ceph_lo... |
538 539 |
return err; } |
2817b000b ceph: directory o... |
540 |
|
468640e32 ceph: fix ceph_lo... |
541 542 543 544 545 546 547 548 549 550 551 552 553 554 |
/* * Figure out final result of a lookup/open request. * * Mainly, make sure we return the final req->r_dentry (if it already * existed) in place of the original VFS-provided dentry when they * differ. * * Gracefully handle the case where the MDS replies with -ENOENT and * no trace (which it may do, at its discretion, e.g., if it doesn't * care to issue a lease on the negative dentry). */ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, struct dentry *dentry, int err) { |
2817b000b ceph: directory o... |
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 |
if (err == -ENOENT) { /* no trace? */ err = 0; if (!req->r_reply_info.head->is_dentry) { dout("ENOENT and no trace, dentry %p inode %p ", dentry, dentry->d_inode); if (dentry->d_inode) { d_drop(dentry); err = -ENOENT; } else { d_add(dentry, NULL); } } } if (err) dentry = ERR_PTR(err); else if (dentry != req->r_dentry) dentry = dget(req->r_dentry); /* we got spliced */ else dentry = NULL; return dentry; } |
1d1de9160 ceph: hide /.ceph... |
578 579 580 581 582 |
static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) { return ceph_ino(inode) == CEPH_INO_ROOT && strncmp(dentry->d_name.name, ".ceph", 5) == 0; } |
2817b000b ceph: directory o... |
583 584 585 586 587 588 589 |
/* * Look up a single dir entry. If there is a lookup intent, inform * the MDS so that it gets our 'caps wanted' value in a single op. */ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { |
3d14c5d2b ceph: factor out ... |
590 591 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 |
struct ceph_mds_request *req; int op; int err; dout("lookup %p dentry %p '%.*s' ", dir, dentry, dentry->d_name.len, dentry->d_name.name); if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); err = ceph_init_dentry(dentry); if (err < 0) return ERR_PTR(err); /* open (but not create!) intent? */ if (nd && (nd->flags & LOOKUP_OPEN) && |
2817b000b ceph: directory o... |
610 611 612 613 614 615 616 617 618 |
!(nd->intent.open.flags & O_CREAT)) { int mode = nd->intent.open.create_mode & ~current->fs->umask; return ceph_lookup_open(dir, dentry, nd, mode, 1); } /* can we conclude ENOENT locally? */ if (dentry->d_inode == NULL) { struct ceph_inode_info *ci = ceph_inode(dir); struct ceph_dentry_info *di = ceph_dentry(dentry); |
be655596b ceph: use i_ceph_... |
619 |
spin_lock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
620 621 622 |
dout(" dir %p flags are %d ", dir, ci->i_ceph_flags); if (strncmp(dentry->d_name.name, |
3d14c5d2b ceph: factor out ... |
623 |
fsc->mount_options->snapdir_name, |
2817b000b ceph: directory o... |
624 |
dentry->d_name.len) && |
1d1de9160 ceph: hide /.ceph... |
625 |
!is_root_ceph_dentry(dir, dentry) && |
c6ffe1001 ceph: use new D_C... |
626 |
ceph_dir_test_complete(dir) && |
2817b000b ceph: directory o... |
627 |
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { |
be655596b ceph: use i_ceph_... |
628 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
629 630 631 632 633 634 |
dout(" dir %p complete, -ENOENT ", dir); d_add(dentry, NULL); di->lease_shared_gen = ci->i_shared_gen; return NULL; } |
be655596b ceph: use i_ceph_... |
635 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
636 637 638 639 640 641 |
} op = ceph_snap(dir) == CEPH_SNAPDIR ? CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); if (IS_ERR(req)) |
7e34bc524 fs/ceph: Use ERR_... |
642 |
return ERR_CAST(req); |
2817b000b ceph: directory o... |
643 644 645 646 647 648 |
req->r_dentry = dget(dentry); req->r_num_caps = 2; /* we only need inode linkage */ req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); req->r_locked_dir = dir; err = ceph_mdsc_do_request(mdsc, NULL, req); |
468640e32 ceph: fix ceph_lo... |
649 |
err = ceph_handle_snapdir(req, dentry, err); |
2817b000b ceph: directory o... |
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 |
dentry = ceph_finish_lookup(req, dentry, err); ceph_mdsc_put_request(req); /* will dput(dentry) */ dout("lookup result=%p ", dentry); return dentry; } /* * If we do a create but get no trace back from the MDS, follow up with * a lookup (the VFS expects us to link up the provided dentry). */ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) { struct dentry *result = ceph_lookup(dir, dentry, NULL); if (result && !IS_ERR(result)) { /* * We created the item, then did a lookup, and found * it was already linked to another inode we already * had in our cache (and thus got spliced). Link our * dentry to that inode, but don't hash it, just in * case the VFS wants to dereference it. */ BUG_ON(!result->d_inode); d_instantiate(dentry, result->d_inode); return 0; } return PTR_ERR(result); } static int ceph_mknod(struct inode *dir, struct dentry *dentry, |
1a67aafb5 switch ->mknod() ... |
681 |
umode_t mode, dev_t rdev) |
2817b000b ceph: directory o... |
682 |
{ |
3d14c5d2b ceph: factor out ... |
683 684 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
685 686 687 688 689 |
struct ceph_mds_request *req; int err; if (ceph_snap(dir) != CEPH_NOSNAP) return -EROFS; |
1a67aafb5 switch ->mknod() ... |
690 691 |
dout("mknod in dir %p dentry %p mode 0%ho rdev %d ", |
2817b000b ceph: directory o... |
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 |
dir, dentry, mode, rdev); req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); if (IS_ERR(req)) { d_drop(dentry); return PTR_ERR(req); } req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_locked_dir = dir; req->r_args.mknod.mode = cpu_to_le32(mode); req->r_args.mknod.rdev = cpu_to_le32(rdev); req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); ceph_mdsc_put_request(req); if (err) d_drop(dentry); return err; } |
4acdaf27e switch ->create()... |
713 |
static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, |
2817b000b ceph: directory o... |
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 |
struct nameidata *nd) { dout("create in dir %p dentry %p name '%.*s' ", dir, dentry, dentry->d_name.len, dentry->d_name.name); if (ceph_snap(dir) != CEPH_NOSNAP) return -EROFS; if (nd) { BUG_ON((nd->flags & LOOKUP_OPEN) == 0); dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); /* hrm, what should i do here if we get aliased? */ if (IS_ERR(dentry)) return PTR_ERR(dentry); return 0; } /* fall back to mknod */ return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); } static int ceph_symlink(struct inode *dir, struct dentry *dentry, const char *dest) { |
3d14c5d2b ceph: factor out ... |
739 740 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 |
struct ceph_mds_request *req; int err; if (ceph_snap(dir) != CEPH_NOSNAP) return -EROFS; dout("symlink in dir %p dentry %p to '%s' ", dir, dentry, dest); req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); if (IS_ERR(req)) { d_drop(dentry); return PTR_ERR(req); } req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_path2 = kstrdup(dest, GFP_NOFS); req->r_locked_dir = dir; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); ceph_mdsc_put_request(req); if (err) d_drop(dentry); return err; } |
18bb1db3e switch vfs_mkdir(... |
768 |
static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
2817b000b ceph: directory o... |
769 |
{ |
3d14c5d2b ceph: factor out ... |
770 771 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
772 773 774 775 776 777 778 779 780 781 782 |
struct ceph_mds_request *req; int err = -EROFS; int op; if (ceph_snap(dir) == CEPH_SNAPDIR) { /* mkdir .snap/foo is a MKSNAP */ op = CEPH_MDS_OP_MKSNAP; dout("mksnap dir %p snap '%.*s' dn %p ", dir, dentry->d_name.len, dentry->d_name.name, dentry); } else if (ceph_snap(dir) == CEPH_NOSNAP) { |
18bb1db3e switch vfs_mkdir(... |
783 784 |
dout("mkdir dir %p dn %p mode 0%ho ", dir, dentry, mode); |
2817b000b ceph: directory o... |
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 |
op = CEPH_MDS_OP_MKDIR; } else { goto out; } req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; } req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_locked_dir = dir; req->r_args.mkdir.mode = cpu_to_le32(mode); req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); ceph_mdsc_put_request(req); out: if (err < 0) d_drop(dentry); return err; } static int ceph_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { |
3d14c5d2b ceph: factor out ... |
814 815 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 |
struct ceph_mds_request *req; int err; if (ceph_snap(dir) != CEPH_NOSNAP) return -EROFS; dout("link in dir %p old_dentry %p dentry %p ", dir, old_dentry, dentry); req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); if (IS_ERR(req)) { d_drop(dentry); return PTR_ERR(req); } req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ |
41b02e1f9 ceph: explicitly ... |
833 |
req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); |
2817b000b ceph: directory o... |
834 835 836 837 |
req->r_locked_dir = dir; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; err = ceph_mdsc_do_request(mdsc, dir, req); |
70b666c3b ceph: use ihold w... |
838 |
if (err) { |
2817b000b ceph: directory o... |
839 |
d_drop(dentry); |
70b666c3b ceph: use ihold w... |
840 841 842 843 |
} else if (!req->r_reply_info.head->is_dentry) { ihold(old_dentry->d_inode); d_instantiate(dentry, old_dentry->d_inode); } |
2817b000b ceph: directory o... |
844 845 846 847 848 849 850 851 852 853 854 855 856 857 |
ceph_mdsc_put_request(req); return err; } /* * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it * looks like the link count will hit 0, drop any other caps (other * than PIN) we don't specifically want (due to the file still being * open). */ static int drop_caps_for_unlink(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; |
be655596b ceph: use i_ceph_... |
858 |
spin_lock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
859 860 861 862 |
if (inode->i_nlink == 1) { drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); ci->i_ceph_flags |= CEPH_I_NODELAY; } |
be655596b ceph: use i_ceph_... |
863 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
864 865 866 867 868 869 870 871 |
return drop; } /* * rmdir and unlink are differ only by the metadata op code */ static int ceph_unlink(struct inode *dir, struct dentry *dentry) { |
3d14c5d2b ceph: factor out ... |
872 873 |
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 |
struct inode *inode = dentry->d_inode; struct ceph_mds_request *req; int err = -EROFS; int op; if (ceph_snap(dir) == CEPH_SNAPDIR) { /* rmdir .snap/foo is RMSNAP */ dout("rmsnap dir %p '%.*s' dn %p ", dir, dentry->d_name.len, dentry->d_name.name, dentry); op = CEPH_MDS_OP_RMSNAP; } else if (ceph_snap(dir) == CEPH_NOSNAP) { dout("unlink/rmdir dir %p dn %p inode %p ", dir, dentry, inode); |
dba19c606 get rid of open-c... |
889 |
op = S_ISDIR(dentry->d_inode->i_mode) ? |
2817b000b ceph: directory o... |
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 |
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; } else goto out; req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; } req->r_dentry = dget(dentry); req->r_num_caps = 2; req->r_locked_dir = dir; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; req->r_inode_drop = drop_caps_for_unlink(inode); err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) d_delete(dentry); ceph_mdsc_put_request(req); out: return err; } static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { |
3d14c5d2b ceph: factor out ... |
915 916 |
struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); struct ceph_mds_client *mdsc = fsc->mdsc; |
2817b000b ceph: directory o... |
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 |
struct ceph_mds_request *req; int err; if (ceph_snap(old_dir) != ceph_snap(new_dir)) return -EXDEV; if (ceph_snap(old_dir) != CEPH_NOSNAP || ceph_snap(new_dir) != CEPH_NOSNAP) return -EROFS; dout("rename dir %p dentry %p to dir %p dentry %p ", old_dir, old_dentry, new_dir, new_dentry); req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_dentry = dget(new_dentry); req->r_num_caps = 2; req->r_old_dentry = dget(old_dentry); |
41b02e1f9 ceph: explicitly ... |
934 |
req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); |
2817b000b ceph: directory o... |
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 |
req->r_locked_dir = new_dir; req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; /* release LINK_RDCACHE on source inode (mds will lock it) */ req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; if (new_dentry->d_inode) req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); err = ceph_mdsc_do_request(mdsc, old_dir, req); if (!err && !req->r_reply_info.head->is_dentry) { /* * Normally d_move() is done by fill_trace (called by * do_request, above). If there is no trace, we need * to do it here. */ |
ea1409f96 ceph: clear dir c... |
951 952 |
/* d_move screws up d_subdirs order */ |
c6ffe1001 ceph: use new D_C... |
953 |
ceph_dir_clear_complete(new_dir); |
ea1409f96 ceph: clear dir c... |
954 |
|
2817b000b ceph: directory o... |
955 |
d_move(old_dentry, new_dentry); |
ea1409f96 ceph: clear dir c... |
956 957 958 |
/* ensure target dentry is invalidated, despite rehashing bug in vfs_rename_dir */ |
81a6cf2d3 ceph: invalidate ... |
959 |
ceph_invalidate_dentry_lease(new_dentry); |
2817b000b ceph: directory o... |
960 961 962 963 |
} ceph_mdsc_put_request(req); return err; } |
81a6cf2d3 ceph: invalidate ... |
964 965 966 967 968 969 970 971 972 973 |
/* * Ensure a dentry lease will no longer revalidate. */ void ceph_invalidate_dentry_lease(struct dentry *dentry) { spin_lock(&dentry->d_lock); dentry->d_time = jiffies; ceph_dentry(dentry)->lease_shared_gen = 0; spin_unlock(&dentry->d_lock); } |
2817b000b ceph: directory o... |
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 |
/* * Check if dentry lease is valid. If not, delete the lease. Try to * renew if the least is more than half up. */ static int dentry_lease_is_valid(struct dentry *dentry) { struct ceph_dentry_info *di; struct ceph_mds_session *s; int valid = 0; u32 gen; unsigned long ttl; struct ceph_mds_session *session = NULL; struct inode *dir = NULL; u32 seq = 0; spin_lock(&dentry->d_lock); di = ceph_dentry(dentry); |
3d8eb7a94 ceph: remove unne... |
992 |
if (di->lease_session) { |
2817b000b ceph: directory o... |
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 |
s = di->lease_session; spin_lock(&s->s_cap_lock); gen = s->s_cap_gen; ttl = s->s_cap_ttl; spin_unlock(&s->s_cap_lock); if (di->lease_gen == gen && time_before(jiffies, dentry->d_time) && time_before(jiffies, ttl)) { valid = 1; if (di->lease_renew_after && time_after(jiffies, di->lease_renew_after)) { /* we should renew */ dir = dentry->d_parent->d_inode; session = ceph_get_mds_session(s); seq = di->lease_seq; di->lease_renew_after = 0; di->lease_renew_from = jiffies; } |
2817b000b ceph: directory o... |
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 |
} } spin_unlock(&dentry->d_lock); if (session) { ceph_mdsc_lease_send_msg(session, dir, dentry, CEPH_MDS_LEASE_RENEW, seq); ceph_put_mds_session(session); } dout("dentry_lease_is_valid - dentry %p = %d ", dentry, valid); return valid; } /* * Check if directory-wide content lease/cap is valid. */ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) { struct ceph_inode_info *ci = ceph_inode(dir); struct ceph_dentry_info *di = ceph_dentry(dentry); int valid = 0; |
be655596b ceph: use i_ceph_... |
1034 |
spin_lock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
1035 1036 |
if (ci->i_shared_gen == di->lease_shared_gen) valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); |
be655596b ceph: use i_ceph_... |
1037 |
spin_unlock(&ci->i_ceph_lock); |
2817b000b ceph: directory o... |
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 |
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d ", dir, (unsigned)ci->i_shared_gen, dentry, (unsigned)di->lease_shared_gen, valid); return valid; } /* * Check if cached dentry can be trusted. */ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) { |
bf1c6aca9 ceph: protect d_p... |
1050 |
int valid = 0; |
34286d666 fs: rcu-walk awar... |
1051 |
struct inode *dir; |
0eb980e31 ceph: fix d_reval... |
1052 |
if (nd && nd->flags & LOOKUP_RCU) |
34286d666 fs: rcu-walk awar... |
1053 |
return -ECHILD; |
1cd3935be ceph: set dn offs... |
1054 1055 1056 1057 |
dout("d_revalidate %p '%.*s' inode %p offset %lld ", dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode, ceph_dentry(dentry)->offset); |
2817b000b ceph: directory o... |
1058 |
|
bf1c6aca9 ceph: protect d_p... |
1059 |
dir = ceph_get_dentry_parent_inode(dentry); |
2817b000b ceph: directory o... |
1060 1061 1062 1063 1064 |
/* always trust cached snapped dentries, snapdir dentry */ if (ceph_snap(dir) != CEPH_NOSNAP) { dout("d_revalidate %p '%.*s' inode %p is SNAPPED ", dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); |
bf1c6aca9 ceph: protect d_p... |
1065 1066 1067 1068 1069 1070 1071 |
valid = 1; } else if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { valid = 1; } else if (dentry_lease_is_valid(dentry) || dir_lease_is_valid(dir, dentry)) { valid = 1; |
2817b000b ceph: directory o... |
1072 |
} |
2817b000b ceph: directory o... |
1073 |
|
bf1c6aca9 ceph: protect d_p... |
1074 1075 1076 1077 1078 1079 1080 1081 |
dout("d_revalidate %p %s ", dentry, valid ? "valid" : "invalid"); if (valid) ceph_dentry_lru_touch(dentry); else d_drop(dentry); iput(dir); return valid; |
2817b000b ceph: directory o... |
1082 1083 1084 |
} /* |
147851d2d ceph: rename dent... |
1085 |
* Release our ceph_dentry_info. |
2817b000b ceph: directory o... |
1086 |
*/ |
147851d2d ceph: rename dent... |
1087 |
static void ceph_d_release(struct dentry *dentry) |
2817b000b ceph: directory o... |
1088 1089 |
{ struct ceph_dentry_info *di = ceph_dentry(dentry); |
2817b000b ceph: directory o... |
1090 |
|
147851d2d ceph: rename dent... |
1091 1092 |
dout("d_release %p ", dentry); |
3d8eb7a94 ceph: remove unne... |
1093 1094 1095 1096 1097 |
ceph_dentry_lru_del(dentry); if (di->lease_session) ceph_put_mds_session(di->lease_session); kmem_cache_free(ceph_dentry_cachep, di); dentry->d_fsdata = NULL; |
2817b000b ceph: directory o... |
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 |
} static int ceph_snapdir_d_revalidate(struct dentry *dentry, struct nameidata *nd) { /* * Eventually, we'll want to revalidate snapped metadata * too... probably... */ return 1; } |
b58dc4100 ceph: clear paren... |
1109 |
/* |
c6ffe1001 ceph: use new D_C... |
1110 1111 |
* Set/clear/test dir complete flag on the dir's dentry. */ |
c6ffe1001 ceph: use new D_C... |
1112 1113 |
void ceph_dir_set_complete(struct inode *inode) { |
a40dc6cc2 ceph: enable/disa... |
1114 1115 1116 1117 1118 1119 1120 1121 1122 |
struct dentry *dentry = d_find_any_alias(inode); if (dentry && ceph_dentry(dentry) && ceph_test_mount_opt(ceph_sb_to_client(dentry->d_sb), DCACHE)) { dout(" marking %p (%p) complete ", inode, dentry); set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); } dput(dentry); |
c6ffe1001 ceph: use new D_C... |
1123 1124 1125 1126 |
} void ceph_dir_clear_complete(struct inode *inode) { |
a40dc6cc2 ceph: enable/disa... |
1127 1128 1129 1130 1131 1132 1133 1134 |
struct dentry *dentry = d_find_any_alias(inode); if (dentry && ceph_dentry(dentry)) { dout(" marking %p (%p) complete ", inode, dentry); set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); } dput(dentry); |
c6ffe1001 ceph: use new D_C... |
1135 1136 1137 1138 |
} bool ceph_dir_test_complete(struct inode *inode) { |
a40dc6cc2 ceph: enable/disa... |
1139 1140 1141 1142 1143 1144 1145 1146 |
struct dentry *dentry = d_find_any_alias(inode); if (dentry && ceph_dentry(dentry)) { dout(" marking %p (%p) NOT complete ", inode, dentry); clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); } dput(dentry); |
c6ffe1001 ceph: use new D_C... |
1147 1148 1149 1150 |
return false; } /* |
b58dc4100 ceph: clear paren... |
1151 1152 1153 1154 1155 1156 1157 1158 |
* When the VFS prunes a dentry from the cache, we need to clear the * complete flag on the parent directory. * * Called under dentry->d_lock. */ static void ceph_d_prune(struct dentry *dentry) { struct ceph_dentry_info *di; |
774ac21da ceph: initialize ... |
1159 1160 |
dout("ceph_d_prune %p ", dentry); |
b58dc4100 ceph: clear paren... |
1161 1162 1163 1164 1165 1166 1167 1168 |
/* do we have a valid parent? */ if (!dentry->d_parent || IS_ROOT(dentry)) return; /* if we are not hashed, we don't affect D_COMPLETE */ if (d_unhashed(dentry)) return; |
2817b000b ceph: directory o... |
1169 |
|
b58dc4100 ceph: clear paren... |
1170 1171 1172 1173 1174 1175 1176 |
/* * we hold d_lock, so d_parent is stable, and d_fsdata is never * cleared until d_release */ di = ceph_dentry(dentry->d_parent); clear_bit(CEPH_D_COMPLETE, &di->flags); } |
2817b000b ceph: directory o... |
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 |
/* * read() on a dir. This weird interface hack only works if mounted * with '-o dirstat'. */ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct ceph_file_info *cf = file->private_data; struct inode *inode = file->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); int left; |
ae5980830 ceph: use snprint... |
1189 |
const int bufsize = 1024; |
2817b000b ceph: directory o... |
1190 |
|
3d14c5d2b ceph: factor out ... |
1191 |
if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) |
2817b000b ceph: directory o... |
1192 1193 1194 |
return -EISDIR; if (!cf->dir_info) { |
ae5980830 ceph: use snprint... |
1195 |
cf->dir_info = kmalloc(bufsize, GFP_NOFS); |
2817b000b ceph: directory o... |
1196 1197 1198 |
if (!cf->dir_info) return -ENOMEM; cf->dir_info_len = |
ae5980830 ceph: use snprint... |
1199 |
snprintf(cf->dir_info, bufsize, |
2817b000b ceph: directory o... |
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 |
"entries: %20lld " " files: %20lld " " subdirs: %20lld " "rentries: %20lld " " rfiles: %20lld " " rsubdirs: %20lld " "rbytes: %20lld " "rctime: %10ld.%09ld ", ci->i_files + ci->i_subdirs, ci->i_files, ci->i_subdirs, ci->i_rfiles + ci->i_rsubdirs, ci->i_rfiles, ci->i_rsubdirs, ci->i_rbytes, (long)ci->i_rctime.tv_sec, (long)ci->i_rctime.tv_nsec); } if (*ppos >= cf->dir_info_len) return 0; size = min_t(unsigned, size, cf->dir_info_len-*ppos); left = copy_to_user(buf, cf->dir_info + *ppos, size); if (left == size) return -EFAULT; *ppos += (size - left); return size - left; } /* * an fsync() on a dir will wait for any uncommitted directory * operations to commit. */ |
02c24a821 fs: push i_mutex ... |
1241 1242 |
static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
2817b000b ceph: directory o... |
1243 |
{ |
7ea808591 drop unused dentr... |
1244 |
struct inode *inode = file->f_path.dentry->d_inode; |
2817b000b ceph: directory o... |
1245 1246 1247 1248 1249 1250 1251 1252 |
struct ceph_inode_info *ci = ceph_inode(inode); struct list_head *head = &ci->i_unsafe_dirops; struct ceph_mds_request *req; u64 last_tid; int ret = 0; dout("dir_fsync %p ", inode); |
02c24a821 fs: push i_mutex ... |
1253 1254 1255 1256 |
ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret) return ret; mutex_lock(&inode->i_mutex); |
2817b000b ceph: directory o... |
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 |
spin_lock(&ci->i_unsafe_lock); if (list_empty(head)) goto out; req = list_entry(head->prev, struct ceph_mds_request, r_unsafe_dir_item); last_tid = req->r_tid; do { ceph_mdsc_get_request(req); spin_unlock(&ci->i_unsafe_lock); |
2ff179e65 ceph: avoid iput(... |
1268 |
|
2817b000b ceph: directory o... |
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 |
dout("dir_fsync %p wait on tid %llu (until %llu) ", inode, req->r_tid, last_tid); if (req->r_timeout) { ret = wait_for_completion_timeout( &req->r_safe_completion, req->r_timeout); if (ret > 0) ret = 0; else if (ret == 0) ret = -EIO; /* timed out */ } else { wait_for_completion(&req->r_safe_completion); } |
2817b000b ceph: directory o... |
1282 |
ceph_mdsc_put_request(req); |
2ff179e65 ceph: avoid iput(... |
1283 |
spin_lock(&ci->i_unsafe_lock); |
2817b000b ceph: directory o... |
1284 1285 1286 1287 1288 1289 1290 |
if (ret || list_empty(head)) break; req = list_entry(head->next, struct ceph_mds_request, r_unsafe_dir_item); } while (req->r_tid < last_tid); out: spin_unlock(&ci->i_unsafe_lock); |
02c24a821 fs: push i_mutex ... |
1291 |
mutex_unlock(&inode->i_mutex); |
2817b000b ceph: directory o... |
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 |
return ret; } /* * We maintain a private dentry LRU. * * FIXME: this needs to be changed to a per-mds lru to be useful. */ void ceph_dentry_lru_add(struct dentry *dn) { struct ceph_dentry_info *di = ceph_dentry(dn); struct ceph_mds_client *mdsc; |
2817b000b ceph: directory o... |
1304 |
|
04a419f90 ceph: add feature... |
1305 1306 1307 |
dout("dentry_lru_add %p %p '%.*s' ", di, dn, dn->d_name.len, dn->d_name.name); |
3d8eb7a94 ceph: remove unne... |
1308 1309 1310 1311 1312 |
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; spin_lock(&mdsc->dentry_lru_lock); list_add_tail(&di->lru, &mdsc->dentry_lru); mdsc->num_dentry++; spin_unlock(&mdsc->dentry_lru_lock); |
2817b000b ceph: directory o... |
1313 1314 1315 1316 1317 1318 |
} void ceph_dentry_lru_touch(struct dentry *dn) { struct ceph_dentry_info *di = ceph_dentry(dn); struct ceph_mds_client *mdsc; |
2817b000b ceph: directory o... |
1319 |
|
1cd3935be ceph: set dn offs... |
1320 1321 1322 |
dout("dentry_lru_touch %p %p '%.*s' (offset %lld) ", di, dn, dn->d_name.len, dn->d_name.name, di->offset); |
3d8eb7a94 ceph: remove unne... |
1323 1324 1325 1326 |
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; spin_lock(&mdsc->dentry_lru_lock); list_move_tail(&di->lru, &mdsc->dentry_lru); spin_unlock(&mdsc->dentry_lru_lock); |
2817b000b ceph: directory o... |
1327 1328 1329 1330 1331 1332 |
} void ceph_dentry_lru_del(struct dentry *dn) { struct ceph_dentry_info *di = ceph_dentry(dn); struct ceph_mds_client *mdsc; |
04a419f90 ceph: add feature... |
1333 1334 1335 |
dout("dentry_lru_del %p %p '%.*s' ", di, dn, dn->d_name.len, dn->d_name.name); |
3d8eb7a94 ceph: remove unne... |
1336 1337 1338 1339 1340 |
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; spin_lock(&mdsc->dentry_lru_lock); list_del_init(&di->lru); mdsc->num_dentry--; spin_unlock(&mdsc->dentry_lru_lock); |
2817b000b ceph: directory o... |
1341 |
} |
6c0f3af72 ceph: add dir_lay... |
1342 1343 1344 1345 |
/* * Return name hash for a given dentry. This is dependent on * the parent directory's hash function. */ |
e5f86dc37 ceph: avoid d_par... |
1346 |
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) |
6c0f3af72 ceph: add dir_lay... |
1347 |
{ |
6c0f3af72 ceph: add dir_lay... |
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 |
struct ceph_inode_info *dci = ceph_inode(dir); switch (dci->i_dir_layout.dl_dir_hash) { case 0: /* for backward compat */ case CEPH_STR_HASH_LINUX: return dn->d_name.hash; default: return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, dn->d_name.name, dn->d_name.len); } } |
2817b000b ceph: directory o... |
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 |
const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, .readdir = ceph_readdir, .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, .unlocked_ioctl = ceph_ioctl, .fsync = ceph_dir_fsync, }; const struct inode_operations ceph_dir_iops = { .lookup = ceph_lookup, .permission = ceph_permission, .getattr = ceph_getattr, .setattr = ceph_setattr, .setxattr = ceph_setxattr, .getxattr = ceph_getxattr, .listxattr = ceph_listxattr, .removexattr = ceph_removexattr, .mknod = ceph_mknod, .symlink = ceph_symlink, .mkdir = ceph_mkdir, .link = ceph_link, .unlink = ceph_unlink, .rmdir = ceph_unlink, .rename = ceph_rename, .create = ceph_create, }; |
52dfb8ac0 ceph: constify de... |
1388 |
const struct dentry_operations ceph_dentry_ops = { |
2817b000b ceph: directory o... |
1389 |
.d_revalidate = ceph_d_revalidate, |
147851d2d ceph: rename dent... |
1390 |
.d_release = ceph_d_release, |
b58dc4100 ceph: clear paren... |
1391 |
.d_prune = ceph_d_prune, |
2817b000b ceph: directory o... |
1392 |
}; |
52dfb8ac0 ceph: constify de... |
1393 |
const struct dentry_operations ceph_snapdir_dentry_ops = { |
2817b000b ceph: directory o... |
1394 |
.d_revalidate = ceph_snapdir_d_revalidate, |
147851d2d ceph: rename dent... |
1395 |
.d_release = ceph_d_release, |
2817b000b ceph: directory o... |
1396 |
}; |
52dfb8ac0 ceph: constify de... |
1397 |
const struct dentry_operations ceph_snap_dentry_ops = { |
147851d2d ceph: rename dent... |
1398 |
.d_release = ceph_d_release, |
b58dc4100 ceph: clear paren... |
1399 |
.d_prune = ceph_d_prune, |
2817b000b ceph: directory o... |
1400 |
}; |