Blame view
fs/ceph/mds_client.c
87.5 KB
3d14c5d2b ceph: factor out ... |
1 |
#include <linux/ceph/ceph_debug.h> |
2f2dc0534 ceph: MDS client |
2 |
|
496e59553 ceph: switch from... |
3 |
#include <linux/fs.h> |
2f2dc0534 ceph: MDS client |
4 |
#include <linux/wait.h> |
5a0e3ad6a include cleanup: ... |
5 |
#include <linux/slab.h> |
2f2dc0534 ceph: MDS client |
6 |
#include <linux/sched.h> |
3d14c5d2b ceph: factor out ... |
7 8 |
#include <linux/debugfs.h> #include <linux/seq_file.h> |
2f2dc0534 ceph: MDS client |
9 |
|
2f2dc0534 ceph: MDS client |
10 |
#include "super.h" |
3d14c5d2b ceph: factor out ... |
11 12 13 14 15 16 17 |
#include "mds_client.h" #include <linux/ceph/messenger.h> #include <linux/ceph/decode.h> #include <linux/ceph/pagelist.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> |
2f2dc0534 ceph: MDS client |
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
/* * A cluster of MDS (metadata server) daemons is responsible for * managing the file system namespace (the directory hierarchy and * inodes) and for coordinating shared access to storage. Metadata is * partitioning hierarchically across a number of servers, and that * partition varies over time as the cluster adjusts the distribution * in order to balance load. * * The MDS client is primarily responsible to managing synchronous * metadata requests for operations like open, unlink, and so forth. * If there is a MDS failure, we find out about it when we (possibly * request and) receive a new MDS map, and can resubmit affected * requests. * * For the most part, though, we take advantage of a lossless * communications channel to the MDS, and do not need to worry about * timing out or resubmitting requests. * * We maintain a stateful "session" with each MDS we interact with. * Within each session, we sent periodic heartbeat messages to ensure * any capabilities or leases we have been issues remain valid. If * the session times out and goes stale, our leases and capabilities * are no longer valid. */ |
20cb34ae9 ceph: support v2 ... |
43 44 45 46 |
struct ceph_reconnect_state { struct ceph_pagelist *pagelist; bool flock; }; |
2f2dc0534 ceph: MDS client |
47 48 |
static void __wake_requests(struct ceph_mds_client *mdsc, struct list_head *head); |
9e32789f6 ceph: Storage cla... |
49 |
static const struct ceph_connection_operations mds_con_ops; |
2f2dc0534 ceph: MDS client |
50 51 52 53 54 55 56 57 58 59 |
/* * mds reply parsing */ /* * parse individual inode info */ static int parse_reply_info_in(void **p, void *end, |
14303d20f ceph: implement D... |
60 61 |
struct ceph_mds_reply_info_in *info, int features) |
2f2dc0534 ceph: MDS client |
62 63 64 65 66 67 68 69 70 71 72 73 |
{ int err = -EIO; info->in = *p; *p += sizeof(struct ceph_mds_reply_inode) + sizeof(*info->in->fragtree.splits) * le32_to_cpu(info->in->fragtree.nsplits); ceph_decode_32_safe(p, end, info->symlink_len, bad); ceph_decode_need(p, end, info->symlink_len, bad); info->symlink = *p; *p += info->symlink_len; |
14303d20f ceph: implement D... |
74 75 76 77 78 |
if (features & CEPH_FEATURE_DIRLAYOUTHASH) ceph_decode_copy_safe(p, end, &info->dir_layout, sizeof(info->dir_layout), bad); else memset(&info->dir_layout, 0, sizeof(info->dir_layout)); |
2f2dc0534 ceph: MDS client |
79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
ceph_decode_32_safe(p, end, info->xattr_len, bad); ceph_decode_need(p, end, info->xattr_len, bad); info->xattr_data = *p; *p += info->xattr_len; return 0; bad: return err; } /* * parse a normal reply, which may contain a (dir+)dentry and/or a * target inode. */ static int parse_reply_info_trace(void **p, void *end, |
14303d20f ceph: implement D... |
93 94 |
struct ceph_mds_reply_info_parsed *info, int features) |
2f2dc0534 ceph: MDS client |
95 96 97 98 |
{ int err; if (info->head->is_dentry) { |
14303d20f ceph: implement D... |
99 |
err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc0534 ceph: MDS client |
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
if (err < 0) goto out_bad; if (unlikely(*p + sizeof(*info->dirfrag) > end)) goto bad; info->dirfrag = *p; *p += sizeof(*info->dirfrag) + sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); if (unlikely(*p > end)) goto bad; ceph_decode_32_safe(p, end, info->dname_len, bad); ceph_decode_need(p, end, info->dname_len, bad); info->dname = *p; *p += info->dname_len; info->dlease = *p; *p += sizeof(*info->dlease); } if (info->head->is_target) { |
14303d20f ceph: implement D... |
120 |
err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc0534 ceph: MDS client |
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
if (err < 0) goto out_bad; } if (unlikely(*p != end)) goto bad; return 0; bad: err = -EIO; out_bad: pr_err("problem parsing mds trace %d ", err); return err; } /* * parse readdir results */ static int parse_reply_info_dir(void **p, void *end, |
14303d20f ceph: implement D... |
141 142 |
struct ceph_mds_reply_info_parsed *info, int features) |
2f2dc0534 ceph: MDS client |
143 144 145 146 147 148 149 150 151 152 153 154 155 |
{ u32 num, i = 0; int err; info->dir_dir = *p; if (*p + sizeof(*info->dir_dir) > end) goto bad; *p += sizeof(*info->dir_dir) + sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); if (*p > end) goto bad; ceph_decode_need(p, end, sizeof(num) + 2, bad); |
c89136ea4 ceph: convert enc... |
156 157 158 |
num = ceph_decode_32(p); info->dir_end = ceph_decode_8(p); info->dir_complete = ceph_decode_8(p); |
2f2dc0534 ceph: MDS client |
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
if (num == 0) goto done; /* alloc large array */ info->dir_nr = num; info->dir_in = kcalloc(num, sizeof(*info->dir_in) + sizeof(*info->dir_dname) + sizeof(*info->dir_dname_len) + sizeof(*info->dir_dlease), GFP_NOFS); if (info->dir_in == NULL) { err = -ENOMEM; goto out_bad; } info->dir_dname = (void *)(info->dir_in + num); info->dir_dname_len = (void *)(info->dir_dname + num); info->dir_dlease = (void *)(info->dir_dname_len + num); while (num) { /* dentry */ ceph_decode_need(p, end, sizeof(u32)*2, bad); |
c89136ea4 ceph: convert enc... |
180 |
info->dir_dname_len[i] = ceph_decode_32(p); |
2f2dc0534 ceph: MDS client |
181 182 183 184 185 186 187 188 189 190 |
ceph_decode_need(p, end, info->dir_dname_len[i], bad); info->dir_dname[i] = *p; *p += info->dir_dname_len[i]; dout("parsed dir dname '%.*s' ", info->dir_dname_len[i], info->dir_dname[i]); info->dir_dlease[i] = *p; *p += sizeof(struct ceph_mds_reply_lease); /* inode */ |
14303d20f ceph: implement D... |
191 |
err = parse_reply_info_in(p, end, &info->dir_in[i], features); |
2f2dc0534 ceph: MDS client |
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
if (err < 0) goto out_bad; i++; num--; } done: if (*p != end) goto bad; return 0; bad: err = -EIO; out_bad: pr_err("problem parsing dir contents %d ", err); return err; } /* |
25933abdd ceph: Handle file... |
212 213 214 |
* parse fcntl F_GETLK results */ static int parse_reply_info_filelock(void **p, void *end, |
14303d20f ceph: implement D... |
215 216 |
struct ceph_mds_reply_info_parsed *info, int features) |
25933abdd ceph: Handle file... |
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
{ if (*p + sizeof(*info->filelock_reply) > end) goto bad; info->filelock_reply = *p; *p += sizeof(*info->filelock_reply); if (unlikely(*p != end)) goto bad; return 0; bad: return -EIO; } /* * parse extra results */ static int parse_reply_info_extra(void **p, void *end, |
14303d20f ceph: implement D... |
236 237 |
struct ceph_mds_reply_info_parsed *info, int features) |
25933abdd ceph: Handle file... |
238 239 |
{ if (info->head->op == CEPH_MDS_OP_GETFILELOCK) |
14303d20f ceph: implement D... |
240 |
return parse_reply_info_filelock(p, end, info, features); |
25933abdd ceph: Handle file... |
241 |
else |
14303d20f ceph: implement D... |
242 |
return parse_reply_info_dir(p, end, info, features); |
25933abdd ceph: Handle file... |
243 244 245 |
} /* |
2f2dc0534 ceph: MDS client |
246 247 248 |
* parse entire mds reply */ static int parse_reply_info(struct ceph_msg *msg, |
14303d20f ceph: implement D... |
249 250 |
struct ceph_mds_reply_info_parsed *info, int features) |
2f2dc0534 ceph: MDS client |
251 252 253 254 255 256 257 258 259 260 261 262 |
{ void *p, *end; u32 len; int err; info->head = msg->front.iov_base; p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); /* trace */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { |
14303d20f ceph: implement D... |
263 |
err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc0534 ceph: MDS client |
264 265 266 |
if (err < 0) goto out_bad; } |
25933abdd ceph: Handle file... |
267 |
/* extra */ |
2f2dc0534 ceph: MDS client |
268 269 |
ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { |
14303d20f ceph: implement D... |
270 |
err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc0534 ceph: MDS client |
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
if (err < 0) goto out_bad; } /* snap blob */ ceph_decode_32_safe(&p, end, len, bad); info->snapblob_len = len; info->snapblob = p; p += len; if (p != end) goto bad; return 0; bad: err = -EIO; out_bad: pr_err("mds parse_reply err %d ", err); return err; } static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) { kfree(info->dir_in); } /* * sessions */ static const char *session_state_name(int s) { switch (s) { case CEPH_MDS_SESSION_NEW: return "new"; case CEPH_MDS_SESSION_OPENING: return "opening"; case CEPH_MDS_SESSION_OPEN: return "open"; case CEPH_MDS_SESSION_HUNG: return "hung"; case CEPH_MDS_SESSION_CLOSING: return "closing"; |
44ca18f26 ceph: use rbtree ... |
310 |
case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc0534 ceph: MDS client |
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 |
case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; default: return "???"; } } static struct ceph_mds_session *get_session(struct ceph_mds_session *s) { if (atomic_inc_not_zero(&s->s_ref)) { dout("mdsc get_session %p %d -> %d ", s, atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); return s; } else { dout("mdsc get_session %p 0 -- FAIL", s); return NULL; } } void ceph_put_mds_session(struct ceph_mds_session *s) { dout("mdsc put_session %p %d -> %d ", s, atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); |
4e7a5dcd1 ceph: negotiate a... |
334 335 |
if (atomic_dec_and_test(&s->s_ref)) { if (s->s_authorizer) |
3d14c5d2b ceph: factor out ... |
336 337 338 |
s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer( s->s_mdsc->fsc->client->monc.auth, s->s_authorizer); |
2f2dc0534 ceph: MDS client |
339 |
kfree(s); |
4e7a5dcd1 ceph: negotiate a... |
340 |
} |
2f2dc0534 ceph: MDS client |
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 |
} /* * called under mdsc->mutex */ struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, int mds) { struct ceph_mds_session *session; if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) return NULL; session = mdsc->sessions[mds]; dout("lookup_mds_session %p %d ", session, atomic_read(&session->s_ref)); get_session(session); return session; } static bool __have_session(struct ceph_mds_client *mdsc, int mds) { if (mds >= mdsc->max_sessions) return false; return mdsc->sessions[mds]; } |
2600d2dd5 ceph: drop messag... |
367 368 369 370 371 372 373 374 |
static int __verify_registered_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *s) { if (s->s_mds >= mdsc->max_sessions || mdsc->sessions[s->s_mds] != s) return -ENOENT; return 0; } |
2f2dc0534 ceph: MDS client |
375 376 377 378 379 380 381 382 383 384 |
/* * create+register a new session for given mds. * called under mdsc->mutex. */ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, int mds) { struct ceph_mds_session *s; s = kzalloc(sizeof(*s), GFP_NOFS); |
4736b009b ceph: handle kmal... |
385 386 |
if (!s) return ERR_PTR(-ENOMEM); |
2f2dc0534 ceph: MDS client |
387 388 389 390 391 392 |
s->s_mdsc = mdsc; s->s_mds = mds; s->s_state = CEPH_MDS_SESSION_NEW; s->s_ttl = 0; s->s_seq = 0; mutex_init(&s->s_mutex); |
3d14c5d2b ceph: factor out ... |
393 |
ceph_con_init(mdsc->fsc->client->msgr, &s->s_con); |
2f2dc0534 ceph: MDS client |
394 395 396 397 |
s->s_con.private = s; s->s_con.ops = &mds_con_ops; s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; s->s_con.peer_name.num = cpu_to_le64(mds); |
2f2dc0534 ceph: MDS client |
398 399 400 401 402 403 404 405 |
spin_lock_init(&s->s_cap_lock); s->s_cap_gen = 0; s->s_cap_ttl = 0; s->s_renew_requested = 0; s->s_renew_seq = 0; INIT_LIST_HEAD(&s->s_caps); s->s_nr_caps = 0; |
5dacf0912 ceph: do not touc... |
406 |
s->s_trim_caps = 0; |
2f2dc0534 ceph: MDS client |
407 408 409 410 |
atomic_set(&s->s_ref, 1); INIT_LIST_HEAD(&s->s_waiting); INIT_LIST_HEAD(&s->s_unsafe); s->s_num_cap_releases = 0; |
7c1332b8c ceph: fix iterate... |
411 |
s->s_cap_iterator = NULL; |
2f2dc0534 ceph: MDS client |
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 |
INIT_LIST_HEAD(&s->s_cap_releases); INIT_LIST_HEAD(&s->s_cap_releases_done); INIT_LIST_HEAD(&s->s_cap_flushing); INIT_LIST_HEAD(&s->s_cap_snaps_flushing); dout("register_session mds%d ", mds); if (mds >= mdsc->max_sessions) { int newmax = 1 << get_count_order(mds+1); struct ceph_mds_session **sa; dout("register_session realloc to %d ", newmax); sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); if (sa == NULL) |
42ce56e50 ceph: remove bad ... |
427 |
goto fail_realloc; |
2f2dc0534 ceph: MDS client |
428 429 430 431 432 433 434 435 436 437 |
if (mdsc->sessions) { memcpy(sa, mdsc->sessions, mdsc->max_sessions * sizeof(void *)); kfree(mdsc->sessions); } mdsc->sessions = sa; mdsc->max_sessions = newmax; } mdsc->sessions[mds] = s; atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ |
42ce56e50 ceph: remove bad ... |
438 439 |
ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); |
2f2dc0534 ceph: MDS client |
440 |
return s; |
42ce56e50 ceph: remove bad ... |
441 442 443 444 |
fail_realloc: kfree(s); return ERR_PTR(-ENOMEM); |
2f2dc0534 ceph: MDS client |
445 446 447 448 449 |
} /* * called under mdsc->mutex */ |
2600d2dd5 ceph: drop messag... |
450 |
static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e50 ceph: remove bad ... |
451 |
struct ceph_mds_session *s) |
2f2dc0534 ceph: MDS client |
452 |
{ |
2600d2dd5 ceph: drop messag... |
453 454 455 |
dout("__unregister_session mds%d %p ", s->s_mds, s); BUG_ON(mdsc->sessions[s->s_mds] != s); |
42ce56e50 ceph: remove bad ... |
456 457 458 |
mdsc->sessions[s->s_mds] = NULL; ceph_con_close(&s->s_con); ceph_put_mds_session(s); |
2f2dc0534 ceph: MDS client |
459 460 461 462 463 464 465 466 467 468 469 470 471 472 |
} /* * drop session refs in request. * * should be last request ref, or hold mdsc->mutex */ static void put_request_session(struct ceph_mds_request *req) { if (req->r_session) { ceph_put_mds_session(req->r_session); req->r_session = NULL; } } |
153c8e6bf ceph: use kref fo... |
473 |
void ceph_mdsc_release_request(struct kref *kref) |
2f2dc0534 ceph: MDS client |
474 |
{ |
153c8e6bf ceph: use kref fo... |
475 476 477 478 479 480 481 482 483 484 |
struct ceph_mds_request *req = container_of(kref, struct ceph_mds_request, r_kref); if (req->r_request) ceph_msg_put(req->r_request); if (req->r_reply) { ceph_msg_put(req->r_reply); destroy_reply_info(&req->r_reply_info); } if (req->r_inode) { |
41b02e1f9 ceph: explicitly ... |
485 |
ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6bf ceph: use kref fo... |
486 487 488 |
iput(req->r_inode); } if (req->r_locked_dir) |
41b02e1f9 ceph: explicitly ... |
489 |
ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); |
153c8e6bf ceph: use kref fo... |
490 491 492 493 494 |
if (req->r_target_inode) iput(req->r_target_inode); if (req->r_dentry) dput(req->r_dentry); if (req->r_old_dentry) { |
41b02e1f9 ceph: explicitly ... |
495 496 497 498 499 500 501 502 |
/* * track (and drop pins for) r_old_dentry_dir * separately, since r_old_dentry's d_parent may have * changed between the dir mutex being dropped and * this request being freed. */ ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), CEPH_CAP_PIN); |
153c8e6bf ceph: use kref fo... |
503 |
dput(req->r_old_dentry); |
41b02e1f9 ceph: explicitly ... |
504 |
iput(req->r_old_dentry_dir); |
2f2dc0534 ceph: MDS client |
505 |
} |
153c8e6bf ceph: use kref fo... |
506 507 508 |
kfree(req->r_path1); kfree(req->r_path2); put_request_session(req); |
37151668b ceph: do caps acc... |
509 |
ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6bf ceph: use kref fo... |
510 |
kfree(req); |
2f2dc0534 ceph: MDS client |
511 512 513 514 515 516 517 518 519 520 521 |
} /* * lookup session, bump ref if found. * * called under mdsc->mutex. */ static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, u64 tid) { struct ceph_mds_request *req; |
44ca18f26 ceph: use rbtree ... |
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 |
struct rb_node *n = mdsc->request_tree.rb_node; while (n) { req = rb_entry(n, struct ceph_mds_request, r_node); if (tid < req->r_tid) n = n->rb_left; else if (tid > req->r_tid) n = n->rb_right; else { ceph_mdsc_get_request(req); return req; } } return NULL; } static void __insert_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *new) { struct rb_node **p = &mdsc->request_tree.rb_node; struct rb_node *parent = NULL; struct ceph_mds_request *req = NULL; while (*p) { parent = *p; req = rb_entry(parent, struct ceph_mds_request, r_node); if (new->r_tid < req->r_tid) p = &(*p)->rb_left; else if (new->r_tid > req->r_tid) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->r_node, parent, p); rb_insert_color(&new->r_node, &mdsc->request_tree); |
2f2dc0534 ceph: MDS client |
558 559 560 561 562 563 564 565 566 567 568 569 570 571 |
} /* * Register an in-flight request, and assign a tid. Link to directory * are modifying (if any). * * Called under mdsc->mutex. */ static void __register_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, struct inode *dir) { req->r_tid = ++mdsc->last_tid; if (req->r_num_caps) |
37151668b ceph: do caps acc... |
572 573 |
ceph_reserve_caps(mdsc, &req->r_caps_reservation, req->r_num_caps); |
2f2dc0534 ceph: MDS client |
574 575 576 |
dout("__register_request %p tid %lld ", req, req->r_tid); ceph_mdsc_get_request(req); |
44ca18f26 ceph: use rbtree ... |
577 |
__insert_request(mdsc, req); |
2f2dc0534 ceph: MDS client |
578 |
|
cb4276cca ceph: fix uid/gid... |
579 580 |
req->r_uid = current_fsuid(); req->r_gid = current_fsgid(); |
2f2dc0534 ceph: MDS client |
581 582 |
if (dir) { struct ceph_inode_info *ci = ceph_inode(dir); |
3b6637803 ceph: take refere... |
583 |
ihold(dir); |
2f2dc0534 ceph: MDS client |
584 585 586 587 588 589 590 591 592 593 594 595 |
spin_lock(&ci->i_unsafe_lock); req->r_unsafe_dir = dir; list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); spin_unlock(&ci->i_unsafe_lock); } } static void __unregister_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { dout("__unregister_request %p tid %lld ", req, req->r_tid); |
44ca18f26 ceph: use rbtree ... |
596 |
rb_erase(&req->r_node, &mdsc->request_tree); |
80fc7314a ceph: fix mds syn... |
597 |
RB_CLEAR_NODE(&req->r_node); |
2f2dc0534 ceph: MDS client |
598 599 600 601 602 603 604 |
if (req->r_unsafe_dir) { struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); spin_lock(&ci->i_unsafe_lock); list_del_init(&req->r_unsafe_dir_item); spin_unlock(&ci->i_unsafe_lock); |
3b6637803 ceph: take refere... |
605 606 607 |
iput(req->r_unsafe_dir); req->r_unsafe_dir = NULL; |
2f2dc0534 ceph: MDS client |
608 |
} |
94aa8ae13 ceph: fix use aft... |
609 610 |
ceph_mdsc_put_request(req); |
2f2dc0534 ceph: MDS client |
611 612 613 614 615 616 617 618 619 620 |
} /* * Choose mds to send request to next. If there is a hint set in the * request (e.g., due to a prior forward hint from the mds), use that. * Otherwise, consult frag tree and/or caps to identify the * appropriate mds. If all else fails, choose randomly. * * Called under mdsc->mutex. */ |
7fd7d101f ceph/mds_client.c... |
621 |
static struct dentry *get_nonsnap_parent(struct dentry *dentry) |
eb6bb1c5b ceph: direct requ... |
622 |
{ |
d79698da3 ceph: document un... |
623 624 625 626 627 628 |
/* * we don't need to worry about protecting the d_parent access * here because we never renaming inside the snapped namespace * except to resplice to another snapdir, and either the old or new * result is a valid result. */ |
eb6bb1c5b ceph: direct requ... |
629 630 631 632 |
while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP) dentry = dentry->d_parent; return dentry; } |
2f2dc0534 ceph: MDS client |
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 |
static int __choose_mds(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { struct inode *inode; struct ceph_inode_info *ci; struct ceph_cap *cap; int mode = req->r_direct_mode; int mds = -1; u32 hash = req->r_direct_hash; bool is_hash = req->r_direct_is_hash; /* * is there a specific mds we should try? ignore hint if we have * no session and the mds is not up (active or recovering). */ if (req->r_resend_mds >= 0 && (__have_session(mdsc, req->r_resend_mds) || ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { dout("choose_mds using resend_mds mds%d ", req->r_resend_mds); return req->r_resend_mds; } if (mode == USE_RANDOM_MDS) goto random; inode = NULL; if (req->r_inode) { inode = req->r_inode; } else if (req->r_dentry) { |
d79698da3 ceph: document un... |
664 665 666 |
/* ignore race with rename; old or new d_parent is okay */ struct dentry *parent = req->r_dentry->d_parent; struct inode *dir = parent->d_inode; |
eb6bb1c5b ceph: direct requ... |
667 |
|
3d14c5d2b ceph: factor out ... |
668 |
if (dir->i_sb != mdsc->fsc->sb) { |
eb6bb1c5b ceph: direct requ... |
669 670 671 672 673 |
/* not this fs! */ inode = req->r_dentry->d_inode; } else if (ceph_snap(dir) != CEPH_NOSNAP) { /* direct snapped/virtual snapdir requests * based on parent dir inode */ |
d79698da3 ceph: document un... |
674 |
struct dentry *dn = get_nonsnap_parent(parent); |
eb6bb1c5b ceph: direct requ... |
675 676 677 678 679 |
inode = dn->d_inode; dout("__choose_mds using nonsnap parent %p ", inode); } else if (req->r_dentry->d_inode) { /* dentry target */ |
2f2dc0534 ceph: MDS client |
680 681 |
inode = req->r_dentry->d_inode; } else { |
eb6bb1c5b ceph: direct requ... |
682 683 |
/* dir + name */ inode = dir; |
e5f86dc37 ceph: avoid d_par... |
684 |
hash = ceph_dentry_hash(dir, req->r_dentry); |
2f2dc0534 ceph: MDS client |
685 686 687 |
is_hash = true; } } |
eb6bb1c5b ceph: direct requ... |
688 |
|
2f2dc0534 ceph: MDS client |
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 |
dout("__choose_mds %p is_hash=%d (%d) mode %d ", inode, (int)is_hash, (int)hash, mode); if (!inode) goto random; ci = ceph_inode(inode); if (is_hash && S_ISDIR(inode->i_mode)) { struct ceph_inode_frag frag; int found; ceph_choose_frag(ci, hash, &frag, &found); if (found) { if (mode == USE_ANY_MDS && frag.ndist > 0) { u8 r; /* choose a random replica */ get_random_bytes(&r, 1); r %= frag.ndist; mds = frag.dist[r]; dout("choose_mds %p %llx.%llx " "frag %u mds%d (%d/%d) ", inode, ceph_vinop(inode), |
d66bbd441 ceph: avoid picki... |
713 |
frag.frag, mds, |
2f2dc0534 ceph: MDS client |
714 |
(int)r, frag.ndist); |
d66bbd441 ceph: avoid picki... |
715 716 717 |
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) return mds; |
2f2dc0534 ceph: MDS client |
718 719 720 721 722 723 724 725 726 727 728 729 730 |
} /* since this file/dir wasn't known to be * replicated, then we want to look for the * authoritative mds. */ mode = USE_AUTH_MDS; if (frag.mds >= 0) { /* choose auth mds */ mds = frag.mds; dout("choose_mds %p %llx.%llx " "frag %u mds%d (auth) ", inode, ceph_vinop(inode), frag.frag, mds); |
d66bbd441 ceph: avoid picki... |
731 732 733 |
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) return mds; |
2f2dc0534 ceph: MDS client |
734 735 736 |
} } } |
be655596b ceph: use i_ceph_... |
737 |
spin_lock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
738 739 740 741 742 743 |
cap = NULL; if (mode == USE_AUTH_MDS) cap = ci->i_auth_cap; if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); if (!cap) { |
be655596b ceph: use i_ceph_... |
744 |
spin_unlock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
745 746 747 748 749 750 751 |
goto random; } mds = cap->session->s_mds; dout("choose_mds %p %llx.%llx mds%d (%scap %p) ", inode, ceph_vinop(inode), mds, cap == ci->i_auth_cap ? "auth " : "", cap); |
be655596b ceph: use i_ceph_... |
752 |
spin_unlock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 |
return mds; random: mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); dout("choose_mds chose random mds%d ", mds); return mds; } /* * session messages */ static struct ceph_msg *create_session_msg(u32 op, u64 seq) { struct ceph_msg *msg; struct ceph_mds_session_head *h; |
b61c27636 libceph: don't co... |
770 771 |
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, false); |
a79832f26 ceph: make ceph_m... |
772 |
if (!msg) { |
2f2dc0534 ceph: MDS client |
773 774 |
pr_err("create_session_msg ENOMEM creating msg "); |
a79832f26 ceph: make ceph_m... |
775 |
return NULL; |
2f2dc0534 ceph: MDS client |
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 |
} h = msg->front.iov_base; h->op = cpu_to_le32(op); h->seq = cpu_to_le64(seq); return msg; } /* * send session open request. * * called under mdsc->mutex */ static int __open_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; int mstate; int mds = session->s_mds; |
2f2dc0534 ceph: MDS client |
794 795 796 797 798 799 800 801 802 803 804 |
/* wait for mds to go active? */ mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); dout("open_session to mds%d (%s) ", mds, ceph_mds_state_name(mstate)); session->s_state = CEPH_MDS_SESSION_OPENING; session->s_renew_requested = jiffies; /* send connect message */ msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); |
a79832f26 ceph: make ceph_m... |
805 806 |
if (!msg) return -ENOMEM; |
2f2dc0534 ceph: MDS client |
807 |
ceph_con_send(&session->s_con, msg); |
2f2dc0534 ceph: MDS client |
808 809 810 811 |
return 0; } /* |
ed0552a1a ceph: introduce h... |
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 |
* open sessions for any export targets for the given mds * * called under mdsc->mutex */ static void __open_export_target_sessions(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_info *mi; struct ceph_mds_session *ts; int i, mds = session->s_mds; int target; if (mds >= mdsc->mdsmap->m_max_mds) return; mi = &mdsc->mdsmap->m_info[mds]; dout("open_export_target_sessions for mds%d (%d targets) ", session->s_mds, mi->num_export_targets); for (i = 0; i < mi->num_export_targets; i++) { target = mi->export_targets[i]; ts = __ceph_lookup_mds_session(mdsc, target); if (!ts) { ts = register_session(mdsc, target); if (IS_ERR(ts)) return; } if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) __open_session(mdsc, session); else dout(" mds%d target mds%d %p is %s ", session->s_mds, i, ts, session_state_name(ts->s_state)); ceph_put_mds_session(ts); } } |
154f42c2c ceph: connect to ... |
849 850 851 852 853 854 855 |
void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { mutex_lock(&mdsc->mutex); __open_export_target_sessions(mdsc, session); mutex_unlock(&mdsc->mutex); } |
ed0552a1a ceph: introduce h... |
856 |
/* |
2f2dc0534 ceph: MDS client |
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 |
* session caps */ /* * Free preallocated cap messages assigned to this session */ static void cleanup_cap_releases(struct ceph_mds_session *session) { struct ceph_msg *msg; spin_lock(&session->s_cap_lock); while (!list_empty(&session->s_cap_releases)) { msg = list_first_entry(&session->s_cap_releases, struct ceph_msg, list_head); list_del_init(&msg->list_head); ceph_msg_put(msg); } while (!list_empty(&session->s_cap_releases_done)) { msg = list_first_entry(&session->s_cap_releases_done, struct ceph_msg, list_head); list_del_init(&msg->list_head); ceph_msg_put(msg); } spin_unlock(&session->s_cap_lock); } /* |
f818a7367 ceph: fix cap rem... |
884 885 |
* Helper to safely iterate over all caps associated with a session, with * special care taken to handle a racing __ceph_remove_cap(). |
2f2dc0534 ceph: MDS client |
886 |
* |
f818a7367 ceph: fix cap rem... |
887 |
* Caller must hold session s_mutex. |
2f2dc0534 ceph: MDS client |
888 889 890 891 892 |
*/ static int iterate_session_caps(struct ceph_mds_session *session, int (*cb)(struct inode *, struct ceph_cap *, void *), void *arg) { |
7c1332b8c ceph: fix iterate... |
893 894 895 896 |
struct list_head *p; struct ceph_cap *cap; struct inode *inode, *last_inode = NULL; struct ceph_cap *old_cap = NULL; |
2f2dc0534 ceph: MDS client |
897 898 899 900 901 |
int ret; dout("iterate_session_caps %p mds%d ", session, session->s_mds); spin_lock(&session->s_cap_lock); |
7c1332b8c ceph: fix iterate... |
902 903 904 |
p = session->s_caps.next; while (p != &session->s_caps) { cap = list_entry(p, struct ceph_cap, session_caps); |
2f2dc0534 ceph: MDS client |
905 |
inode = igrab(&cap->ci->vfs_inode); |
7c1332b8c ceph: fix iterate... |
906 907 |
if (!inode) { p = p->next; |
2f2dc0534 ceph: MDS client |
908 |
continue; |
7c1332b8c ceph: fix iterate... |
909 910 |
} session->s_cap_iterator = cap; |
2f2dc0534 ceph: MDS client |
911 |
spin_unlock(&session->s_cap_lock); |
7c1332b8c ceph: fix iterate... |
912 913 914 915 916 917 |
if (last_inode) { iput(last_inode); last_inode = NULL; } if (old_cap) { |
37151668b ceph: do caps acc... |
918 |
ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8c ceph: fix iterate... |
919 920 |
old_cap = NULL; } |
2f2dc0534 ceph: MDS client |
921 |
ret = cb(inode, cap, arg); |
7c1332b8c ceph: fix iterate... |
922 |
last_inode = inode; |
2f2dc0534 ceph: MDS client |
923 |
spin_lock(&session->s_cap_lock); |
7c1332b8c ceph: fix iterate... |
924 925 926 927 928 929 930 931 932 933 934 |
p = p->next; if (cap->ci == NULL) { dout("iterate_session_caps finishing cap %p removal ", cap); BUG_ON(cap->session != session); list_del_init(&cap->session_caps); session->s_nr_caps--; cap->session = NULL; old_cap = cap; /* put_cap it w/o locks held */ } |
5dacf0912 ceph: do not touc... |
935 936 |
if (ret < 0) goto out; |
2f2dc0534 ceph: MDS client |
937 |
} |
5dacf0912 ceph: do not touc... |
938 939 |
ret = 0; out: |
7c1332b8c ceph: fix iterate... |
940 |
session->s_cap_iterator = NULL; |
2f2dc0534 ceph: MDS client |
941 |
spin_unlock(&session->s_cap_lock); |
7c1332b8c ceph: fix iterate... |
942 943 944 945 |
if (last_inode) iput(last_inode); if (old_cap) |
37151668b ceph: do caps acc... |
946 |
ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8c ceph: fix iterate... |
947 |
|
5dacf0912 ceph: do not touc... |
948 |
return ret; |
2f2dc0534 ceph: MDS client |
949 950 951 |
} static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, |
6c99f2545 ceph: throw out d... |
952 |
void *arg) |
2f2dc0534 ceph: MDS client |
953 954 |
{ struct ceph_inode_info *ci = ceph_inode(inode); |
6c99f2545 ceph: throw out d... |
955 |
int drop = 0; |
2f2dc0534 ceph: MDS client |
956 957 958 |
dout("removing cap %p, ci is %p, inode is %p ", cap, ci, &ci->vfs_inode); |
be655596b ceph: use i_ceph_... |
959 |
spin_lock(&ci->i_ceph_lock); |
6c99f2545 ceph: throw out d... |
960 961 962 |
__ceph_remove_cap(cap); if (!__ceph_is_any_real_caps(ci)) { struct ceph_mds_client *mdsc = |
3d14c5d2b ceph: factor out ... |
963 |
ceph_sb_to_client(inode->i_sb)->mdsc; |
6c99f2545 ceph: throw out d... |
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 |
spin_lock(&mdsc->cap_dirty_lock); if (!list_empty(&ci->i_dirty_item)) { pr_info(" dropping dirty %s state for %p %lld ", ceph_cap_string(ci->i_dirty_caps), inode, ceph_ino(inode)); ci->i_dirty_caps = 0; list_del_init(&ci->i_dirty_item); drop = 1; } if (!list_empty(&ci->i_flushing_item)) { pr_info(" dropping dirty+flushing %s state for %p %lld ", ceph_cap_string(ci->i_flushing_caps), inode, ceph_ino(inode)); ci->i_flushing_caps = 0; list_del_init(&ci->i_flushing_item); mdsc->num_cap_flushing--; drop = 1; } if (drop && ci->i_wrbuffer_ref) { pr_info(" dropping dirty data for %p %lld ", inode, ceph_ino(inode)); ci->i_wrbuffer_ref = 0; ci->i_wrbuffer_ref_head = 0; drop++; } spin_unlock(&mdsc->cap_dirty_lock); } |
be655596b ceph: use i_ceph_... |
995 |
spin_unlock(&ci->i_ceph_lock); |
6c99f2545 ceph: throw out d... |
996 997 |
while (drop--) iput(inode); |
2f2dc0534 ceph: MDS client |
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 |
return 0; } /* * caller must hold session s_mutex */ static void remove_session_caps(struct ceph_mds_session *session) { dout("remove_session_caps on %p ", session); iterate_session_caps(session, remove_session_caps_cb, NULL); BUG_ON(session->s_nr_caps > 0); |
6c99f2545 ceph: throw out d... |
1010 |
BUG_ON(!list_empty(&session->s_cap_flushing)); |
2f2dc0534 ceph: MDS client |
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 |
cleanup_cap_releases(session); } /* * wake up any threads waiting on this session's caps. if the cap is * old (didn't get renewed on the client reconnect), remove it now. * * caller must hold s_mutex. */ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { |
0dc2570fa ceph: reset reque... |
1023 |
struct ceph_inode_info *ci = ceph_inode(inode); |
03066f234 ceph: use complet... |
1024 |
wake_up_all(&ci->i_cap_wq); |
0dc2570fa ceph: reset reque... |
1025 |
if (arg) { |
be655596b ceph: use i_ceph_... |
1026 |
spin_lock(&ci->i_ceph_lock); |
0dc2570fa ceph: reset reque... |
1027 1028 |
ci->i_wanted_max_size = 0; ci->i_requested_max_size = 0; |
be655596b ceph: use i_ceph_... |
1029 |
spin_unlock(&ci->i_ceph_lock); |
0dc2570fa ceph: reset reque... |
1030 |
} |
2f2dc0534 ceph: MDS client |
1031 1032 |
return 0; } |
0dc2570fa ceph: reset reque... |
1033 1034 |
static void wake_up_session_caps(struct ceph_mds_session *session, int reconnect) |
2f2dc0534 ceph: MDS client |
1035 1036 1037 |
{ dout("wake_up_session_caps %p mds%d ", session, session->s_mds); |
0dc2570fa ceph: reset reque... |
1038 1039 |
iterate_session_caps(session, wake_up_session_cb, (void *)(unsigned long)reconnect); |
2f2dc0534 ceph: MDS client |
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 |
} /* * Send periodic message to MDS renewing all currently held caps. The * ack will reset the expiration for all caps from this session. * * caller holds s_mutex */ static int send_renew_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; int state; if (time_after_eq(jiffies, session->s_cap_ttl) && time_after_eq(session->s_cap_ttl, session->s_renew_requested)) pr_info("mds%d caps stale ", session->s_mds); |
e4cb4cb8a ceph: prevent dup... |
1058 |
session->s_renew_requested = jiffies; |
2f2dc0534 ceph: MDS client |
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 |
/* do not try to renew caps until a recovering mds has reconnected * with its clients. */ state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); if (state < CEPH_MDS_STATE_RECONNECT) { dout("send_renew_caps ignoring mds%d (%s) ", session->s_mds, ceph_mds_state_name(state)); return 0; } dout("send_renew_caps to mds%d (%s) ", session->s_mds, ceph_mds_state_name(state)); |
2f2dc0534 ceph: MDS client |
1073 1074 |
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, ++session->s_renew_seq); |
a79832f26 ceph: make ceph_m... |
1075 1076 |
if (!msg) return -ENOMEM; |
2f2dc0534 ceph: MDS client |
1077 1078 1079 1080 1081 1082 |
ceph_con_send(&session->s_con, msg); return 0; } /* * Note new cap ttl, and any transition from stale -> not stale (fresh?). |
0dc2570fa ceph: reset reque... |
1083 1084 |
* * Called under session->s_mutex |
2f2dc0534 ceph: MDS client |
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 |
*/ static void renewed_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, int is_renew) { int was_stale; int wake = 0; spin_lock(&session->s_cap_lock); was_stale = is_renew && (session->s_cap_ttl == 0 || time_after_eq(jiffies, session->s_cap_ttl)); session->s_cap_ttl = session->s_renew_requested + mdsc->mdsmap->m_session_timeout*HZ; if (was_stale) { if (time_before(jiffies, session->s_cap_ttl)) { pr_info("mds%d caps renewed ", session->s_mds); wake = 1; } else { pr_info("mds%d caps still stale ", session->s_mds); } } dout("renewed_caps mds%d ttl now %lu, was %s, now %s ", session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); spin_unlock(&session->s_cap_lock); if (wake) |
0dc2570fa ceph: reset reque... |
1116 |
wake_up_session_caps(session, 0); |
2f2dc0534 ceph: MDS client |
1117 1118 1119 1120 1121 1122 1123 1124 1125 |
} /* * send a session close request */ static int request_close_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; |
2f2dc0534 ceph: MDS client |
1126 1127 1128 1129 1130 1131 |
dout("request_close_session mds%d state %s seq %lld ", session->s_mds, session_state_name(session->s_state), session->s_seq); msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); |
a79832f26 ceph: make ceph_m... |
1132 1133 1134 1135 |
if (!msg) return -ENOMEM; ceph_con_send(&session->s_con, msg); return 0; |
2f2dc0534 ceph: MDS client |
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 |
} /* * Called with s_mutex held. */ static int __close_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { if (session->s_state >= CEPH_MDS_SESSION_CLOSING) return 0; session->s_state = CEPH_MDS_SESSION_CLOSING; return request_close_session(mdsc, session); } /* * Trim old(er) caps. * * Because we can't cache an inode without one or more caps, we do * this indirectly: if a cap is unused, we prune its aliases, at which * point the inode will hopefully get dropped to. * * Yes, this is a bit sloppy. Our only real goal here is to respond to * memory pressure from the MDS, though, so it needn't be perfect. */ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { struct ceph_mds_session *session = arg; struct ceph_inode_info *ci = ceph_inode(inode); int used, oissued, mine; if (session->s_trim_caps <= 0) return -1; |
be655596b ceph: use i_ceph_... |
1168 |
spin_lock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 |
mine = cap->issued | cap->implemented; used = __ceph_caps_used(ci); oissued = __ceph_caps_issued_other(ci, cap); dout("trim_caps_cb %p cap %p mine %s oissued %s used %s ", inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), ceph_cap_string(used)); if (ci->i_dirty_caps) goto out; /* dirty caps */ if ((used & ~oissued) & mine) goto out; /* we need these caps */ session->s_trim_caps--; if (oissued) { /* we aren't the only cap.. just remove us */ |
7c1332b8c ceph: fix iterate... |
1185 |
__ceph_remove_cap(cap); |
2f2dc0534 ceph: MDS client |
1186 1187 |
} else { /* try to drop referring dentries */ |
be655596b ceph: use i_ceph_... |
1188 |
spin_unlock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
1189 1190 1191 1192 1193 1194 1195 1196 |
d_prune_aliases(inode); dout("trim_caps_cb %p cap %p pruned, count now %d ", inode, cap, atomic_read(&inode->i_count)); return 0; } out: |
be655596b ceph: use i_ceph_... |
1197 |
spin_unlock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 |
return 0; } /* * Trim session cap count down to some max number. */ static int trim_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, int max_caps) { int trim_caps = session->s_nr_caps - max_caps; dout("trim_caps mds%d start: %d / %d, trim %d ", session->s_mds, session->s_nr_caps, max_caps, trim_caps); if (trim_caps > 0) { session->s_trim_caps = trim_caps; iterate_session_caps(session, trim_caps_cb, session); dout("trim_caps mds%d done: %d / %d, trimmed %d ", session->s_mds, session->s_nr_caps, max_caps, trim_caps - session->s_trim_caps); |
5dacf0912 ceph: do not touc... |
1220 |
session->s_trim_caps = 0; |
2f2dc0534 ceph: MDS client |
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 |
} return 0; } /* * Allocate cap_release messages. If there is a partially full message * in the queue, try to allocate enough to cover it's remainder, so that * we can send it immediately. * * Called under s_mutex. */ |
2b2300d62 ceph: try to send... |
1232 |
int ceph_add_cap_releases(struct ceph_mds_client *mdsc, |
ee6b272b9 ceph: drop unused... |
1233 |
struct ceph_mds_session *session) |
2f2dc0534 ceph: MDS client |
1234 |
{ |
38e8883ee ceph: simplify ad... |
1235 |
struct ceph_msg *msg, *partial = NULL; |
2f2dc0534 ceph: MDS client |
1236 1237 |
struct ceph_mds_cap_release *head; int err = -ENOMEM; |
3d14c5d2b ceph: factor out ... |
1238 |
int extra = mdsc->fsc->mount_options->cap_release_safety; |
38e8883ee ceph: simplify ad... |
1239 |
int num; |
2f2dc0534 ceph: MDS client |
1240 |
|
38e8883ee ceph: simplify ad... |
1241 1242 1243 |
dout("add_cap_releases %p mds%d extra %d ", session, session->s_mds, extra); |
2f2dc0534 ceph: MDS client |
1244 1245 1246 1247 1248 1249 1250 1251 |
spin_lock(&session->s_cap_lock); if (!list_empty(&session->s_cap_releases)) { msg = list_first_entry(&session->s_cap_releases, struct ceph_msg, list_head); head = msg->front.iov_base; |
38e8883ee ceph: simplify ad... |
1252 1253 1254 1255 1256 1257 1258 1259 |
num = le32_to_cpu(head->num); if (num) { dout(" partial %p with (%d/%d) ", msg, num, (int)CEPH_CAPS_PER_RELEASE); extra += CEPH_CAPS_PER_RELEASE - num; partial = msg; } |
2f2dc0534 ceph: MDS client |
1260 |
} |
2f2dc0534 ceph: MDS client |
1261 1262 |
while (session->s_num_cap_releases < session->s_nr_caps + extra) { spin_unlock(&session->s_cap_lock); |
34d23762d ceph: all allocat... |
1263 |
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, |
b61c27636 libceph: don't co... |
1264 |
GFP_NOFS, false); |
2f2dc0534 ceph: MDS client |
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 |
if (!msg) goto out_unlocked; dout("add_cap_releases %p msg %p now %d ", session, msg, (int)msg->front.iov_len); head = msg->front.iov_base; head->num = cpu_to_le32(0); msg->front.iov_len = sizeof(*head); spin_lock(&session->s_cap_lock); list_add(&msg->list_head, &session->s_cap_releases); session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE; } |
38e8883ee ceph: simplify ad... |
1277 1278 1279 1280 1281 1282 1283 1284 1285 |
if (partial) { head = partial->front.iov_base; num = le32_to_cpu(head->num); dout(" queueing partial %p with %d/%d ", partial, num, (int)CEPH_CAPS_PER_RELEASE); list_move_tail(&partial->list_head, &session->s_cap_releases_done); session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num; |
2f2dc0534 ceph: MDS client |
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 |
} err = 0; spin_unlock(&session->s_cap_lock); out_unlocked: return err; } /* * flush all dirty inode data to disk. * * returns true if we've flushed through want_flush_seq */ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) { int mds, ret = 1; dout("check_cap_flush want %lld ", want_flush_seq); mutex_lock(&mdsc->mutex); for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { struct ceph_mds_session *session = mdsc->sessions[mds]; if (!session) continue; get_session(session); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); if (!list_empty(&session->s_cap_flushing)) { struct ceph_inode_info *ci = list_entry(session->s_cap_flushing.next, struct ceph_inode_info, i_flushing_item); struct inode *inode = &ci->vfs_inode; |
be655596b ceph: use i_ceph_... |
1320 |
spin_lock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
1321 1322 1323 1324 1325 1326 1327 1328 |
if (ci->i_cap_flush_seq <= want_flush_seq) { dout("check_cap_flush still flushing %p " "seq %lld <= %lld to mds%d ", inode, ci->i_cap_flush_seq, want_flush_seq, session->s_mds); ret = 0; } |
be655596b ceph: use i_ceph_... |
1329 |
spin_unlock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 |
} mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); if (!ret) return ret; mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); dout("check_cap_flush ok, flushed thru %lld ", want_flush_seq); return ret; } /* * called under s_mutex */ |
3d7ded4d8 ceph: release cap... |
1348 1349 |
void ceph_send_cap_releases(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) |
2f2dc0534 ceph: MDS client |
1350 1351 1352 1353 1354 |
{ struct ceph_msg *msg; dout("send_cap_releases mds%d ", session->s_mds); |
0f8605f2b ceph: clean up ca... |
1355 1356 |
spin_lock(&session->s_cap_lock); while (!list_empty(&session->s_cap_releases_done)) { |
2f2dc0534 ceph: MDS client |
1357 1358 1359 1360 1361 1362 1363 1364 |
msg = list_first_entry(&session->s_cap_releases_done, struct ceph_msg, list_head); list_del_init(&msg->list_head); spin_unlock(&session->s_cap_lock); msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); dout("send_cap_releases mds%d %p ", session->s_mds, msg); ceph_con_send(&session->s_con, msg); |
0f8605f2b ceph: clean up ca... |
1365 |
spin_lock(&session->s_cap_lock); |
2f2dc0534 ceph: MDS client |
1366 1367 1368 |
} spin_unlock(&session->s_cap_lock); } |
e01a59464 ceph: dicard cap ... |
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 |
static void discard_cap_releases(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_msg *msg; struct ceph_mds_cap_release *head; unsigned num; dout("discard_cap_releases mds%d ", session->s_mds); spin_lock(&session->s_cap_lock); /* zero out the in-progress message */ msg = list_first_entry(&session->s_cap_releases, struct ceph_msg, list_head); head = msg->front.iov_base; num = le32_to_cpu(head->num); dout("discard_cap_releases mds%d %p %u ", session->s_mds, msg, num); head->num = cpu_to_le32(0); session->s_num_cap_releases += num; /* requeue completed messages */ while (!list_empty(&session->s_cap_releases_done)) { msg = list_first_entry(&session->s_cap_releases_done, struct ceph_msg, list_head); list_del_init(&msg->list_head); head = msg->front.iov_base; num = le32_to_cpu(head->num); dout("discard_cap_releases mds%d %p %u ", session->s_mds, msg, num); session->s_num_cap_releases += num; head->num = cpu_to_le32(0); msg->front.iov_len = sizeof(*head); list_add(&msg->list_head, &session->s_cap_releases); } spin_unlock(&session->s_cap_lock); } |
2f2dc0534 ceph: MDS client |
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 |
/* * requests */ /* * Create an mds request. */ struct ceph_mds_request * ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) { struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); |
b4556396f ceph: fix race be... |
1423 |
mutex_init(&req->r_fill_mutex); |
37151668b ceph: do caps acc... |
1424 |
req->r_mdsc = mdsc; |
2f2dc0534 ceph: MDS client |
1425 1426 1427 1428 |
req->r_started = jiffies; req->r_resend_mds = -1; INIT_LIST_HEAD(&req->r_unsafe_dir_item); req->r_fmode = -1; |
153c8e6bf ceph: use kref fo... |
1429 |
kref_init(&req->r_kref); |
2f2dc0534 ceph: MDS client |
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 |
INIT_LIST_HEAD(&req->r_wait); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); req->r_op = op; req->r_direct_mode = mode; return req; } /* |
44ca18f26 ceph: use rbtree ... |
1441 |
* return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc0534 ceph: MDS client |
1442 1443 1444 |
* * called under mdsc->mutex. */ |
44ca18f26 ceph: use rbtree ... |
1445 1446 1447 1448 1449 1450 1451 |
static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) { if (RB_EMPTY_ROOT(&mdsc->request_tree)) return NULL; return rb_entry(rb_first(&mdsc->request_tree), struct ceph_mds_request, r_node); } |
2f2dc0534 ceph: MDS client |
1452 1453 |
static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) { |
44ca18f26 ceph: use rbtree ... |
1454 1455 1456 1457 1458 |
struct ceph_mds_request *req = __get_oldest_req(mdsc); if (req) return req->r_tid; return 0; |
2f2dc0534 ceph: MDS client |
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 |
} /* * Build a dentry's path. Allocate on heap; caller must kfree. Based * on build_path_from_dentry in fs/cifs/dir.c. * * If @stop_on_nosnap, generate path relative to the first non-snapped * inode. * * Encode hidden .snap dirs as a double /, i.e. * foo/.snap/bar -> foo//bar */ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, int stop_on_nosnap) { struct dentry *temp; char *path; int len, pos; |
1b71fe2ef ceph analog of ci... |
1477 |
unsigned seq; |
2f2dc0534 ceph: MDS client |
1478 1479 1480 1481 1482 1483 |
if (dentry == NULL) return ERR_PTR(-EINVAL); retry: len = 0; |
1b71fe2ef ceph analog of ci... |
1484 1485 |
seq = read_seqbegin(&rename_lock); rcu_read_lock(); |
2f2dc0534 ceph: MDS client |
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 |
for (temp = dentry; !IS_ROOT(temp);) { struct inode *inode = temp->d_inode; if (inode && ceph_snap(inode) == CEPH_SNAPDIR) len++; /* slash only */ else if (stop_on_nosnap && inode && ceph_snap(inode) == CEPH_NOSNAP) break; else len += 1 + temp->d_name.len; temp = temp->d_parent; if (temp == NULL) { |
1b71fe2ef ceph analog of ci... |
1497 |
rcu_read_unlock(); |
6c99f2545 ceph: throw out d... |
1498 1499 |
pr_err("build_path corrupt dentry %p ", dentry); |
2f2dc0534 ceph: MDS client |
1500 1501 1502 |
return ERR_PTR(-EINVAL); } } |
1b71fe2ef ceph analog of ci... |
1503 |
rcu_read_unlock(); |
2f2dc0534 ceph: MDS client |
1504 1505 1506 1507 1508 1509 1510 1511 |
if (len) len--; /* no leading '/' */ path = kmalloc(len+1, GFP_NOFS); if (path == NULL) return ERR_PTR(-ENOMEM); pos = len; path[pos] = 0; /* trailing null */ |
1b71fe2ef ceph analog of ci... |
1512 |
rcu_read_lock(); |
2f2dc0534 ceph: MDS client |
1513 |
for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2ef ceph analog of ci... |
1514 |
struct inode *inode; |
2f2dc0534 ceph: MDS client |
1515 |
|
1b71fe2ef ceph analog of ci... |
1516 1517 |
spin_lock(&temp->d_lock); inode = temp->d_inode; |
2f2dc0534 ceph: MDS client |
1518 |
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad3 ceph: reduce buil... |
1519 1520 |
dout("build_path path+%d: %p SNAPDIR ", |
2f2dc0534 ceph: MDS client |
1521 1522 1523 |
pos, temp); } else if (stop_on_nosnap && inode && ceph_snap(inode) == CEPH_NOSNAP) { |
9d5a09e65 ceph: add missing... |
1524 |
spin_unlock(&temp->d_lock); |
2f2dc0534 ceph: MDS client |
1525 1526 1527 |
break; } else { pos -= temp->d_name.len; |
1b71fe2ef ceph analog of ci... |
1528 1529 |
if (pos < 0) { spin_unlock(&temp->d_lock); |
2f2dc0534 ceph: MDS client |
1530 |
break; |
1b71fe2ef ceph analog of ci... |
1531 |
} |
2f2dc0534 ceph: MDS client |
1532 1533 |
strncpy(path + pos, temp->d_name.name, temp->d_name.len); |
2f2dc0534 ceph: MDS client |
1534 |
} |
1b71fe2ef ceph analog of ci... |
1535 |
spin_unlock(&temp->d_lock); |
2f2dc0534 ceph: MDS client |
1536 1537 1538 1539 |
if (pos) path[--pos] = '/'; temp = temp->d_parent; if (temp == NULL) { |
1b71fe2ef ceph analog of ci... |
1540 |
rcu_read_unlock(); |
104648ad3 ceph: reduce buil... |
1541 1542 |
pr_err("build_path corrupt dentry "); |
2f2dc0534 ceph: MDS client |
1543 1544 1545 1546 |
kfree(path); return ERR_PTR(-EINVAL); } } |
1b71fe2ef ceph analog of ci... |
1547 1548 |
rcu_read_unlock(); if (pos != 0 || read_seqretry(&rename_lock, seq)) { |
104648ad3 ceph: reduce buil... |
1549 |
pr_err("build_path did not end path lookup where " |
2f2dc0534 ceph: MDS client |
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 |
"expected, namelen is %d, pos is %d ", len, pos); /* presumably this is only possible if racing with a rename of one of the parent directories (we can not lock the dentries above us to prevent this, but retrying should be harmless) */ kfree(path); goto retry; } *base = ceph_ino(temp->d_inode); *plen = len; |
104648ad3 ceph: reduce buil... |
1562 1563 |
dout("build_path on %p %d built %llx '%.*s' ", |
b7ab39f63 fs: dcache scale ... |
1564 |
dentry, dentry->d_count, *base, len, path); |
2f2dc0534 ceph: MDS client |
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 |
return path; } static int build_dentry_path(struct dentry *dentry, const char **ppath, int *ppathlen, u64 *pino, int *pfreepath) { char *path; if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) { *pino = ceph_ino(dentry->d_parent->d_inode); *ppath = dentry->d_name.name; *ppathlen = dentry->d_name.len; return 0; } path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); if (IS_ERR(path)) return PTR_ERR(path); *ppath = path; *pfreepath = 1; return 0; } static int build_inode_path(struct inode *inode, const char **ppath, int *ppathlen, u64 *pino, int *pfreepath) { struct dentry *dentry; char *path; if (ceph_snap(inode) == CEPH_NOSNAP) { *pino = ceph_ino(inode); *ppathlen = 0; return 0; } dentry = d_find_alias(inode); path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); dput(dentry); if (IS_ERR(path)) return PTR_ERR(path); *ppath = path; *pfreepath = 1; return 0; } /* * request arguments may be specified via an inode *, a dentry *, or * an explicit ino+path. */ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, const char *rpath, u64 rino, const char **ppath, int *pathlen, u64 *ino, int *freepath) { int r = 0; if (rinode) { r = build_inode_path(rinode, ppath, pathlen, ino, freepath); dout(" inode %p %llx.%llx ", rinode, ceph_ino(rinode), ceph_snap(rinode)); } else if (rdentry) { r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); dout(" dentry %p %llx/%.*s ", rdentry, *ino, *pathlen, *ppath); |
795858dbd ceph: fix encodin... |
1631 |
} else if (rpath || rino) { |
2f2dc0534 ceph: MDS client |
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 |
*ino = rino; *ppath = rpath; *pathlen = strlen(rpath); dout(" path %.*s ", *pathlen, rpath); } return r; } /* * called under mdsc->mutex */ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, int mds) { struct ceph_msg *msg; struct ceph_mds_request_head *head; const char *path1 = NULL; const char *path2 = NULL; u64 ino1 = 0, ino2 = 0; int pathlen1 = 0, pathlen2 = 0; int freepath1 = 0, freepath2 = 0; int len; u16 releases; void *p, *end; int ret; ret = set_request_path_attr(req->r_inode, req->r_dentry, req->r_path1, req->r_ino1.ino, &path1, &pathlen1, &ino1, &freepath1); if (ret < 0) { msg = ERR_PTR(ret); goto out; } ret = set_request_path_attr(NULL, req->r_old_dentry, req->r_path2, req->r_ino2.ino, &path2, &pathlen2, &ino2, &freepath2); if (ret < 0) { msg = ERR_PTR(ret); goto out_free1; } len = sizeof(*head) + |
ac8839d7b ceph: include typ... |
1678 |
pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)); |
2f2dc0534 ceph: MDS client |
1679 1680 1681 1682 1683 1684 1685 1686 1687 |
/* calculate (max) length for cap releases */ len += sizeof(struct ceph_mds_request_release) * (!!req->r_inode_drop + !!req->r_dentry_drop + !!req->r_old_inode_drop + !!req->r_old_dentry_drop); if (req->r_dentry_drop) len += req->r_dentry->d_name.len; if (req->r_old_dentry_drop) len += req->r_old_dentry->d_name.len; |
b61c27636 libceph: don't co... |
1688 |
msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f26 ceph: make ceph_m... |
1689 1690 |
if (!msg) { msg = ERR_PTR(-ENOMEM); |
2f2dc0534 ceph: MDS client |
1691 |
goto out_free2; |
a79832f26 ceph: make ceph_m... |
1692 |
} |
2f2dc0534 ceph: MDS client |
1693 |
|
6df058c02 ceph: include tra... |
1694 |
msg->hdr.tid = cpu_to_le64(req->r_tid); |
2f2dc0534 ceph: MDS client |
1695 1696 1697 1698 1699 1700 |
head = msg->front.iov_base; p = msg->front.iov_base + sizeof(*head); end = msg->front.iov_base + msg->front.iov_len; head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); head->op = cpu_to_le32(req->r_op); |
cb4276cca ceph: fix uid/gid... |
1701 1702 |
head->caller_uid = cpu_to_le32(req->r_uid); head->caller_gid = cpu_to_le32(req->r_gid); |
2f2dc0534 ceph: MDS client |
1703 1704 1705 1706 |
head->args = req->r_args; ceph_encode_filepath(&p, end, ino1, path1); ceph_encode_filepath(&p, end, ino2, path2); |
e979cf503 ceph: do not incl... |
1707 1708 |
/* make note of release offset, in case we need to replay */ req->r_request_release_offset = p - msg->front.iov_base; |
2f2dc0534 ceph: MDS client |
1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 |
/* cap releases */ releases = 0; if (req->r_inode_drop) releases += ceph_encode_inode_release(&p, req->r_inode ? req->r_inode : req->r_dentry->d_inode, mds, req->r_inode_drop, req->r_inode_unless, 0); if (req->r_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_dentry, mds, req->r_dentry_drop, req->r_dentry_unless); if (req->r_old_dentry_drop) releases += ceph_encode_dentry_release(&p, req->r_old_dentry, mds, req->r_old_dentry_drop, req->r_old_dentry_unless); if (req->r_old_inode_drop) releases += ceph_encode_inode_release(&p, req->r_old_dentry->d_inode, mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); head->num_releases = cpu_to_le16(releases); BUG_ON(p > end); msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); msg->pages = req->r_pages; msg->nr_pages = req->r_num_pages; msg->hdr.data_len = cpu_to_le32(req->r_data_len); msg->hdr.data_off = cpu_to_le16(0); out_free2: if (freepath2) kfree((char *)path2); out_free1: if (freepath1) kfree((char *)path1); out: return msg; } /* * called under mdsc->mutex if error, under no mutex if * success. */ static void complete_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { if (req->r_callback) req->r_callback(mdsc, req); else |
03066f234 ceph: use complet... |
1756 |
complete_all(&req->r_completion); |
2f2dc0534 ceph: MDS client |
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 |
} /* * called under mdsc->mutex */ static int __prepare_send_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, int mds) { struct ceph_mds_request_head *rhead; struct ceph_msg *msg; int flags = 0; |
2f2dc0534 ceph: MDS client |
1769 |
req->r_attempts++; |
e55b71f80 ceph: handle ESTA... |
1770 1771 1772 1773 1774 1775 1776 1777 1778 |
if (req->r_inode) { struct ceph_cap *cap = ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); if (cap) req->r_sent_on_mseq = cap->mseq; else req->r_sent_on_mseq = -1; } |
2f2dc0534 ceph: MDS client |
1779 1780 1781 |
dout("prepare_send_request %p tid %lld %s (attempt %d) ", req, req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); |
01a92f174 ceph: reuse reque... |
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 |
if (req->r_got_unsafe) { /* * Replay. Do not regenerate message (and rebuild * paths, etc.); just use the original message. * Rebuilding paths will break for renames because * d_move mangles the src name. */ msg = req->r_request; rhead = msg->front.iov_base; flags = le32_to_cpu(rhead->flags); flags |= CEPH_MDS_FLAG_REPLAY; rhead->flags = cpu_to_le32(flags); if (req->r_target_inode) rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); rhead->num_retry = req->r_attempts - 1; |
e979cf503 ceph: do not incl... |
1800 1801 1802 1803 1804 |
/* remove cap/dentry releases from message */ rhead->num_releases = 0; msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset); msg->front.iov_len = req->r_request_release_offset; |
01a92f174 ceph: reuse reque... |
1805 1806 |
return 0; } |
2f2dc0534 ceph: MDS client |
1807 1808 1809 1810 1811 1812 |
if (req->r_request) { ceph_msg_put(req->r_request); req->r_request = NULL; } msg = create_request_message(mdsc, req, mds); if (IS_ERR(msg)) { |
e1518c7c0 ceph: clean up md... |
1813 |
req->r_err = PTR_ERR(msg); |
2f2dc0534 ceph: MDS client |
1814 |
complete_request(mdsc, req); |
a79832f26 ceph: make ceph_m... |
1815 |
return PTR_ERR(msg); |
2f2dc0534 ceph: MDS client |
1816 1817 1818 1819 |
} req->r_request = msg; rhead = msg->front.iov_base; |
2f2dc0534 ceph: MDS client |
1820 1821 1822 1823 1824 1825 1826 1827 |
rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); if (req->r_got_unsafe) flags |= CEPH_MDS_FLAG_REPLAY; if (req->r_locked_dir) flags |= CEPH_MDS_FLAG_WANT_DENTRY; rhead->flags = cpu_to_le32(flags); rhead->num_fwd = req->r_num_fwd; rhead->num_retry = req->r_attempts - 1; |
01a92f174 ceph: reuse reque... |
1828 |
rhead->ino = 0; |
2f2dc0534 ceph: MDS client |
1829 1830 1831 |
dout(" r_locked_dir = %p ", req->r_locked_dir); |
2f2dc0534 ceph: MDS client |
1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 |
return 0; } /* * send request, or put it on the appropriate wait list. */ static int __do_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { struct ceph_mds_session *session = NULL; int mds = -1; int err = -EAGAIN; |
e1518c7c0 ceph: clean up md... |
1844 |
if (req->r_err || req->r_got_result) |
2f2dc0534 ceph: MDS client |
1845 1846 1847 1848 1849 1850 1851 1852 1853 |
goto out; if (req->r_timeout && time_after_eq(jiffies, req->r_started + req->r_timeout)) { dout("do_request timed out "); err = -EIO; goto finish; } |
dc69e2e9f ceph: associate r... |
1854 |
put_request_session(req); |
2f2dc0534 ceph: MDS client |
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 |
mds = __choose_mds(mdsc, req); if (mds < 0 || ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { dout("do_request no mds or not active, waiting for map "); list_add(&req->r_wait, &mdsc->waiting_for_map); goto out; } /* get, open session */ session = __ceph_lookup_mds_session(mdsc, mds); |
9c423956b ceph: propagate m... |
1866 |
if (!session) { |
2f2dc0534 ceph: MDS client |
1867 |
session = register_session(mdsc, mds); |
9c423956b ceph: propagate m... |
1868 1869 1870 1871 1872 |
if (IS_ERR(session)) { err = PTR_ERR(session); goto finish; } } |
dc69e2e9f ceph: associate r... |
1873 |
req->r_session = get_session(session); |
2f2dc0534 ceph: MDS client |
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 |
dout("do_request mds%d session %p state %s ", mds, session, session_state_name(session->s_state)); if (session->s_state != CEPH_MDS_SESSION_OPEN && session->s_state != CEPH_MDS_SESSION_HUNG) { if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) __open_session(mdsc, session); list_add(&req->r_wait, &session->s_waiting); goto out_session; } /* send request */ |
2f2dc0534 ceph: MDS client |
1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 |
req->r_resend_mds = -1; /* forget any previous mds hint */ if (req->r_request_started == 0) /* note request start time */ req->r_request_started = jiffies; err = __prepare_send_request(mdsc, req, mds); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); } out_session: ceph_put_mds_session(session); out: return err; finish: |
e1518c7c0 ceph: clean up md... |
1904 |
req->r_err = err; |
2f2dc0534 ceph: MDS client |
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 |
complete_request(mdsc, req); goto out; } /* * called under mdsc->mutex */ static void __wake_requests(struct ceph_mds_client *mdsc, struct list_head *head) { struct ceph_mds_request *req, *nreq; list_for_each_entry_safe(req, nreq, head, r_wait) { list_del_init(&req->r_wait); __do_request(mdsc, req); } } /* * Wake up threads with requests pending for @mds, so that they can |
29790f26a ceph: wait for md... |
1925 |
* resubmit their requests to a possibly different mds. |
2f2dc0534 ceph: MDS client |
1926 |
*/ |
29790f26a ceph: wait for md... |
1927 |
static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc0534 ceph: MDS client |
1928 |
{ |
44ca18f26 ceph: use rbtree ... |
1929 1930 |
struct ceph_mds_request *req; struct rb_node *p; |
2f2dc0534 ceph: MDS client |
1931 1932 1933 |
dout("kick_requests mds%d ", mds); |
44ca18f26 ceph: use rbtree ... |
1934 1935 1936 1937 1938 1939 1940 1941 |
for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) { req = rb_entry(p, struct ceph_mds_request, r_node); if (req->r_got_unsafe) continue; if (req->r_session && req->r_session->s_mds == mds) { dout(" kicking tid %llu ", req->r_tid); |
44ca18f26 ceph: use rbtree ... |
1942 |
__do_request(mdsc, req); |
2f2dc0534 ceph: MDS client |
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 |
} } } void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { dout("submit_request on %p ", req); mutex_lock(&mdsc->mutex); __register_request(mdsc, req, NULL); __do_request(mdsc, req); mutex_unlock(&mdsc->mutex); } /* * Synchrously perform an mds request. Take care of all of the * session setup, forwarding, retry details. */ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, struct inode *dir, struct ceph_mds_request *req) { int err; dout("do_request on %p ", req); /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ if (req->r_inode) ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); if (req->r_locked_dir) ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); if (req->r_old_dentry) |
41b02e1f9 ceph: explicitly ... |
1977 1978 |
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), CEPH_CAP_PIN); |
2f2dc0534 ceph: MDS client |
1979 1980 1981 1982 1983 |
/* issue */ mutex_lock(&mdsc->mutex); __register_request(mdsc, req, dir); __do_request(mdsc, req); |
e1518c7c0 ceph: clean up md... |
1984 1985 1986 1987 1988 1989 |
if (req->r_err) { err = req->r_err; __unregister_request(mdsc, req); dout("do_request early error %d ", err); goto out; |
2f2dc0534 ceph: MDS client |
1990 |
} |
e1518c7c0 ceph: clean up md... |
1991 1992 1993 1994 1995 |
/* wait */ mutex_unlock(&mdsc->mutex); dout("do_request waiting "); if (req->r_timeout) { |
aa91647c8 ceph: make mds re... |
1996 |
err = (long)wait_for_completion_killable_timeout( |
e1518c7c0 ceph: clean up md... |
1997 1998 1999 2000 |
&req->r_completion, req->r_timeout); if (err == 0) err = -EIO; } else { |
aa91647c8 ceph: make mds re... |
2001 |
err = wait_for_completion_killable(&req->r_completion); |
e1518c7c0 ceph: clean up md... |
2002 2003 2004 2005 |
} dout("do_request waited, got %d ", err); mutex_lock(&mdsc->mutex); |
5b1daecd5 ceph: properly ha... |
2006 |
|
e1518c7c0 ceph: clean up md... |
2007 2008 2009 2010 2011 2012 |
/* only abort if we didn't race with a real reply */ if (req->r_got_result) { err = le32_to_cpu(req->r_reply_info.head->result); } else if (err < 0) { dout("aborted request %lld with %d ", req->r_tid, err); |
b4556396f ceph: fix race be... |
2013 2014 2015 2016 2017 2018 2019 |
/* * ensure we aren't running concurrently with * ceph_fill_trace or ceph_readdir_prepopulate, which * rely on locks (dir mutex) held by our caller. */ mutex_lock(&req->r_fill_mutex); |
e1518c7c0 ceph: clean up md... |
2020 2021 |
req->r_err = err; req->r_aborted = true; |
b4556396f ceph: fix race be... |
2022 |
mutex_unlock(&req->r_fill_mutex); |
5b1daecd5 ceph: properly ha... |
2023 |
|
e1518c7c0 ceph: clean up md... |
2024 |
if (req->r_locked_dir && |
167c9e352 ceph: use common ... |
2025 2026 |
(req->r_op & CEPH_MDS_OP_WRITE)) ceph_invalidate_dir_request(req); |
2f2dc0534 ceph: MDS client |
2027 |
} else { |
e1518c7c0 ceph: clean up md... |
2028 |
err = req->r_err; |
2f2dc0534 ceph: MDS client |
2029 |
} |
2f2dc0534 ceph: MDS client |
2030 |
|
e1518c7c0 ceph: clean up md... |
2031 2032 |
out: mutex_unlock(&mdsc->mutex); |
2f2dc0534 ceph: MDS client |
2033 2034 2035 2036 2037 2038 |
dout("do_request %p done, result %d ", req, err); return err; } /* |
c6ffe1001 ceph: use new D_C... |
2039 |
* Invalidate dir D_COMPLETE, dentry lease state on an aborted MDS |
167c9e352 ceph: use common ... |
2040 2041 2042 2043 2044 2045 |
* namespace request. */ void ceph_invalidate_dir_request(struct ceph_mds_request *req) { struct inode *inode = req->r_locked_dir; struct ceph_inode_info *ci = ceph_inode(inode); |
c6ffe1001 ceph: use new D_C... |
2046 2047 |
dout("invalidate_dir_request %p (D_COMPLETE, lease(s)) ", inode); |
be655596b ceph: use i_ceph_... |
2048 |
spin_lock(&ci->i_ceph_lock); |
c6ffe1001 ceph: use new D_C... |
2049 |
ceph_dir_clear_complete(inode); |
167c9e352 ceph: use common ... |
2050 |
ci->i_release_count++; |
be655596b ceph: use i_ceph_... |
2051 |
spin_unlock(&ci->i_ceph_lock); |
167c9e352 ceph: use common ... |
2052 2053 2054 2055 2056 2057 2058 2059 |
if (req->r_dentry) ceph_invalidate_dentry_lease(req->r_dentry); if (req->r_old_dentry) ceph_invalidate_dentry_lease(req->r_old_dentry); } /* |
2f2dc0534 ceph: MDS client |
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 |
* Handle mds reply. * * We take the session mutex and parse and process the reply immediately. * This preserves the logical ordering of replies, capabilities, etc., sent * by the MDS as they are applied to our local cache. */ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) { struct ceph_mds_client *mdsc = session->s_mdsc; struct ceph_mds_request *req; struct ceph_mds_reply_head *head = msg->front.iov_base; struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ u64 tid; int err, result; |
2600d2dd5 ceph: drop messag... |
2074 |
int mds = session->s_mds; |
2f2dc0534 ceph: MDS client |
2075 |
|
2f2dc0534 ceph: MDS client |
2076 2077 2078 |
if (msg->front.iov_len < sizeof(*head)) { pr_err("mdsc_handle_reply got corrupt (short) reply "); |
9ec7cab14 ceph: hex dump co... |
2079 |
ceph_msg_dump(msg); |
2f2dc0534 ceph: MDS client |
2080 2081 2082 2083 |
return; } /* get request, session */ |
6df058c02 ceph: include tra... |
2084 |
tid = le64_to_cpu(msg->hdr.tid); |
2f2dc0534 ceph: MDS client |
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 |
mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { dout("handle_reply on unknown tid %llu ", tid); mutex_unlock(&mdsc->mutex); return; } dout("handle_reply %p ", req); |
2f2dc0534 ceph: MDS client |
2095 2096 |
/* correct session? */ |
d96d60498 ceph: fix session... |
2097 |
if (req->r_session != session) { |
2f2dc0534 ceph: MDS client |
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 |
pr_err("mdsc_handle_reply got %llu on session mds%d" " not mds%d ", tid, session->s_mds, req->r_session ? req->r_session->s_mds : -1); mutex_unlock(&mdsc->mutex); goto out; } /* dup? */ if ((req->r_got_unsafe && !head->safe) || (req->r_got_safe && head->safe)) { pr_warning("got a dup %s reply on %llu from mds%d ", head->safe ? "safe" : "unsafe", tid, mds); mutex_unlock(&mdsc->mutex); goto out; } |
85792d0dd ceph: cope with o... |
2115 2116 2117 2118 2119 2120 2121 |
if (req->r_got_safe && !head->safe) { pr_warning("got unsafe after safe on %llu from mds%d ", tid, mds); mutex_unlock(&mdsc->mutex); goto out; } |
2f2dc0534 ceph: MDS client |
2122 2123 2124 2125 |
result = le32_to_cpu(head->result); /* |
e55b71f80 ceph: handle ESTA... |
2126 2127 2128 2129 2130 |
* Handle an ESTALE * if we're not talking to the authority, send to them * if the authority has changed while we weren't looking, * send to new authority * Otherwise we just have to return an ESTALE |
2f2dc0534 ceph: MDS client |
2131 2132 |
*/ if (result == -ESTALE) { |
e55b71f80 ceph: handle ESTA... |
2133 |
dout("got ESTALE on request %llu", req->r_tid); |
213c99ee0 ceph: whitespace ... |
2134 2135 2136 |
if (!req->r_inode) { /* do nothing; not an authority problem */ } else if (req->r_direct_mode != USE_AUTH_MDS) { |
e55b71f80 ceph: handle ESTA... |
2137 2138 |
dout("not using auth, setting for that now"); req->r_direct_mode = USE_AUTH_MDS; |
2f2dc0534 ceph: MDS client |
2139 2140 2141 |
__do_request(mdsc, req); mutex_unlock(&mdsc->mutex); goto out; |
e55b71f80 ceph: handle ESTA... |
2142 2143 |
} else { struct ceph_inode_info *ci = ceph_inode(req->r_inode); |
4af25fdda ceph: drop redund... |
2144 2145 2146 2147 2148 |
struct ceph_cap *cap = NULL; if (req->r_session) cap = ceph_get_cap_for_mds(ci, req->r_session->s_mds); |
e55b71f80 ceph: handle ESTA... |
2149 2150 2151 2152 2153 2154 2155 2156 2157 |
dout("already using auth"); if ((!cap || cap != ci->i_auth_cap) || (cap->mseq != req->r_sent_on_mseq)) { dout("but cap changed, so resending"); __do_request(mdsc, req); mutex_unlock(&mdsc->mutex); goto out; } |
2f2dc0534 ceph: MDS client |
2158 |
} |
e55b71f80 ceph: handle ESTA... |
2159 |
dout("have to return ESTALE on request %llu", req->r_tid); |
2f2dc0534 ceph: MDS client |
2160 |
} |
e55b71f80 ceph: handle ESTA... |
2161 |
|
2f2dc0534 ceph: MDS client |
2162 2163 2164 |
if (head->safe) { req->r_got_safe = true; __unregister_request(mdsc, req); |
03066f234 ceph: use complet... |
2165 |
complete_all(&req->r_safe_completion); |
2f2dc0534 ceph: MDS client |
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 |
if (req->r_got_unsafe) { /* * We already handled the unsafe response, now do the * cleanup. No need to examine the response; the MDS * doesn't include any result info in the safe * response. And even if it did, there is nothing * useful we could do with a revised return value. */ dout("got safe reply %llu, mds%d ", tid, mds); list_del_init(&req->r_unsafe_item); /* last unsafe request during umount? */ |
44ca18f26 ceph: use rbtree ... |
2180 |
if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f234 ceph: use complet... |
2181 |
complete_all(&mdsc->safe_umount_waiters); |
2f2dc0534 ceph: MDS client |
2182 2183 2184 |
mutex_unlock(&mdsc->mutex); goto out; } |
e1518c7c0 ceph: clean up md... |
2185 |
} else { |
2f2dc0534 ceph: MDS client |
2186 2187 2188 2189 2190 2191 2192 |
req->r_got_unsafe = true; list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); } dout("handle_reply tid %lld result %d ", tid, result); rinfo = &req->r_reply_info; |
14303d20f ceph: implement D... |
2193 |
err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc0534 ceph: MDS client |
2194 2195 2196 2197 |
mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); if (err < 0) { |
25933abdd ceph: Handle file... |
2198 2199 |
pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld) ", mds, tid); |
9ec7cab14 ceph: hex dump co... |
2200 |
ceph_msg_dump(msg); |
2f2dc0534 ceph: MDS client |
2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 |
goto out_err; } /* snap trace */ if (rinfo->snapblob_len) { down_write(&mdsc->snap_rwsem); ceph_update_snap_trace(mdsc, rinfo->snapblob, rinfo->snapblob + rinfo->snapblob_len, le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); downgrade_write(&mdsc->snap_rwsem); } else { down_read(&mdsc->snap_rwsem); } /* insert trace into our cache */ |
b4556396f ceph: fix race be... |
2216 |
mutex_lock(&req->r_fill_mutex); |
3d14c5d2b ceph: factor out ... |
2217 |
err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); |
2f2dc0534 ceph: MDS client |
2218 |
if (err == 0) { |
25933abdd ceph: Handle file... |
2219 2220 |
if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK && rinfo->dir_nr) |
2f2dc0534 ceph: MDS client |
2221 |
ceph_readdir_prepopulate(req, req->r_session); |
37151668b ceph: do caps acc... |
2222 |
ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc0534 ceph: MDS client |
2223 |
} |
b4556396f ceph: fix race be... |
2224 |
mutex_unlock(&req->r_fill_mutex); |
2f2dc0534 ceph: MDS client |
2225 2226 2227 |
up_read(&mdsc->snap_rwsem); out_err: |
e1518c7c0 ceph: clean up md... |
2228 2229 2230 2231 2232 2233 2234 2235 2236 |
mutex_lock(&mdsc->mutex); if (!req->r_aborted) { if (err) { req->r_err = err; } else { req->r_reply = msg; ceph_msg_get(msg); req->r_got_result = true; } |
2f2dc0534 ceph: MDS client |
2237 |
} else { |
e1518c7c0 ceph: clean up md... |
2238 2239 |
dout("reply arrived after request %lld was aborted ", tid); |
2f2dc0534 ceph: MDS client |
2240 |
} |
e1518c7c0 ceph: clean up md... |
2241 |
mutex_unlock(&mdsc->mutex); |
2f2dc0534 ceph: MDS client |
2242 |
|
ee6b272b9 ceph: drop unused... |
2243 |
ceph_add_cap_releases(mdsc, req->r_session); |
2f2dc0534 ceph: MDS client |
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 |
mutex_unlock(&session->s_mutex); /* kick calling process */ complete_request(mdsc, req); out: ceph_mdsc_put_request(req); return; } /* * handle mds notification that our request has been forwarded. */ |
2600d2dd5 ceph: drop messag... |
2258 2259 2260 |
static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) |
2f2dc0534 ceph: MDS client |
2261 2262 |
{ struct ceph_mds_request *req; |
a1ea787c7 ceph: fix client_... |
2263 |
u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc0534 ceph: MDS client |
2264 2265 |
u32 next_mds; u32 fwd_seq; |
2f2dc0534 ceph: MDS client |
2266 2267 2268 |
int err = -EINVAL; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; |
2f2dc0534 ceph: MDS client |
2269 |
|
a1ea787c7 ceph: fix client_... |
2270 |
ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea4 ceph: convert enc... |
2271 2272 |
next_mds = ceph_decode_32(&p); fwd_seq = ceph_decode_32(&p); |
2f2dc0534 ceph: MDS client |
2273 2274 2275 2276 |
mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { |
2a8e5e363 ceph: clean up on... |
2277 2278 |
dout("forward tid %llu to mds%d - req dne ", tid, next_mds); |
2f2dc0534 ceph: MDS client |
2279 2280 |
goto out; /* dup reply? */ } |
2a8e5e363 ceph: clean up on... |
2281 2282 2283 2284 2285 2286 2287 |
if (req->r_aborted) { dout("forward tid %llu aborted, unregistering ", tid); __unregister_request(mdsc, req); } else if (fwd_seq <= req->r_num_fwd) { dout("forward tid %llu to mds%d - old seq %d <= %d ", |
2f2dc0534 ceph: MDS client |
2288 2289 2290 |
tid, next_mds, req->r_num_fwd, fwd_seq); } else { /* resend. forward race not possible; mds would drop */ |
2a8e5e363 ceph: clean up on... |
2291 2292 2293 2294 |
dout("forward tid %llu to mds%d (we resend) ", tid, next_mds); BUG_ON(req->r_err); BUG_ON(req->r_got_result); |
2f2dc0534 ceph: MDS client |
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 |
req->r_num_fwd = fwd_seq; req->r_resend_mds = next_mds; put_request_session(req); __do_request(mdsc, req); } ceph_mdsc_put_request(req); out: mutex_unlock(&mdsc->mutex); return; bad: pr_err("mdsc_handle_forward decode error err=%d ", err); } /* * handle a mds session control message */ static void handle_session(struct ceph_mds_session *session, struct ceph_msg *msg) { struct ceph_mds_client *mdsc = session->s_mdsc; u32 op; u64 seq; |
2600d2dd5 ceph: drop messag... |
2319 |
int mds = session->s_mds; |
2f2dc0534 ceph: MDS client |
2320 2321 |
struct ceph_mds_session_head *h = msg->front.iov_base; int wake = 0; |
2f2dc0534 ceph: MDS client |
2322 2323 2324 2325 2326 2327 2328 |
/* decode */ if (msg->front.iov_len != sizeof(*h)) goto bad; op = le32_to_cpu(h->op); seq = le64_to_cpu(h->seq); mutex_lock(&mdsc->mutex); |
2600d2dd5 ceph: drop messag... |
2329 2330 |
if (op == CEPH_SESSION_CLOSE) __unregister_session(mdsc, session); |
2f2dc0534 ceph: MDS client |
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 |
/* FIXME: this ttl calculation is generous */ session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); dout("handle_session mds%d %s %p state %s seq %llu ", mds, ceph_session_op_name(op), session, session_state_name(session->s_state), seq); if (session->s_state == CEPH_MDS_SESSION_HUNG) { session->s_state = CEPH_MDS_SESSION_OPEN; pr_info("mds%d came back ", session->s_mds); } switch (op) { case CEPH_SESSION_OPEN: |
29790f26a ceph: wait for md... |
2350 2351 2352 |
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) pr_info("mds%d reconnect success ", session->s_mds); |
2f2dc0534 ceph: MDS client |
2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 |
session->s_state = CEPH_MDS_SESSION_OPEN; renewed_caps(mdsc, session, 0); wake = 1; if (mdsc->stopping) __close_session(mdsc, session); break; case CEPH_SESSION_RENEWCAPS: if (session->s_renew_seq == seq) renewed_caps(mdsc, session, 1); break; case CEPH_SESSION_CLOSE: |
29790f26a ceph: wait for md... |
2366 2367 2368 |
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) pr_info("mds%d reconnect denied ", session->s_mds); |
2f2dc0534 ceph: MDS client |
2369 2370 |
remove_session_caps(session); wake = 1; /* for good measure */ |
f3c60c591 ceph: fix multipl... |
2371 |
wake_up_all(&mdsc->session_close_wq); |
29790f26a ceph: wait for md... |
2372 |
kick_requests(mdsc, mds); |
2f2dc0534 ceph: MDS client |
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 |
break; case CEPH_SESSION_STALE: pr_info("mds%d caps went stale, renewing ", session->s_mds); spin_lock(&session->s_cap_lock); session->s_cap_gen++; session->s_cap_ttl = 0; spin_unlock(&session->s_cap_lock); send_renew_caps(mdsc, session); break; case CEPH_SESSION_RECALL_STATE: trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); break; default: pr_err("mdsc_handle_session bad op %d mds%d ", op, mds); WARN_ON(1); } mutex_unlock(&session->s_mutex); if (wake) { mutex_lock(&mdsc->mutex); __wake_requests(mdsc, &session->s_waiting); mutex_unlock(&mdsc->mutex); } return; bad: pr_err("mdsc_handle_session corrupt message mds%d len %d ", mds, (int)msg->front.iov_len); |
9ec7cab14 ceph: hex dump co... |
2408 |
ceph_msg_dump(msg); |
2f2dc0534 ceph: MDS client |
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 |
return; } /* * called under session->mutex. */ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_request *req, *nreq; int err; dout("replay_unsafe_requests mds%d ", session->s_mds); mutex_lock(&mdsc->mutex); list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { err = __prepare_send_request(mdsc, req, session->s_mds); if (!err) { ceph_msg_get(req->r_request); ceph_con_send(&session->s_con, req->r_request); } } mutex_unlock(&mdsc->mutex); } /* * Encode information about a cap for a reconnect with the MDS. */ |
2f2dc0534 ceph: MDS client |
2439 2440 2441 |
static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { |
20cb34ae9 ceph: support v2 ... |
2442 2443 2444 2445 2446 |
union { struct ceph_mds_cap_reconnect v2; struct ceph_mds_cap_reconnect_v1 v1; } rec; size_t reclen; |
2f2dc0534 ceph: MDS client |
2447 |
struct ceph_inode_info *ci; |
20cb34ae9 ceph: support v2 ... |
2448 2449 |
struct ceph_reconnect_state *recon_state = arg; struct ceph_pagelist *pagelist = recon_state->pagelist; |
2f2dc0534 ceph: MDS client |
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 |
char *path; int pathlen, err; u64 pathbase; struct dentry *dentry; ci = cap->ci; dout(" adding %p ino %llx.%llx cap %p %lld %s ", inode, ceph_vinop(inode), cap, cap->cap_id, ceph_cap_string(cap->issued)); |
93cea5beb ceph: use ceph_pa... |
2461 2462 2463 |
err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); if (err) return err; |
2f2dc0534 ceph: MDS client |
2464 2465 2466 2467 2468 2469 |
dentry = d_find_alias(inode); if (dentry) { path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); if (IS_ERR(path)) { err = PTR_ERR(path); |
e072f8aa3 ceph: don't BUG o... |
2470 |
goto out_dput; |
2f2dc0534 ceph: MDS client |
2471 2472 2473 2474 2475 |
} } else { path = NULL; pathlen = 0; } |
93cea5beb ceph: use ceph_pa... |
2476 2477 |
err = ceph_pagelist_encode_string(pagelist, path, pathlen); if (err) |
e072f8aa3 ceph: don't BUG o... |
2478 |
goto out_free; |
2f2dc0534 ceph: MDS client |
2479 |
|
be655596b ceph: use i_ceph_... |
2480 |
spin_lock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
2481 2482 |
cap->seq = 0; /* reset cap seq */ cap->issue_seq = 0; /* and issue_seq */ |
20cb34ae9 ceph: support v2 ... |
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 |
if (recon_state->flock) { rec.v2.cap_id = cpu_to_le64(cap->cap_id); rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v2.issued = cpu_to_le32(cap->issued); rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); rec.v2.pathbase = cpu_to_le64(pathbase); rec.v2.flock_len = 0; reclen = sizeof(rec.v2); } else { rec.v1.cap_id = cpu_to_le64(cap->cap_id); rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); rec.v1.issued = cpu_to_le32(cap->issued); rec.v1.size = cpu_to_le64(inode->i_size); ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); rec.v1.pathbase = cpu_to_le64(pathbase); reclen = sizeof(rec.v1); } |
be655596b ceph: use i_ceph_... |
2503 |
spin_unlock(&ci->i_ceph_lock); |
2f2dc0534 ceph: MDS client |
2504 |
|
40819f6fb ceph: add flock/f... |
2505 2506 |
if (recon_state->flock) { int num_fcntl_locks, num_flock_locks; |
fca4451ac ceph: preallocate... |
2507 2508 2509 2510 |
struct ceph_pagelist_cursor trunc_point; ceph_pagelist_set_cursor(pagelist, &trunc_point); do { |
496e59553 ceph: switch from... |
2511 |
lock_flocks(); |
fca4451ac ceph: preallocate... |
2512 2513 2514 2515 2516 |
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); rec.v2.flock_len = (2*sizeof(u32) + (num_fcntl_locks+num_flock_locks) * sizeof(struct ceph_filelock)); |
496e59553 ceph: switch from... |
2517 |
unlock_flocks(); |
fca4451ac ceph: preallocate... |
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 |
/* pre-alloc pagelist */ ceph_pagelist_truncate(pagelist, &trunc_point); err = ceph_pagelist_append(pagelist, &rec, reclen); if (!err) err = ceph_pagelist_reserve(pagelist, rec.v2.flock_len); /* encode locks */ if (!err) { |
496e59553 ceph: switch from... |
2528 |
lock_flocks(); |
fca4451ac ceph: preallocate... |
2529 2530 2531 2532 |
err = ceph_encode_locks(inode, pagelist, num_fcntl_locks, num_flock_locks); |
496e59553 ceph: switch from... |
2533 |
unlock_flocks(); |
fca4451ac ceph: preallocate... |
2534 2535 |
} } while (err == -ENOSPC); |
3612abbd5 ceph: fix reconne... |
2536 2537 |
} else { err = ceph_pagelist_append(pagelist, &rec, reclen); |
40819f6fb ceph: add flock/f... |
2538 |
} |
93cea5beb ceph: use ceph_pa... |
2539 |
|
e072f8aa3 ceph: don't BUG o... |
2540 |
out_free: |
2f2dc0534 ceph: MDS client |
2541 |
kfree(path); |
e072f8aa3 ceph: don't BUG o... |
2542 |
out_dput: |
2f2dc0534 ceph: MDS client |
2543 |
dput(dentry); |
93cea5beb ceph: use ceph_pa... |
2544 |
return err; |
2f2dc0534 ceph: MDS client |
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 |
} /* * If an MDS fails and recovers, clients need to reconnect in order to * reestablish shared state. This includes all caps issued through * this session _and_ the snap_realm hierarchy. Because it's not * clear which snap realms the mds cares about, we send everything we * know about.. that ensures we'll then get any new info the * recovering MDS might have. * * This is a relatively heavyweight operation, but it's rare. * * called with mdsc->mutex held. */ |
34b6c855f ceph: clean up se... |
2560 2561 |
static void send_mds_reconnect(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) |
2f2dc0534 ceph: MDS client |
2562 |
{ |
2f2dc0534 ceph: MDS client |
2563 |
struct ceph_msg *reply; |
a105f00cf ceph: use rbtree ... |
2564 |
struct rb_node *p; |
34b6c855f ceph: clean up se... |
2565 |
int mds = session->s_mds; |
9abf82b8b ceph: fix locking... |
2566 |
int err = -ENOMEM; |
93cea5beb ceph: use ceph_pa... |
2567 |
struct ceph_pagelist *pagelist; |
20cb34ae9 ceph: support v2 ... |
2568 |
struct ceph_reconnect_state recon_state; |
2f2dc0534 ceph: MDS client |
2569 |
|
34b6c855f ceph: clean up se... |
2570 2571 |
pr_info("mds%d reconnect start ", mds); |
2f2dc0534 ceph: MDS client |
2572 |
|
93cea5beb ceph: use ceph_pa... |
2573 2574 2575 2576 |
pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); if (!pagelist) goto fail_nopagelist; ceph_pagelist_init(pagelist); |
b61c27636 libceph: don't co... |
2577 |
reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f26 ceph: make ceph_m... |
2578 |
if (!reply) |
93cea5beb ceph: use ceph_pa... |
2579 |
goto fail_nomsg; |
93cea5beb ceph: use ceph_pa... |
2580 |
|
34b6c855f ceph: clean up se... |
2581 2582 2583 |
mutex_lock(&session->s_mutex); session->s_state = CEPH_MDS_SESSION_RECONNECTING; session->s_seq = 0; |
2f2dc0534 ceph: MDS client |
2584 |
|
34b6c855f ceph: clean up se... |
2585 2586 |
ceph_con_open(&session->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); |
2f2dc0534 ceph: MDS client |
2587 |
|
34b6c855f ceph: clean up se... |
2588 2589 |
/* replay unsafe requests */ replay_unsafe_requests(mdsc, session); |
2f2dc0534 ceph: MDS client |
2590 2591 |
down_read(&mdsc->snap_rwsem); |
2f2dc0534 ceph: MDS client |
2592 2593 2594 |
dout("session %p state %s ", session, session_state_name(session->s_state)); |
e01a59464 ceph: dicard cap ... |
2595 2596 |
/* drop old cap expires; we're about to reestablish that state */ discard_cap_releases(mdsc, session); |
2f2dc0534 ceph: MDS client |
2597 |
/* traverse this session's caps */ |
93cea5beb ceph: use ceph_pa... |
2598 2599 2600 |
err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); if (err) goto fail; |
20cb34ae9 ceph: support v2 ... |
2601 2602 2603 2604 |
recon_state.pagelist = pagelist; recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; err = iterate_session_caps(session, encode_caps_cb, &recon_state); |
2f2dc0534 ceph: MDS client |
2605 |
if (err < 0) |
9abf82b8b ceph: fix locking... |
2606 |
goto fail; |
2f2dc0534 ceph: MDS client |
2607 2608 2609 2610 2611 2612 |
/* * snaprealms. we provide mds with the ino, seq (version), and * parent for all of our realms. If the mds has any newer info, * it will tell us. */ |
a105f00cf ceph: use rbtree ... |
2613 2614 2615 |
for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { struct ceph_snap_realm *realm = rb_entry(p, struct ceph_snap_realm, node); |
93cea5beb ceph: use ceph_pa... |
2616 |
struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc0534 ceph: MDS client |
2617 2618 2619 2620 |
dout(" adding snap realm %llx seq %lld parent %llx ", realm->ino, realm->seq, realm->parent_ino); |
93cea5beb ceph: use ceph_pa... |
2621 2622 2623 2624 2625 2626 |
sr_rec.ino = cpu_to_le64(realm->ino); sr_rec.seq = cpu_to_le64(realm->seq); sr_rec.parent = cpu_to_le64(realm->parent_ino); err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); if (err) goto fail; |
2f2dc0534 ceph: MDS client |
2627 |
} |
2f2dc0534 ceph: MDS client |
2628 |
|
93cea5beb ceph: use ceph_pa... |
2629 |
reply->pagelist = pagelist; |
20cb34ae9 ceph: support v2 ... |
2630 2631 |
if (recon_state.flock) reply->hdr.version = cpu_to_le16(2); |
93cea5beb ceph: use ceph_pa... |
2632 2633 |
reply->hdr.data_len = cpu_to_le32(pagelist->length); reply->nr_pages = calc_pages_for(0, pagelist->length); |
2f2dc0534 ceph: MDS client |
2634 |
ceph_con_send(&session->s_con, reply); |
9abf82b8b ceph: fix locking... |
2635 2636 2637 2638 2639 |
mutex_unlock(&session->s_mutex); mutex_lock(&mdsc->mutex); __wake_requests(mdsc, &session->s_waiting); mutex_unlock(&mdsc->mutex); |
2f2dc0534 ceph: MDS client |
2640 |
up_read(&mdsc->snap_rwsem); |
2f2dc0534 ceph: MDS client |
2641 |
return; |
93cea5beb ceph: use ceph_pa... |
2642 |
fail: |
2f2dc0534 ceph: MDS client |
2643 |
ceph_msg_put(reply); |
9abf82b8b ceph: fix locking... |
2644 2645 |
up_read(&mdsc->snap_rwsem); mutex_unlock(&session->s_mutex); |
93cea5beb ceph: use ceph_pa... |
2646 2647 2648 2649 |
fail_nomsg: ceph_pagelist_release(pagelist); kfree(pagelist); fail_nopagelist: |
9abf82b8b ceph: fix locking... |
2650 2651 |
pr_err("error %d preparing reconnect for mds%d ", err, mds); |
9abf82b8b ceph: fix locking... |
2652 |
return; |
2f2dc0534 ceph: MDS client |
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 |
} /* * compare old and new mdsmaps, kicking requests * and closing out old connections as necessary * * called under mdsc->mutex. */ static void check_new_map(struct ceph_mds_client *mdsc, struct ceph_mdsmap *newmap, struct ceph_mdsmap *oldmap) { int i; int oldstate, newstate; struct ceph_mds_session *s; dout("check_new_map new %u old %u ", newmap->m_epoch, oldmap->m_epoch); for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { if (mdsc->sessions[i] == NULL) continue; s = mdsc->sessions[i]; oldstate = ceph_mdsmap_get_state(oldmap, i); newstate = ceph_mdsmap_get_state(newmap, i); |
0deb01c99 ceph: track laggy... |
2680 2681 |
dout("check_new_map mds%d state %s%s -> %s%s (session %s) ", |
2f2dc0534 ceph: MDS client |
2682 |
i, ceph_mds_state_name(oldstate), |
0deb01c99 ceph: track laggy... |
2683 |
ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc0534 ceph: MDS client |
2684 |
ceph_mds_state_name(newstate), |
0deb01c99 ceph: track laggy... |
2685 |
ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
2f2dc0534 ceph: MDS client |
2686 2687 2688 2689 2690 2691 2692 2693 2694 |
session_state_name(s->s_state)); if (memcmp(ceph_mdsmap_get_addr(oldmap, i), ceph_mdsmap_get_addr(newmap, i), sizeof(struct ceph_entity_addr))) { if (s->s_state == CEPH_MDS_SESSION_OPENING) { /* the session never opened, just close it * out now */ __wake_requests(mdsc, &s->s_waiting); |
2600d2dd5 ceph: drop messag... |
2695 |
__unregister_session(mdsc, s); |
2f2dc0534 ceph: MDS client |
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 |
} else { /* just close it */ mutex_unlock(&mdsc->mutex); mutex_lock(&s->s_mutex); mutex_lock(&mdsc->mutex); ceph_con_close(&s->s_con); mutex_unlock(&s->s_mutex); s->s_state = CEPH_MDS_SESSION_RESTARTING; } /* kick any requests waiting on the recovering mds */ |
29790f26a ceph: wait for md... |
2707 |
kick_requests(mdsc, i); |
2f2dc0534 ceph: MDS client |
2708 2709 2710 2711 2712 2713 2714 2715 |
} else if (oldstate == newstate) { continue; /* nothing new with this mds */ } /* * send reconnect? */ if (s->s_state == CEPH_MDS_SESSION_RESTARTING && |
34b6c855f ceph: clean up se... |
2716 2717 2718 2719 2720 |
newstate >= CEPH_MDS_STATE_RECONNECT) { mutex_unlock(&mdsc->mutex); send_mds_reconnect(mdsc, s); mutex_lock(&mdsc->mutex); } |
2f2dc0534 ceph: MDS client |
2721 2722 |
/* |
29790f26a ceph: wait for md... |
2723 |
* kick request on any mds that has gone active. |
2f2dc0534 ceph: MDS client |
2724 2725 2726 |
*/ if (oldstate < CEPH_MDS_STATE_ACTIVE && newstate >= CEPH_MDS_STATE_ACTIVE) { |
29790f26a ceph: wait for md... |
2727 2728 2729 2730 2731 |
if (oldstate != CEPH_MDS_STATE_CREATING && oldstate != CEPH_MDS_STATE_STARTING) pr_info("mds%d recovery completed ", s->s_mds); kick_requests(mdsc, i); |
2f2dc0534 ceph: MDS client |
2732 |
ceph_kick_flushing_caps(mdsc, s); |
0dc2570fa ceph: reset reque... |
2733 |
wake_up_session_caps(s, 1); |
2f2dc0534 ceph: MDS client |
2734 2735 |
} } |
cb170a221 ceph: connect to ... |
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 |
for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { s = mdsc->sessions[i]; if (!s) continue; if (!ceph_mdsmap_is_laggy(newmap, i)) continue; if (s->s_state == CEPH_MDS_SESSION_OPEN || s->s_state == CEPH_MDS_SESSION_HUNG || s->s_state == CEPH_MDS_SESSION_CLOSING) { dout(" connecting to export targets of laggy mds%d ", i); __open_export_target_sessions(mdsc, s); } } |
2f2dc0534 ceph: MDS client |
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 |
} /* * leases */ /* * caller must hold session s_mutex, dentry->d_lock */ void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) { struct ceph_dentry_info *di = ceph_dentry(dentry); ceph_put_mds_session(di->lease_session); di->lease_session = NULL; } |
2600d2dd5 ceph: drop messag... |
2770 2771 2772 |
static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) |
2f2dc0534 ceph: MDS client |
2773 |
{ |
3d14c5d2b ceph: factor out ... |
2774 |
struct super_block *sb = mdsc->fsc->sb; |
2f2dc0534 ceph: MDS client |
2775 |
struct inode *inode; |
2f2dc0534 ceph: MDS client |
2776 2777 |
struct dentry *parent, *dentry; struct ceph_dentry_info *di; |
2600d2dd5 ceph: drop messag... |
2778 |
int mds = session->s_mds; |
2f2dc0534 ceph: MDS client |
2779 |
struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23df ceph: fix lease r... |
2780 |
u32 seq; |
2f2dc0534 ceph: MDS client |
2781 |
struct ceph_vino vino; |
2f2dc0534 ceph: MDS client |
2782 2783 |
struct qstr dname; int release = 0; |
2f2dc0534 ceph: MDS client |
2784 2785 2786 2787 2788 2789 2790 2791 |
dout("handle_lease from mds%d ", mds); /* decode */ if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) goto bad; vino.ino = le64_to_cpu(h->ino); vino.snap = CEPH_NOSNAP; |
1e5ea23df ceph: fix lease r... |
2792 |
seq = le32_to_cpu(h->seq); |
2f2dc0534 ceph: MDS client |
2793 2794 2795 2796 |
dname.name = (void *)h + sizeof(*h) + sizeof(u32); dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); if (dname.len != get_unaligned_le32(h+1)) goto bad; |
2f2dc0534 ceph: MDS client |
2797 2798 2799 2800 2801 |
mutex_lock(&session->s_mutex); session->s_seq++; /* lookup inode */ inode = ceph_find_inode(sb, vino); |
2f90b852e ceph: ignore leas... |
2802 2803 2804 |
dout("handle_lease %s, ino %llx %p %.*s ", ceph_lease_op_name(h->action), vino.ino, inode, |
1e5ea23df ceph: fix lease r... |
2805 |
dname.len, dname.name); |
2f2dc0534 ceph: MDS client |
2806 2807 2808 2809 2810 |
if (inode == NULL) { dout("handle_lease no inode %llx ", vino.ino); goto release; } |
2f2dc0534 ceph: MDS client |
2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 |
/* dentry */ parent = d_find_alias(inode); if (!parent) { dout("no parent dentry on inode %p ", inode); WARN_ON(1); goto release; /* hrm... */ } dname.hash = full_name_hash(dname.name, dname.len); dentry = d_lookup(parent, &dname); dput(parent); if (!dentry) goto release; spin_lock(&dentry->d_lock); di = ceph_dentry(dentry); switch (h->action) { case CEPH_MDS_LEASE_REVOKE: |
3d8eb7a94 ceph: remove unne... |
2830 |
if (di->lease_session == session) { |
1e5ea23df ceph: fix lease r... |
2831 2832 |
if (ceph_seq_cmp(di->lease_seq, seq) > 0) h->seq = cpu_to_le32(di->lease_seq); |
2f2dc0534 ceph: MDS client |
2833 2834 2835 2836 2837 2838 |
__ceph_mdsc_drop_dentry_lease(dentry); } release = 1; break; case CEPH_MDS_LEASE_RENEW: |
3d8eb7a94 ceph: remove unne... |
2839 |
if (di->lease_session == session && |
2f2dc0534 ceph: MDS client |
2840 2841 2842 2843 2844 |
di->lease_gen == session->s_cap_gen && di->lease_renew_from && di->lease_renew_after == 0) { unsigned long duration = le32_to_cpu(h->duration_ms) * HZ / 1000; |
1e5ea23df ceph: fix lease r... |
2845 |
di->lease_seq = seq; |
2f2dc0534 ceph: MDS client |
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 |
dentry->d_time = di->lease_renew_from + duration; di->lease_renew_after = di->lease_renew_from + (duration >> 1); di->lease_renew_from = 0; } break; } spin_unlock(&dentry->d_lock); dput(dentry); if (!release) goto out; release: /* let's just reuse the same message */ h->action = CEPH_MDS_LEASE_REVOKE_ACK; ceph_msg_get(msg); ceph_con_send(&session->s_con, msg); out: iput(inode); mutex_unlock(&session->s_mutex); |
2f2dc0534 ceph: MDS client |
2868 2869 2870 2871 2872 |
return; bad: pr_err("corrupt lease message "); |
9ec7cab14 ceph: hex dump co... |
2873 |
ceph_msg_dump(msg); |
2f2dc0534 ceph: MDS client |
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 |
} void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, struct inode *inode, struct dentry *dentry, char action, u32 seq) { struct ceph_msg *msg; struct ceph_mds_lease *lease; int len = sizeof(*lease) + sizeof(u32); int dnamelen = 0; dout("lease_send_msg inode %p dentry %p %s to mds%d ", inode, dentry, ceph_lease_op_name(action), session->s_mds); dnamelen = dentry->d_name.len; len += dnamelen; |
b61c27636 libceph: don't co... |
2891 |
msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f26 ceph: make ceph_m... |
2892 |
if (!msg) |
2f2dc0534 ceph: MDS client |
2893 2894 2895 |
return; lease = msg->front.iov_base; lease->action = action; |
2f2dc0534 ceph: MDS client |
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 |
lease->ino = cpu_to_le64(ceph_vino(inode).ino); lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); lease->seq = cpu_to_le32(seq); put_unaligned_le32(dnamelen, lease + 1); memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); /* * if this is a preemptive lease RELEASE, no need to * flush request stream, since the actual request will * soon follow. */ msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); ceph_con_send(&session->s_con, msg); } /* * Preemptively release a lease we expect to invalidate anyway. * Pass @inode always, @dentry is optional. */ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, |
2f90b852e ceph: ignore leas... |
2917 |
struct dentry *dentry) |
2f2dc0534 ceph: MDS client |
2918 2919 2920 2921 2922 2923 2924 |
{ struct ceph_dentry_info *di; struct ceph_mds_session *session; u32 seq; BUG_ON(inode == NULL); BUG_ON(dentry == NULL); |
2f2dc0534 ceph: MDS client |
2925 2926 2927 2928 2929 2930 2931 2932 2933 |
/* is dentry lease valid? */ spin_lock(&dentry->d_lock); di = ceph_dentry(dentry); if (!di || !di->lease_session || di->lease_session->s_mds < 0 || di->lease_gen != di->lease_session->s_cap_gen || !time_before(jiffies, dentry->d_time)) { dout("lease_release inode %p dentry %p -- " |
2f90b852e ceph: ignore leas... |
2934 2935 2936 |
"no lease ", inode, dentry); |
2f2dc0534 ceph: MDS client |
2937 2938 2939 2940 2941 2942 2943 2944 2945 |
spin_unlock(&dentry->d_lock); return; } /* we do have a lease on this dentry; note mds and seq */ session = ceph_get_mds_session(di->lease_session); seq = di->lease_seq; __ceph_mdsc_drop_dentry_lease(dentry); spin_unlock(&dentry->d_lock); |
2f90b852e ceph: ignore leas... |
2946 2947 2948 |
dout("lease_release inode %p dentry %p to mds%d ", inode, dentry, session->s_mds); |
2f2dc0534 ceph: MDS client |
2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 |
ceph_mdsc_lease_send_msg(session, inode, dentry, CEPH_MDS_LEASE_RELEASE, seq); ceph_put_mds_session(session); } /* * drop all leases (and dentry refs) in preparation for umount */ static void drop_leases(struct ceph_mds_client *mdsc) { int i; dout("drop_leases "); mutex_lock(&mdsc->mutex); for (i = 0; i < mdsc->max_sessions; i++) { struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); if (!s) continue; mutex_unlock(&mdsc->mutex); mutex_lock(&s->s_mutex); mutex_unlock(&s->s_mutex); ceph_put_mds_session(s); mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); } /* * delayed work -- periodically trim expired leases, renew caps with mds */ static void schedule_delayed(struct ceph_mds_client *mdsc) { int delay = 5; unsigned hz = round_jiffies_relative(HZ * delay); schedule_delayed_work(&mdsc->delayed_work, hz); } static void delayed_work(struct work_struct *work) { int i; struct ceph_mds_client *mdsc = container_of(work, struct ceph_mds_client, delayed_work.work); int renew_interval; int renew_caps; dout("mdsc delayed_work "); |
afcdaea3f ceph: flush dirty... |
2999 |
ceph_check_delayed_caps(mdsc); |
2f2dc0534 ceph: MDS client |
3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 |
mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + mdsc->last_renew_caps); if (renew_caps) mdsc->last_renew_caps = jiffies; for (i = 0; i < mdsc->max_sessions; i++) { struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); if (s == NULL) continue; if (s->s_state == CEPH_MDS_SESSION_CLOSING) { dout("resending session close request for mds%d ", s->s_mds); request_close_session(mdsc, s); ceph_put_mds_session(s); continue; } if (s->s_ttl && time_after(jiffies, s->s_ttl)) { if (s->s_state == CEPH_MDS_SESSION_OPEN) { s->s_state = CEPH_MDS_SESSION_HUNG; pr_info("mds%d hung ", s->s_mds); } } if (s->s_state < CEPH_MDS_SESSION_OPEN) { /* this mds is failed or recovering, just wait */ ceph_put_mds_session(s); continue; } mutex_unlock(&mdsc->mutex); mutex_lock(&s->s_mutex); if (renew_caps) send_renew_caps(mdsc, s); else ceph_con_keepalive(&s->s_con); |
ee6b272b9 ceph: drop unused... |
3039 |
ceph_add_cap_releases(mdsc, s); |
aab53dd9e ceph: only send c... |
3040 3041 |
if (s->s_state == CEPH_MDS_SESSION_OPEN || s->s_state == CEPH_MDS_SESSION_HUNG) |
3d7ded4d8 ceph: release cap... |
3042 |
ceph_send_cap_releases(mdsc, s); |
2f2dc0534 ceph: MDS client |
3043 3044 3045 3046 3047 3048 3049 3050 3051 |
mutex_unlock(&s->s_mutex); ceph_put_mds_session(s); mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); schedule_delayed(mdsc); } |
3d14c5d2b ceph: factor out ... |
3052 |
int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc0534 ceph: MDS client |
3053 |
|
2f2dc0534 ceph: MDS client |
3054 |
{ |
3d14c5d2b ceph: factor out ... |
3055 3056 3057 3058 3059 3060 3061 |
struct ceph_mds_client *mdsc; mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); if (!mdsc) return -ENOMEM; mdsc->fsc = fsc; fsc->mdsc = mdsc; |
2f2dc0534 ceph: MDS client |
3062 3063 |
mutex_init(&mdsc->mutex); mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); |
2d06eeb87 ceph: handle kzal... |
3064 3065 |
if (mdsc->mdsmap == NULL) return -ENOMEM; |
2f2dc0534 ceph: MDS client |
3066 |
init_completion(&mdsc->safe_umount_waiters); |
f3c60c591 ceph: fix multipl... |
3067 |
init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc0534 ceph: MDS client |
3068 3069 3070 3071 3072 |
INIT_LIST_HEAD(&mdsc->waiting_for_map); mdsc->sessions = NULL; mdsc->max_sessions = 0; mdsc->stopping = 0; init_rwsem(&mdsc->snap_rwsem); |
a105f00cf ceph: use rbtree ... |
3073 |
mdsc->snap_realms = RB_ROOT; |
2f2dc0534 ceph: MDS client |
3074 3075 3076 |
INIT_LIST_HEAD(&mdsc->snap_empty); spin_lock_init(&mdsc->snap_empty_lock); mdsc->last_tid = 0; |
44ca18f26 ceph: use rbtree ... |
3077 |
mdsc->request_tree = RB_ROOT; |
2f2dc0534 ceph: MDS client |
3078 3079 3080 3081 3082 3083 3084 3085 |
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); mdsc->last_renew_caps = jiffies; INIT_LIST_HEAD(&mdsc->cap_delay_list); spin_lock_init(&mdsc->cap_delay_lock); INIT_LIST_HEAD(&mdsc->snap_flush_list); spin_lock_init(&mdsc->snap_flush_lock); mdsc->cap_flush_seq = 0; INIT_LIST_HEAD(&mdsc->cap_dirty); |
db3540522 ceph: fix cap flu... |
3086 |
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc0534 ceph: MDS client |
3087 3088 3089 3090 3091 |
mdsc->num_cap_flushing = 0; spin_lock_init(&mdsc->cap_dirty_lock); init_waitqueue_head(&mdsc->cap_flushing_wq); spin_lock_init(&mdsc->dentry_lru_lock); INIT_LIST_HEAD(&mdsc->dentry_lru); |
2d06eeb87 ceph: handle kzal... |
3092 |
|
37151668b ceph: do caps acc... |
3093 |
ceph_caps_init(mdsc); |
3d14c5d2b ceph: factor out ... |
3094 |
ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668b ceph: do caps acc... |
3095 |
|
5f44f1426 ceph: handle erro... |
3096 |
return 0; |
2f2dc0534 ceph: MDS client |
3097 3098 3099 3100 3101 3102 3103 3104 3105 |
} /* * Wait for safe replies on open mds requests. If we time out, drop * all requests from the tree to avoid dangling dentry refs. */ static void wait_requests(struct ceph_mds_client *mdsc) { struct ceph_mds_request *req; |
3d14c5d2b ceph: factor out ... |
3106 |
struct ceph_fs_client *fsc = mdsc->fsc; |
2f2dc0534 ceph: MDS client |
3107 3108 |
mutex_lock(&mdsc->mutex); |
44ca18f26 ceph: use rbtree ... |
3109 |
if (__get_oldest_req(mdsc)) { |
2f2dc0534 ceph: MDS client |
3110 |
mutex_unlock(&mdsc->mutex); |
44ca18f26 ceph: use rbtree ... |
3111 |
|
2f2dc0534 ceph: MDS client |
3112 3113 3114 |
dout("wait_requests waiting for requests "); wait_for_completion_timeout(&mdsc->safe_umount_waiters, |
3d14c5d2b ceph: factor out ... |
3115 |
fsc->client->options->mount_timeout * HZ); |
2f2dc0534 ceph: MDS client |
3116 3117 |
/* tear down remaining requests */ |
44ca18f26 ceph: use rbtree ... |
3118 3119 |
mutex_lock(&mdsc->mutex); while ((req = __get_oldest_req(mdsc))) { |
2f2dc0534 ceph: MDS client |
3120 3121 3122 |
dout("wait_requests timed out on tid %llu ", req->r_tid); |
44ca18f26 ceph: use rbtree ... |
3123 |
__unregister_request(mdsc, req); |
2f2dc0534 ceph: MDS client |
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 |
} } mutex_unlock(&mdsc->mutex); dout("wait_requests done "); } /* * called before mount is ro, and before dentries are torn down. * (hmm, does this still race with new lookups?) */ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount "); mdsc->stopping = 1; drop_leases(mdsc); |
afcdaea3f ceph: flush dirty... |
3142 |
ceph_flush_dirty_caps(mdsc); |
2f2dc0534 ceph: MDS client |
3143 |
wait_requests(mdsc); |
17c688c3d ceph: delay umoun... |
3144 3145 3146 3147 3148 3149 |
/* * wait for reply handlers to drop their request refs and * their inode/dcache refs */ ceph_msgr_flush(); |
2f2dc0534 ceph: MDS client |
3150 3151 3152 3153 3154 3155 3156 |
} /* * wait for all write mds requests to flush. */ static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) { |
80fc7314a ceph: fix mds syn... |
3157 |
struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f26 ceph: use rbtree ... |
3158 |
struct rb_node *n; |
2f2dc0534 ceph: MDS client |
3159 3160 3161 3162 |
mutex_lock(&mdsc->mutex); dout("wait_unsafe_requests want %lld ", want_tid); |
80fc7314a ceph: fix mds syn... |
3163 |
restart: |
44ca18f26 ceph: use rbtree ... |
3164 3165 |
req = __get_oldest_req(mdsc); while (req && req->r_tid <= want_tid) { |
80fc7314a ceph: fix mds syn... |
3166 3167 3168 3169 3170 3171 |
/* find next request */ n = rb_next(&req->r_node); if (n) nextreq = rb_entry(n, struct ceph_mds_request, r_node); else nextreq = NULL; |
44ca18f26 ceph: use rbtree ... |
3172 3173 3174 |
if ((req->r_op & CEPH_MDS_OP_WRITE)) { /* write op */ ceph_mdsc_get_request(req); |
80fc7314a ceph: fix mds syn... |
3175 3176 |
if (nextreq) ceph_mdsc_get_request(nextreq); |
44ca18f26 ceph: use rbtree ... |
3177 3178 3179 3180 3181 3182 |
mutex_unlock(&mdsc->mutex); dout("wait_unsafe_requests wait on %llu (want %llu) ", req->r_tid, want_tid); wait_for_completion(&req->r_safe_completion); mutex_lock(&mdsc->mutex); |
44ca18f26 ceph: use rbtree ... |
3183 |
ceph_mdsc_put_request(req); |
80fc7314a ceph: fix mds syn... |
3184 3185 3186 3187 3188 3189 3190 3191 |
if (!nextreq) break; /* next dne before, so we're done! */ if (RB_EMPTY_NODE(&nextreq->r_node)) { /* next request was removed from tree */ ceph_mdsc_put_request(nextreq); goto restart; } ceph_mdsc_put_request(nextreq); /* won't go away */ |
44ca18f26 ceph: use rbtree ... |
3192 |
} |
80fc7314a ceph: fix mds syn... |
3193 |
req = nextreq; |
2f2dc0534 ceph: MDS client |
3194 3195 3196 3197 3198 3199 3200 3201 3202 |
} mutex_unlock(&mdsc->mutex); dout("wait_unsafe_requests done "); } void ceph_mdsc_sync(struct ceph_mds_client *mdsc) { u64 want_tid, want_flush; |
3d14c5d2b ceph: factor out ... |
3203 |
if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
56b7cf958 ceph: skip mds sy... |
3204 |
return; |
2f2dc0534 ceph: MDS client |
3205 3206 3207 3208 3209 3210 3211 3212 |
dout("sync "); mutex_lock(&mdsc->mutex); want_tid = mdsc->last_tid; want_flush = mdsc->cap_flush_seq; mutex_unlock(&mdsc->mutex); dout("sync want tid %lld flush_seq %lld ", want_tid, want_flush); |
afcdaea3f ceph: flush dirty... |
3213 |
ceph_flush_dirty_caps(mdsc); |
2f2dc0534 ceph: MDS client |
3214 3215 3216 3217 |
wait_unsafe_requests(mdsc, want_tid); wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); } |
f3c60c591 ceph: fix multipl... |
3218 3219 3220 |
/* * true if all sessions are closed, or we force unmount */ |
7fd7d101f ceph/mds_client.c... |
3221 |
static bool done_closing_sessions(struct ceph_mds_client *mdsc) |
f3c60c591 ceph: fix multipl... |
3222 3223 |
{ int i, n = 0; |
3d14c5d2b ceph: factor out ... |
3224 |
if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
f3c60c591 ceph: fix multipl... |
3225 3226 3227 3228 3229 3230 3231 3232 3233 |
return true; mutex_lock(&mdsc->mutex); for (i = 0; i < mdsc->max_sessions; i++) if (mdsc->sessions[i]) n++; mutex_unlock(&mdsc->mutex); return n == 0; } |
2f2dc0534 ceph: MDS client |
3234 3235 3236 3237 3238 3239 3240 3241 |
/* * called after sb is ro. */ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) { struct ceph_mds_session *session; int i; |
3d14c5d2b ceph: factor out ... |
3242 3243 |
struct ceph_fs_client *fsc = mdsc->fsc; unsigned long timeout = fsc->client->options->mount_timeout * HZ; |
2f2dc0534 ceph: MDS client |
3244 3245 3246 |
dout("close_sessions "); |
2f2dc0534 ceph: MDS client |
3247 |
/* close sessions */ |
f3c60c591 ceph: fix multipl... |
3248 3249 3250 3251 3252 |
mutex_lock(&mdsc->mutex); for (i = 0; i < mdsc->max_sessions; i++) { session = __ceph_lookup_mds_session(mdsc, i); if (!session) continue; |
2f2dc0534 ceph: MDS client |
3253 |
mutex_unlock(&mdsc->mutex); |
f3c60c591 ceph: fix multipl... |
3254 3255 3256 3257 |
mutex_lock(&session->s_mutex); __close_session(mdsc, session); mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); |
2f2dc0534 ceph: MDS client |
3258 3259 |
mutex_lock(&mdsc->mutex); } |
f3c60c591 ceph: fix multipl... |
3260 3261 3262 3263 3264 3265 |
mutex_unlock(&mdsc->mutex); dout("waiting for sessions to close "); wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), timeout); |
2f2dc0534 ceph: MDS client |
3266 3267 |
/* tear down remaining sessions */ |
f3c60c591 ceph: fix multipl... |
3268 |
mutex_lock(&mdsc->mutex); |
2f2dc0534 ceph: MDS client |
3269 3270 3271 |
for (i = 0; i < mdsc->max_sessions; i++) { if (mdsc->sessions[i]) { session = get_session(mdsc->sessions[i]); |
2600d2dd5 ceph: drop messag... |
3272 |
__unregister_session(mdsc, session); |
2f2dc0534 ceph: MDS client |
3273 3274 3275 3276 3277 3278 3279 3280 |
mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); remove_session_caps(session); mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); mutex_lock(&mdsc->mutex); } } |
2f2dc0534 ceph: MDS client |
3281 |
WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc0534 ceph: MDS client |
3282 3283 3284 3285 3286 3287 3288 3289 3290 |
mutex_unlock(&mdsc->mutex); ceph_cleanup_empty_realms(mdsc); cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ dout("stopped "); } |
3d14c5d2b ceph: factor out ... |
3291 |
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc0534 ceph: MDS client |
3292 3293 3294 3295 3296 3297 3298 |
{ dout("stop "); cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); |
37151668b ceph: do caps acc... |
3299 |
ceph_caps_finalize(mdsc); |
2f2dc0534 ceph: MDS client |
3300 |
} |
3d14c5d2b ceph: factor out ... |
3301 3302 3303 |
void ceph_mdsc_destroy(struct ceph_fs_client *fsc) { struct ceph_mds_client *mdsc = fsc->mdsc; |
ef550f6f4 ceph: flush msgr_... |
3304 3305 |
dout("mdsc_destroy %p ", mdsc); |
3d14c5d2b ceph: factor out ... |
3306 |
ceph_mdsc_stop(mdsc); |
ef550f6f4 ceph: flush msgr_... |
3307 3308 3309 |
/* flush out any connection work with references to us */ ceph_msgr_flush(); |
3d14c5d2b ceph: factor out ... |
3310 3311 |
fsc->mdsc = NULL; kfree(mdsc); |
ef550f6f4 ceph: flush msgr_... |
3312 3313 |
dout("mdsc_destroy %p done ", mdsc); |
3d14c5d2b ceph: factor out ... |
3314 |
} |
2f2dc0534 ceph: MDS client |
3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 |
/* * handle mds map update. */ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) { u32 epoch; u32 maplen; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; struct ceph_mdsmap *newmap, *oldmap; struct ceph_fsid fsid; int err = -EINVAL; ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); |
3d14c5d2b ceph: factor out ... |
3331 |
if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d8 ceph: fix debugfs... |
3332 |
return; |
c89136ea4 ceph: convert enc... |
3333 3334 |
epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); |
2f2dc0534 ceph: MDS client |
3335 3336 3337 3338 |
dout("handle_map epoch %u len %d ", epoch, (int)maplen); /* do we need it? */ |
3d14c5d2b ceph: factor out ... |
3339 |
ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); |
2f2dc0534 ceph: MDS client |
3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 |
mutex_lock(&mdsc->mutex); if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { dout("handle_map epoch %u <= our %u ", epoch, mdsc->mdsmap->m_epoch); mutex_unlock(&mdsc->mutex); return; } newmap = ceph_mdsmap_decode(&p, end); if (IS_ERR(newmap)) { err = PTR_ERR(newmap); goto bad_unlock; } /* swap into place */ if (mdsc->mdsmap) { oldmap = mdsc->mdsmap; mdsc->mdsmap = newmap; check_new_map(mdsc, newmap, oldmap); ceph_mdsmap_destroy(oldmap); } else { mdsc->mdsmap = newmap; /* first mds map */ } |
3d14c5d2b ceph: factor out ... |
3364 |
mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc0534 ceph: MDS client |
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 |
__wake_requests(mdsc, &mdsc->waiting_for_map); mutex_unlock(&mdsc->mutex); schedule_delayed(mdsc); return; bad_unlock: mutex_unlock(&mdsc->mutex); bad: pr_err("error decoding mdsmap %d ", err); return; } static struct ceph_connection *con_get(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; if (get_session(s)) { |
2600d2dd5 ceph: drop messag... |
3385 3386 |
dout("mdsc con_get %p ok (%d) ", s, atomic_read(&s->s_ref)); |
2f2dc0534 ceph: MDS client |
3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 |
return con; } dout("mdsc con_get %p FAIL ", s); return NULL; } static void con_put(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; |
7d8e18a69 ceph: print debug... |
3397 3398 |
dout("mdsc con_put %p (%d) ", s, atomic_read(&s->s_ref) - 1); |
2f2dc0534 ceph: MDS client |
3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 |
ceph_put_mds_session(s); } /* * if the client is unresponsive for long enough, the mds will kill * the session entirely. */ static void peer_reset(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; |
7e70f0ed9 ceph: attempt mds... |
3409 |
struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc0534 ceph: MDS client |
3410 |
|
7e70f0ed9 ceph: attempt mds... |
3411 3412 3413 |
pr_warning("mds%d closed our session ", s->s_mds); send_mds_reconnect(mdsc, s); |
2f2dc0534 ceph: MDS client |
3414 3415 3416 3417 3418 3419 3420 |
} static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; int type = le16_to_cpu(msg->hdr.type); |
2600d2dd5 ceph: drop messag... |
3421 3422 3423 3424 3425 3426 |
mutex_lock(&mdsc->mutex); if (__verify_registered_session(mdsc, s) < 0) { mutex_unlock(&mdsc->mutex); goto out; } mutex_unlock(&mdsc->mutex); |
2f2dc0534 ceph: MDS client |
3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 |
switch (type) { case CEPH_MSG_MDS_MAP: ceph_mdsc_handle_map(mdsc, msg); break; case CEPH_MSG_CLIENT_SESSION: handle_session(s, msg); break; case CEPH_MSG_CLIENT_REPLY: handle_reply(s, msg); break; case CEPH_MSG_CLIENT_REQUEST_FORWARD: |
2600d2dd5 ceph: drop messag... |
3438 |
handle_forward(mdsc, s, msg); |
2f2dc0534 ceph: MDS client |
3439 3440 3441 3442 3443 |
break; case CEPH_MSG_CLIENT_CAPS: ceph_handle_caps(s, msg); break; case CEPH_MSG_CLIENT_SNAP: |
2600d2dd5 ceph: drop messag... |
3444 |
ceph_handle_snap(mdsc, s, msg); |
2f2dc0534 ceph: MDS client |
3445 3446 |
break; case CEPH_MSG_CLIENT_LEASE: |
2600d2dd5 ceph: drop messag... |
3447 |
handle_lease(mdsc, s, msg); |
2f2dc0534 ceph: MDS client |
3448 3449 3450 3451 3452 3453 3454 |
break; default: pr_err("received unknown message type %d %s ", type, ceph_msg_type_name(type)); } |
2600d2dd5 ceph: drop messag... |
3455 |
out: |
2f2dc0534 ceph: MDS client |
3456 3457 |
ceph_msg_put(msg); } |
4e7a5dcd1 ceph: negotiate a... |
3458 3459 3460 3461 3462 3463 3464 3465 3466 |
/* * authentication */ static int get_authorizer(struct ceph_connection *con, void **buf, int *len, int *proto, void **reply_buf, int *reply_len, int force_new) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; |
3d14c5d2b ceph: factor out ... |
3467 |
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd1 ceph: negotiate a... |
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 |
int ret = 0; if (force_new && s->s_authorizer) { ac->ops->destroy_authorizer(ac, s->s_authorizer); s->s_authorizer = NULL; } if (s->s_authorizer == NULL) { if (ac->ops->create_authorizer) { ret = ac->ops->create_authorizer( ac, CEPH_ENTITY_TYPE_MDS, &s->s_authorizer, &s->s_authorizer_buf, &s->s_authorizer_buf_len, &s->s_authorizer_reply_buf, &s->s_authorizer_reply_buf_len); if (ret) return ret; } } *proto = ac->protocol; *buf = s->s_authorizer_buf; *len = s->s_authorizer_buf_len; *reply_buf = s->s_authorizer_reply_buf; *reply_len = s->s_authorizer_reply_buf_len; return 0; } static int verify_authorizer_reply(struct ceph_connection *con, int len) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; |
3d14c5d2b ceph: factor out ... |
3501 |
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd1 ceph: negotiate a... |
3502 3503 3504 |
return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); } |
9bd2e6f8b ceph: allow renew... |
3505 3506 3507 3508 |
static int invalidate_authorizer(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; struct ceph_mds_client *mdsc = s->s_mdsc; |
3d14c5d2b ceph: factor out ... |
3509 |
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8b ceph: allow renew... |
3510 3511 3512 |
if (ac->ops->invalidate_authorizer) ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
3d14c5d2b ceph: factor out ... |
3513 |
return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8b ceph: allow renew... |
3514 |
} |
9e32789f6 ceph: Storage cla... |
3515 |
static const struct ceph_connection_operations mds_con_ops = { |
2f2dc0534 ceph: MDS client |
3516 3517 3518 |
.get = con_get, .put = con_put, .dispatch = dispatch, |
4e7a5dcd1 ceph: negotiate a... |
3519 3520 |
.get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, |
9bd2e6f8b ceph: allow renew... |
3521 |
.invalidate_authorizer = invalidate_authorizer, |
2f2dc0534 ceph: MDS client |
3522 |
.peer_reset = peer_reset, |
2f2dc0534 ceph: MDS client |
3523 |
}; |
2f2dc0534 ceph: MDS client |
3524 |
/* eof */ |