Blame view
net/ceph/mon_client.c
24.4 KB
3d14c5d2b ceph: factor out ... |
1 |
#include <linux/ceph/ceph_debug.h> |
ba75bb98c ceph: monitor client |
2 |
|
3d14c5d2b ceph: factor out ... |
3 |
#include <linux/module.h> |
ba75bb98c ceph: monitor client |
4 |
#include <linux/types.h> |
5a0e3ad6a include cleanup: ... |
5 |
#include <linux/slab.h> |
ba75bb98c ceph: monitor client |
6 7 |
#include <linux/random.h> #include <linux/sched.h> |
3d14c5d2b ceph: factor out ... |
8 9 10 11 12 |
#include <linux/ceph/mon_client.h> #include <linux/ceph/libceph.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> |
ba75bb98c ceph: monitor client |
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
/* * Interact with Ceph monitor cluster. Handle requests for new map * versions, and periodically resend as needed. Also implement * statfs() and umount(). * * A small cluster of Ceph "monitors" are responsible for managing critical * cluster configuration and state information. An odd number (e.g., 3, 5) * of cmon daemons use a modified version of the Paxos part-time parliament * algorithm to manage the MDS map (mds cluster membership), OSD map, and * list of clients who have mounted the file system. * * We maintain an open, active session with a monitor at all times in order to * receive timely MDSMap updates. We periodically send a keepalive byte on the * TCP socket to ensure we detect a failure. If the connection does break, we * randomly hunt for a new monitor. Once the connection is reestablished, we * resend any outstanding requests. */ |
9e32789f6 ceph: Storage cla... |
31 |
static const struct ceph_connection_operations mon_con_ops; |
ba75bb98c ceph: monitor client |
32 |
|
9bd2e6f8b ceph: allow renew... |
33 |
static int __validate_auth(struct ceph_mon_client *monc); |
ba75bb98c ceph: monitor client |
34 35 36 37 38 39 40 41 42 43 |
/* * Decode a monmap blob (e.g., during mount). */ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) { struct ceph_monmap *m = NULL; int i, err = -EINVAL; struct ceph_fsid fsid; u32 epoch, num_mon; u16 version; |
4e7a5dcd1 ceph: negotiate a... |
44 45 46 47 |
u32 len; ceph_decode_32_safe(&p, end, len, bad); ceph_decode_need(&p, end, len, bad); |
ba75bb98c ceph: monitor client |
48 49 50 51 52 53 54 55 |
dout("monmap_decode %p %p len %d ", p, end, (int)(end-p)); ceph_decode_16_safe(&p, end, version, bad); ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); |
c89136ea4 ceph: convert enc... |
56 |
epoch = ceph_decode_32(&p); |
ba75bb98c ceph: monitor client |
57 |
|
c89136ea4 ceph: convert enc... |
58 |
num_mon = ceph_decode_32(&p); |
ba75bb98c ceph: monitor client |
59 60 61 62 63 64 65 66 67 68 69 |
ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); if (num_mon >= CEPH_MAX_MON) goto bad; m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); if (m == NULL) return ERR_PTR(-ENOMEM); m->fsid = fsid; m->epoch = epoch; m->num_mon = num_mon; ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); |
63f2d2119 ceph: use fixed e... |
70 71 |
for (i = 0; i < num_mon; i++) ceph_decode_addr(&m->mon_inst[i].addr); |
ba75bb98c ceph: monitor client |
72 |
|
ba75bb98c ceph: monitor client |
73 74 75 76 77 78 |
dout("monmap_decode epoch %d, num_mon %d ", m->epoch, m->num_mon); for (i = 0; i < m->num_mon; i++) dout("monmap_decode mon%d is %s ", i, |
3d14c5d2b ceph: factor out ... |
79 |
ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); |
ba75bb98c ceph: monitor client |
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
return m; bad: dout("monmap_decode failed with %d ", err); kfree(m); return ERR_PTR(err); } /* * return true if *addr is included in the monmap. */ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) { int i; for (i = 0; i < m->num_mon; i++) |
103e2d3ae ceph: remove unus... |
97 |
if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98c ceph: monitor client |
98 99 100 101 102 |
return 1; return 0; } /* |
5ce6e9dbe ceph: fix authent... |
103 104 105 106 107 108 109 |
* Send an auth request. */ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) { monc->pending_auth = 1; monc->m_auth->front.iov_len = len; monc->m_auth->hdr.front_len = cpu_to_le32(len); |
970690012 ceph: avoid resen... |
110 |
ceph_con_revoke(monc->con, monc->m_auth); |
5ce6e9dbe ceph: fix authent... |
111 112 113 114 115 |
ceph_msg_get(monc->m_auth); /* keep our ref */ ceph_con_send(monc->con, monc->m_auth); } /* |
ba75bb98c ceph: monitor client |
116 117 118 119 120 121 122 |
* Close monitor session, if any. */ static void __close_session(struct ceph_mon_client *monc) { if (monc->con) { dout("__close_session closing mon%d ", monc->cur_mon); |
4e7a5dcd1 ceph: negotiate a... |
123 |
ceph_con_revoke(monc->con, monc->m_auth); |
ba75bb98c ceph: monitor client |
124 125 |
ceph_con_close(monc->con); monc->cur_mon = -1; |
9bd2e6f8b ceph: allow renew... |
126 |
monc->pending_auth = 0; |
4e7a5dcd1 ceph: negotiate a... |
127 |
ceph_auth_reset(monc->auth); |
ba75bb98c ceph: monitor client |
128 129 130 131 132 133 134 135 136 |
} } /* * Open a session with a (new) monitor. */ static int __open_session(struct ceph_mon_client *monc) { char r; |
4e7a5dcd1 ceph: negotiate a... |
137 |
int ret; |
ba75bb98c ceph: monitor client |
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
if (monc->cur_mon < 0) { get_random_bytes(&r, 1); monc->cur_mon = r % monc->monmap->num_mon; dout("open_session num=%d r=%d -> mon%d ", monc->monmap->num_mon, r, monc->cur_mon); monc->sub_sent = 0; monc->sub_renew_after = jiffies; /* i.e., expired */ monc->want_next_osdmap = !!monc->want_next_osdmap; dout("open_session mon%d opening ", monc->cur_mon); monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON; monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); ceph_con_open(monc->con, &monc->monmap->mon_inst[monc->cur_mon].addr); |
4e7a5dcd1 ceph: negotiate a... |
155 156 157 158 159 |
/* initiatiate authentication handshake */ ret = ceph_auth_build_hello(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_max); |
5ce6e9dbe ceph: fix authent... |
160 |
__send_prepared_auth_request(monc, ret); |
ba75bb98c ceph: monitor client |
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
} else { dout("open_session mon%d already open ", monc->cur_mon); } return 0; } static bool __sub_expired(struct ceph_mon_client *monc) { return time_after_eq(jiffies, monc->sub_renew_after); } /* * Reschedule delayed work timer. */ static void __schedule_delayed(struct ceph_mon_client *monc) { unsigned delay; |
4e7a5dcd1 ceph: negotiate a... |
179 |
if (monc->cur_mon < 0 || __sub_expired(monc)) |
ba75bb98c ceph: monitor client |
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
delay = 10 * HZ; else delay = 20 * HZ; dout("__schedule_delayed after %u ", delay); schedule_delayed_work(&monc->delayed_work, delay); } /* * Send subscribe request for mdsmap and/or osdmap. */ static void __send_subscribe(struct ceph_mon_client *monc) { dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d ", (unsigned)monc->sub_sent, __sub_expired(monc), monc->want_next_osdmap); if ((__sub_expired(monc) && !monc->sub_sent) || monc->want_next_osdmap == 1) { |
240ed68eb ceph: reuse mon s... |
199 |
struct ceph_msg *msg = monc->m_subscribe; |
ba75bb98c ceph: monitor client |
200 201 |
struct ceph_mon_subscribe_item *i; void *p, *end; |
3d14c5d2b ceph: factor out ... |
202 |
int num; |
ba75bb98c ceph: monitor client |
203 |
|
ba75bb98c ceph: monitor client |
204 |
p = msg->front.iov_base; |
240ed68eb ceph: reuse mon s... |
205 |
end = p + msg->front_max; |
ba75bb98c ceph: monitor client |
206 |
|
3d14c5d2b ceph: factor out ... |
207 208 |
num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; ceph_encode_32(&p, num); |
ba75bb98c ceph: monitor client |
209 210 211 212 |
if (monc->want_next_osdmap) { dout("__send_subscribe to 'osdmap' %u ", (unsigned)monc->have_osdmap); |
ba75bb98c ceph: monitor client |
213 214 215 216 217 218 |
ceph_encode_string(&p, end, "osdmap", 6); i = p; i->have = cpu_to_le64(monc->have_osdmap); i->onetime = 1; p += sizeof(*i); monc->want_next_osdmap = 2; /* requested */ |
ba75bb98c ceph: monitor client |
219 |
} |
3d14c5d2b ceph: factor out ... |
220 221 222 223 224 225 226 227 228 229 |
if (monc->want_mdsmap) { dout("__send_subscribe to 'mdsmap' %u+ ", (unsigned)monc->have_mdsmap); ceph_encode_string(&p, end, "mdsmap", 6); i = p; i->have = cpu_to_le64(monc->have_mdsmap); i->onetime = 0; p += sizeof(*i); } |
4e7a5dcd1 ceph: negotiate a... |
230 231 232 233 234 |
ceph_encode_string(&p, end, "monmap", 6); i = p; i->have = 0; i->onetime = 0; p += sizeof(*i); |
ba75bb98c ceph: monitor client |
235 236 237 |
msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
240ed68eb ceph: reuse mon s... |
238 239 |
ceph_con_revoke(monc->con, msg); ceph_con_send(monc->con, ceph_msg_get(msg)); |
ba75bb98c ceph: monitor client |
240 241 242 243 244 245 246 247 248 |
monc->sub_sent = jiffies | 1; /* never 0 */ } } static void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { unsigned seconds; |
07bd10fb9 ceph: correct sub... |
249 250 251 252 253 |
struct ceph_mon_subscribe_ack *h = msg->front.iov_base; if (msg->front.iov_len < sizeof(*h)) goto bad; seconds = le32_to_cpu(h->duration); |
ba75bb98c ceph: monitor client |
254 |
|
ba75bb98c ceph: monitor client |
255 256 257 258 |
mutex_lock(&monc->mutex); if (monc->hunting) { pr_info("mon%d %s session established ", |
3d14c5d2b ceph: factor out ... |
259 260 |
monc->cur_mon, ceph_pr_addr(&monc->con->peer_addr.in_addr)); |
ba75bb98c ceph: monitor client |
261 262 263 264 |
monc->hunting = false; } dout("handle_subscribe_ack after %d seconds ", seconds); |
0656d11ba ceph: renew mon s... |
265 |
monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; |
ba75bb98c ceph: monitor client |
266 267 268 269 270 271 |
monc->sub_sent = 0; mutex_unlock(&monc->mutex); return; bad: pr_err("got corrupt subscribe-ack msg "); |
9ec7cab14 ceph: hex dump co... |
272 |
ceph_msg_dump(msg); |
ba75bb98c ceph: monitor client |
273 274 275 276 277 278 279 280 281 282 283 284 |
} /* * Keep track of which maps we have */ int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) { mutex_lock(&monc->mutex); monc->have_mdsmap = got; mutex_unlock(&monc->mutex); return 0; } |
3d14c5d2b ceph: factor out ... |
285 |
EXPORT_SYMBOL(ceph_monc_got_mdsmap); |
ba75bb98c ceph: monitor client |
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) { mutex_lock(&monc->mutex); monc->have_osdmap = got; monc->want_next_osdmap = 0; mutex_unlock(&monc->mutex); return 0; } /* * Register interest in the next osdmap */ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) { dout("request_next_osdmap have %u ", monc->have_osdmap); mutex_lock(&monc->mutex); if (!monc->want_next_osdmap) monc->want_next_osdmap = 1; if (monc->want_next_osdmap < 2) __send_subscribe(monc); mutex_unlock(&monc->mutex); } |
4e7a5dcd1 ceph: negotiate a... |
310 |
/* |
50b885b96 ceph: whitespace ... |
311 |
* |
4e7a5dcd1 ceph: negotiate a... |
312 313 |
*/ int ceph_monc_open_session(struct ceph_mon_client *monc) |
ba75bb98c ceph: monitor client |
314 315 316 317 318 319 320 321 322 323 324 |
{ if (!monc->con) { monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL); if (!monc->con) return -ENOMEM; ceph_con_init(monc->client->msgr, monc->con); monc->con->private = monc; monc->con->ops = &mon_con_ops; } mutex_lock(&monc->mutex); |
4e7a5dcd1 ceph: negotiate a... |
325 |
__open_session(monc); |
ba75bb98c ceph: monitor client |
326 327 328 329 |
__schedule_delayed(monc); mutex_unlock(&monc->mutex); return 0; } |
3d14c5d2b ceph: factor out ... |
330 |
EXPORT_SYMBOL(ceph_monc_open_session); |
ba75bb98c ceph: monitor client |
331 |
|
4e7a5dcd1 ceph: negotiate a... |
332 333 334 335 |
/* * The monitor responds with mount ack indicate mount success. The * included client ticket allows the client to talk to MDSs and OSDs. */ |
0743304d8 ceph: fix debugfs... |
336 337 |
static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg) |
4e7a5dcd1 ceph: negotiate a... |
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 |
{ struct ceph_client *client = monc->client; struct ceph_monmap *monmap = NULL, *old = monc->monmap; void *p, *end; mutex_lock(&monc->mutex); dout("handle_monmap "); p = msg->front.iov_base; end = p + msg->front.iov_len; monmap = ceph_monmap_decode(p, end); if (IS_ERR(monmap)) { pr_err("problem decoding monmap, %d ", (int)PTR_ERR(monmap)); |
d4a780ce8 ceph: fix leak of... |
355 |
goto out; |
4e7a5dcd1 ceph: negotiate a... |
356 |
} |
0743304d8 ceph: fix debugfs... |
357 358 |
if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { |
4e7a5dcd1 ceph: negotiate a... |
359 |
kfree(monmap); |
d4a780ce8 ceph: fix leak of... |
360 |
goto out; |
4e7a5dcd1 ceph: negotiate a... |
361 362 363 |
} client->monc.monmap = monmap; |
4e7a5dcd1 ceph: negotiate a... |
364 |
kfree(old); |
d4a780ce8 ceph: fix leak of... |
365 |
out: |
4e7a5dcd1 ceph: negotiate a... |
366 |
mutex_unlock(&monc->mutex); |
03066f234 ceph: use complet... |
367 |
wake_up_all(&client->auth_wq); |
4e7a5dcd1 ceph: negotiate a... |
368 |
} |
ba75bb98c ceph: monitor client |
369 |
/* |
e56fa10e9 ceph: generalize ... |
370 |
* generic requests (e.g., statfs, poolop) |
ba75bb98c ceph: monitor client |
371 |
*/ |
f8c76f6f2 ceph: make mon cl... |
372 |
static struct ceph_mon_generic_request *__lookup_generic_req( |
85ff03f6b ceph: use rbtree ... |
373 374 |
struct ceph_mon_client *monc, u64 tid) { |
f8c76f6f2 ceph: make mon cl... |
375 376 |
struct ceph_mon_generic_request *req; struct rb_node *n = monc->generic_request_tree.rb_node; |
85ff03f6b ceph: use rbtree ... |
377 378 |
while (n) { |
f8c76f6f2 ceph: make mon cl... |
379 |
req = rb_entry(n, struct ceph_mon_generic_request, node); |
85ff03f6b ceph: use rbtree ... |
380 381 382 383 384 385 386 387 388 |
if (tid < req->tid) n = n->rb_left; else if (tid > req->tid) n = n->rb_right; else return req; } return NULL; } |
f8c76f6f2 ceph: make mon cl... |
389 390 |
static void __insert_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *new) |
85ff03f6b ceph: use rbtree ... |
391 |
{ |
f8c76f6f2 ceph: make mon cl... |
392 |
struct rb_node **p = &monc->generic_request_tree.rb_node; |
85ff03f6b ceph: use rbtree ... |
393 |
struct rb_node *parent = NULL; |
f8c76f6f2 ceph: make mon cl... |
394 |
struct ceph_mon_generic_request *req = NULL; |
85ff03f6b ceph: use rbtree ... |
395 396 397 |
while (*p) { parent = *p; |
f8c76f6f2 ceph: make mon cl... |
398 |
req = rb_entry(parent, struct ceph_mon_generic_request, node); |
85ff03f6b ceph: use rbtree ... |
399 400 401 402 403 404 405 406 407 |
if (new->tid < req->tid) p = &(*p)->rb_left; else if (new->tid > req->tid) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->node, parent, p); |
f8c76f6f2 ceph: make mon cl... |
408 |
rb_insert_color(&new->node, &monc->generic_request_tree); |
85ff03f6b ceph: use rbtree ... |
409 |
} |
f8c76f6f2 ceph: make mon cl... |
410 |
static void release_generic_request(struct kref *kref) |
3143edd3a ceph: clean up st... |
411 |
{ |
f8c76f6f2 ceph: make mon cl... |
412 413 |
struct ceph_mon_generic_request *req = container_of(kref, struct ceph_mon_generic_request, kref); |
3143edd3a ceph: clean up st... |
414 415 416 417 418 |
if (req->reply) ceph_msg_put(req->reply); if (req->request) ceph_msg_put(req->request); |
205475679 ceph: fix memory ... |
419 420 |
kfree(req); |
3143edd3a ceph: clean up st... |
421 |
} |
f8c76f6f2 ceph: make mon cl... |
422 |
static void put_generic_request(struct ceph_mon_generic_request *req) |
3143edd3a ceph: clean up st... |
423 |
{ |
f8c76f6f2 ceph: make mon cl... |
424 |
kref_put(&req->kref, release_generic_request); |
3143edd3a ceph: clean up st... |
425 |
} |
f8c76f6f2 ceph: make mon cl... |
426 |
static void get_generic_request(struct ceph_mon_generic_request *req) |
3143edd3a ceph: clean up st... |
427 428 429 |
{ kref_get(&req->kref); } |
f8c76f6f2 ceph: make mon cl... |
430 |
static struct ceph_msg *get_generic_reply(struct ceph_connection *con, |
3143edd3a ceph: clean up st... |
431 432 433 434 |
struct ceph_msg_header *hdr, int *skip) { struct ceph_mon_client *monc = con->private; |
f8c76f6f2 ceph: make mon cl... |
435 |
struct ceph_mon_generic_request *req; |
3143edd3a ceph: clean up st... |
436 437 438 439 |
u64 tid = le64_to_cpu(hdr->tid); struct ceph_msg *m; mutex_lock(&monc->mutex); |
f8c76f6f2 ceph: make mon cl... |
440 |
req = __lookup_generic_req(monc, tid); |
3143edd3a ceph: clean up st... |
441 |
if (!req) { |
f8c76f6f2 ceph: make mon cl... |
442 443 |
dout("get_generic_reply %lld dne ", tid); |
3143edd3a ceph: clean up st... |
444 445 446 |
*skip = 1; m = NULL; } else { |
f8c76f6f2 ceph: make mon cl... |
447 448 |
dout("get_generic_reply %lld got %p ", tid, req->reply); |
3143edd3a ceph: clean up st... |
449 450 451 452 453 454 455 456 457 458 |
m = ceph_msg_get(req->reply); /* * we don't need to track the connection reading into * this reply because we only have one open connection * at a time, ever. */ } mutex_unlock(&monc->mutex); return m; } |
e56fa10e9 ceph: generalize ... |
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 |
static int do_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *req) { int err; /* register request */ mutex_lock(&monc->mutex); req->tid = ++monc->last_tid; req->request->hdr.tid = cpu_to_le64(req->tid); __insert_generic_request(monc, req); monc->num_generic_requests++; ceph_con_send(monc->con, ceph_msg_get(req->request)); mutex_unlock(&monc->mutex); err = wait_for_completion_interruptible(&req->completion); mutex_lock(&monc->mutex); rb_erase(&req->node, &monc->generic_request_tree); monc->num_generic_requests--; mutex_unlock(&monc->mutex); if (!err) err = req->result; return err; } /* * statfs */ |
ba75bb98c ceph: monitor client |
488 489 490 |
static void handle_statfs_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { |
f8c76f6f2 ceph: make mon cl... |
491 |
struct ceph_mon_generic_request *req; |
ba75bb98c ceph: monitor client |
492 |
struct ceph_mon_statfs_reply *reply = msg->front.iov_base; |
3143edd3a ceph: clean up st... |
493 |
u64 tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98c ceph: monitor client |
494 495 496 |
if (msg->front.iov_len != sizeof(*reply)) goto bad; |
ba75bb98c ceph: monitor client |
497 498 499 500 |
dout("handle_statfs_reply %p tid %llu ", msg, tid); mutex_lock(&monc->mutex); |
f8c76f6f2 ceph: make mon cl... |
501 |
req = __lookup_generic_req(monc, tid); |
ba75bb98c ceph: monitor client |
502 |
if (req) { |
f8c76f6f2 ceph: make mon cl... |
503 |
*(struct ceph_statfs *)req->buf = reply->st; |
ba75bb98c ceph: monitor client |
504 |
req->result = 0; |
f8c76f6f2 ceph: make mon cl... |
505 |
get_generic_request(req); |
ba75bb98c ceph: monitor client |
506 507 |
} mutex_unlock(&monc->mutex); |
3143edd3a ceph: clean up st... |
508 |
if (req) { |
03066f234 ceph: use complet... |
509 |
complete_all(&req->completion); |
f8c76f6f2 ceph: make mon cl... |
510 |
put_generic_request(req); |
3143edd3a ceph: clean up st... |
511 |
} |
ba75bb98c ceph: monitor client |
512 513 514 |
return; bad: |
e56fa10e9 ceph: generalize ... |
515 516 |
pr_err("corrupt generic reply, tid %llu ", tid); |
9ec7cab14 ceph: hex dump co... |
517 |
ceph_msg_dump(msg); |
ba75bb98c ceph: monitor client |
518 519 520 |
} /* |
3143edd3a ceph: clean up st... |
521 |
* Do a synchronous statfs(). |
ba75bb98c ceph: monitor client |
522 |
*/ |
3143edd3a ceph: clean up st... |
523 |
int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) |
ba75bb98c ceph: monitor client |
524 |
{ |
f8c76f6f2 ceph: make mon cl... |
525 |
struct ceph_mon_generic_request *req; |
ba75bb98c ceph: monitor client |
526 |
struct ceph_mon_statfs *h; |
3143edd3a ceph: clean up st... |
527 |
int err; |
cffe7b6d8 ceph: Use kzalloc |
528 |
req = kzalloc(sizeof(*req), GFP_NOFS); |
3143edd3a ceph: clean up st... |
529 530 |
if (!req) return -ENOMEM; |
3143edd3a ceph: clean up st... |
531 532 |
kref_init(&req->kref); req->buf = buf; |
e56fa10e9 ceph: generalize ... |
533 |
req->buf_len = sizeof(*buf); |
3143edd3a ceph: clean up st... |
534 |
init_completion(&req->completion); |
ba75bb98c ceph: monitor client |
535 |
|
a79832f26 ceph: make ceph_m... |
536 |
err = -ENOMEM; |
34d23762d ceph: all allocat... |
537 |
req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS); |
a79832f26 ceph: make ceph_m... |
538 |
if (!req->request) |
3143edd3a ceph: clean up st... |
539 |
goto out; |
34d23762d ceph: all allocat... |
540 |
req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS); |
a79832f26 ceph: make ceph_m... |
541 |
if (!req->reply) |
3143edd3a ceph: clean up st... |
542 |
goto out; |
3143edd3a ceph: clean up st... |
543 544 545 |
/* fill out request */ h = req->request->front.iov_base; |
13e38c8ae ceph: update to m... |
546 547 548 |
h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; |
ba75bb98c ceph: monitor client |
549 |
h->fsid = monc->monmap->fsid; |
ba75bb98c ceph: monitor client |
550 |
|
e56fa10e9 ceph: generalize ... |
551 |
err = do_generic_request(monc, req); |
ba75bb98c ceph: monitor client |
552 |
|
e56fa10e9 ceph: generalize ... |
553 554 555 556 |
out: kref_put(&req->kref, release_generic_request); return err; } |
3d14c5d2b ceph: factor out ... |
557 |
EXPORT_SYMBOL(ceph_monc_do_statfs); |
e56fa10e9 ceph: generalize ... |
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 |
/* * pool ops */ static int get_poolop_reply_buf(const char *src, size_t src_len, char *dst, size_t dst_len) { u32 buf_len; if (src_len != sizeof(u32) + dst_len) return -EINVAL; buf_len = le32_to_cpu(*(u32 *)src); if (buf_len != dst_len) return -EINVAL; memcpy(dst, src + sizeof(u32), dst_len); return 0; } static void handle_poolop_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_mon_generic_request *req; struct ceph_mon_poolop_reply *reply = msg->front.iov_base; u64 tid = le64_to_cpu(msg->hdr.tid); if (msg->front.iov_len < sizeof(*reply)) goto bad; dout("handle_poolop_reply %p tid %llu ", msg, tid); |
ba75bb98c ceph: monitor client |
589 590 |
mutex_lock(&monc->mutex); |
e56fa10e9 ceph: generalize ... |
591 592 593 594 595 596 597 598 599 600 601 602 |
req = __lookup_generic_req(monc, tid); if (req) { if (req->buf_len && get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply), msg->front.iov_len - sizeof(*reply), req->buf, req->buf_len) < 0) { mutex_unlock(&monc->mutex); goto bad; } req->result = le32_to_cpu(reply->reply_code); get_generic_request(req); } |
ba75bb98c ceph: monitor client |
603 |
mutex_unlock(&monc->mutex); |
e56fa10e9 ceph: generalize ... |
604 605 606 607 608 |
if (req) { complete(&req->completion); put_generic_request(req); } return; |
ba75bb98c ceph: monitor client |
609 |
|
e56fa10e9 ceph: generalize ... |
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 |
bad: pr_err("corrupt generic reply, tid %llu ", tid); ceph_msg_dump(msg); } /* * Do a synchronous pool op. */ int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op, u32 pool, u64 snapid, char *buf, int len) { struct ceph_mon_generic_request *req; struct ceph_mon_poolop *h; int err; req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) return -ENOMEM; kref_init(&req->kref); req->buf = buf; req->buf_len = len; init_completion(&req->completion); err = -ENOMEM; req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS); if (!req->request) goto out; req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS); if (!req->reply) goto out; /* fill out request */ req->request->hdr.version = cpu_to_le16(2); h = req->request->front.iov_base; h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; h->fsid = monc->monmap->fsid; h->pool = cpu_to_le32(pool); h->op = cpu_to_le32(op); h->auid = 0; h->snapid = cpu_to_le64(snapid); h->name_len = 0; err = do_generic_request(monc, req); |
3143edd3a ceph: clean up st... |
658 659 |
out: |
f8c76f6f2 ceph: make mon cl... |
660 |
kref_put(&req->kref, release_generic_request); |
ba75bb98c ceph: monitor client |
661 662 |
return err; } |
e56fa10e9 ceph: generalize ... |
663 664 665 666 667 668 669 |
int ceph_monc_create_snapid(struct ceph_mon_client *monc, u32 pool, u64 *snapid) { return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, pool, 0, (char *)snapid, sizeof(*snapid)); } |
3d14c5d2b ceph: factor out ... |
670 |
EXPORT_SYMBOL(ceph_monc_create_snapid); |
e56fa10e9 ceph: generalize ... |
671 672 673 674 675 676 677 678 |
int ceph_monc_delete_snapid(struct ceph_mon_client *monc, u32 pool, u64 snapid) { return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, pool, snapid, 0, 0); } |
ba75bb98c ceph: monitor client |
679 |
/* |
e56fa10e9 ceph: generalize ... |
680 |
* Resend pending generic requests. |
ba75bb98c ceph: monitor client |
681 |
*/ |
f8c76f6f2 ceph: make mon cl... |
682 |
static void __resend_generic_request(struct ceph_mon_client *monc) |
ba75bb98c ceph: monitor client |
683 |
{ |
f8c76f6f2 ceph: make mon cl... |
684 |
struct ceph_mon_generic_request *req; |
85ff03f6b ceph: use rbtree ... |
685 |
struct rb_node *p; |
ba75bb98c ceph: monitor client |
686 |
|
f8c76f6f2 ceph: make mon cl... |
687 688 |
for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { req = rb_entry(p, struct ceph_mon_generic_request, node); |
970690012 ceph: avoid resen... |
689 |
ceph_con_revoke(monc->con, req->request); |
3143edd3a ceph: clean up st... |
690 |
ceph_con_send(monc->con, ceph_msg_get(req->request)); |
ba75bb98c ceph: monitor client |
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 |
} } /* * Delayed work. If we haven't mounted yet, retry. Otherwise, * renew/retry subscription as needed (in case it is timing out, or we * got an ENOMEM). And keep the monitor connection alive. */ static void delayed_work(struct work_struct *work) { struct ceph_mon_client *monc = container_of(work, struct ceph_mon_client, delayed_work.work); dout("monc delayed_work "); mutex_lock(&monc->mutex); |
4e7a5dcd1 ceph: negotiate a... |
707 708 709 |
if (monc->hunting) { __close_session(monc); __open_session(monc); /* continue hunting */ |
ba75bb98c ceph: monitor client |
710 |
} else { |
4e7a5dcd1 ceph: negotiate a... |
711 |
ceph_con_keepalive(monc->con); |
9bd2e6f8b ceph: allow renew... |
712 713 |
__validate_auth(monc); |
4e7a5dcd1 ceph: negotiate a... |
714 715 |
if (monc->auth->ops->is_authenticated(monc->auth)) __send_subscribe(monc); |
ba75bb98c ceph: monitor client |
716 |
} |
ba75bb98c ceph: monitor client |
717 718 719 |
__schedule_delayed(monc); mutex_unlock(&monc->mutex); } |
6b8051855 ceph: allocate an... |
720 721 722 723 724 725 |
/* * On startup, we build a temporary monmap populated with the IPs * provided by mount(2). */ static int build_initial_monmap(struct ceph_mon_client *monc) { |
3d14c5d2b ceph: factor out ... |
726 727 728 |
struct ceph_options *opt = monc->client->options; struct ceph_entity_addr *mon_addr = opt->mon_addr; int num_mon = opt->num_mon; |
6b8051855 ceph: allocate an... |
729 730 731 732 733 734 735 736 737 738 |
int i; /* build initial monmap */ monc->monmap = kzalloc(sizeof(*monc->monmap) + num_mon*sizeof(monc->monmap->mon_inst[0]), GFP_KERNEL); if (!monc->monmap) return -ENOMEM; for (i = 0; i < num_mon; i++) { monc->monmap->mon_inst[i].addr = mon_addr[i]; |
6b8051855 ceph: allocate an... |
739 740 741 742 743 744 |
monc->monmap->mon_inst[i].addr.nonce = 0; monc->monmap->mon_inst[i].name.type = CEPH_ENTITY_TYPE_MON; monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); } monc->monmap->num_mon = num_mon; |
4e7a5dcd1 ceph: negotiate a... |
745 |
monc->have_fsid = false; |
6b8051855 ceph: allocate an... |
746 747 |
return 0; } |
ba75bb98c ceph: monitor client |
748 749 750 751 752 753 754 755 756 757 |
int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) { int err = 0; dout("init "); memset(monc, 0, sizeof(*monc)); monc->client = cl; monc->monmap = NULL; mutex_init(&monc->mutex); |
6b8051855 ceph: allocate an... |
758 759 760 |
err = build_initial_monmap(monc); if (err) goto out; |
ba75bb98c ceph: monitor client |
761 |
monc->con = NULL; |
4e7a5dcd1 ceph: negotiate a... |
762 |
/* authentication */ |
3d14c5d2b ceph: factor out ... |
763 |
monc->auth = ceph_auth_init(cl->options->name, |
8323c3aa7 ceph: Move secret... |
764 |
cl->options->key); |
4e7a5dcd1 ceph: negotiate a... |
765 766 767 768 769 |
if (IS_ERR(monc->auth)) return PTR_ERR(monc->auth); monc->auth->want_keys = CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; |
240ed68eb ceph: reuse mon s... |
770 |
/* msgs */ |
a79832f26 ceph: make ceph_m... |
771 |
err = -ENOMEM; |
7c315c552 ceph: drop unnece... |
772 |
monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, |
34d23762d ceph: all allocat... |
773 774 |
sizeof(struct ceph_mon_subscribe_ack), GFP_NOFS); |
a79832f26 ceph: make ceph_m... |
775 |
if (!monc->m_subscribe_ack) |
4e7a5dcd1 ceph: negotiate a... |
776 |
goto out_monmap; |
6694d6b95 ceph: drop unnece... |
777 |
|
240ed68eb ceph: reuse mon s... |
778 779 780 |
monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS); if (!monc->m_subscribe) goto out_subscribe_ack; |
34d23762d ceph: all allocat... |
781 |
monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS); |
a79832f26 ceph: make ceph_m... |
782 |
if (!monc->m_auth_reply) |
240ed68eb ceph: reuse mon s... |
783 |
goto out_subscribe; |
4e7a5dcd1 ceph: negotiate a... |
784 |
|
34d23762d ceph: all allocat... |
785 |
monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS); |
9bd2e6f8b ceph: allow renew... |
786 |
monc->pending_auth = 0; |
a79832f26 ceph: make ceph_m... |
787 |
if (!monc->m_auth) |
6694d6b95 ceph: drop unnece... |
788 |
goto out_auth_reply; |
ba75bb98c ceph: monitor client |
789 790 |
monc->cur_mon = -1; |
4e7a5dcd1 ceph: negotiate a... |
791 |
monc->hunting = true; |
ba75bb98c ceph: monitor client |
792 793 794 795 |
monc->sub_renew_after = jiffies; monc->sub_sent = 0; INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); |
f8c76f6f2 ceph: make mon cl... |
796 797 |
monc->generic_request_tree = RB_ROOT; monc->num_generic_requests = 0; |
ba75bb98c ceph: monitor client |
798 799 800 801 802 |
monc->last_tid = 0; monc->have_mdsmap = 0; monc->have_osdmap = 0; monc->want_next_osdmap = 1; |
4e7a5dcd1 ceph: negotiate a... |
803 |
return 0; |
6694d6b95 ceph: drop unnece... |
804 805 |
out_auth_reply: ceph_msg_put(monc->m_auth_reply); |
240ed68eb ceph: reuse mon s... |
806 807 |
out_subscribe: ceph_msg_put(monc->m_subscribe); |
7c315c552 ceph: drop unnece... |
808 809 |
out_subscribe_ack: ceph_msg_put(monc->m_subscribe_ack); |
4e7a5dcd1 ceph: negotiate a... |
810 811 |
out_monmap: kfree(monc->monmap); |
ba75bb98c ceph: monitor client |
812 813 814 |
out: return err; } |
3d14c5d2b ceph: factor out ... |
815 |
EXPORT_SYMBOL(ceph_monc_init); |
ba75bb98c ceph: monitor client |
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 |
void ceph_monc_stop(struct ceph_mon_client *monc) { dout("stop "); cancel_delayed_work_sync(&monc->delayed_work); mutex_lock(&monc->mutex); __close_session(monc); if (monc->con) { monc->con->private = NULL; monc->con->ops->put(monc->con); monc->con = NULL; } mutex_unlock(&monc->mutex); |
4e7a5dcd1 ceph: negotiate a... |
831 832 833 |
ceph_auth_destroy(monc->auth); ceph_msg_put(monc->m_auth); |
6694d6b95 ceph: drop unnece... |
834 |
ceph_msg_put(monc->m_auth_reply); |
240ed68eb ceph: reuse mon s... |
835 |
ceph_msg_put(monc->m_subscribe); |
7c315c552 ceph: drop unnece... |
836 |
ceph_msg_put(monc->m_subscribe_ack); |
ba75bb98c ceph: monitor client |
837 838 839 |
kfree(monc->monmap); } |
3d14c5d2b ceph: factor out ... |
840 |
EXPORT_SYMBOL(ceph_monc_stop); |
ba75bb98c ceph: monitor client |
841 |
|
4e7a5dcd1 ceph: negotiate a... |
842 843 844 845 |
static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { int ret; |
09c4d6a7d ceph: do not rese... |
846 |
int was_auth = 0; |
4e7a5dcd1 ceph: negotiate a... |
847 848 |
mutex_lock(&monc->mutex); |
09c4d6a7d ceph: do not rese... |
849 850 |
if (monc->auth->ops) was_auth = monc->auth->ops->is_authenticated(monc->auth); |
9bd2e6f8b ceph: allow renew... |
851 |
monc->pending_auth = 0; |
4e7a5dcd1 ceph: negotiate a... |
852 853 854 855 856 |
ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, monc->m_auth->front.iov_base, monc->m_auth->front_max); if (ret < 0) { |
9bd2e6f8b ceph: allow renew... |
857 |
monc->client->auth_err = ret; |
03066f234 ceph: use complet... |
858 |
wake_up_all(&monc->client->auth_wq); |
4e7a5dcd1 ceph: negotiate a... |
859 |
} else if (ret > 0) { |
9bd2e6f8b ceph: allow renew... |
860 |
__send_prepared_auth_request(monc, ret); |
09c4d6a7d ceph: do not rese... |
861 |
} else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { |
4e7a5dcd1 ceph: negotiate a... |
862 863 |
dout("authenticated, starting session "); |
0743304d8 ceph: fix debugfs... |
864 865 |
monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
0cf5537b1 ceph: some endian... |
866 867 |
monc->client->msgr->inst.name.num = cpu_to_le64(monc->auth->global_id); |
0743304d8 ceph: fix debugfs... |
868 |
|
4e7a5dcd1 ceph: negotiate a... |
869 |
__send_subscribe(monc); |
f8c76f6f2 ceph: make mon cl... |
870 |
__resend_generic_request(monc); |
4e7a5dcd1 ceph: negotiate a... |
871 872 873 |
} mutex_unlock(&monc->mutex); } |
9bd2e6f8b ceph: allow renew... |
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 |
static int __validate_auth(struct ceph_mon_client *monc) { int ret; if (monc->pending_auth) return 0; ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_max); if (ret <= 0) return ret; /* either an error, or no need to authenticate */ __send_prepared_auth_request(monc, ret); return 0; } int ceph_monc_validate_auth(struct ceph_mon_client *monc) { int ret; mutex_lock(&monc->mutex); ret = __validate_auth(monc); mutex_unlock(&monc->mutex); return ret; } |
3d14c5d2b ceph: factor out ... |
898 |
EXPORT_SYMBOL(ceph_monc_validate_auth); |
9bd2e6f8b ceph: allow renew... |
899 |
|
ba75bb98c ceph: monitor client |
900 901 902 903 904 905 906 907 908 909 910 911 |
/* * handle incoming message */ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(msg->hdr.type); if (!monc) return; switch (type) { |
4e7a5dcd1 ceph: negotiate a... |
912 913 |
case CEPH_MSG_AUTH_REPLY: handle_auth_reply(monc, msg); |
ba75bb98c ceph: monitor client |
914 915 916 917 918 919 920 921 922 |
break; case CEPH_MSG_MON_SUBSCRIBE_ACK: handle_subscribe_ack(monc, msg); break; case CEPH_MSG_STATFS_REPLY: handle_statfs_reply(monc, msg); break; |
e56fa10e9 ceph: generalize ... |
923 924 925 |
case CEPH_MSG_POOLOP_REPLY: handle_poolop_reply(monc, msg); break; |
4e7a5dcd1 ceph: negotiate a... |
926 927 928 |
case CEPH_MSG_MON_MAP: ceph_monc_handle_map(monc, msg); break; |
ba75bb98c ceph: monitor client |
929 930 931 932 933 |
case CEPH_MSG_OSD_MAP: ceph_osdc_handle_map(&monc->client->osdc, msg); break; default: |
3d14c5d2b ceph: factor out ... |
934 935 936 937 938 |
/* can the chained handler handle it? */ if (monc->client->extra_mon_dispatch && monc->client->extra_mon_dispatch(monc->client, msg) == 0) break; |
ba75bb98c ceph: monitor client |
939 940 941 942 943 944 945 946 947 948 949 |
pr_err("received unknown message type %d %s ", type, ceph_msg_type_name(type)); } ceph_msg_put(msg); } /* * Allocate memory for incoming message */ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, |
2450418c4 ceph: allocate mi... |
950 951 |
struct ceph_msg_header *hdr, int *skip) |
ba75bb98c ceph: monitor client |
952 953 954 |
{ struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(hdr->type); |
2450418c4 ceph: allocate mi... |
955 |
int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3e ceph: fix up unex... |
956 |
struct ceph_msg *m = NULL; |
ba75bb98c ceph: monitor client |
957 |
|
2450418c4 ceph: allocate mi... |
958 |
*skip = 0; |
0547a9b30 ceph: alloc messa... |
959 |
|
ba75bb98c ceph: monitor client |
960 |
switch (type) { |
ba75bb98c ceph: monitor client |
961 |
case CEPH_MSG_MON_SUBSCRIBE_ACK: |
7c315c552 ceph: drop unnece... |
962 |
m = ceph_msg_get(monc->m_subscribe_ack); |
2450418c4 ceph: allocate mi... |
963 |
break; |
e56fa10e9 ceph: generalize ... |
964 |
case CEPH_MSG_POOLOP_REPLY: |
ba75bb98c ceph: monitor client |
965 |
case CEPH_MSG_STATFS_REPLY: |
f8c76f6f2 ceph: make mon cl... |
966 |
return get_generic_reply(con, hdr, skip); |
4e7a5dcd1 ceph: negotiate a... |
967 |
case CEPH_MSG_AUTH_REPLY: |
6694d6b95 ceph: drop unnece... |
968 |
m = ceph_msg_get(monc->m_auth_reply); |
2450418c4 ceph: allocate mi... |
969 |
break; |
5b3a4db3e ceph: fix up unex... |
970 971 972 |
case CEPH_MSG_MON_MAP: case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: |
34d23762d ceph: all allocat... |
973 |
m = ceph_msg_new(type, front_len, GFP_NOFS); |
5b3a4db3e ceph: fix up unex... |
974 |
break; |
ba75bb98c ceph: monitor client |
975 |
} |
2450418c4 ceph: allocate mi... |
976 |
|
5b3a4db3e ceph: fix up unex... |
977 978 979 |
if (!m) { pr_info("alloc_msg unknown type %d ", type); |
2450418c4 ceph: allocate mi... |
980 |
*skip = 1; |
5b3a4db3e ceph: fix up unex... |
981 |
} |
2450418c4 ceph: allocate mi... |
982 |
return m; |
ba75bb98c ceph: monitor client |
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 |
} /* * If the monitor connection resets, pick a new monitor and resubmit * any pending requests. */ static void mon_fault(struct ceph_connection *con) { struct ceph_mon_client *monc = con->private; if (!monc) return; dout("mon_fault "); mutex_lock(&monc->mutex); if (!con->private) goto out; if (monc->con && !monc->hunting) pr_info("mon%d %s session lost, " "hunting for new mon ", monc->cur_mon, |
3d14c5d2b ceph: factor out ... |
1006 |
ceph_pr_addr(&monc->con->peer_addr.in_addr)); |
ba75bb98c ceph: monitor client |
1007 1008 1009 1010 1011 |
__close_session(monc); if (!monc->hunting) { /* start hunting */ monc->hunting = true; |
4e7a5dcd1 ceph: negotiate a... |
1012 |
__open_session(monc); |
ba75bb98c ceph: monitor client |
1013 1014 1015 1016 1017 1018 1019 |
} else { /* already hunting, let's wait a bit */ __schedule_delayed(monc); } out: mutex_unlock(&monc->mutex); } |
9e32789f6 ceph: Storage cla... |
1020 |
static const struct ceph_connection_operations mon_con_ops = { |
ba75bb98c ceph: monitor client |
1021 1022 1023 1024 1025 |
.get = ceph_con_get, .put = ceph_con_put, .dispatch = dispatch, .fault = mon_fault, .alloc_msg = mon_alloc_msg, |
ba75bb98c ceph: monitor client |
1026 |
}; |