Blame view
net/ceph/mon_client.c
24.5 KB
3d14c5d2b ceph: factor out ... |
1 |
#include <linux/ceph/ceph_debug.h> |
ba75bb98c ceph: monitor client |
2 |
|
3d14c5d2b ceph: factor out ... |
3 |
#include <linux/module.h> |
ba75bb98c ceph: monitor client |
4 |
#include <linux/types.h> |
5a0e3ad6a include cleanup: ... |
5 |
#include <linux/slab.h> |
ba75bb98c ceph: monitor client |
6 7 |
#include <linux/random.h> #include <linux/sched.h> |
3d14c5d2b ceph: factor out ... |
8 9 10 11 12 |
#include <linux/ceph/mon_client.h> #include <linux/ceph/libceph.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> |
ba75bb98c ceph: monitor client |
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
/* * Interact with Ceph monitor cluster. Handle requests for new map * versions, and periodically resend as needed. Also implement * statfs() and umount(). * * A small cluster of Ceph "monitors" are responsible for managing critical * cluster configuration and state information. An odd number (e.g., 3, 5) * of cmon daemons use a modified version of the Paxos part-time parliament * algorithm to manage the MDS map (mds cluster membership), OSD map, and * list of clients who have mounted the file system. * * We maintain an open, active session with a monitor at all times in order to * receive timely MDSMap updates. We periodically send a keepalive byte on the * TCP socket to ensure we detect a failure. If the connection does break, we * randomly hunt for a new monitor. Once the connection is reestablished, we * resend any outstanding requests. */ |
9e32789f6 ceph: Storage cla... |
31 |
static const struct ceph_connection_operations mon_con_ops; |
ba75bb98c ceph: monitor client |
32 |
|
9bd2e6f8b ceph: allow renew... |
33 |
static int __validate_auth(struct ceph_mon_client *monc); |
ba75bb98c ceph: monitor client |
34 35 36 37 38 39 40 41 42 43 |
/* * Decode a monmap blob (e.g., during mount). */ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) { struct ceph_monmap *m = NULL; int i, err = -EINVAL; struct ceph_fsid fsid; u32 epoch, num_mon; u16 version; |
4e7a5dcd1 ceph: negotiate a... |
44 45 46 47 |
u32 len; ceph_decode_32_safe(&p, end, len, bad); ceph_decode_need(&p, end, len, bad); |
ba75bb98c ceph: monitor client |
48 49 50 51 52 53 54 55 |
dout("monmap_decode %p %p len %d ", p, end, (int)(end-p)); ceph_decode_16_safe(&p, end, version, bad); ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); |
c89136ea4 ceph: convert enc... |
56 |
epoch = ceph_decode_32(&p); |
ba75bb98c ceph: monitor client |
57 |
|
c89136ea4 ceph: convert enc... |
58 |
num_mon = ceph_decode_32(&p); |
ba75bb98c ceph: monitor client |
59 60 61 62 63 64 65 66 67 68 69 |
ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); if (num_mon >= CEPH_MAX_MON) goto bad; m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); if (m == NULL) return ERR_PTR(-ENOMEM); m->fsid = fsid; m->epoch = epoch; m->num_mon = num_mon; ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); |
63f2d2119 ceph: use fixed e... |
70 71 |
for (i = 0; i < num_mon; i++) ceph_decode_addr(&m->mon_inst[i].addr); |
ba75bb98c ceph: monitor client |
72 |
|
ba75bb98c ceph: monitor client |
73 74 75 76 77 78 |
dout("monmap_decode epoch %d, num_mon %d ", m->epoch, m->num_mon); for (i = 0; i < m->num_mon; i++) dout("monmap_decode mon%d is %s ", i, |
3d14c5d2b ceph: factor out ... |
79 |
ceph_pr_addr(&m->mon_inst[i].addr.in_addr)); |
ba75bb98c ceph: monitor client |
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
return m; bad: dout("monmap_decode failed with %d ", err); kfree(m); return ERR_PTR(err); } /* * return true if *addr is included in the monmap. */ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) { int i; for (i = 0; i < m->num_mon; i++) |
103e2d3ae ceph: remove unus... |
97 |
if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98c ceph: monitor client |
98 99 100 101 102 |
return 1; return 0; } /* |
5ce6e9dbe ceph: fix authent... |
103 104 105 106 107 108 109 |
* Send an auth request. */ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) { monc->pending_auth = 1; monc->m_auth->front.iov_len = len; monc->m_auth->hdr.front_len = cpu_to_le32(len); |
970690012 ceph: avoid resen... |
110 |
ceph_con_revoke(monc->con, monc->m_auth); |
5ce6e9dbe ceph: fix authent... |
111 112 113 114 115 |
ceph_msg_get(monc->m_auth); /* keep our ref */ ceph_con_send(monc->con, monc->m_auth); } /* |
ba75bb98c ceph: monitor client |
116 117 118 119 |
* Close monitor session, if any. */ static void __close_session(struct ceph_mon_client *monc) { |
f6a2f5be0 libceph: always p... |
120 121 122 123 124 125 126 |
dout("__close_session closing mon%d ", monc->cur_mon); ceph_con_revoke(monc->con, monc->m_auth); ceph_con_close(monc->con); monc->cur_mon = -1; monc->pending_auth = 0; ceph_auth_reset(monc->auth); |
ba75bb98c ceph: monitor client |
127 128 129 130 131 132 133 134 |
} /* * Open a session with a (new) monitor. */ static int __open_session(struct ceph_mon_client *monc) { char r; |
4e7a5dcd1 ceph: negotiate a... |
135 |
int ret; |
ba75bb98c ceph: monitor client |
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
if (monc->cur_mon < 0) { get_random_bytes(&r, 1); monc->cur_mon = r % monc->monmap->num_mon; dout("open_session num=%d r=%d -> mon%d ", monc->monmap->num_mon, r, monc->cur_mon); monc->sub_sent = 0; monc->sub_renew_after = jiffies; /* i.e., expired */ monc->want_next_osdmap = !!monc->want_next_osdmap; dout("open_session mon%d opening ", monc->cur_mon); monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON; monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); ceph_con_open(monc->con, &monc->monmap->mon_inst[monc->cur_mon].addr); |
4e7a5dcd1 ceph: negotiate a... |
153 154 155 156 157 |
/* initiatiate authentication handshake */ ret = ceph_auth_build_hello(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_max); |
5ce6e9dbe ceph: fix authent... |
158 |
__send_prepared_auth_request(monc, ret); |
ba75bb98c ceph: monitor client |
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
} else { dout("open_session mon%d already open ", monc->cur_mon); } return 0; } static bool __sub_expired(struct ceph_mon_client *monc) { return time_after_eq(jiffies, monc->sub_renew_after); } /* * Reschedule delayed work timer. */ static void __schedule_delayed(struct ceph_mon_client *monc) { unsigned delay; |
4e7a5dcd1 ceph: negotiate a... |
177 |
if (monc->cur_mon < 0 || __sub_expired(monc)) |
ba75bb98c ceph: monitor client |
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
delay = 10 * HZ; else delay = 20 * HZ; dout("__schedule_delayed after %u ", delay); schedule_delayed_work(&monc->delayed_work, delay); } /* * Send subscribe request for mdsmap and/or osdmap. */ static void __send_subscribe(struct ceph_mon_client *monc) { dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d ", (unsigned)monc->sub_sent, __sub_expired(monc), monc->want_next_osdmap); if ((__sub_expired(monc) && !monc->sub_sent) || monc->want_next_osdmap == 1) { |
240ed68eb ceph: reuse mon s... |
197 |
struct ceph_msg *msg = monc->m_subscribe; |
ba75bb98c ceph: monitor client |
198 199 |
struct ceph_mon_subscribe_item *i; void *p, *end; |
3d14c5d2b ceph: factor out ... |
200 |
int num; |
ba75bb98c ceph: monitor client |
201 |
|
ba75bb98c ceph: monitor client |
202 |
p = msg->front.iov_base; |
240ed68eb ceph: reuse mon s... |
203 |
end = p + msg->front_max; |
ba75bb98c ceph: monitor client |
204 |
|
3d14c5d2b ceph: factor out ... |
205 206 |
num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; ceph_encode_32(&p, num); |
ba75bb98c ceph: monitor client |
207 208 209 210 |
if (monc->want_next_osdmap) { dout("__send_subscribe to 'osdmap' %u ", (unsigned)monc->have_osdmap); |
ba75bb98c ceph: monitor client |
211 212 213 214 215 216 |
ceph_encode_string(&p, end, "osdmap", 6); i = p; i->have = cpu_to_le64(monc->have_osdmap); i->onetime = 1; p += sizeof(*i); monc->want_next_osdmap = 2; /* requested */ |
ba75bb98c ceph: monitor client |
217 |
} |
3d14c5d2b ceph: factor out ... |
218 219 220 221 222 223 224 225 226 227 |
if (monc->want_mdsmap) { dout("__send_subscribe to 'mdsmap' %u+ ", (unsigned)monc->have_mdsmap); ceph_encode_string(&p, end, "mdsmap", 6); i = p; i->have = cpu_to_le64(monc->have_mdsmap); i->onetime = 0; p += sizeof(*i); } |
4e7a5dcd1 ceph: negotiate a... |
228 229 230 231 232 |
ceph_encode_string(&p, end, "monmap", 6); i = p; i->have = 0; i->onetime = 0; p += sizeof(*i); |
ba75bb98c ceph: monitor client |
233 234 235 |
msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
240ed68eb ceph: reuse mon s... |
236 237 |
ceph_con_revoke(monc->con, msg); ceph_con_send(monc->con, ceph_msg_get(msg)); |
ba75bb98c ceph: monitor client |
238 239 240 241 242 243 244 245 246 |
monc->sub_sent = jiffies | 1; /* never 0 */ } } static void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { unsigned seconds; |
07bd10fb9 ceph: correct sub... |
247 248 249 250 251 |
struct ceph_mon_subscribe_ack *h = msg->front.iov_base; if (msg->front.iov_len < sizeof(*h)) goto bad; seconds = le32_to_cpu(h->duration); |
ba75bb98c ceph: monitor client |
252 |
|
ba75bb98c ceph: monitor client |
253 254 255 256 |
mutex_lock(&monc->mutex); if (monc->hunting) { pr_info("mon%d %s session established ", |
3d14c5d2b ceph: factor out ... |
257 258 |
monc->cur_mon, ceph_pr_addr(&monc->con->peer_addr.in_addr)); |
ba75bb98c ceph: monitor client |
259 260 261 262 |
monc->hunting = false; } dout("handle_subscribe_ack after %d seconds ", seconds); |
0656d11ba ceph: renew mon s... |
263 |
monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; |
ba75bb98c ceph: monitor client |
264 265 266 267 268 269 |
monc->sub_sent = 0; mutex_unlock(&monc->mutex); return; bad: pr_err("got corrupt subscribe-ack msg "); |
9ec7cab14 ceph: hex dump co... |
270 |
ceph_msg_dump(msg); |
ba75bb98c ceph: monitor client |
271 272 273 274 275 276 277 278 279 280 281 282 |
} /* * Keep track of which maps we have */ int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) { mutex_lock(&monc->mutex); monc->have_mdsmap = got; mutex_unlock(&monc->mutex); return 0; } |
3d14c5d2b ceph: factor out ... |
283 |
EXPORT_SYMBOL(ceph_monc_got_mdsmap); |
ba75bb98c ceph: monitor client |
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) { mutex_lock(&monc->mutex); monc->have_osdmap = got; monc->want_next_osdmap = 0; mutex_unlock(&monc->mutex); return 0; } /* * Register interest in the next osdmap */ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) { dout("request_next_osdmap have %u ", monc->have_osdmap); mutex_lock(&monc->mutex); if (!monc->want_next_osdmap) monc->want_next_osdmap = 1; if (monc->want_next_osdmap < 2) __send_subscribe(monc); mutex_unlock(&monc->mutex); } |
4e7a5dcd1 ceph: negotiate a... |
308 |
/* |
50b885b96 ceph: whitespace ... |
309 |
* |
4e7a5dcd1 ceph: negotiate a... |
310 311 |
*/ int ceph_monc_open_session(struct ceph_mon_client *monc) |
ba75bb98c ceph: monitor client |
312 |
{ |
ba75bb98c ceph: monitor client |
313 |
mutex_lock(&monc->mutex); |
4e7a5dcd1 ceph: negotiate a... |
314 |
__open_session(monc); |
ba75bb98c ceph: monitor client |
315 316 317 318 |
__schedule_delayed(monc); mutex_unlock(&monc->mutex); return 0; } |
3d14c5d2b ceph: factor out ... |
319 |
EXPORT_SYMBOL(ceph_monc_open_session); |
ba75bb98c ceph: monitor client |
320 |
|
4e7a5dcd1 ceph: negotiate a... |
321 322 323 324 |
/* * The monitor responds with mount ack indicate mount success. The * included client ticket allows the client to talk to MDSs and OSDs. */ |
0743304d8 ceph: fix debugfs... |
325 326 |
static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg) |
4e7a5dcd1 ceph: negotiate a... |
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 |
{ struct ceph_client *client = monc->client; struct ceph_monmap *monmap = NULL, *old = monc->monmap; void *p, *end; mutex_lock(&monc->mutex); dout("handle_monmap "); p = msg->front.iov_base; end = p + msg->front.iov_len; monmap = ceph_monmap_decode(p, end); if (IS_ERR(monmap)) { pr_err("problem decoding monmap, %d ", (int)PTR_ERR(monmap)); |
d4a780ce8 ceph: fix leak of... |
344 |
goto out; |
4e7a5dcd1 ceph: negotiate a... |
345 |
} |
0743304d8 ceph: fix debugfs... |
346 347 |
if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { |
4e7a5dcd1 ceph: negotiate a... |
348 |
kfree(monmap); |
d4a780ce8 ceph: fix leak of... |
349 |
goto out; |
4e7a5dcd1 ceph: negotiate a... |
350 351 352 |
} client->monc.monmap = monmap; |
4e7a5dcd1 ceph: negotiate a... |
353 |
kfree(old); |
d4a780ce8 ceph: fix leak of... |
354 |
out: |
4e7a5dcd1 ceph: negotiate a... |
355 |
mutex_unlock(&monc->mutex); |
03066f234 ceph: use complet... |
356 |
wake_up_all(&client->auth_wq); |
4e7a5dcd1 ceph: negotiate a... |
357 |
} |
ba75bb98c ceph: monitor client |
358 |
/* |
e56fa10e9 ceph: generalize ... |
359 |
* generic requests (e.g., statfs, poolop) |
ba75bb98c ceph: monitor client |
360 |
*/ |
f8c76f6f2 ceph: make mon cl... |
361 |
static struct ceph_mon_generic_request *__lookup_generic_req( |
85ff03f6b ceph: use rbtree ... |
362 363 |
struct ceph_mon_client *monc, u64 tid) { |
f8c76f6f2 ceph: make mon cl... |
364 365 |
struct ceph_mon_generic_request *req; struct rb_node *n = monc->generic_request_tree.rb_node; |
85ff03f6b ceph: use rbtree ... |
366 367 |
while (n) { |
f8c76f6f2 ceph: make mon cl... |
368 |
req = rb_entry(n, struct ceph_mon_generic_request, node); |
85ff03f6b ceph: use rbtree ... |
369 370 371 372 373 374 375 376 377 |
if (tid < req->tid) n = n->rb_left; else if (tid > req->tid) n = n->rb_right; else return req; } return NULL; } |
f8c76f6f2 ceph: make mon cl... |
378 379 |
static void __insert_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *new) |
85ff03f6b ceph: use rbtree ... |
380 |
{ |
f8c76f6f2 ceph: make mon cl... |
381 |
struct rb_node **p = &monc->generic_request_tree.rb_node; |
85ff03f6b ceph: use rbtree ... |
382 |
struct rb_node *parent = NULL; |
f8c76f6f2 ceph: make mon cl... |
383 |
struct ceph_mon_generic_request *req = NULL; |
85ff03f6b ceph: use rbtree ... |
384 385 386 |
while (*p) { parent = *p; |
f8c76f6f2 ceph: make mon cl... |
387 |
req = rb_entry(parent, struct ceph_mon_generic_request, node); |
85ff03f6b ceph: use rbtree ... |
388 389 390 391 392 393 394 395 396 |
if (new->tid < req->tid) p = &(*p)->rb_left; else if (new->tid > req->tid) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->node, parent, p); |
f8c76f6f2 ceph: make mon cl... |
397 |
rb_insert_color(&new->node, &monc->generic_request_tree); |
85ff03f6b ceph: use rbtree ... |
398 |
} |
f8c76f6f2 ceph: make mon cl... |
399 |
static void release_generic_request(struct kref *kref) |
3143edd3a ceph: clean up st... |
400 |
{ |
f8c76f6f2 ceph: make mon cl... |
401 402 |
struct ceph_mon_generic_request *req = container_of(kref, struct ceph_mon_generic_request, kref); |
3143edd3a ceph: clean up st... |
403 404 405 406 407 |
if (req->reply) ceph_msg_put(req->reply); if (req->request) ceph_msg_put(req->request); |
205475679 ceph: fix memory ... |
408 409 |
kfree(req); |
3143edd3a ceph: clean up st... |
410 |
} |
f8c76f6f2 ceph: make mon cl... |
411 |
static void put_generic_request(struct ceph_mon_generic_request *req) |
3143edd3a ceph: clean up st... |
412 |
{ |
f8c76f6f2 ceph: make mon cl... |
413 |
kref_put(&req->kref, release_generic_request); |
3143edd3a ceph: clean up st... |
414 |
} |
f8c76f6f2 ceph: make mon cl... |
415 |
static void get_generic_request(struct ceph_mon_generic_request *req) |
3143edd3a ceph: clean up st... |
416 417 418 |
{ kref_get(&req->kref); } |
f8c76f6f2 ceph: make mon cl... |
419 |
static struct ceph_msg *get_generic_reply(struct ceph_connection *con, |
3143edd3a ceph: clean up st... |
420 421 422 423 |
struct ceph_msg_header *hdr, int *skip) { struct ceph_mon_client *monc = con->private; |
f8c76f6f2 ceph: make mon cl... |
424 |
struct ceph_mon_generic_request *req; |
3143edd3a ceph: clean up st... |
425 426 427 428 |
u64 tid = le64_to_cpu(hdr->tid); struct ceph_msg *m; mutex_lock(&monc->mutex); |
f8c76f6f2 ceph: make mon cl... |
429 |
req = __lookup_generic_req(monc, tid); |
3143edd3a ceph: clean up st... |
430 |
if (!req) { |
f8c76f6f2 ceph: make mon cl... |
431 432 |
dout("get_generic_reply %lld dne ", tid); |
3143edd3a ceph: clean up st... |
433 434 435 |
*skip = 1; m = NULL; } else { |
f8c76f6f2 ceph: make mon cl... |
436 437 |
dout("get_generic_reply %lld got %p ", tid, req->reply); |
3143edd3a ceph: clean up st... |
438 439 440 441 442 443 444 445 446 447 |
m = ceph_msg_get(req->reply); /* * we don't need to track the connection reading into * this reply because we only have one open connection * at a time, ever. */ } mutex_unlock(&monc->mutex); return m; } |
e56fa10e9 ceph: generalize ... |
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 |
static int do_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *req) { int err; /* register request */ mutex_lock(&monc->mutex); req->tid = ++monc->last_tid; req->request->hdr.tid = cpu_to_le64(req->tid); __insert_generic_request(monc, req); monc->num_generic_requests++; ceph_con_send(monc->con, ceph_msg_get(req->request)); mutex_unlock(&monc->mutex); err = wait_for_completion_interruptible(&req->completion); mutex_lock(&monc->mutex); rb_erase(&req->node, &monc->generic_request_tree); monc->num_generic_requests--; mutex_unlock(&monc->mutex); if (!err) err = req->result; return err; } /* * statfs */ |
ba75bb98c ceph: monitor client |
477 478 479 |
static void handle_statfs_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { |
f8c76f6f2 ceph: make mon cl... |
480 |
struct ceph_mon_generic_request *req; |
ba75bb98c ceph: monitor client |
481 |
struct ceph_mon_statfs_reply *reply = msg->front.iov_base; |
3143edd3a ceph: clean up st... |
482 |
u64 tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98c ceph: monitor client |
483 484 485 |
if (msg->front.iov_len != sizeof(*reply)) goto bad; |
ba75bb98c ceph: monitor client |
486 487 488 489 |
dout("handle_statfs_reply %p tid %llu ", msg, tid); mutex_lock(&monc->mutex); |
f8c76f6f2 ceph: make mon cl... |
490 |
req = __lookup_generic_req(monc, tid); |
ba75bb98c ceph: monitor client |
491 |
if (req) { |
f8c76f6f2 ceph: make mon cl... |
492 |
*(struct ceph_statfs *)req->buf = reply->st; |
ba75bb98c ceph: monitor client |
493 |
req->result = 0; |
f8c76f6f2 ceph: make mon cl... |
494 |
get_generic_request(req); |
ba75bb98c ceph: monitor client |
495 496 |
} mutex_unlock(&monc->mutex); |
3143edd3a ceph: clean up st... |
497 |
if (req) { |
03066f234 ceph: use complet... |
498 |
complete_all(&req->completion); |
f8c76f6f2 ceph: make mon cl... |
499 |
put_generic_request(req); |
3143edd3a ceph: clean up st... |
500 |
} |
ba75bb98c ceph: monitor client |
501 502 503 |
return; bad: |
e56fa10e9 ceph: generalize ... |
504 505 |
pr_err("corrupt generic reply, tid %llu ", tid); |
9ec7cab14 ceph: hex dump co... |
506 |
ceph_msg_dump(msg); |
ba75bb98c ceph: monitor client |
507 508 509 |
} /* |
3143edd3a ceph: clean up st... |
510 |
* Do a synchronous statfs(). |
ba75bb98c ceph: monitor client |
511 |
*/ |
3143edd3a ceph: clean up st... |
512 |
int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) |
ba75bb98c ceph: monitor client |
513 |
{ |
f8c76f6f2 ceph: make mon cl... |
514 |
struct ceph_mon_generic_request *req; |
ba75bb98c ceph: monitor client |
515 |
struct ceph_mon_statfs *h; |
3143edd3a ceph: clean up st... |
516 |
int err; |
cffe7b6d8 ceph: Use kzalloc |
517 |
req = kzalloc(sizeof(*req), GFP_NOFS); |
3143edd3a ceph: clean up st... |
518 519 |
if (!req) return -ENOMEM; |
3143edd3a ceph: clean up st... |
520 521 |
kref_init(&req->kref); req->buf = buf; |
e56fa10e9 ceph: generalize ... |
522 |
req->buf_len = sizeof(*buf); |
3143edd3a ceph: clean up st... |
523 |
init_completion(&req->completion); |
ba75bb98c ceph: monitor client |
524 |
|
a79832f26 ceph: make ceph_m... |
525 |
err = -ENOMEM; |
b61c27636 libceph: don't co... |
526 527 |
req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, true); |
a79832f26 ceph: make ceph_m... |
528 |
if (!req->request) |
3143edd3a ceph: clean up st... |
529 |
goto out; |
b61c27636 libceph: don't co... |
530 531 |
req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS, true); |
a79832f26 ceph: make ceph_m... |
532 |
if (!req->reply) |
3143edd3a ceph: clean up st... |
533 |
goto out; |
3143edd3a ceph: clean up st... |
534 535 536 |
/* fill out request */ h = req->request->front.iov_base; |
13e38c8ae ceph: update to m... |
537 538 539 |
h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; |
ba75bb98c ceph: monitor client |
540 |
h->fsid = monc->monmap->fsid; |
ba75bb98c ceph: monitor client |
541 |
|
e56fa10e9 ceph: generalize ... |
542 |
err = do_generic_request(monc, req); |
ba75bb98c ceph: monitor client |
543 |
|
e56fa10e9 ceph: generalize ... |
544 545 546 547 |
out: kref_put(&req->kref, release_generic_request); return err; } |
3d14c5d2b ceph: factor out ... |
548 |
EXPORT_SYMBOL(ceph_monc_do_statfs); |
e56fa10e9 ceph: generalize ... |
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 |
/* * pool ops */ static int get_poolop_reply_buf(const char *src, size_t src_len, char *dst, size_t dst_len) { u32 buf_len; if (src_len != sizeof(u32) + dst_len) return -EINVAL; buf_len = le32_to_cpu(*(u32 *)src); if (buf_len != dst_len) return -EINVAL; memcpy(dst, src + sizeof(u32), dst_len); return 0; } static void handle_poolop_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_mon_generic_request *req; struct ceph_mon_poolop_reply *reply = msg->front.iov_base; u64 tid = le64_to_cpu(msg->hdr.tid); if (msg->front.iov_len < sizeof(*reply)) goto bad; dout("handle_poolop_reply %p tid %llu ", msg, tid); |
ba75bb98c ceph: monitor client |
580 581 |
mutex_lock(&monc->mutex); |
e56fa10e9 ceph: generalize ... |
582 583 584 585 586 587 588 589 590 591 592 593 |
req = __lookup_generic_req(monc, tid); if (req) { if (req->buf_len && get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply), msg->front.iov_len - sizeof(*reply), req->buf, req->buf_len) < 0) { mutex_unlock(&monc->mutex); goto bad; } req->result = le32_to_cpu(reply->reply_code); get_generic_request(req); } |
ba75bb98c ceph: monitor client |
594 |
mutex_unlock(&monc->mutex); |
e56fa10e9 ceph: generalize ... |
595 596 597 598 599 |
if (req) { complete(&req->completion); put_generic_request(req); } return; |
ba75bb98c ceph: monitor client |
600 |
|
e56fa10e9 ceph: generalize ... |
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 |
bad: pr_err("corrupt generic reply, tid %llu ", tid); ceph_msg_dump(msg); } /* * Do a synchronous pool op. */ int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op, u32 pool, u64 snapid, char *buf, int len) { struct ceph_mon_generic_request *req; struct ceph_mon_poolop *h; int err; req = kzalloc(sizeof(*req), GFP_NOFS); if (!req) return -ENOMEM; kref_init(&req->kref); req->buf = buf; req->buf_len = len; init_completion(&req->completion); err = -ENOMEM; |
b61c27636 libceph: don't co... |
628 629 |
req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS, true); |
e56fa10e9 ceph: generalize ... |
630 631 |
if (!req->request) goto out; |
b61c27636 libceph: don't co... |
632 633 |
req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS, true); |
e56fa10e9 ceph: generalize ... |
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 |
if (!req->reply) goto out; /* fill out request */ req->request->hdr.version = cpu_to_le16(2); h = req->request->front.iov_base; h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; h->fsid = monc->monmap->fsid; h->pool = cpu_to_le32(pool); h->op = cpu_to_le32(op); h->auid = 0; h->snapid = cpu_to_le64(snapid); h->name_len = 0; err = do_generic_request(monc, req); |
3143edd3a ceph: clean up st... |
651 652 |
out: |
f8c76f6f2 ceph: make mon cl... |
653 |
kref_put(&req->kref, release_generic_request); |
ba75bb98c ceph: monitor client |
654 655 |
return err; } |
e56fa10e9 ceph: generalize ... |
656 657 658 659 660 661 662 |
int ceph_monc_create_snapid(struct ceph_mon_client *monc, u32 pool, u64 *snapid) { return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, pool, 0, (char *)snapid, sizeof(*snapid)); } |
3d14c5d2b ceph: factor out ... |
663 |
EXPORT_SYMBOL(ceph_monc_create_snapid); |
e56fa10e9 ceph: generalize ... |
664 665 666 667 668 669 670 671 |
int ceph_monc_delete_snapid(struct ceph_mon_client *monc, u32 pool, u64 snapid) { return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, pool, snapid, 0, 0); } |
ba75bb98c ceph: monitor client |
672 |
/* |
e56fa10e9 ceph: generalize ... |
673 |
* Resend pending generic requests. |
ba75bb98c ceph: monitor client |
674 |
*/ |
f8c76f6f2 ceph: make mon cl... |
675 |
static void __resend_generic_request(struct ceph_mon_client *monc) |
ba75bb98c ceph: monitor client |
676 |
{ |
f8c76f6f2 ceph: make mon cl... |
677 |
struct ceph_mon_generic_request *req; |
85ff03f6b ceph: use rbtree ... |
678 |
struct rb_node *p; |
ba75bb98c ceph: monitor client |
679 |
|
f8c76f6f2 ceph: make mon cl... |
680 681 |
for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { req = rb_entry(p, struct ceph_mon_generic_request, node); |
970690012 ceph: avoid resen... |
682 |
ceph_con_revoke(monc->con, req->request); |
3143edd3a ceph: clean up st... |
683 |
ceph_con_send(monc->con, ceph_msg_get(req->request)); |
ba75bb98c ceph: monitor client |
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 |
} } /* * Delayed work. If we haven't mounted yet, retry. Otherwise, * renew/retry subscription as needed (in case it is timing out, or we * got an ENOMEM). And keep the monitor connection alive. */ static void delayed_work(struct work_struct *work) { struct ceph_mon_client *monc = container_of(work, struct ceph_mon_client, delayed_work.work); dout("monc delayed_work "); mutex_lock(&monc->mutex); |
4e7a5dcd1 ceph: negotiate a... |
700 701 702 |
if (monc->hunting) { __close_session(monc); __open_session(monc); /* continue hunting */ |
ba75bb98c ceph: monitor client |
703 |
} else { |
4e7a5dcd1 ceph: negotiate a... |
704 |
ceph_con_keepalive(monc->con); |
9bd2e6f8b ceph: allow renew... |
705 706 |
__validate_auth(monc); |
4e7a5dcd1 ceph: negotiate a... |
707 708 |
if (monc->auth->ops->is_authenticated(monc->auth)) __send_subscribe(monc); |
ba75bb98c ceph: monitor client |
709 |
} |
ba75bb98c ceph: monitor client |
710 711 712 |
__schedule_delayed(monc); mutex_unlock(&monc->mutex); } |
6b8051855 ceph: allocate an... |
713 714 715 716 717 718 |
/* * On startup, we build a temporary monmap populated with the IPs * provided by mount(2). */ static int build_initial_monmap(struct ceph_mon_client *monc) { |
3d14c5d2b ceph: factor out ... |
719 720 721 |
struct ceph_options *opt = monc->client->options; struct ceph_entity_addr *mon_addr = opt->mon_addr; int num_mon = opt->num_mon; |
6b8051855 ceph: allocate an... |
722 723 724 725 726 727 728 729 730 731 |
int i; /* build initial monmap */ monc->monmap = kzalloc(sizeof(*monc->monmap) + num_mon*sizeof(monc->monmap->mon_inst[0]), GFP_KERNEL); if (!monc->monmap) return -ENOMEM; for (i = 0; i < num_mon; i++) { monc->monmap->mon_inst[i].addr = mon_addr[i]; |
6b8051855 ceph: allocate an... |
732 733 734 735 736 737 |
monc->monmap->mon_inst[i].addr.nonce = 0; monc->monmap->mon_inst[i].name.type = CEPH_ENTITY_TYPE_MON; monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); } monc->monmap->num_mon = num_mon; |
4e7a5dcd1 ceph: negotiate a... |
738 |
monc->have_fsid = false; |
6b8051855 ceph: allocate an... |
739 740 |
return 0; } |
ba75bb98c ceph: monitor client |
741 742 743 744 745 746 747 748 749 750 |
int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) { int err = 0; dout("init "); memset(monc, 0, sizeof(*monc)); monc->client = cl; monc->monmap = NULL; mutex_init(&monc->mutex); |
6b8051855 ceph: allocate an... |
751 752 753 |
err = build_initial_monmap(monc); if (err) goto out; |
f6a2f5be0 libceph: always p... |
754 755 756 757 758 759 760 |
/* connection */ monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL); if (!monc->con) goto out_monmap; ceph_con_init(monc->client->msgr, monc->con); monc->con->private = monc; monc->con->ops = &mon_con_ops; |
ba75bb98c ceph: monitor client |
761 |
|
4e7a5dcd1 ceph: negotiate a... |
762 |
/* authentication */ |
3d14c5d2b ceph: factor out ... |
763 |
monc->auth = ceph_auth_init(cl->options->name, |
8323c3aa7 ceph: Move secret... |
764 |
cl->options->key); |
49d9224c0 ceph: fix ceph_mo... |
765 766 767 768 |
if (IS_ERR(monc->auth)) { err = PTR_ERR(monc->auth); goto out_con; } |
4e7a5dcd1 ceph: negotiate a... |
769 770 771 |
monc->auth->want_keys = CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; |
240ed68eb ceph: reuse mon s... |
772 |
/* msgs */ |
a79832f26 ceph: make ceph_m... |
773 |
err = -ENOMEM; |
7c315c552 ceph: drop unnece... |
774 |
monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, |
34d23762d ceph: all allocat... |
775 |
sizeof(struct ceph_mon_subscribe_ack), |
b61c27636 libceph: don't co... |
776 |
GFP_NOFS, true); |
a79832f26 ceph: make ceph_m... |
777 |
if (!monc->m_subscribe_ack) |
49d9224c0 ceph: fix ceph_mo... |
778 |
goto out_auth; |
6694d6b95 ceph: drop unnece... |
779 |
|
b61c27636 libceph: don't co... |
780 781 |
monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS, true); |
240ed68eb ceph: reuse mon s... |
782 783 |
if (!monc->m_subscribe) goto out_subscribe_ack; |
b61c27636 libceph: don't co... |
784 785 |
monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS, true); |
a79832f26 ceph: make ceph_m... |
786 |
if (!monc->m_auth_reply) |
240ed68eb ceph: reuse mon s... |
787 |
goto out_subscribe; |
4e7a5dcd1 ceph: negotiate a... |
788 |
|
b61c27636 libceph: don't co... |
789 |
monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true); |
9bd2e6f8b ceph: allow renew... |
790 |
monc->pending_auth = 0; |
a79832f26 ceph: make ceph_m... |
791 |
if (!monc->m_auth) |
6694d6b95 ceph: drop unnece... |
792 |
goto out_auth_reply; |
ba75bb98c ceph: monitor client |
793 794 |
monc->cur_mon = -1; |
4e7a5dcd1 ceph: negotiate a... |
795 |
monc->hunting = true; |
ba75bb98c ceph: monitor client |
796 797 798 799 |
monc->sub_renew_after = jiffies; monc->sub_sent = 0; INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); |
f8c76f6f2 ceph: make mon cl... |
800 801 |
monc->generic_request_tree = RB_ROOT; monc->num_generic_requests = 0; |
ba75bb98c ceph: monitor client |
802 803 804 805 806 |
monc->last_tid = 0; monc->have_mdsmap = 0; monc->have_osdmap = 0; monc->want_next_osdmap = 1; |
4e7a5dcd1 ceph: negotiate a... |
807 |
return 0; |
6694d6b95 ceph: drop unnece... |
808 809 |
out_auth_reply: ceph_msg_put(monc->m_auth_reply); |
240ed68eb ceph: reuse mon s... |
810 811 |
out_subscribe: ceph_msg_put(monc->m_subscribe); |
7c315c552 ceph: drop unnece... |
812 813 |
out_subscribe_ack: ceph_msg_put(monc->m_subscribe_ack); |
49d9224c0 ceph: fix ceph_mo... |
814 815 |
out_auth: ceph_auth_destroy(monc->auth); |
f6a2f5be0 libceph: always p... |
816 817 |
out_con: monc->con->ops->put(monc->con); |
4e7a5dcd1 ceph: negotiate a... |
818 819 |
out_monmap: kfree(monc->monmap); |
ba75bb98c ceph: monitor client |
820 821 822 |
out: return err; } |
3d14c5d2b ceph: factor out ... |
823 |
EXPORT_SYMBOL(ceph_monc_init); |
ba75bb98c ceph: monitor client |
824 825 826 827 828 829 830 831 832 |
void ceph_monc_stop(struct ceph_mon_client *monc) { dout("stop "); cancel_delayed_work_sync(&monc->delayed_work); mutex_lock(&monc->mutex); __close_session(monc); |
f6a2f5be0 libceph: always p... |
833 834 835 836 |
monc->con->private = NULL; monc->con->ops->put(monc->con); monc->con = NULL; |
ba75bb98c ceph: monitor client |
837 |
mutex_unlock(&monc->mutex); |
4e7a5dcd1 ceph: negotiate a... |
838 839 840 |
ceph_auth_destroy(monc->auth); ceph_msg_put(monc->m_auth); |
6694d6b95 ceph: drop unnece... |
841 |
ceph_msg_put(monc->m_auth_reply); |
240ed68eb ceph: reuse mon s... |
842 |
ceph_msg_put(monc->m_subscribe); |
7c315c552 ceph: drop unnece... |
843 |
ceph_msg_put(monc->m_subscribe_ack); |
ba75bb98c ceph: monitor client |
844 845 846 |
kfree(monc->monmap); } |
3d14c5d2b ceph: factor out ... |
847 |
EXPORT_SYMBOL(ceph_monc_stop); |
ba75bb98c ceph: monitor client |
848 |
|
4e7a5dcd1 ceph: negotiate a... |
849 850 851 852 |
static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { int ret; |
09c4d6a7d ceph: do not rese... |
853 |
int was_auth = 0; |
4e7a5dcd1 ceph: negotiate a... |
854 855 |
mutex_lock(&monc->mutex); |
09c4d6a7d ceph: do not rese... |
856 857 |
if (monc->auth->ops) was_auth = monc->auth->ops->is_authenticated(monc->auth); |
9bd2e6f8b ceph: allow renew... |
858 |
monc->pending_auth = 0; |
4e7a5dcd1 ceph: negotiate a... |
859 860 861 862 863 |
ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, monc->m_auth->front.iov_base, monc->m_auth->front_max); if (ret < 0) { |
9bd2e6f8b ceph: allow renew... |
864 |
monc->client->auth_err = ret; |
03066f234 ceph: use complet... |
865 |
wake_up_all(&monc->client->auth_wq); |
4e7a5dcd1 ceph: negotiate a... |
866 |
} else if (ret > 0) { |
9bd2e6f8b ceph: allow renew... |
867 |
__send_prepared_auth_request(monc, ret); |
09c4d6a7d ceph: do not rese... |
868 |
} else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { |
4e7a5dcd1 ceph: negotiate a... |
869 870 |
dout("authenticated, starting session "); |
0743304d8 ceph: fix debugfs... |
871 872 |
monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
0cf5537b1 ceph: some endian... |
873 874 |
monc->client->msgr->inst.name.num = cpu_to_le64(monc->auth->global_id); |
0743304d8 ceph: fix debugfs... |
875 |
|
4e7a5dcd1 ceph: negotiate a... |
876 |
__send_subscribe(monc); |
f8c76f6f2 ceph: make mon cl... |
877 |
__resend_generic_request(monc); |
4e7a5dcd1 ceph: negotiate a... |
878 879 880 |
} mutex_unlock(&monc->mutex); } |
9bd2e6f8b ceph: allow renew... |
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 |
static int __validate_auth(struct ceph_mon_client *monc) { int ret; if (monc->pending_auth) return 0; ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_max); if (ret <= 0) return ret; /* either an error, or no need to authenticate */ __send_prepared_auth_request(monc, ret); return 0; } int ceph_monc_validate_auth(struct ceph_mon_client *monc) { int ret; mutex_lock(&monc->mutex); ret = __validate_auth(monc); mutex_unlock(&monc->mutex); return ret; } |
3d14c5d2b ceph: factor out ... |
905 |
EXPORT_SYMBOL(ceph_monc_validate_auth); |
9bd2e6f8b ceph: allow renew... |
906 |
|
ba75bb98c ceph: monitor client |
907 908 909 910 911 912 913 914 915 916 917 918 |
/* * handle incoming message */ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(msg->hdr.type); if (!monc) return; switch (type) { |
4e7a5dcd1 ceph: negotiate a... |
919 920 |
case CEPH_MSG_AUTH_REPLY: handle_auth_reply(monc, msg); |
ba75bb98c ceph: monitor client |
921 922 923 924 925 926 927 928 929 |
break; case CEPH_MSG_MON_SUBSCRIBE_ACK: handle_subscribe_ack(monc, msg); break; case CEPH_MSG_STATFS_REPLY: handle_statfs_reply(monc, msg); break; |
e56fa10e9 ceph: generalize ... |
930 931 932 |
case CEPH_MSG_POOLOP_REPLY: handle_poolop_reply(monc, msg); break; |
4e7a5dcd1 ceph: negotiate a... |
933 934 935 |
case CEPH_MSG_MON_MAP: ceph_monc_handle_map(monc, msg); break; |
ba75bb98c ceph: monitor client |
936 937 938 939 940 |
case CEPH_MSG_OSD_MAP: ceph_osdc_handle_map(&monc->client->osdc, msg); break; default: |
3d14c5d2b ceph: factor out ... |
941 942 943 944 945 |
/* can the chained handler handle it? */ if (monc->client->extra_mon_dispatch && monc->client->extra_mon_dispatch(monc->client, msg) == 0) break; |
ba75bb98c ceph: monitor client |
946 947 948 949 950 951 952 953 954 955 956 |
pr_err("received unknown message type %d %s ", type, ceph_msg_type_name(type)); } ceph_msg_put(msg); } /* * Allocate memory for incoming message */ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, |
2450418c4 ceph: allocate mi... |
957 958 |
struct ceph_msg_header *hdr, int *skip) |
ba75bb98c ceph: monitor client |
959 960 961 |
{ struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(hdr->type); |
2450418c4 ceph: allocate mi... |
962 |
int front_len = le32_to_cpu(hdr->front_len); |
5b3a4db3e ceph: fix up unex... |
963 |
struct ceph_msg *m = NULL; |
ba75bb98c ceph: monitor client |
964 |
|
2450418c4 ceph: allocate mi... |
965 |
*skip = 0; |
0547a9b30 ceph: alloc messa... |
966 |
|
ba75bb98c ceph: monitor client |
967 |
switch (type) { |
ba75bb98c ceph: monitor client |
968 |
case CEPH_MSG_MON_SUBSCRIBE_ACK: |
7c315c552 ceph: drop unnece... |
969 |
m = ceph_msg_get(monc->m_subscribe_ack); |
2450418c4 ceph: allocate mi... |
970 |
break; |
e56fa10e9 ceph: generalize ... |
971 |
case CEPH_MSG_POOLOP_REPLY: |
ba75bb98c ceph: monitor client |
972 |
case CEPH_MSG_STATFS_REPLY: |
f8c76f6f2 ceph: make mon cl... |
973 |
return get_generic_reply(con, hdr, skip); |
4e7a5dcd1 ceph: negotiate a... |
974 |
case CEPH_MSG_AUTH_REPLY: |
6694d6b95 ceph: drop unnece... |
975 |
m = ceph_msg_get(monc->m_auth_reply); |
2450418c4 ceph: allocate mi... |
976 |
break; |
5b3a4db3e ceph: fix up unex... |
977 978 979 |
case CEPH_MSG_MON_MAP: case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: |
b61c27636 libceph: don't co... |
980 |
m = ceph_msg_new(type, front_len, GFP_NOFS, false); |
5b3a4db3e ceph: fix up unex... |
981 |
break; |
ba75bb98c ceph: monitor client |
982 |
} |
2450418c4 ceph: allocate mi... |
983 |
|
5b3a4db3e ceph: fix up unex... |
984 985 986 |
if (!m) { pr_info("alloc_msg unknown type %d ", type); |
2450418c4 ceph: allocate mi... |
987 |
*skip = 1; |
5b3a4db3e ceph: fix up unex... |
988 |
} |
2450418c4 ceph: allocate mi... |
989 |
return m; |
ba75bb98c ceph: monitor client |
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 |
} /* * If the monitor connection resets, pick a new monitor and resubmit * any pending requests. */ static void mon_fault(struct ceph_connection *con) { struct ceph_mon_client *monc = con->private; if (!monc) return; dout("mon_fault "); mutex_lock(&monc->mutex); if (!con->private) goto out; |
f6a2f5be0 libceph: always p... |
1008 |
if (!monc->hunting) |
ba75bb98c ceph: monitor client |
1009 1010 1011 |
pr_info("mon%d %s session lost, " "hunting for new mon ", monc->cur_mon, |
3d14c5d2b ceph: factor out ... |
1012 |
ceph_pr_addr(&monc->con->peer_addr.in_addr)); |
ba75bb98c ceph: monitor client |
1013 1014 1015 1016 1017 |
__close_session(monc); if (!monc->hunting) { /* start hunting */ monc->hunting = true; |
4e7a5dcd1 ceph: negotiate a... |
1018 |
__open_session(monc); |
ba75bb98c ceph: monitor client |
1019 1020 1021 1022 1023 1024 1025 |
} else { /* already hunting, let's wait a bit */ __schedule_delayed(monc); } out: mutex_unlock(&monc->mutex); } |
9e32789f6 ceph: Storage cla... |
1026 |
static const struct ceph_connection_operations mon_con_ops = { |
ba75bb98c ceph: monitor client |
1027 1028 1029 1030 1031 |
.get = ceph_con_get, .put = ceph_con_put, .dispatch = dispatch, .fault = mon_fault, .alloc_msg = mon_alloc_msg, |
ba75bb98c ceph: monitor client |
1032 |
}; |