1 #include "ceph_debug.h"
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/socket.h>
10 #include <linux/string.h>
14 #include "messenger.h"
19 * Ceph uses the messenger to exchange ceph_msg messages with other
20 * hosts in the system. The messenger provides ordered and reliable
21 * delivery. We tolerate TCP disconnects by reconnecting (with
22 * exponential backoff) in the case of a fault (disconnection, bad
23 * crc, protocol error). Acks allow sent messages to be discarded by
27 /* static tag bytes (protocol control messages) */
28 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
29 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
30 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
33 static void queue_con(struct ceph_connection
*con
);
34 static void con_work(struct work_struct
*);
35 static void ceph_fault(struct ceph_connection
*con
);
37 const char *ceph_name_type_str(int t
)
40 case CEPH_ENTITY_TYPE_MON
: return "mon";
41 case CEPH_ENTITY_TYPE_MDS
: return "mds";
42 case CEPH_ENTITY_TYPE_OSD
: return "osd";
43 case CEPH_ENTITY_TYPE_CLIENT
: return "client";
44 case CEPH_ENTITY_TYPE_ADMIN
: return "admin";
45 default: return "???";
50 * nicely render a sockaddr as a string.
52 #define MAX_ADDR_STR 20
53 static char addr_str
[MAX_ADDR_STR
][40];
54 static DEFINE_SPINLOCK(addr_str_lock
);
55 static int last_addr_str
;
57 const char *pr_addr(const struct sockaddr_storage
*ss
)
61 struct sockaddr_in
*in4
= (void *)ss
;
62 unsigned char *quad
= (void *)&in4
->sin_addr
.s_addr
;
63 struct sockaddr_in6
*in6
= (void *)ss
;
65 spin_lock(&addr_str_lock
);
67 if (last_addr_str
== MAX_ADDR_STR
)
69 spin_unlock(&addr_str_lock
);
72 switch (ss
->ss_family
) {
74 sprintf(s
, "%u.%u.%u.%u:%u",
75 (unsigned int)quad
[0],
76 (unsigned int)quad
[1],
77 (unsigned int)quad
[2],
78 (unsigned int)quad
[3],
79 (unsigned int)ntohs(in4
->sin_port
));
83 sprintf(s
, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
84 in6
->sin6_addr
.s6_addr16
[0],
85 in6
->sin6_addr
.s6_addr16
[1],
86 in6
->sin6_addr
.s6_addr16
[2],
87 in6
->sin6_addr
.s6_addr16
[3],
88 in6
->sin6_addr
.s6_addr16
[4],
89 in6
->sin6_addr
.s6_addr16
[5],
90 in6
->sin6_addr
.s6_addr16
[6],
91 in6
->sin6_addr
.s6_addr16
[7],
92 (unsigned int)ntohs(in6
->sin6_port
));
96 sprintf(s
, "(unknown sockaddr family %d)", (int)ss
->ss_family
);
102 static void encode_my_addr(struct ceph_messenger
*msgr
)
104 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
105 ceph_encode_addr(&msgr
->my_enc_addr
);
109 * work queue for all reading and writing to/from the socket.
111 struct workqueue_struct
*ceph_msgr_wq
;
113 int __init
ceph_msgr_init(void)
115 ceph_msgr_wq
= create_workqueue("ceph-msgr");
116 if (IS_ERR(ceph_msgr_wq
)) {
117 int ret
= PTR_ERR(ceph_msgr_wq
);
118 pr_err("msgr_init failed to create workqueue: %d\n", ret
);
125 void ceph_msgr_exit(void)
127 destroy_workqueue(ceph_msgr_wq
);
131 * socket callback functions
134 /* data available on socket, or listen socket received a connect */
135 static void ceph_data_ready(struct sock
*sk
, int count_unused
)
137 struct ceph_connection
*con
=
138 (struct ceph_connection
*)sk
->sk_user_data
;
139 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
140 dout("ceph_data_ready on %p state = %lu, queueing work\n",
146 /* socket has buffer space for writing */
147 static void ceph_write_space(struct sock
*sk
)
149 struct ceph_connection
*con
=
150 (struct ceph_connection
*)sk
->sk_user_data
;
152 /* only queue to workqueue if there is data we want to write. */
153 if (test_bit(WRITE_PENDING
, &con
->state
)) {
154 dout("ceph_write_space %p queueing write work\n", con
);
157 dout("ceph_write_space %p nothing to write\n", con
);
160 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
161 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
164 /* socket's state has changed */
165 static void ceph_state_change(struct sock
*sk
)
167 struct ceph_connection
*con
=
168 (struct ceph_connection
*)sk
->sk_user_data
;
170 dout("ceph_state_change %p state = %lu sk_state = %u\n",
171 con
, con
->state
, sk
->sk_state
);
173 if (test_bit(CLOSED
, &con
->state
))
176 switch (sk
->sk_state
) {
178 dout("ceph_state_change TCP_CLOSE\n");
180 dout("ceph_state_change TCP_CLOSE_WAIT\n");
181 if (test_and_set_bit(SOCK_CLOSED
, &con
->state
) == 0) {
182 if (test_bit(CONNECTING
, &con
->state
))
183 con
->error_msg
= "connection failed";
185 con
->error_msg
= "socket closed";
189 case TCP_ESTABLISHED
:
190 dout("ceph_state_change TCP_ESTABLISHED\n");
197 * set up socket callbacks
199 static void set_sock_callbacks(struct socket
*sock
,
200 struct ceph_connection
*con
)
202 struct sock
*sk
= sock
->sk
;
203 sk
->sk_user_data
= (void *)con
;
204 sk
->sk_data_ready
= ceph_data_ready
;
205 sk
->sk_write_space
= ceph_write_space
;
206 sk
->sk_state_change
= ceph_state_change
;
215 * initiate connection to a remote socket.
217 static struct socket
*ceph_tcp_connect(struct ceph_connection
*con
)
219 struct sockaddr
*paddr
= (struct sockaddr
*)&con
->peer_addr
.in_addr
;
224 ret
= sock_create_kern(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, &sock
);
228 sock
->sk
->sk_allocation
= GFP_NOFS
;
230 set_sock_callbacks(sock
, con
);
232 dout("connect %s\n", pr_addr(&con
->peer_addr
.in_addr
));
234 ret
= sock
->ops
->connect(sock
, paddr
, sizeof(*paddr
), O_NONBLOCK
);
235 if (ret
== -EINPROGRESS
) {
236 dout("connect %s EINPROGRESS sk_state = %u\n",
237 pr_addr(&con
->peer_addr
.in_addr
),
242 pr_err("connect %s error %d\n",
243 pr_addr(&con
->peer_addr
.in_addr
), ret
);
246 con
->error_msg
= "connect error";
254 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
256 struct kvec iov
= {buf
, len
};
257 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
259 return kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
263 * write something. @more is true if caller will be sending more data
266 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
267 size_t kvlen
, size_t len
, int more
)
269 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
272 msg
.msg_flags
|= MSG_MORE
;
274 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
276 return kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
281 * Shutdown/close the socket for the given connection.
283 static int con_close_socket(struct ceph_connection
*con
)
287 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
290 set_bit(SOCK_CLOSED
, &con
->state
);
291 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
292 sock_release(con
->sock
);
294 clear_bit(SOCK_CLOSED
, &con
->state
);
299 * Reset a connection. Discard all incoming and outgoing messages
300 * and clear *_seq state.
302 static void ceph_msg_remove(struct ceph_msg
*msg
)
304 list_del_init(&msg
->list_head
);
307 static void ceph_msg_remove_list(struct list_head
*head
)
309 while (!list_empty(head
)) {
310 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
312 ceph_msg_remove(msg
);
316 static void reset_connection(struct ceph_connection
*con
)
318 /* reset connection, out_queue, msg_ and connect_seq */
319 /* discard existing out_queue and msg_seq */
320 ceph_msg_remove_list(&con
->out_queue
);
321 ceph_msg_remove_list(&con
->out_sent
);
324 ceph_msg_put(con
->in_msg
);
328 con
->connect_seq
= 0;
331 ceph_msg_put(con
->out_msg
);
338 * mark a peer down. drop any open connections.
340 void ceph_con_close(struct ceph_connection
*con
)
342 dout("con_close %p peer %s\n", con
, pr_addr(&con
->peer_addr
.in_addr
));
343 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
344 clear_bit(STANDBY
, &con
->state
); /* avoid connect_seq bump */
345 clear_bit(LOSSYTX
, &con
->state
); /* so we retry next connect */
346 clear_bit(KEEPALIVE_PENDING
, &con
->state
);
347 clear_bit(WRITE_PENDING
, &con
->state
);
348 mutex_lock(&con
->mutex
);
349 reset_connection(con
);
350 cancel_delayed_work(&con
->work
);
351 mutex_unlock(&con
->mutex
);
356 * Reopen a closed connection, with a new peer address.
358 void ceph_con_open(struct ceph_connection
*con
, struct ceph_entity_addr
*addr
)
360 dout("con_open %p %s\n", con
, pr_addr(&addr
->in_addr
));
361 set_bit(OPENING
, &con
->state
);
362 clear_bit(CLOSED
, &con
->state
);
363 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
364 con
->delay
= 0; /* reset backoff memory */
371 struct ceph_connection
*ceph_con_get(struct ceph_connection
*con
)
373 dout("con_get %p nref = %d -> %d\n", con
,
374 atomic_read(&con
->nref
), atomic_read(&con
->nref
) + 1);
375 if (atomic_inc_not_zero(&con
->nref
))
380 void ceph_con_put(struct ceph_connection
*con
)
382 dout("con_put %p nref = %d -> %d\n", con
,
383 atomic_read(&con
->nref
), atomic_read(&con
->nref
) - 1);
384 BUG_ON(atomic_read(&con
->nref
) == 0);
385 if (atomic_dec_and_test(&con
->nref
)) {
392 * initialize a new connection.
394 void ceph_con_init(struct ceph_messenger
*msgr
, struct ceph_connection
*con
)
396 dout("con_init %p\n", con
);
397 memset(con
, 0, sizeof(*con
));
398 atomic_set(&con
->nref
, 1);
400 mutex_init(&con
->mutex
);
401 INIT_LIST_HEAD(&con
->out_queue
);
402 INIT_LIST_HEAD(&con
->out_sent
);
403 INIT_DELAYED_WORK(&con
->work
, con_work
);
408 * We maintain a global counter to order connection attempts. Get
409 * a unique seq greater than @gt.
411 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
415 spin_lock(&msgr
->global_seq_lock
);
416 if (msgr
->global_seq
< gt
)
417 msgr
->global_seq
= gt
;
418 ret
= ++msgr
->global_seq
;
419 spin_unlock(&msgr
->global_seq_lock
);
425 * Prepare footer for currently outgoing message, and finish things
426 * off. Assumes out_kvec* are already valid.. we just add on to the end.
428 static void prepare_write_message_footer(struct ceph_connection
*con
, int v
)
430 struct ceph_msg
*m
= con
->out_msg
;
432 dout("prepare_write_message_footer %p\n", con
);
433 con
->out_kvec_is_msg
= true;
434 con
->out_kvec
[v
].iov_base
= &m
->footer
;
435 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
436 con
->out_kvec_bytes
+= sizeof(m
->footer
);
437 con
->out_kvec_left
++;
438 con
->out_more
= m
->more_to_follow
;
439 con
->out_msg_done
= true;
443 * Prepare headers for the next outgoing message.
445 static void prepare_write_message(struct ceph_connection
*con
)
450 con
->out_kvec_bytes
= 0;
451 con
->out_kvec_is_msg
= true;
452 con
->out_msg_done
= false;
454 /* Sneak an ack in there first? If we can get it into the same
455 * TCP packet that's a good thing. */
456 if (con
->in_seq
> con
->in_seq_acked
) {
457 con
->in_seq_acked
= con
->in_seq
;
458 con
->out_kvec
[v
].iov_base
= &tag_ack
;
459 con
->out_kvec
[v
++].iov_len
= 1;
460 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
461 con
->out_kvec
[v
].iov_base
= &con
->out_temp_ack
;
462 con
->out_kvec
[v
++].iov_len
= sizeof(con
->out_temp_ack
);
463 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
466 m
= list_first_entry(&con
->out_queue
,
467 struct ceph_msg
, list_head
);
469 if (test_bit(LOSSYTX
, &con
->state
)) {
470 list_del_init(&m
->list_head
);
472 /* put message on sent list */
474 list_move_tail(&m
->list_head
, &con
->out_sent
);
477 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
479 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
480 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
481 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
482 le32_to_cpu(m
->hdr
.data_len
),
484 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
486 /* tag + hdr + front + middle */
487 con
->out_kvec
[v
].iov_base
= &tag_msg
;
488 con
->out_kvec
[v
++].iov_len
= 1;
489 con
->out_kvec
[v
].iov_base
= &m
->hdr
;
490 con
->out_kvec
[v
++].iov_len
= sizeof(m
->hdr
);
491 con
->out_kvec
[v
++] = m
->front
;
493 con
->out_kvec
[v
++] = m
->middle
->vec
;
494 con
->out_kvec_left
= v
;
495 con
->out_kvec_bytes
+= 1 + sizeof(m
->hdr
) + m
->front
.iov_len
+
496 (m
->middle
? m
->middle
->vec
.iov_len
: 0);
497 con
->out_kvec_cur
= con
->out_kvec
;
499 /* fill in crc (except data pages), footer */
500 con
->out_msg
->hdr
.crc
=
501 cpu_to_le32(crc32c(0, (void *)&m
->hdr
,
502 sizeof(m
->hdr
) - sizeof(m
->hdr
.crc
)));
503 con
->out_msg
->footer
.flags
= CEPH_MSG_FOOTER_COMPLETE
;
504 con
->out_msg
->footer
.front_crc
=
505 cpu_to_le32(crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
));
507 con
->out_msg
->footer
.middle_crc
=
508 cpu_to_le32(crc32c(0, m
->middle
->vec
.iov_base
,
509 m
->middle
->vec
.iov_len
));
511 con
->out_msg
->footer
.middle_crc
= 0;
512 con
->out_msg
->footer
.data_crc
= 0;
513 dout("prepare_write_message front_crc %u data_crc %u\n",
514 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
515 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
517 /* is there a data payload? */
518 if (le32_to_cpu(m
->hdr
.data_len
) > 0) {
519 /* initialize page iterator */
520 con
->out_msg_pos
.page
= 0;
521 con
->out_msg_pos
.page_pos
=
522 le16_to_cpu(m
->hdr
.data_off
) & ~PAGE_MASK
;
523 con
->out_msg_pos
.data_pos
= 0;
524 con
->out_msg_pos
.did_page_crc
= 0;
525 con
->out_more
= 1; /* data + footer will follow */
527 /* no, queue up footer too and be done */
528 prepare_write_message_footer(con
, v
);
531 set_bit(WRITE_PENDING
, &con
->state
);
537 static void prepare_write_ack(struct ceph_connection
*con
)
539 dout("prepare_write_ack %p %llu -> %llu\n", con
,
540 con
->in_seq_acked
, con
->in_seq
);
541 con
->in_seq_acked
= con
->in_seq
;
543 con
->out_kvec
[0].iov_base
= &tag_ack
;
544 con
->out_kvec
[0].iov_len
= 1;
545 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
546 con
->out_kvec
[1].iov_base
= &con
->out_temp_ack
;
547 con
->out_kvec
[1].iov_len
= sizeof(con
->out_temp_ack
);
548 con
->out_kvec_left
= 2;
549 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
550 con
->out_kvec_cur
= con
->out_kvec
;
551 con
->out_more
= 1; /* more will follow.. eventually.. */
552 set_bit(WRITE_PENDING
, &con
->state
);
556 * Prepare to write keepalive byte.
558 static void prepare_write_keepalive(struct ceph_connection
*con
)
560 dout("prepare_write_keepalive %p\n", con
);
561 con
->out_kvec
[0].iov_base
= &tag_keepalive
;
562 con
->out_kvec
[0].iov_len
= 1;
563 con
->out_kvec_left
= 1;
564 con
->out_kvec_bytes
= 1;
565 con
->out_kvec_cur
= con
->out_kvec
;
566 set_bit(WRITE_PENDING
, &con
->state
);
570 * Connection negotiation.
573 static void prepare_connect_authorizer(struct ceph_connection
*con
)
577 int auth_protocol
= 0;
579 mutex_unlock(&con
->mutex
);
580 if (con
->ops
->get_authorizer
)
581 con
->ops
->get_authorizer(con
, &auth_buf
, &auth_len
,
582 &auth_protocol
, &con
->auth_reply_buf
,
583 &con
->auth_reply_buf_len
,
585 mutex_lock(&con
->mutex
);
587 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_protocol
);
588 con
->out_connect
.authorizer_len
= cpu_to_le32(auth_len
);
590 con
->out_kvec
[con
->out_kvec_left
].iov_base
= auth_buf
;
591 con
->out_kvec
[con
->out_kvec_left
].iov_len
= auth_len
;
592 con
->out_kvec_left
++;
593 con
->out_kvec_bytes
+= auth_len
;
597 * We connected to a peer and are saying hello.
599 static void prepare_write_banner(struct ceph_messenger
*msgr
,
600 struct ceph_connection
*con
)
602 int len
= strlen(CEPH_BANNER
);
604 con
->out_kvec
[0].iov_base
= CEPH_BANNER
;
605 con
->out_kvec
[0].iov_len
= len
;
606 con
->out_kvec
[1].iov_base
= &msgr
->my_enc_addr
;
607 con
->out_kvec
[1].iov_len
= sizeof(msgr
->my_enc_addr
);
608 con
->out_kvec_left
= 2;
609 con
->out_kvec_bytes
= len
+ sizeof(msgr
->my_enc_addr
);
610 con
->out_kvec_cur
= con
->out_kvec
;
612 set_bit(WRITE_PENDING
, &con
->state
);
615 static void prepare_write_connect(struct ceph_messenger
*msgr
,
616 struct ceph_connection
*con
,
619 unsigned global_seq
= get_global_seq(con
->msgr
, 0);
622 switch (con
->peer_name
.type
) {
623 case CEPH_ENTITY_TYPE_MON
:
624 proto
= CEPH_MONC_PROTOCOL
;
626 case CEPH_ENTITY_TYPE_OSD
:
627 proto
= CEPH_OSDC_PROTOCOL
;
629 case CEPH_ENTITY_TYPE_MDS
:
630 proto
= CEPH_MDSC_PROTOCOL
;
636 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
637 con
->connect_seq
, global_seq
, proto
);
639 con
->out_connect
.features
= CEPH_FEATURE_SUPPORTED
;
640 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
641 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
642 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
643 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
644 con
->out_connect
.flags
= 0;
647 con
->out_kvec_left
= 0;
648 con
->out_kvec_bytes
= 0;
650 con
->out_kvec
[con
->out_kvec_left
].iov_base
= &con
->out_connect
;
651 con
->out_kvec
[con
->out_kvec_left
].iov_len
= sizeof(con
->out_connect
);
652 con
->out_kvec_left
++;
653 con
->out_kvec_bytes
+= sizeof(con
->out_connect
);
654 con
->out_kvec_cur
= con
->out_kvec
;
656 set_bit(WRITE_PENDING
, &con
->state
);
658 prepare_connect_authorizer(con
);
663 * write as much of pending kvecs to the socket as we can.
665 * 0 -> socket full, but more to do
668 static int write_partial_kvec(struct ceph_connection
*con
)
672 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
673 while (con
->out_kvec_bytes
> 0) {
674 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
675 con
->out_kvec_left
, con
->out_kvec_bytes
,
679 con
->out_kvec_bytes
-= ret
;
680 if (con
->out_kvec_bytes
== 0)
683 if (ret
>= con
->out_kvec_cur
->iov_len
) {
684 ret
-= con
->out_kvec_cur
->iov_len
;
686 con
->out_kvec_left
--;
688 con
->out_kvec_cur
->iov_len
-= ret
;
689 con
->out_kvec_cur
->iov_base
+= ret
;
695 con
->out_kvec_left
= 0;
696 con
->out_kvec_is_msg
= false;
699 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
700 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
701 return ret
; /* done! */
705 * Write as much message data payload as we can. If we finish, queue
707 * 1 -> done, footer is now queued in out_kvec[].
708 * 0 -> socket full, but more to do
711 static int write_partial_msg_pages(struct ceph_connection
*con
)
713 struct ceph_msg
*msg
= con
->out_msg
;
714 unsigned data_len
= le32_to_cpu(msg
->hdr
.data_len
);
716 int crc
= con
->msgr
->nocrc
;
719 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
720 con
, con
->out_msg
, con
->out_msg_pos
.page
, con
->out_msg
->nr_pages
,
721 con
->out_msg_pos
.page_pos
);
723 while (con
->out_msg_pos
.page
< con
->out_msg
->nr_pages
) {
724 struct page
*page
= NULL
;
728 * if we are calculating the data crc (the default), we need
729 * to map the page. if our pages[] has been revoked, use the
733 page
= msg
->pages
[con
->out_msg_pos
.page
];
736 } else if (msg
->pagelist
) {
737 page
= list_first_entry(&msg
->pagelist
->head
,
742 page
= con
->msgr
->zero_page
;
744 kaddr
= page_address(con
->msgr
->zero_page
);
746 len
= min((int)(PAGE_SIZE
- con
->out_msg_pos
.page_pos
),
747 (int)(data_len
- con
->out_msg_pos
.data_pos
));
748 if (crc
&& !con
->out_msg_pos
.did_page_crc
) {
749 void *base
= kaddr
+ con
->out_msg_pos
.page_pos
;
750 u32 tmpcrc
= le32_to_cpu(con
->out_msg
->footer
.data_crc
);
752 BUG_ON(kaddr
== NULL
);
753 con
->out_msg
->footer
.data_crc
=
754 cpu_to_le32(crc32c(tmpcrc
, base
, len
));
755 con
->out_msg_pos
.did_page_crc
= 1;
758 ret
= kernel_sendpage(con
->sock
, page
,
759 con
->out_msg_pos
.page_pos
, len
,
760 MSG_DONTWAIT
| MSG_NOSIGNAL
|
763 if (crc
&& (msg
->pages
|| msg
->pagelist
))
769 con
->out_msg_pos
.data_pos
+= ret
;
770 con
->out_msg_pos
.page_pos
+= ret
;
772 con
->out_msg_pos
.page_pos
= 0;
773 con
->out_msg_pos
.page
++;
774 con
->out_msg_pos
.did_page_crc
= 0;
776 list_move_tail(&page
->lru
,
777 &msg
->pagelist
->head
);
781 dout("write_partial_msg_pages %p msg %p done\n", con
, msg
);
783 /* prepare and queue up footer, too */
785 con
->out_msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
786 con
->out_kvec_bytes
= 0;
787 con
->out_kvec_left
= 0;
788 con
->out_kvec_cur
= con
->out_kvec
;
789 prepare_write_message_footer(con
, 0);
798 static int write_partial_skip(struct ceph_connection
*con
)
802 while (con
->out_skip
> 0) {
804 .iov_base
= page_address(con
->msgr
->zero_page
),
805 .iov_len
= min(con
->out_skip
, (int)PAGE_CACHE_SIZE
)
808 ret
= ceph_tcp_sendmsg(con
->sock
, &iov
, 1, iov
.iov_len
, 1);
811 con
->out_skip
-= ret
;
819 * Prepare to read connection handshake, or an ack.
821 static void prepare_read_banner(struct ceph_connection
*con
)
823 dout("prepare_read_banner %p\n", con
);
824 con
->in_base_pos
= 0;
827 static void prepare_read_connect(struct ceph_connection
*con
)
829 dout("prepare_read_connect %p\n", con
);
830 con
->in_base_pos
= 0;
833 static void prepare_read_connect_retry(struct ceph_connection
*con
)
835 dout("prepare_read_connect_retry %p\n", con
);
836 con
->in_base_pos
= strlen(CEPH_BANNER
) + sizeof(con
->actual_peer_addr
)
837 + sizeof(con
->peer_addr_for_me
);
840 static void prepare_read_ack(struct ceph_connection
*con
)
842 dout("prepare_read_ack %p\n", con
);
843 con
->in_base_pos
= 0;
846 static void prepare_read_tag(struct ceph_connection
*con
)
848 dout("prepare_read_tag %p\n", con
);
849 con
->in_base_pos
= 0;
850 con
->in_tag
= CEPH_MSGR_TAG_READY
;
854 * Prepare to read a message.
856 static int prepare_read_message(struct ceph_connection
*con
)
858 dout("prepare_read_message %p\n", con
);
859 BUG_ON(con
->in_msg
!= NULL
);
860 con
->in_base_pos
= 0;
861 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
866 static int read_partial(struct ceph_connection
*con
,
867 int *to
, int size
, void *object
)
870 while (con
->in_base_pos
< *to
) {
871 int left
= *to
- con
->in_base_pos
;
872 int have
= size
- left
;
873 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
876 con
->in_base_pos
+= ret
;
883 * Read all or part of the connect-side handshake on a new connection
885 static int read_partial_banner(struct ceph_connection
*con
)
889 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
892 ret
= read_partial(con
, &to
, strlen(CEPH_BANNER
), con
->in_banner
);
895 ret
= read_partial(con
, &to
, sizeof(con
->actual_peer_addr
),
896 &con
->actual_peer_addr
);
899 ret
= read_partial(con
, &to
, sizeof(con
->peer_addr_for_me
),
900 &con
->peer_addr_for_me
);
907 static int read_partial_connect(struct ceph_connection
*con
)
911 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
913 ret
= read_partial(con
, &to
, sizeof(con
->in_reply
), &con
->in_reply
);
916 ret
= read_partial(con
, &to
, le32_to_cpu(con
->in_reply
.authorizer_len
),
917 con
->auth_reply_buf
);
921 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
922 con
, (int)con
->in_reply
.tag
,
923 le32_to_cpu(con
->in_reply
.connect_seq
),
924 le32_to_cpu(con
->in_reply
.global_seq
));
931 * Verify the hello banner looks okay.
933 static int verify_hello(struct ceph_connection
*con
)
935 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
936 pr_err("connect to %s got bad banner\n",
937 pr_addr(&con
->peer_addr
.in_addr
));
938 con
->error_msg
= "protocol error, bad banner";
944 static bool addr_is_blank(struct sockaddr_storage
*ss
)
946 switch (ss
->ss_family
) {
948 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
951 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
952 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
953 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
954 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
959 static int addr_port(struct sockaddr_storage
*ss
)
961 switch (ss
->ss_family
) {
963 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
965 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
970 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
972 switch (ss
->ss_family
) {
974 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
976 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
981 * Parse an ip[:port] list into an addr array. Use the default
982 * monitor port if a port isn't specified.
984 int ceph_parse_ips(const char *c
, const char *end
,
985 struct ceph_entity_addr
*addr
,
986 int max_count
, int *count
)
991 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
992 for (i
= 0; i
< max_count
; i
++) {
994 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
995 struct sockaddr_in
*in4
= (void *)ss
;
996 struct sockaddr_in6
*in6
= (void *)ss
;
999 memset(ss
, 0, sizeof(*ss
));
1000 if (in4_pton(p
, end
- p
, (u8
*)&in4
->sin_addr
.s_addr
,
1002 ss
->ss_family
= AF_INET
;
1003 } else if (in6_pton(p
, end
- p
, (u8
*)&in6
->sin6_addr
.s6_addr
,
1005 ss
->ss_family
= AF_INET6
;
1012 if (p
< end
&& *p
== ':') {
1015 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1016 port
= (port
* 10) + (*p
- '0');
1019 if (port
> 65535 || port
== 0)
1022 port
= CEPH_MON_PORT
;
1025 addr_set_port(ss
, port
);
1027 dout("parse_ips got %s\n", pr_addr(ss
));
1044 pr_err("parse_ips bad ip '%s'\n", c
);
1048 static int process_banner(struct ceph_connection
*con
)
1050 dout("process_banner on %p\n", con
);
1052 if (verify_hello(con
) < 0)
1055 ceph_decode_addr(&con
->actual_peer_addr
);
1056 ceph_decode_addr(&con
->peer_addr_for_me
);
1059 * Make sure the other end is who we wanted. note that the other
1060 * end may not yet know their ip address, so if it's 0.0.0.0, give
1061 * them the benefit of the doubt.
1063 if (memcmp(&con
->peer_addr
, &con
->actual_peer_addr
,
1064 sizeof(con
->peer_addr
)) != 0 &&
1065 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1066 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1067 pr_warning("wrong peer, want %s/%lld, got %s/%lld\n",
1068 pr_addr(&con
->peer_addr
.in_addr
),
1069 le64_to_cpu(con
->peer_addr
.nonce
),
1070 pr_addr(&con
->actual_peer_addr
.in_addr
),
1071 le64_to_cpu(con
->actual_peer_addr
.nonce
));
1072 con
->error_msg
= "wrong peer at address";
1077 * did we learn our address?
1079 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1080 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1082 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1083 &con
->peer_addr_for_me
.in_addr
,
1084 sizeof(con
->peer_addr_for_me
.in_addr
));
1085 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1086 encode_my_addr(con
->msgr
);
1087 dout("process_banner learned my addr is %s\n",
1088 pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1091 set_bit(NEGOTIATING
, &con
->state
);
1092 prepare_read_connect(con
);
1096 static void fail_protocol(struct ceph_connection
*con
)
1098 reset_connection(con
);
1099 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
1101 mutex_unlock(&con
->mutex
);
1102 if (con
->ops
->bad_proto
)
1103 con
->ops
->bad_proto(con
);
1104 mutex_lock(&con
->mutex
);
1107 static int process_connect(struct ceph_connection
*con
)
1109 u64 sup_feat
= CEPH_FEATURE_SUPPORTED
;
1110 u64 req_feat
= CEPH_FEATURE_REQUIRED
;
1111 u64 server_feat
= le64_to_cpu(con
->in_reply
.features
);
1113 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1115 switch (con
->in_reply
.tag
) {
1116 case CEPH_MSGR_TAG_FEATURES
:
1117 pr_err("%s%lld %s feature set mismatch,"
1118 " my %llx < server's %llx, missing %llx\n",
1119 ENTITY_NAME(con
->peer_name
),
1120 pr_addr(&con
->peer_addr
.in_addr
),
1121 sup_feat
, server_feat
, server_feat
& ~sup_feat
);
1122 con
->error_msg
= "missing required protocol features";
1126 case CEPH_MSGR_TAG_BADPROTOVER
:
1127 pr_err("%s%lld %s protocol version mismatch,"
1128 " my %d != server's %d\n",
1129 ENTITY_NAME(con
->peer_name
),
1130 pr_addr(&con
->peer_addr
.in_addr
),
1131 le32_to_cpu(con
->out_connect
.protocol_version
),
1132 le32_to_cpu(con
->in_reply
.protocol_version
));
1133 con
->error_msg
= "protocol version mismatch";
1137 case CEPH_MSGR_TAG_BADAUTHORIZER
:
1139 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
1141 if (con
->auth_retry
== 2) {
1142 con
->error_msg
= "connect authorization failure";
1143 reset_connection(con
);
1144 set_bit(CLOSED
, &con
->state
);
1147 con
->auth_retry
= 1;
1148 prepare_write_connect(con
->msgr
, con
, 0);
1149 prepare_read_connect_retry(con
);
1152 case CEPH_MSGR_TAG_RESETSESSION
:
1154 * If we connected with a large connect_seq but the peer
1155 * has no record of a session with us (no connection, or
1156 * connect_seq == 0), they will send RESETSESION to indicate
1157 * that they must have reset their session, and may have
1160 dout("process_connect got RESET peer seq %u\n",
1161 le32_to_cpu(con
->in_connect
.connect_seq
));
1162 pr_err("%s%lld %s connection reset\n",
1163 ENTITY_NAME(con
->peer_name
),
1164 pr_addr(&con
->peer_addr
.in_addr
));
1165 reset_connection(con
);
1166 prepare_write_connect(con
->msgr
, con
, 0);
1167 prepare_read_connect(con
);
1169 /* Tell ceph about it. */
1170 mutex_unlock(&con
->mutex
);
1171 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
1172 if (con
->ops
->peer_reset
)
1173 con
->ops
->peer_reset(con
);
1174 mutex_lock(&con
->mutex
);
1177 case CEPH_MSGR_TAG_RETRY_SESSION
:
1179 * If we sent a smaller connect_seq than the peer has, try
1180 * again with a larger value.
1182 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1183 le32_to_cpu(con
->out_connect
.connect_seq
),
1184 le32_to_cpu(con
->in_connect
.connect_seq
));
1185 con
->connect_seq
= le32_to_cpu(con
->in_connect
.connect_seq
);
1186 prepare_write_connect(con
->msgr
, con
, 0);
1187 prepare_read_connect(con
);
1190 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
1192 * If we sent a smaller global_seq than the peer has, try
1193 * again with a larger value.
1195 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1196 con
->peer_global_seq
,
1197 le32_to_cpu(con
->in_connect
.global_seq
));
1198 get_global_seq(con
->msgr
,
1199 le32_to_cpu(con
->in_connect
.global_seq
));
1200 prepare_write_connect(con
->msgr
, con
, 0);
1201 prepare_read_connect(con
);
1204 case CEPH_MSGR_TAG_READY
:
1205 if (req_feat
& ~server_feat
) {
1206 pr_err("%s%lld %s protocol feature mismatch,"
1207 " my required %llx > server's %llx, need %llx\n",
1208 ENTITY_NAME(con
->peer_name
),
1209 pr_addr(&con
->peer_addr
.in_addr
),
1210 req_feat
, server_feat
, req_feat
& ~server_feat
);
1211 con
->error_msg
= "missing required protocol features";
1215 clear_bit(CONNECTING
, &con
->state
);
1216 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
1218 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1219 con
->peer_global_seq
,
1220 le32_to_cpu(con
->in_reply
.connect_seq
),
1222 WARN_ON(con
->connect_seq
!=
1223 le32_to_cpu(con
->in_reply
.connect_seq
));
1225 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
1226 set_bit(LOSSYTX
, &con
->state
);
1228 prepare_read_tag(con
);
1231 case CEPH_MSGR_TAG_WAIT
:
1233 * If there is a connection race (we are opening
1234 * connections to each other), one of us may just have
1235 * to WAIT. This shouldn't happen if we are the
1238 pr_err("process_connect peer connecting WAIT\n");
1241 pr_err("connect protocol error, will retry\n");
1242 con
->error_msg
= "protocol error, garbage tag during connect";
1250 * read (part of) an ack
1252 static int read_partial_ack(struct ceph_connection
*con
)
1256 return read_partial(con
, &to
, sizeof(con
->in_temp_ack
),
1262 * We can finally discard anything that's been acked.
1264 static void process_ack(struct ceph_connection
*con
)
1267 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
1270 while (!list_empty(&con
->out_sent
)) {
1271 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
1273 seq
= le64_to_cpu(m
->hdr
.seq
);
1276 dout("got ack for seq %llu type %d at %p\n", seq
,
1277 le16_to_cpu(m
->hdr
.type
), m
);
1280 prepare_read_tag(con
);
1286 static int read_partial_message_section(struct ceph_connection
*con
,
1287 struct kvec
*section
, unsigned int sec_len
,
1295 while (section
->iov_len
< sec_len
) {
1296 BUG_ON(section
->iov_base
== NULL
);
1297 left
= sec_len
- section
->iov_len
;
1298 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)section
->iov_base
+
1299 section
->iov_len
, left
);
1302 section
->iov_len
+= ret
;
1303 if (section
->iov_len
== sec_len
)
1304 *crc
= crc32c(0, section
->iov_base
,
1311 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
1312 struct ceph_msg_header
*hdr
,
1315 * read (part of) a message.
1317 static int read_partial_message(struct ceph_connection
*con
)
1319 struct ceph_msg
*m
= con
->in_msg
;
1323 unsigned front_len
, middle_len
, data_len
, data_off
;
1324 int datacrc
= con
->msgr
->nocrc
;
1327 dout("read_partial_message con %p msg %p\n", con
, m
);
1330 while (con
->in_base_pos
< sizeof(con
->in_hdr
)) {
1331 left
= sizeof(con
->in_hdr
) - con
->in_base_pos
;
1332 ret
= ceph_tcp_recvmsg(con
->sock
,
1333 (char *)&con
->in_hdr
+ con
->in_base_pos
,
1337 con
->in_base_pos
+= ret
;
1338 if (con
->in_base_pos
== sizeof(con
->in_hdr
)) {
1339 u32 crc
= crc32c(0, (void *)&con
->in_hdr
,
1340 sizeof(con
->in_hdr
) - sizeof(con
->in_hdr
.crc
));
1341 if (crc
!= le32_to_cpu(con
->in_hdr
.crc
)) {
1342 pr_err("read_partial_message bad hdr "
1343 " crc %u != expected %u\n",
1344 crc
, con
->in_hdr
.crc
);
1349 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1350 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
1352 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1353 if (middle_len
> CEPH_MSG_MAX_DATA_LEN
)
1355 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1356 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
1358 data_off
= le16_to_cpu(con
->in_hdr
.data_off
);
1360 /* allocate message? */
1362 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
1363 con
->in_hdr
.front_len
, con
->in_hdr
.data_len
);
1364 con
->in_msg
= ceph_alloc_msg(con
, &con
->in_hdr
, &skip
);
1366 /* skip this message */
1367 dout("alloc_msg returned NULL, skipping message\n");
1368 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1370 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1373 if (IS_ERR(con
->in_msg
)) {
1374 ret
= PTR_ERR(con
->in_msg
);
1377 "error allocating memory for incoming message";
1381 m
->front
.iov_len
= 0; /* haven't read it yet */
1383 m
->middle
->vec
.iov_len
= 0;
1385 con
->in_msg_pos
.page
= 0;
1386 con
->in_msg_pos
.page_pos
= data_off
& ~PAGE_MASK
;
1387 con
->in_msg_pos
.data_pos
= 0;
1391 ret
= read_partial_message_section(con
, &m
->front
, front_len
,
1392 &con
->in_front_crc
);
1398 ret
= read_partial_message_section(con
, &m
->middle
->vec
, middle_len
,
1399 &con
->in_middle_crc
);
1405 while (con
->in_msg_pos
.data_pos
< data_len
) {
1406 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1407 (int)(PAGE_SIZE
- con
->in_msg_pos
.page_pos
));
1408 BUG_ON(m
->pages
== NULL
);
1409 p
= kmap(m
->pages
[con
->in_msg_pos
.page
]);
1410 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1412 if (ret
> 0 && datacrc
)
1414 crc32c(con
->in_data_crc
,
1415 p
+ con
->in_msg_pos
.page_pos
, ret
);
1416 kunmap(m
->pages
[con
->in_msg_pos
.page
]);
1419 con
->in_msg_pos
.data_pos
+= ret
;
1420 con
->in_msg_pos
.page_pos
+= ret
;
1421 if (con
->in_msg_pos
.page_pos
== PAGE_SIZE
) {
1422 con
->in_msg_pos
.page_pos
= 0;
1423 con
->in_msg_pos
.page
++;
1428 to
= sizeof(m
->hdr
) + sizeof(m
->footer
);
1429 while (con
->in_base_pos
< to
) {
1430 left
= to
- con
->in_base_pos
;
1431 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)&m
->footer
+
1432 (con
->in_base_pos
- sizeof(m
->hdr
)),
1436 con
->in_base_pos
+= ret
;
1438 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1439 m
, front_len
, m
->footer
.front_crc
, middle_len
,
1440 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
1443 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
1444 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1445 m
, con
->in_front_crc
, m
->footer
.front_crc
);
1448 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
1449 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1450 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
1454 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
1455 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
1456 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
1457 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
1461 return 1; /* done! */
1465 * Process message. This happens in the worker thread. The callback should
1466 * be careful not to do anything that waits on other incoming messages or it
1469 static void process_message(struct ceph_connection
*con
)
1471 struct ceph_msg
*msg
;
1476 /* if first message, set peer_name */
1477 if (con
->peer_name
.type
== 0)
1478 con
->peer_name
= msg
->hdr
.src
.name
;
1481 mutex_unlock(&con
->mutex
);
1483 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1484 msg
, le64_to_cpu(msg
->hdr
.seq
),
1485 ENTITY_NAME(msg
->hdr
.src
.name
),
1486 le16_to_cpu(msg
->hdr
.type
),
1487 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1488 le32_to_cpu(msg
->hdr
.front_len
),
1489 le32_to_cpu(msg
->hdr
.data_len
),
1490 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
1491 con
->ops
->dispatch(con
, msg
);
1493 mutex_lock(&con
->mutex
);
1494 prepare_read_tag(con
);
1499 * Write something to the socket. Called in a worker thread when the
1500 * socket appears to be writeable and we have something ready to send.
1502 static int try_write(struct ceph_connection
*con
)
1504 struct ceph_messenger
*msgr
= con
->msgr
;
1507 dout("try_write start %p state %lu nref %d\n", con
, con
->state
,
1508 atomic_read(&con
->nref
));
1510 mutex_lock(&con
->mutex
);
1512 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
1514 /* open the socket first? */
1515 if (con
->sock
== NULL
) {
1517 * if we were STANDBY and are reconnecting _this_
1518 * connection, bump connect_seq now. Always bump
1521 if (test_and_clear_bit(STANDBY
, &con
->state
))
1524 prepare_write_banner(msgr
, con
);
1525 prepare_write_connect(msgr
, con
, 1);
1526 prepare_read_banner(con
);
1527 set_bit(CONNECTING
, &con
->state
);
1528 clear_bit(NEGOTIATING
, &con
->state
);
1530 BUG_ON(con
->in_msg
);
1531 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1532 dout("try_write initiating connect on %p new state %lu\n",
1534 con
->sock
= ceph_tcp_connect(con
);
1535 if (IS_ERR(con
->sock
)) {
1537 con
->error_msg
= "connect error";
1544 /* kvec data queued? */
1545 if (con
->out_skip
) {
1546 ret
= write_partial_skip(con
);
1550 dout("try_write write_partial_skip err %d\n", ret
);
1554 if (con
->out_kvec_left
) {
1555 ret
= write_partial_kvec(con
);
1562 if (con
->out_msg_done
) {
1563 ceph_msg_put(con
->out_msg
);
1564 con
->out_msg
= NULL
; /* we're done with this one */
1568 ret
= write_partial_msg_pages(con
);
1570 goto more_kvec
; /* we need to send the footer, too! */
1574 dout("try_write write_partial_msg_pages err %d\n",
1581 if (!test_bit(CONNECTING
, &con
->state
)) {
1582 /* is anything else pending? */
1583 if (!list_empty(&con
->out_queue
)) {
1584 prepare_write_message(con
);
1587 if (con
->in_seq
> con
->in_seq_acked
) {
1588 prepare_write_ack(con
);
1591 if (test_and_clear_bit(KEEPALIVE_PENDING
, &con
->state
)) {
1592 prepare_write_keepalive(con
);
1597 /* Nothing to do! */
1598 clear_bit(WRITE_PENDING
, &con
->state
);
1599 dout("try_write nothing else to write.\n");
1603 mutex_unlock(&con
->mutex
);
1604 dout("try_write done on %p\n", con
);
1611 * Read what we can from the socket.
1613 static int try_read(struct ceph_connection
*con
)
1615 struct ceph_messenger
*msgr
;
1621 if (test_bit(STANDBY
, &con
->state
))
1624 dout("try_read start on %p\n", con
);
1627 mutex_lock(&con
->mutex
);
1630 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
1632 if (test_bit(CONNECTING
, &con
->state
)) {
1633 if (!test_bit(NEGOTIATING
, &con
->state
)) {
1634 dout("try_read connecting\n");
1635 ret
= read_partial_banner(con
);
1638 if (process_banner(con
) < 0) {
1643 ret
= read_partial_connect(con
);
1646 if (process_connect(con
) < 0) {
1653 if (con
->in_base_pos
< 0) {
1655 * skipping + discarding content.
1657 * FIXME: there must be a better way to do this!
1659 static char buf
[1024];
1660 int skip
= min(1024, -con
->in_base_pos
);
1661 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
1662 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
1665 con
->in_base_pos
+= ret
;
1666 if (con
->in_base_pos
)
1669 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
1673 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
1676 dout("try_read got tag %d\n", (int)con
->in_tag
);
1677 switch (con
->in_tag
) {
1678 case CEPH_MSGR_TAG_MSG
:
1679 prepare_read_message(con
);
1681 case CEPH_MSGR_TAG_ACK
:
1682 prepare_read_ack(con
);
1684 case CEPH_MSGR_TAG_CLOSE
:
1685 set_bit(CLOSED
, &con
->state
); /* fixme */
1691 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
1692 ret
= read_partial_message(con
);
1696 con
->error_msg
= "bad crc";
1700 con
->error_msg
= "io error";
1706 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
1708 process_message(con
);
1711 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
) {
1712 ret
= read_partial_ack(con
);
1722 mutex_unlock(&con
->mutex
);
1723 dout("try_read done on %p\n", con
);
1727 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
1728 con
->error_msg
= "protocol error, garbage tag";
1735 * Atomically queue work on a connection. Bump @con reference to
1736 * avoid races with connection teardown.
1738 * There is some trickery going on with QUEUED and BUSY because we
1739 * only want a _single_ thread operating on each connection at any
1740 * point in time, but we want to use all available CPUs.
1742 * The worker thread only proceeds if it can atomically set BUSY. It
1743 * clears QUEUED and does it's thing. When it thinks it's done, it
1744 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1745 * (tries again to set BUSY).
1747 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1748 * try to queue work. If that fails (work is already queued, or BUSY)
1749 * we give up (work also already being done or is queued) but leave QUEUED
1750 * set so that the worker thread will loop if necessary.
1752 static void queue_con(struct ceph_connection
*con
)
1754 if (test_bit(DEAD
, &con
->state
)) {
1755 dout("queue_con %p ignoring: DEAD\n",
1760 if (!con
->ops
->get(con
)) {
1761 dout("queue_con %p ref count 0\n", con
);
1765 set_bit(QUEUED
, &con
->state
);
1766 if (test_bit(BUSY
, &con
->state
)) {
1767 dout("queue_con %p - already BUSY\n", con
);
1769 } else if (!queue_work(ceph_msgr_wq
, &con
->work
.work
)) {
1770 dout("queue_con %p - already queued\n", con
);
1773 dout("queue_con %p\n", con
);
1778 * Do some work on a connection. Drop a connection ref when we're done.
1780 static void con_work(struct work_struct
*work
)
1782 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
1787 if (test_and_set_bit(BUSY
, &con
->state
) != 0) {
1788 dout("con_work %p BUSY already set\n", con
);
1791 dout("con_work %p start, clearing QUEUED\n", con
);
1792 clear_bit(QUEUED
, &con
->state
);
1794 if (test_bit(CLOSED
, &con
->state
)) { /* e.g. if we are replaced */
1795 dout("con_work CLOSED\n");
1796 con_close_socket(con
);
1799 if (test_and_clear_bit(OPENING
, &con
->state
)) {
1800 /* reopen w/ new peer */
1801 dout("con_work OPENING\n");
1802 con_close_socket(con
);
1805 if (test_and_clear_bit(SOCK_CLOSED
, &con
->state
) ||
1806 try_read(con
) < 0 ||
1807 try_write(con
) < 0) {
1809 ceph_fault(con
); /* error/fault path */
1813 clear_bit(BUSY
, &con
->state
);
1814 dout("con->state=%lu\n", con
->state
);
1815 if (test_bit(QUEUED
, &con
->state
)) {
1816 if (!backoff
|| test_bit(OPENING
, &con
->state
)) {
1817 dout("con_work %p QUEUED reset, looping\n", con
);
1820 dout("con_work %p QUEUED reset, but just faulted\n", con
);
1821 clear_bit(QUEUED
, &con
->state
);
1823 dout("con_work %p done\n", con
);
1831 * Generic error/fault handler. A retry mechanism is used with
1832 * exponential backoff
1834 static void ceph_fault(struct ceph_connection
*con
)
1836 pr_err("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
1837 pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
1838 dout("fault %p state %lu to peer %s\n",
1839 con
, con
->state
, pr_addr(&con
->peer_addr
.in_addr
));
1841 if (test_bit(LOSSYTX
, &con
->state
)) {
1842 dout("fault on LOSSYTX channel\n");
1846 clear_bit(BUSY
, &con
->state
); /* to avoid an improbable race */
1848 mutex_lock(&con
->mutex
);
1849 if (test_bit(CLOSED
, &con
->state
))
1852 con_close_socket(con
);
1855 ceph_msg_put(con
->in_msg
);
1859 /* Requeue anything that hasn't been acked */
1860 list_splice_init(&con
->out_sent
, &con
->out_queue
);
1862 /* If there are no messages in the queue, place the connection
1863 * in a STANDBY state (i.e., don't try to reconnect just yet). */
1864 if (list_empty(&con
->out_queue
) && !con
->out_keepalive_pending
) {
1865 dout("fault setting STANDBY\n");
1866 set_bit(STANDBY
, &con
->state
);
1868 /* retry after a delay. */
1869 if (con
->delay
== 0)
1870 con
->delay
= BASE_DELAY_INTERVAL
;
1871 else if (con
->delay
< MAX_DELAY_INTERVAL
)
1873 dout("fault queueing %p delay %lu\n", con
, con
->delay
);
1875 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
1876 round_jiffies_relative(con
->delay
)) == 0)
1881 mutex_unlock(&con
->mutex
);
1884 * in case we faulted due to authentication, invalidate our
1885 * current tickets so that we can get new ones.
1887 if (con
->auth_retry
&& con
->ops
->invalidate_authorizer
) {
1888 dout("calling invalidate_authorizer()\n");
1889 con
->ops
->invalidate_authorizer(con
);
1892 if (con
->ops
->fault
)
1893 con
->ops
->fault(con
);
1899 * create a new messenger instance
1901 struct ceph_messenger
*ceph_messenger_create(struct ceph_entity_addr
*myaddr
)
1903 struct ceph_messenger
*msgr
;
1905 msgr
= kzalloc(sizeof(*msgr
), GFP_KERNEL
);
1907 return ERR_PTR(-ENOMEM
);
1909 spin_lock_init(&msgr
->global_seq_lock
);
1911 /* the zero page is needed if a request is "canceled" while the message
1912 * is being written over the socket */
1913 msgr
->zero_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1914 if (!msgr
->zero_page
) {
1916 return ERR_PTR(-ENOMEM
);
1918 kmap(msgr
->zero_page
);
1921 msgr
->inst
.addr
= *myaddr
;
1923 /* select a random nonce */
1924 msgr
->inst
.addr
.type
= 0;
1925 get_random_bytes(&msgr
->inst
.addr
.nonce
, sizeof(msgr
->inst
.addr
.nonce
));
1926 encode_my_addr(msgr
);
1928 dout("messenger_create %p\n", msgr
);
1932 void ceph_messenger_destroy(struct ceph_messenger
*msgr
)
1934 dout("destroy %p\n", msgr
);
1935 kunmap(msgr
->zero_page
);
1936 __free_page(msgr
->zero_page
);
1938 dout("destroyed messenger %p\n", msgr
);
1942 * Queue up an outgoing message on the given connection.
1944 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1946 if (test_bit(CLOSED
, &con
->state
)) {
1947 dout("con_send %p closed, dropping %p\n", con
, msg
);
1953 msg
->hdr
.src
.name
= con
->msgr
->inst
.name
;
1954 msg
->hdr
.src
.addr
= con
->msgr
->my_enc_addr
;
1955 msg
->hdr
.orig_src
= msg
->hdr
.src
;
1957 BUG_ON(msg
->front
.iov_len
!= le32_to_cpu(msg
->hdr
.front_len
));
1960 mutex_lock(&con
->mutex
);
1961 BUG_ON(!list_empty(&msg
->list_head
));
1962 list_add_tail(&msg
->list_head
, &con
->out_queue
);
1963 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
1964 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
1965 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1966 le32_to_cpu(msg
->hdr
.front_len
),
1967 le32_to_cpu(msg
->hdr
.middle_len
),
1968 le32_to_cpu(msg
->hdr
.data_len
));
1969 mutex_unlock(&con
->mutex
);
1971 /* if there wasn't anything waiting to send before, queue
1973 if (test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
1978 * Revoke a message that was previously queued for send
1980 void ceph_con_revoke(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1982 mutex_lock(&con
->mutex
);
1983 if (!list_empty(&msg
->list_head
)) {
1984 dout("con_revoke %p msg %p\n", con
, msg
);
1985 list_del_init(&msg
->list_head
);
1988 if (con
->out_msg
== msg
) {
1989 ceph_msg_put(con
->out_msg
);
1990 con
->out_msg
= NULL
;
1992 if (con
->out_kvec_is_msg
) {
1993 con
->out_skip
= con
->out_kvec_bytes
;
1994 con
->out_kvec_is_msg
= false;
1997 dout("con_revoke %p msg %p - not queued (sent?)\n", con
, msg
);
1999 mutex_unlock(&con
->mutex
);
2003 * Revoke a message that we may be reading data into
2005 void ceph_con_revoke_message(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2007 mutex_lock(&con
->mutex
);
2008 if (con
->in_msg
&& con
->in_msg
== msg
) {
2009 unsigned front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
2010 unsigned middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
2011 unsigned data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
2013 /* skip rest of message */
2014 dout("con_revoke_pages %p msg %p revoked\n", con
, msg
);
2015 con
->in_base_pos
= con
->in_base_pos
-
2016 sizeof(struct ceph_msg_header
) -
2020 sizeof(struct ceph_msg_footer
);
2021 ceph_msg_put(con
->in_msg
);
2023 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2025 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2026 con
, con
->in_msg
, msg
);
2028 mutex_unlock(&con
->mutex
);
2032 * Queue a keepalive byte to ensure the tcp connection is alive.
2034 void ceph_con_keepalive(struct ceph_connection
*con
)
2036 if (test_and_set_bit(KEEPALIVE_PENDING
, &con
->state
) == 0 &&
2037 test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2043 * construct a new message with given type, size
2044 * the new msg has a ref count of 1.
2046 struct ceph_msg
*ceph_msg_new(int type
, int front_len
,
2047 int page_len
, int page_off
, struct page
**pages
)
2051 m
= kmalloc(sizeof(*m
), GFP_NOFS
);
2054 kref_init(&m
->kref
);
2055 INIT_LIST_HEAD(&m
->list_head
);
2057 m
->hdr
.type
= cpu_to_le16(type
);
2058 m
->hdr
.front_len
= cpu_to_le32(front_len
);
2059 m
->hdr
.middle_len
= 0;
2060 m
->hdr
.data_len
= cpu_to_le32(page_len
);
2061 m
->hdr
.data_off
= cpu_to_le16(page_off
);
2062 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
2063 m
->footer
.front_crc
= 0;
2064 m
->footer
.middle_crc
= 0;
2065 m
->footer
.data_crc
= 0;
2066 m
->front_max
= front_len
;
2067 m
->front_is_vmalloc
= false;
2068 m
->more_to_follow
= false;
2073 if (front_len
> PAGE_CACHE_SIZE
) {
2074 m
->front
.iov_base
= __vmalloc(front_len
, GFP_NOFS
,
2076 m
->front_is_vmalloc
= true;
2078 m
->front
.iov_base
= kmalloc(front_len
, GFP_NOFS
);
2080 if (m
->front
.iov_base
== NULL
) {
2081 pr_err("msg_new can't allocate %d bytes\n",
2086 m
->front
.iov_base
= NULL
;
2088 m
->front
.iov_len
= front_len
;
2094 m
->nr_pages
= calc_pages_for(page_off
, page_len
);
2098 dout("ceph_msg_new %p page %d~%d -> %d\n", m
, page_off
, page_len
,
2105 pr_err("msg_new can't create type %d len %d\n", type
, front_len
);
2106 return ERR_PTR(-ENOMEM
);
2110 * Allocate "middle" portion of a message, if it is needed and wasn't
2111 * allocated by alloc_msg. This allows us to read a small fixed-size
2112 * per-type header in the front and then gracefully fail (i.e.,
2113 * propagate the error to the caller based on info in the front) when
2114 * the middle is too large.
2116 static int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2118 int type
= le16_to_cpu(msg
->hdr
.type
);
2119 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
2121 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
2122 ceph_msg_type_name(type
), middle_len
);
2123 BUG_ON(!middle_len
);
2124 BUG_ON(msg
->middle
);
2126 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
2133 * Generic message allocator, for incoming messages.
2135 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
2136 struct ceph_msg_header
*hdr
,
2139 int type
= le16_to_cpu(hdr
->type
);
2140 int front_len
= le32_to_cpu(hdr
->front_len
);
2141 int middle_len
= le32_to_cpu(hdr
->middle_len
);
2142 struct ceph_msg
*msg
= NULL
;
2145 if (con
->ops
->alloc_msg
) {
2146 mutex_unlock(&con
->mutex
);
2147 msg
= con
->ops
->alloc_msg(con
, hdr
, skip
);
2148 mutex_lock(&con
->mutex
);
2157 msg
= ceph_msg_new(type
, front_len
, 0, 0, NULL
);
2159 pr_err("unable to allocate msg type %d len %d\n",
2161 return ERR_PTR(-ENOMEM
);
2164 memcpy(&msg
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
2167 ret
= ceph_alloc_middle(con
, msg
);
2180 * Free a generically kmalloc'd message.
2182 void ceph_msg_kfree(struct ceph_msg
*m
)
2184 dout("msg_kfree %p\n", m
);
2185 if (m
->front_is_vmalloc
)
2186 vfree(m
->front
.iov_base
);
2188 kfree(m
->front
.iov_base
);
2193 * Drop a msg ref. Destroy as needed.
2195 void ceph_msg_last_put(struct kref
*kref
)
2197 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
2199 dout("ceph_msg_put last one on %p\n", m
);
2200 WARN_ON(!list_empty(&m
->list_head
));
2202 /* drop middle, data, if any */
2204 ceph_buffer_put(m
->middle
);
2211 ceph_pagelist_release(m
->pagelist
);
2217 ceph_msgpool_put(m
->pool
, m
);
2222 void ceph_msg_dump(struct ceph_msg
*msg
)
2224 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg
,
2225 msg
->front_max
, msg
->nr_pages
);
2226 print_hex_dump(KERN_DEBUG
, "header: ",
2227 DUMP_PREFIX_OFFSET
, 16, 1,
2228 &msg
->hdr
, sizeof(msg
->hdr
), true);
2229 print_hex_dump(KERN_DEBUG
, " front: ",
2230 DUMP_PREFIX_OFFSET
, 16, 1,
2231 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
2233 print_hex_dump(KERN_DEBUG
, "middle: ",
2234 DUMP_PREFIX_OFFSET
, 16, 1,
2235 msg
->middle
->vec
.iov_base
,
2236 msg
->middle
->vec
.iov_len
, true);
2237 print_hex_dump(KERN_DEBUG
, "footer: ",
2238 DUMP_PREFIX_OFFSET
, 16, 1,
2239 &msg
->footer
, sizeof(msg
->footer
), true);