hwmon: (w83795) Fix LSB reading of voltage limits
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ceph / messenger.c
blob0e8157ee5d4382a32992f34c2b1cf0d78f2868b4
1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <net/tcp.h>
16 #include <linux/ceph/libceph.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
22 * Ceph uses the messenger to exchange ceph_msg messages with other
23 * hosts in the system. The messenger provides ordered and reliable
24 * delivery. We tolerate TCP disconnects by reconnecting (with
25 * exponential backoff) in the case of a fault (disconnection, bad
26 * crc, protocol error). Acks allow sent messages to be discarded by
27 * the sender.
30 /* static tag bytes (protocol control messages) */
31 static char tag_msg = CEPH_MSGR_TAG_MSG;
32 static char tag_ack = CEPH_MSGR_TAG_ACK;
33 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
35 #ifdef CONFIG_LOCKDEP
36 static struct lock_class_key socket_class;
37 #endif
40 static void queue_con(struct ceph_connection *con);
41 static void con_work(struct work_struct *);
42 static void ceph_fault(struct ceph_connection *con);
45 * nicely render a sockaddr as a string.
47 #define MAX_ADDR_STR 20
48 #define MAX_ADDR_STR_LEN 60
49 static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
50 static DEFINE_SPINLOCK(addr_str_lock);
51 static int last_addr_str;
53 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
55 int i;
56 char *s;
57 struct sockaddr_in *in4 = (void *)ss;
58 struct sockaddr_in6 *in6 = (void *)ss;
60 spin_lock(&addr_str_lock);
61 i = last_addr_str++;
62 if (last_addr_str == MAX_ADDR_STR)
63 last_addr_str = 0;
64 spin_unlock(&addr_str_lock);
65 s = addr_str[i];
67 switch (ss->ss_family) {
68 case AF_INET:
69 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
70 (unsigned int)ntohs(in4->sin_port));
71 break;
73 case AF_INET6:
74 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
75 (unsigned int)ntohs(in6->sin6_port));
76 break;
78 default:
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
82 return s;
84 EXPORT_SYMBOL(ceph_pr_addr);
86 static void encode_my_addr(struct ceph_messenger *msgr)
88 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
89 ceph_encode_addr(&msgr->my_enc_addr);
93 * work queue for all reading and writing to/from the socket.
95 struct workqueue_struct *ceph_msgr_wq;
97 int ceph_msgr_init(void)
99 ceph_msgr_wq = create_workqueue("ceph-msgr");
100 if (IS_ERR(ceph_msgr_wq)) {
101 int ret = PTR_ERR(ceph_msgr_wq);
102 pr_err("msgr_init failed to create workqueue: %d\n", ret);
103 ceph_msgr_wq = NULL;
104 return ret;
106 return 0;
108 EXPORT_SYMBOL(ceph_msgr_init);
110 void ceph_msgr_exit(void)
112 destroy_workqueue(ceph_msgr_wq);
114 EXPORT_SYMBOL(ceph_msgr_exit);
116 void ceph_msgr_flush(void)
118 flush_workqueue(ceph_msgr_wq);
120 EXPORT_SYMBOL(ceph_msgr_flush);
124 * socket callback functions
127 /* data available on socket, or listen socket received a connect */
128 static void ceph_data_ready(struct sock *sk, int count_unused)
130 struct ceph_connection *con =
131 (struct ceph_connection *)sk->sk_user_data;
132 if (sk->sk_state != TCP_CLOSE_WAIT) {
133 dout("ceph_data_ready on %p state = %lu, queueing work\n",
134 con, con->state);
135 queue_con(con);
139 /* socket has buffer space for writing */
140 static void ceph_write_space(struct sock *sk)
142 struct ceph_connection *con =
143 (struct ceph_connection *)sk->sk_user_data;
145 /* only queue to workqueue if there is data we want to write. */
146 if (test_bit(WRITE_PENDING, &con->state)) {
147 dout("ceph_write_space %p queueing write work\n", con);
148 queue_con(con);
149 } else {
150 dout("ceph_write_space %p nothing to write\n", con);
153 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
154 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
157 /* socket's state has changed */
158 static void ceph_state_change(struct sock *sk)
160 struct ceph_connection *con =
161 (struct ceph_connection *)sk->sk_user_data;
163 dout("ceph_state_change %p state = %lu sk_state = %u\n",
164 con, con->state, sk->sk_state);
166 if (test_bit(CLOSED, &con->state))
167 return;
169 switch (sk->sk_state) {
170 case TCP_CLOSE:
171 dout("ceph_state_change TCP_CLOSE\n");
172 case TCP_CLOSE_WAIT:
173 dout("ceph_state_change TCP_CLOSE_WAIT\n");
174 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
175 if (test_bit(CONNECTING, &con->state))
176 con->error_msg = "connection failed";
177 else
178 con->error_msg = "socket closed";
179 queue_con(con);
181 break;
182 case TCP_ESTABLISHED:
183 dout("ceph_state_change TCP_ESTABLISHED\n");
184 queue_con(con);
185 break;
190 * set up socket callbacks
192 static void set_sock_callbacks(struct socket *sock,
193 struct ceph_connection *con)
195 struct sock *sk = sock->sk;
196 sk->sk_user_data = (void *)con;
197 sk->sk_data_ready = ceph_data_ready;
198 sk->sk_write_space = ceph_write_space;
199 sk->sk_state_change = ceph_state_change;
204 * socket helpers
208 * initiate connection to a remote socket.
210 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
212 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
213 struct socket *sock;
214 int ret;
216 BUG_ON(con->sock);
217 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
218 IPPROTO_TCP, &sock);
219 if (ret)
220 return ERR_PTR(ret);
221 con->sock = sock;
222 sock->sk->sk_allocation = GFP_NOFS;
224 #ifdef CONFIG_LOCKDEP
225 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
226 #endif
228 set_sock_callbacks(sock, con);
230 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
232 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
233 O_NONBLOCK);
234 if (ret == -EINPROGRESS) {
235 dout("connect %s EINPROGRESS sk_state = %u\n",
236 ceph_pr_addr(&con->peer_addr.in_addr),
237 sock->sk->sk_state);
238 ret = 0;
240 if (ret < 0) {
241 pr_err("connect %s error %d\n",
242 ceph_pr_addr(&con->peer_addr.in_addr), ret);
243 sock_release(sock);
244 con->sock = NULL;
245 con->error_msg = "connect error";
248 if (ret < 0)
249 return ERR_PTR(ret);
250 return sock;
253 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
255 struct kvec iov = {buf, len};
256 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
258 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
262 * write something. @more is true if caller will be sending more data
263 * shortly.
265 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
266 size_t kvlen, size_t len, int more)
268 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
270 if (more)
271 msg.msg_flags |= MSG_MORE;
272 else
273 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
275 return kernel_sendmsg(sock, &msg, iov, kvlen, len);
280 * Shutdown/close the socket for the given connection.
282 static int con_close_socket(struct ceph_connection *con)
284 int rc;
286 dout("con_close_socket on %p sock %p\n", con, con->sock);
287 if (!con->sock)
288 return 0;
289 set_bit(SOCK_CLOSED, &con->state);
290 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
291 sock_release(con->sock);
292 con->sock = NULL;
293 clear_bit(SOCK_CLOSED, &con->state);
294 return rc;
298 * Reset a connection. Discard all incoming and outgoing messages
299 * and clear *_seq state.
301 static void ceph_msg_remove(struct ceph_msg *msg)
303 list_del_init(&msg->list_head);
304 ceph_msg_put(msg);
306 static void ceph_msg_remove_list(struct list_head *head)
308 while (!list_empty(head)) {
309 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
310 list_head);
311 ceph_msg_remove(msg);
315 static void reset_connection(struct ceph_connection *con)
317 /* reset connection, out_queue, msg_ and connect_seq */
318 /* discard existing out_queue and msg_seq */
319 ceph_msg_remove_list(&con->out_queue);
320 ceph_msg_remove_list(&con->out_sent);
322 if (con->in_msg) {
323 ceph_msg_put(con->in_msg);
324 con->in_msg = NULL;
327 con->connect_seq = 0;
328 con->out_seq = 0;
329 if (con->out_msg) {
330 ceph_msg_put(con->out_msg);
331 con->out_msg = NULL;
333 con->out_keepalive_pending = false;
334 con->in_seq = 0;
335 con->in_seq_acked = 0;
339 * mark a peer down. drop any open connections.
341 void ceph_con_close(struct ceph_connection *con)
343 dout("con_close %p peer %s\n", con,
344 ceph_pr_addr(&con->peer_addr.in_addr));
345 set_bit(CLOSED, &con->state); /* in case there's queued work */
346 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
347 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
348 clear_bit(KEEPALIVE_PENDING, &con->state);
349 clear_bit(WRITE_PENDING, &con->state);
350 mutex_lock(&con->mutex);
351 reset_connection(con);
352 con->peer_global_seq = 0;
353 cancel_delayed_work(&con->work);
354 mutex_unlock(&con->mutex);
355 queue_con(con);
357 EXPORT_SYMBOL(ceph_con_close);
360 * Reopen a closed connection, with a new peer address.
362 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
364 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
365 set_bit(OPENING, &con->state);
366 clear_bit(CLOSED, &con->state);
367 memcpy(&con->peer_addr, addr, sizeof(*addr));
368 con->delay = 0; /* reset backoff memory */
369 queue_con(con);
371 EXPORT_SYMBOL(ceph_con_open);
374 * return true if this connection ever successfully opened
376 bool ceph_con_opened(struct ceph_connection *con)
378 return con->connect_seq > 0;
382 * generic get/put
384 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
386 dout("con_get %p nref = %d -> %d\n", con,
387 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
388 if (atomic_inc_not_zero(&con->nref))
389 return con;
390 return NULL;
393 void ceph_con_put(struct ceph_connection *con)
395 dout("con_put %p nref = %d -> %d\n", con,
396 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
397 BUG_ON(atomic_read(&con->nref) == 0);
398 if (atomic_dec_and_test(&con->nref)) {
399 BUG_ON(con->sock);
400 kfree(con);
405 * initialize a new connection.
407 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
409 dout("con_init %p\n", con);
410 memset(con, 0, sizeof(*con));
411 atomic_set(&con->nref, 1);
412 con->msgr = msgr;
413 mutex_init(&con->mutex);
414 INIT_LIST_HEAD(&con->out_queue);
415 INIT_LIST_HEAD(&con->out_sent);
416 INIT_DELAYED_WORK(&con->work, con_work);
418 EXPORT_SYMBOL(ceph_con_init);
422 * We maintain a global counter to order connection attempts. Get
423 * a unique seq greater than @gt.
425 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
427 u32 ret;
429 spin_lock(&msgr->global_seq_lock);
430 if (msgr->global_seq < gt)
431 msgr->global_seq = gt;
432 ret = ++msgr->global_seq;
433 spin_unlock(&msgr->global_seq_lock);
434 return ret;
439 * Prepare footer for currently outgoing message, and finish things
440 * off. Assumes out_kvec* are already valid.. we just add on to the end.
442 static void prepare_write_message_footer(struct ceph_connection *con, int v)
444 struct ceph_msg *m = con->out_msg;
446 dout("prepare_write_message_footer %p\n", con);
447 con->out_kvec_is_msg = true;
448 con->out_kvec[v].iov_base = &m->footer;
449 con->out_kvec[v].iov_len = sizeof(m->footer);
450 con->out_kvec_bytes += sizeof(m->footer);
451 con->out_kvec_left++;
452 con->out_more = m->more_to_follow;
453 con->out_msg_done = true;
457 * Prepare headers for the next outgoing message.
459 static void prepare_write_message(struct ceph_connection *con)
461 struct ceph_msg *m;
462 int v = 0;
464 con->out_kvec_bytes = 0;
465 con->out_kvec_is_msg = true;
466 con->out_msg_done = false;
468 /* Sneak an ack in there first? If we can get it into the same
469 * TCP packet that's a good thing. */
470 if (con->in_seq > con->in_seq_acked) {
471 con->in_seq_acked = con->in_seq;
472 con->out_kvec[v].iov_base = &tag_ack;
473 con->out_kvec[v++].iov_len = 1;
474 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
475 con->out_kvec[v].iov_base = &con->out_temp_ack;
476 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
477 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
480 m = list_first_entry(&con->out_queue,
481 struct ceph_msg, list_head);
482 con->out_msg = m;
483 if (test_bit(LOSSYTX, &con->state)) {
484 list_del_init(&m->list_head);
485 } else {
486 /* put message on sent list */
487 ceph_msg_get(m);
488 list_move_tail(&m->list_head, &con->out_sent);
492 * only assign outgoing seq # if we haven't sent this message
493 * yet. if it is requeued, resend with it's original seq.
495 if (m->needs_out_seq) {
496 m->hdr.seq = cpu_to_le64(++con->out_seq);
497 m->needs_out_seq = false;
500 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
501 m, con->out_seq, le16_to_cpu(m->hdr.type),
502 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
503 le32_to_cpu(m->hdr.data_len),
504 m->nr_pages);
505 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
507 /* tag + hdr + front + middle */
508 con->out_kvec[v].iov_base = &tag_msg;
509 con->out_kvec[v++].iov_len = 1;
510 con->out_kvec[v].iov_base = &m->hdr;
511 con->out_kvec[v++].iov_len = sizeof(m->hdr);
512 con->out_kvec[v++] = m->front;
513 if (m->middle)
514 con->out_kvec[v++] = m->middle->vec;
515 con->out_kvec_left = v;
516 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
517 (m->middle ? m->middle->vec.iov_len : 0);
518 con->out_kvec_cur = con->out_kvec;
520 /* fill in crc (except data pages), footer */
521 con->out_msg->hdr.crc =
522 cpu_to_le32(crc32c(0, (void *)&m->hdr,
523 sizeof(m->hdr) - sizeof(m->hdr.crc)));
524 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
525 con->out_msg->footer.front_crc =
526 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
527 if (m->middle)
528 con->out_msg->footer.middle_crc =
529 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
530 m->middle->vec.iov_len));
531 else
532 con->out_msg->footer.middle_crc = 0;
533 con->out_msg->footer.data_crc = 0;
534 dout("prepare_write_message front_crc %u data_crc %u\n",
535 le32_to_cpu(con->out_msg->footer.front_crc),
536 le32_to_cpu(con->out_msg->footer.middle_crc));
538 /* is there a data payload? */
539 if (le32_to_cpu(m->hdr.data_len) > 0) {
540 /* initialize page iterator */
541 con->out_msg_pos.page = 0;
542 if (m->pages)
543 con->out_msg_pos.page_pos =
544 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
545 else
546 con->out_msg_pos.page_pos = 0;
547 con->out_msg_pos.data_pos = 0;
548 con->out_msg_pos.did_page_crc = 0;
549 con->out_more = 1; /* data + footer will follow */
550 } else {
551 /* no, queue up footer too and be done */
552 prepare_write_message_footer(con, v);
555 set_bit(WRITE_PENDING, &con->state);
559 * Prepare an ack.
561 static void prepare_write_ack(struct ceph_connection *con)
563 dout("prepare_write_ack %p %llu -> %llu\n", con,
564 con->in_seq_acked, con->in_seq);
565 con->in_seq_acked = con->in_seq;
567 con->out_kvec[0].iov_base = &tag_ack;
568 con->out_kvec[0].iov_len = 1;
569 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
570 con->out_kvec[1].iov_base = &con->out_temp_ack;
571 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
572 con->out_kvec_left = 2;
573 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
574 con->out_kvec_cur = con->out_kvec;
575 con->out_more = 1; /* more will follow.. eventually.. */
576 set_bit(WRITE_PENDING, &con->state);
580 * Prepare to write keepalive byte.
582 static void prepare_write_keepalive(struct ceph_connection *con)
584 dout("prepare_write_keepalive %p\n", con);
585 con->out_kvec[0].iov_base = &tag_keepalive;
586 con->out_kvec[0].iov_len = 1;
587 con->out_kvec_left = 1;
588 con->out_kvec_bytes = 1;
589 con->out_kvec_cur = con->out_kvec;
590 set_bit(WRITE_PENDING, &con->state);
594 * Connection negotiation.
597 static void prepare_connect_authorizer(struct ceph_connection *con)
599 void *auth_buf;
600 int auth_len = 0;
601 int auth_protocol = 0;
603 mutex_unlock(&con->mutex);
604 if (con->ops->get_authorizer)
605 con->ops->get_authorizer(con, &auth_buf, &auth_len,
606 &auth_protocol, &con->auth_reply_buf,
607 &con->auth_reply_buf_len,
608 con->auth_retry);
609 mutex_lock(&con->mutex);
611 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
612 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
614 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
615 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
616 con->out_kvec_left++;
617 con->out_kvec_bytes += auth_len;
621 * We connected to a peer and are saying hello.
623 static void prepare_write_banner(struct ceph_messenger *msgr,
624 struct ceph_connection *con)
626 int len = strlen(CEPH_BANNER);
628 con->out_kvec[0].iov_base = CEPH_BANNER;
629 con->out_kvec[0].iov_len = len;
630 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
631 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
632 con->out_kvec_left = 2;
633 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
634 con->out_kvec_cur = con->out_kvec;
635 con->out_more = 0;
636 set_bit(WRITE_PENDING, &con->state);
639 static void prepare_write_connect(struct ceph_messenger *msgr,
640 struct ceph_connection *con,
641 int after_banner)
643 unsigned global_seq = get_global_seq(con->msgr, 0);
644 int proto;
646 switch (con->peer_name.type) {
647 case CEPH_ENTITY_TYPE_MON:
648 proto = CEPH_MONC_PROTOCOL;
649 break;
650 case CEPH_ENTITY_TYPE_OSD:
651 proto = CEPH_OSDC_PROTOCOL;
652 break;
653 case CEPH_ENTITY_TYPE_MDS:
654 proto = CEPH_MDSC_PROTOCOL;
655 break;
656 default:
657 BUG();
660 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
661 con->connect_seq, global_seq, proto);
663 con->out_connect.features = cpu_to_le64(msgr->supported_features);
664 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
665 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
666 con->out_connect.global_seq = cpu_to_le32(global_seq);
667 con->out_connect.protocol_version = cpu_to_le32(proto);
668 con->out_connect.flags = 0;
670 if (!after_banner) {
671 con->out_kvec_left = 0;
672 con->out_kvec_bytes = 0;
674 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
675 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
676 con->out_kvec_left++;
677 con->out_kvec_bytes += sizeof(con->out_connect);
678 con->out_kvec_cur = con->out_kvec;
679 con->out_more = 0;
680 set_bit(WRITE_PENDING, &con->state);
682 prepare_connect_authorizer(con);
687 * write as much of pending kvecs to the socket as we can.
688 * 1 -> done
689 * 0 -> socket full, but more to do
690 * <0 -> error
692 static int write_partial_kvec(struct ceph_connection *con)
694 int ret;
696 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
697 while (con->out_kvec_bytes > 0) {
698 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
699 con->out_kvec_left, con->out_kvec_bytes,
700 con->out_more);
701 if (ret <= 0)
702 goto out;
703 con->out_kvec_bytes -= ret;
704 if (con->out_kvec_bytes == 0)
705 break; /* done */
706 while (ret > 0) {
707 if (ret >= con->out_kvec_cur->iov_len) {
708 ret -= con->out_kvec_cur->iov_len;
709 con->out_kvec_cur++;
710 con->out_kvec_left--;
711 } else {
712 con->out_kvec_cur->iov_len -= ret;
713 con->out_kvec_cur->iov_base += ret;
714 ret = 0;
715 break;
719 con->out_kvec_left = 0;
720 con->out_kvec_is_msg = false;
721 ret = 1;
722 out:
723 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
724 con->out_kvec_bytes, con->out_kvec_left, ret);
725 return ret; /* done! */
728 #ifdef CONFIG_BLOCK
729 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
731 if (!bio) {
732 *iter = NULL;
733 *seg = 0;
734 return;
736 *iter = bio;
737 *seg = bio->bi_idx;
740 static void iter_bio_next(struct bio **bio_iter, int *seg)
742 if (*bio_iter == NULL)
743 return;
745 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
747 (*seg)++;
748 if (*seg == (*bio_iter)->bi_vcnt)
749 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
751 #endif
754 * Write as much message data payload as we can. If we finish, queue
755 * up the footer.
756 * 1 -> done, footer is now queued in out_kvec[].
757 * 0 -> socket full, but more to do
758 * <0 -> error
760 static int write_partial_msg_pages(struct ceph_connection *con)
762 struct ceph_msg *msg = con->out_msg;
763 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
764 size_t len;
765 int crc = con->msgr->nocrc;
766 int ret;
767 int total_max_write;
768 int in_trail = 0;
769 size_t trail_len = (msg->trail ? msg->trail->length : 0);
771 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
772 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
773 con->out_msg_pos.page_pos);
775 #ifdef CONFIG_BLOCK
776 if (msg->bio && !msg->bio_iter)
777 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
778 #endif
780 while (data_len > con->out_msg_pos.data_pos) {
781 struct page *page = NULL;
782 void *kaddr = NULL;
783 int max_write = PAGE_SIZE;
784 int page_shift = 0;
786 total_max_write = data_len - trail_len -
787 con->out_msg_pos.data_pos;
790 * if we are calculating the data crc (the default), we need
791 * to map the page. if our pages[] has been revoked, use the
792 * zero page.
795 /* have we reached the trail part of the data? */
796 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
797 in_trail = 1;
799 total_max_write = data_len - con->out_msg_pos.data_pos;
801 page = list_first_entry(&msg->trail->head,
802 struct page, lru);
803 if (crc)
804 kaddr = kmap(page);
805 max_write = PAGE_SIZE;
806 } else if (msg->pages) {
807 page = msg->pages[con->out_msg_pos.page];
808 if (crc)
809 kaddr = kmap(page);
810 } else if (msg->pagelist) {
811 page = list_first_entry(&msg->pagelist->head,
812 struct page, lru);
813 if (crc)
814 kaddr = kmap(page);
815 #ifdef CONFIG_BLOCK
816 } else if (msg->bio) {
817 struct bio_vec *bv;
819 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
820 page = bv->bv_page;
821 page_shift = bv->bv_offset;
822 if (crc)
823 kaddr = kmap(page) + page_shift;
824 max_write = bv->bv_len;
825 #endif
826 } else {
827 page = con->msgr->zero_page;
828 if (crc)
829 kaddr = page_address(con->msgr->zero_page);
831 len = min_t(int, max_write - con->out_msg_pos.page_pos,
832 total_max_write);
834 if (crc && !con->out_msg_pos.did_page_crc) {
835 void *base = kaddr + con->out_msg_pos.page_pos;
836 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
838 BUG_ON(kaddr == NULL);
839 con->out_msg->footer.data_crc =
840 cpu_to_le32(crc32c(tmpcrc, base, len));
841 con->out_msg_pos.did_page_crc = 1;
843 ret = kernel_sendpage(con->sock, page,
844 con->out_msg_pos.page_pos + page_shift,
845 len,
846 MSG_DONTWAIT | MSG_NOSIGNAL |
847 MSG_MORE);
849 if (crc &&
850 (msg->pages || msg->pagelist || msg->bio || in_trail))
851 kunmap(page);
853 if (ret <= 0)
854 goto out;
856 con->out_msg_pos.data_pos += ret;
857 con->out_msg_pos.page_pos += ret;
858 if (ret == len) {
859 con->out_msg_pos.page_pos = 0;
860 con->out_msg_pos.page++;
861 con->out_msg_pos.did_page_crc = 0;
862 if (in_trail)
863 list_move_tail(&page->lru,
864 &msg->trail->head);
865 else if (msg->pagelist)
866 list_move_tail(&page->lru,
867 &msg->pagelist->head);
868 #ifdef CONFIG_BLOCK
869 else if (msg->bio)
870 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
871 #endif
875 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
877 /* prepare and queue up footer, too */
878 if (!crc)
879 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
880 con->out_kvec_bytes = 0;
881 con->out_kvec_left = 0;
882 con->out_kvec_cur = con->out_kvec;
883 prepare_write_message_footer(con, 0);
884 ret = 1;
885 out:
886 return ret;
890 * write some zeros
892 static int write_partial_skip(struct ceph_connection *con)
894 int ret;
896 while (con->out_skip > 0) {
897 struct kvec iov = {
898 .iov_base = page_address(con->msgr->zero_page),
899 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
902 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
903 if (ret <= 0)
904 goto out;
905 con->out_skip -= ret;
907 ret = 1;
908 out:
909 return ret;
913 * Prepare to read connection handshake, or an ack.
915 static void prepare_read_banner(struct ceph_connection *con)
917 dout("prepare_read_banner %p\n", con);
918 con->in_base_pos = 0;
921 static void prepare_read_connect(struct ceph_connection *con)
923 dout("prepare_read_connect %p\n", con);
924 con->in_base_pos = 0;
927 static void prepare_read_ack(struct ceph_connection *con)
929 dout("prepare_read_ack %p\n", con);
930 con->in_base_pos = 0;
933 static void prepare_read_tag(struct ceph_connection *con)
935 dout("prepare_read_tag %p\n", con);
936 con->in_base_pos = 0;
937 con->in_tag = CEPH_MSGR_TAG_READY;
941 * Prepare to read a message.
943 static int prepare_read_message(struct ceph_connection *con)
945 dout("prepare_read_message %p\n", con);
946 BUG_ON(con->in_msg != NULL);
947 con->in_base_pos = 0;
948 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
949 return 0;
953 static int read_partial(struct ceph_connection *con,
954 int *to, int size, void *object)
956 *to += size;
957 while (con->in_base_pos < *to) {
958 int left = *to - con->in_base_pos;
959 int have = size - left;
960 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
961 if (ret <= 0)
962 return ret;
963 con->in_base_pos += ret;
965 return 1;
970 * Read all or part of the connect-side handshake on a new connection
972 static int read_partial_banner(struct ceph_connection *con)
974 int ret, to = 0;
976 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
978 /* peer's banner */
979 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
980 if (ret <= 0)
981 goto out;
982 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
983 &con->actual_peer_addr);
984 if (ret <= 0)
985 goto out;
986 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
987 &con->peer_addr_for_me);
988 if (ret <= 0)
989 goto out;
990 out:
991 return ret;
994 static int read_partial_connect(struct ceph_connection *con)
996 int ret, to = 0;
998 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1000 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
1001 if (ret <= 0)
1002 goto out;
1003 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
1004 con->auth_reply_buf);
1005 if (ret <= 0)
1006 goto out;
1008 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1009 con, (int)con->in_reply.tag,
1010 le32_to_cpu(con->in_reply.connect_seq),
1011 le32_to_cpu(con->in_reply.global_seq));
1012 out:
1013 return ret;
1018 * Verify the hello banner looks okay.
1020 static int verify_hello(struct ceph_connection *con)
1022 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1023 pr_err("connect to %s got bad banner\n",
1024 ceph_pr_addr(&con->peer_addr.in_addr));
1025 con->error_msg = "protocol error, bad banner";
1026 return -1;
1028 return 0;
1031 static bool addr_is_blank(struct sockaddr_storage *ss)
1033 switch (ss->ss_family) {
1034 case AF_INET:
1035 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1036 case AF_INET6:
1037 return
1038 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1039 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1040 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1041 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1043 return false;
1046 static int addr_port(struct sockaddr_storage *ss)
1048 switch (ss->ss_family) {
1049 case AF_INET:
1050 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1051 case AF_INET6:
1052 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1054 return 0;
1057 static void addr_set_port(struct sockaddr_storage *ss, int p)
1059 switch (ss->ss_family) {
1060 case AF_INET:
1061 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1062 case AF_INET6:
1063 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1068 * Parse an ip[:port] list into an addr array. Use the default
1069 * monitor port if a port isn't specified.
1071 int ceph_parse_ips(const char *c, const char *end,
1072 struct ceph_entity_addr *addr,
1073 int max_count, int *count)
1075 int i;
1076 const char *p = c;
1078 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1079 for (i = 0; i < max_count; i++) {
1080 const char *ipend;
1081 struct sockaddr_storage *ss = &addr[i].in_addr;
1082 struct sockaddr_in *in4 = (void *)ss;
1083 struct sockaddr_in6 *in6 = (void *)ss;
1084 int port;
1085 char delim = ',';
1087 if (*p == '[') {
1088 delim = ']';
1089 p++;
1092 memset(ss, 0, sizeof(*ss));
1093 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1094 delim, &ipend))
1095 ss->ss_family = AF_INET;
1096 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1097 delim, &ipend))
1098 ss->ss_family = AF_INET6;
1099 else
1100 goto bad;
1101 p = ipend;
1103 if (delim == ']') {
1104 if (*p != ']') {
1105 dout("missing matching ']'\n");
1106 goto bad;
1108 p++;
1111 /* port? */
1112 if (p < end && *p == ':') {
1113 port = 0;
1114 p++;
1115 while (p < end && *p >= '0' && *p <= '9') {
1116 port = (port * 10) + (*p - '0');
1117 p++;
1119 if (port > 65535 || port == 0)
1120 goto bad;
1121 } else {
1122 port = CEPH_MON_PORT;
1125 addr_set_port(ss, port);
1127 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1129 if (p == end)
1130 break;
1131 if (*p != ',')
1132 goto bad;
1133 p++;
1136 if (p != end)
1137 goto bad;
1139 if (count)
1140 *count = i + 1;
1141 return 0;
1143 bad:
1144 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1145 return -EINVAL;
1147 EXPORT_SYMBOL(ceph_parse_ips);
1149 static int process_banner(struct ceph_connection *con)
1151 dout("process_banner on %p\n", con);
1153 if (verify_hello(con) < 0)
1154 return -1;
1156 ceph_decode_addr(&con->actual_peer_addr);
1157 ceph_decode_addr(&con->peer_addr_for_me);
1160 * Make sure the other end is who we wanted. note that the other
1161 * end may not yet know their ip address, so if it's 0.0.0.0, give
1162 * them the benefit of the doubt.
1164 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1165 sizeof(con->peer_addr)) != 0 &&
1166 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1167 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1168 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1169 ceph_pr_addr(&con->peer_addr.in_addr),
1170 (int)le32_to_cpu(con->peer_addr.nonce),
1171 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1172 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1173 con->error_msg = "wrong peer at address";
1174 return -1;
1178 * did we learn our address?
1180 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1181 int port = addr_port(&con->msgr->inst.addr.in_addr);
1183 memcpy(&con->msgr->inst.addr.in_addr,
1184 &con->peer_addr_for_me.in_addr,
1185 sizeof(con->peer_addr_for_me.in_addr));
1186 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1187 encode_my_addr(con->msgr);
1188 dout("process_banner learned my addr is %s\n",
1189 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1192 set_bit(NEGOTIATING, &con->state);
1193 prepare_read_connect(con);
1194 return 0;
1197 static void fail_protocol(struct ceph_connection *con)
1199 reset_connection(con);
1200 set_bit(CLOSED, &con->state); /* in case there's queued work */
1202 mutex_unlock(&con->mutex);
1203 if (con->ops->bad_proto)
1204 con->ops->bad_proto(con);
1205 mutex_lock(&con->mutex);
1208 static int process_connect(struct ceph_connection *con)
1210 u64 sup_feat = con->msgr->supported_features;
1211 u64 req_feat = con->msgr->required_features;
1212 u64 server_feat = le64_to_cpu(con->in_reply.features);
1214 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1216 switch (con->in_reply.tag) {
1217 case CEPH_MSGR_TAG_FEATURES:
1218 pr_err("%s%lld %s feature set mismatch,"
1219 " my %llx < server's %llx, missing %llx\n",
1220 ENTITY_NAME(con->peer_name),
1221 ceph_pr_addr(&con->peer_addr.in_addr),
1222 sup_feat, server_feat, server_feat & ~sup_feat);
1223 con->error_msg = "missing required protocol features";
1224 fail_protocol(con);
1225 return -1;
1227 case CEPH_MSGR_TAG_BADPROTOVER:
1228 pr_err("%s%lld %s protocol version mismatch,"
1229 " my %d != server's %d\n",
1230 ENTITY_NAME(con->peer_name),
1231 ceph_pr_addr(&con->peer_addr.in_addr),
1232 le32_to_cpu(con->out_connect.protocol_version),
1233 le32_to_cpu(con->in_reply.protocol_version));
1234 con->error_msg = "protocol version mismatch";
1235 fail_protocol(con);
1236 return -1;
1238 case CEPH_MSGR_TAG_BADAUTHORIZER:
1239 con->auth_retry++;
1240 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1241 con->auth_retry);
1242 if (con->auth_retry == 2) {
1243 con->error_msg = "connect authorization failure";
1244 reset_connection(con);
1245 set_bit(CLOSED, &con->state);
1246 return -1;
1248 con->auth_retry = 1;
1249 prepare_write_connect(con->msgr, con, 0);
1250 prepare_read_connect(con);
1251 break;
1253 case CEPH_MSGR_TAG_RESETSESSION:
1255 * If we connected with a large connect_seq but the peer
1256 * has no record of a session with us (no connection, or
1257 * connect_seq == 0), they will send RESETSESION to indicate
1258 * that they must have reset their session, and may have
1259 * dropped messages.
1261 dout("process_connect got RESET peer seq %u\n",
1262 le32_to_cpu(con->in_connect.connect_seq));
1263 pr_err("%s%lld %s connection reset\n",
1264 ENTITY_NAME(con->peer_name),
1265 ceph_pr_addr(&con->peer_addr.in_addr));
1266 reset_connection(con);
1267 prepare_write_connect(con->msgr, con, 0);
1268 prepare_read_connect(con);
1270 /* Tell ceph about it. */
1271 mutex_unlock(&con->mutex);
1272 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1273 if (con->ops->peer_reset)
1274 con->ops->peer_reset(con);
1275 mutex_lock(&con->mutex);
1276 break;
1278 case CEPH_MSGR_TAG_RETRY_SESSION:
1280 * If we sent a smaller connect_seq than the peer has, try
1281 * again with a larger value.
1283 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1284 le32_to_cpu(con->out_connect.connect_seq),
1285 le32_to_cpu(con->in_connect.connect_seq));
1286 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1287 prepare_write_connect(con->msgr, con, 0);
1288 prepare_read_connect(con);
1289 break;
1291 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1293 * If we sent a smaller global_seq than the peer has, try
1294 * again with a larger value.
1296 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1297 con->peer_global_seq,
1298 le32_to_cpu(con->in_connect.global_seq));
1299 get_global_seq(con->msgr,
1300 le32_to_cpu(con->in_connect.global_seq));
1301 prepare_write_connect(con->msgr, con, 0);
1302 prepare_read_connect(con);
1303 break;
1305 case CEPH_MSGR_TAG_READY:
1306 if (req_feat & ~server_feat) {
1307 pr_err("%s%lld %s protocol feature mismatch,"
1308 " my required %llx > server's %llx, need %llx\n",
1309 ENTITY_NAME(con->peer_name),
1310 ceph_pr_addr(&con->peer_addr.in_addr),
1311 req_feat, server_feat, req_feat & ~server_feat);
1312 con->error_msg = "missing required protocol features";
1313 fail_protocol(con);
1314 return -1;
1316 clear_bit(CONNECTING, &con->state);
1317 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1318 con->connect_seq++;
1319 con->peer_features = server_feat;
1320 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1321 con->peer_global_seq,
1322 le32_to_cpu(con->in_reply.connect_seq),
1323 con->connect_seq);
1324 WARN_ON(con->connect_seq !=
1325 le32_to_cpu(con->in_reply.connect_seq));
1327 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1328 set_bit(LOSSYTX, &con->state);
1330 prepare_read_tag(con);
1331 break;
1333 case CEPH_MSGR_TAG_WAIT:
1335 * If there is a connection race (we are opening
1336 * connections to each other), one of us may just have
1337 * to WAIT. This shouldn't happen if we are the
1338 * client.
1340 pr_err("process_connect peer connecting WAIT\n");
1342 default:
1343 pr_err("connect protocol error, will retry\n");
1344 con->error_msg = "protocol error, garbage tag during connect";
1345 return -1;
1347 return 0;
1352 * read (part of) an ack
1354 static int read_partial_ack(struct ceph_connection *con)
1356 int to = 0;
1358 return read_partial(con, &to, sizeof(con->in_temp_ack),
1359 &con->in_temp_ack);
1364 * We can finally discard anything that's been acked.
1366 static void process_ack(struct ceph_connection *con)
1368 struct ceph_msg *m;
1369 u64 ack = le64_to_cpu(con->in_temp_ack);
1370 u64 seq;
1372 while (!list_empty(&con->out_sent)) {
1373 m = list_first_entry(&con->out_sent, struct ceph_msg,
1374 list_head);
1375 seq = le64_to_cpu(m->hdr.seq);
1376 if (seq > ack)
1377 break;
1378 dout("got ack for seq %llu type %d at %p\n", seq,
1379 le16_to_cpu(m->hdr.type), m);
1380 ceph_msg_remove(m);
1382 prepare_read_tag(con);
1388 static int read_partial_message_section(struct ceph_connection *con,
1389 struct kvec *section,
1390 unsigned int sec_len, u32 *crc)
1392 int ret, left;
1394 BUG_ON(!section);
1396 while (section->iov_len < sec_len) {
1397 BUG_ON(section->iov_base == NULL);
1398 left = sec_len - section->iov_len;
1399 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1400 section->iov_len, left);
1401 if (ret <= 0)
1402 return ret;
1403 section->iov_len += ret;
1404 if (section->iov_len == sec_len)
1405 *crc = crc32c(0, section->iov_base,
1406 section->iov_len);
1409 return 1;
1412 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1413 struct ceph_msg_header *hdr,
1414 int *skip);
1417 static int read_partial_message_pages(struct ceph_connection *con,
1418 struct page **pages,
1419 unsigned data_len, int datacrc)
1421 void *p;
1422 int ret;
1423 int left;
1425 left = min((int)(data_len - con->in_msg_pos.data_pos),
1426 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1427 /* (page) data */
1428 BUG_ON(pages == NULL);
1429 p = kmap(pages[con->in_msg_pos.page]);
1430 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1431 left);
1432 if (ret > 0 && datacrc)
1433 con->in_data_crc =
1434 crc32c(con->in_data_crc,
1435 p + con->in_msg_pos.page_pos, ret);
1436 kunmap(pages[con->in_msg_pos.page]);
1437 if (ret <= 0)
1438 return ret;
1439 con->in_msg_pos.data_pos += ret;
1440 con->in_msg_pos.page_pos += ret;
1441 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1442 con->in_msg_pos.page_pos = 0;
1443 con->in_msg_pos.page++;
1446 return ret;
1449 #ifdef CONFIG_BLOCK
1450 static int read_partial_message_bio(struct ceph_connection *con,
1451 struct bio **bio_iter, int *bio_seg,
1452 unsigned data_len, int datacrc)
1454 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1455 void *p;
1456 int ret, left;
1458 if (IS_ERR(bv))
1459 return PTR_ERR(bv);
1461 left = min((int)(data_len - con->in_msg_pos.data_pos),
1462 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1464 p = kmap(bv->bv_page) + bv->bv_offset;
1466 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1467 left);
1468 if (ret > 0 && datacrc)
1469 con->in_data_crc =
1470 crc32c(con->in_data_crc,
1471 p + con->in_msg_pos.page_pos, ret);
1472 kunmap(bv->bv_page);
1473 if (ret <= 0)
1474 return ret;
1475 con->in_msg_pos.data_pos += ret;
1476 con->in_msg_pos.page_pos += ret;
1477 if (con->in_msg_pos.page_pos == bv->bv_len) {
1478 con->in_msg_pos.page_pos = 0;
1479 iter_bio_next(bio_iter, bio_seg);
1482 return ret;
1484 #endif
1487 * read (part of) a message.
1489 static int read_partial_message(struct ceph_connection *con)
1491 struct ceph_msg *m = con->in_msg;
1492 int ret;
1493 int to, left;
1494 unsigned front_len, middle_len, data_len, data_off;
1495 int datacrc = con->msgr->nocrc;
1496 int skip;
1497 u64 seq;
1499 dout("read_partial_message con %p msg %p\n", con, m);
1501 /* header */
1502 while (con->in_base_pos < sizeof(con->in_hdr)) {
1503 left = sizeof(con->in_hdr) - con->in_base_pos;
1504 ret = ceph_tcp_recvmsg(con->sock,
1505 (char *)&con->in_hdr + con->in_base_pos,
1506 left);
1507 if (ret <= 0)
1508 return ret;
1509 con->in_base_pos += ret;
1510 if (con->in_base_pos == sizeof(con->in_hdr)) {
1511 u32 crc = crc32c(0, (void *)&con->in_hdr,
1512 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1513 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1514 pr_err("read_partial_message bad hdr "
1515 " crc %u != expected %u\n",
1516 crc, con->in_hdr.crc);
1517 return -EBADMSG;
1521 front_len = le32_to_cpu(con->in_hdr.front_len);
1522 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1523 return -EIO;
1524 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1525 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1526 return -EIO;
1527 data_len = le32_to_cpu(con->in_hdr.data_len);
1528 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1529 return -EIO;
1530 data_off = le16_to_cpu(con->in_hdr.data_off);
1532 /* verify seq# */
1533 seq = le64_to_cpu(con->in_hdr.seq);
1534 if ((s64)seq - (s64)con->in_seq < 1) {
1535 pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
1536 ENTITY_NAME(con->peer_name),
1537 ceph_pr_addr(&con->peer_addr.in_addr),
1538 seq, con->in_seq + 1);
1539 con->in_base_pos = -front_len - middle_len - data_len -
1540 sizeof(m->footer);
1541 con->in_tag = CEPH_MSGR_TAG_READY;
1542 con->in_seq++;
1543 return 0;
1544 } else if ((s64)seq - (s64)con->in_seq > 1) {
1545 pr_err("read_partial_message bad seq %lld expected %lld\n",
1546 seq, con->in_seq + 1);
1547 con->error_msg = "bad message sequence # for incoming message";
1548 return -EBADMSG;
1551 /* allocate message? */
1552 if (!con->in_msg) {
1553 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1554 con->in_hdr.front_len, con->in_hdr.data_len);
1555 skip = 0;
1556 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1557 if (skip) {
1558 /* skip this message */
1559 dout("alloc_msg said skip message\n");
1560 BUG_ON(con->in_msg);
1561 con->in_base_pos = -front_len - middle_len - data_len -
1562 sizeof(m->footer);
1563 con->in_tag = CEPH_MSGR_TAG_READY;
1564 con->in_seq++;
1565 return 0;
1567 if (!con->in_msg) {
1568 con->error_msg =
1569 "error allocating memory for incoming message";
1570 return -ENOMEM;
1572 m = con->in_msg;
1573 m->front.iov_len = 0; /* haven't read it yet */
1574 if (m->middle)
1575 m->middle->vec.iov_len = 0;
1577 con->in_msg_pos.page = 0;
1578 if (m->pages)
1579 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1580 else
1581 con->in_msg_pos.page_pos = 0;
1582 con->in_msg_pos.data_pos = 0;
1585 /* front */
1586 ret = read_partial_message_section(con, &m->front, front_len,
1587 &con->in_front_crc);
1588 if (ret <= 0)
1589 return ret;
1591 /* middle */
1592 if (m->middle) {
1593 ret = read_partial_message_section(con, &m->middle->vec,
1594 middle_len,
1595 &con->in_middle_crc);
1596 if (ret <= 0)
1597 return ret;
1599 #ifdef CONFIG_BLOCK
1600 if (m->bio && !m->bio_iter)
1601 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1602 #endif
1604 /* (page) data */
1605 while (con->in_msg_pos.data_pos < data_len) {
1606 if (m->pages) {
1607 ret = read_partial_message_pages(con, m->pages,
1608 data_len, datacrc);
1609 if (ret <= 0)
1610 return ret;
1611 #ifdef CONFIG_BLOCK
1612 } else if (m->bio) {
1614 ret = read_partial_message_bio(con,
1615 &m->bio_iter, &m->bio_seg,
1616 data_len, datacrc);
1617 if (ret <= 0)
1618 return ret;
1619 #endif
1620 } else {
1621 BUG_ON(1);
1625 /* footer */
1626 to = sizeof(m->hdr) + sizeof(m->footer);
1627 while (con->in_base_pos < to) {
1628 left = to - con->in_base_pos;
1629 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1630 (con->in_base_pos - sizeof(m->hdr)),
1631 left);
1632 if (ret <= 0)
1633 return ret;
1634 con->in_base_pos += ret;
1636 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1637 m, front_len, m->footer.front_crc, middle_len,
1638 m->footer.middle_crc, data_len, m->footer.data_crc);
1640 /* crc ok? */
1641 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1642 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1643 m, con->in_front_crc, m->footer.front_crc);
1644 return -EBADMSG;
1646 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1647 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1648 m, con->in_middle_crc, m->footer.middle_crc);
1649 return -EBADMSG;
1651 if (datacrc &&
1652 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1653 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1654 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1655 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1656 return -EBADMSG;
1659 return 1; /* done! */
1663 * Process message. This happens in the worker thread. The callback should
1664 * be careful not to do anything that waits on other incoming messages or it
1665 * may deadlock.
1667 static void process_message(struct ceph_connection *con)
1669 struct ceph_msg *msg;
1671 msg = con->in_msg;
1672 con->in_msg = NULL;
1674 /* if first message, set peer_name */
1675 if (con->peer_name.type == 0)
1676 con->peer_name = msg->hdr.src;
1678 con->in_seq++;
1679 mutex_unlock(&con->mutex);
1681 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1682 msg, le64_to_cpu(msg->hdr.seq),
1683 ENTITY_NAME(msg->hdr.src),
1684 le16_to_cpu(msg->hdr.type),
1685 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1686 le32_to_cpu(msg->hdr.front_len),
1687 le32_to_cpu(msg->hdr.data_len),
1688 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1689 con->ops->dispatch(con, msg);
1691 mutex_lock(&con->mutex);
1692 prepare_read_tag(con);
1697 * Write something to the socket. Called in a worker thread when the
1698 * socket appears to be writeable and we have something ready to send.
1700 static int try_write(struct ceph_connection *con)
1702 struct ceph_messenger *msgr = con->msgr;
1703 int ret = 1;
1705 dout("try_write start %p state %lu nref %d\n", con, con->state,
1706 atomic_read(&con->nref));
1708 more:
1709 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1711 /* open the socket first? */
1712 if (con->sock == NULL) {
1714 * if we were STANDBY and are reconnecting _this_
1715 * connection, bump connect_seq now. Always bump
1716 * global_seq.
1718 if (test_and_clear_bit(STANDBY, &con->state))
1719 con->connect_seq++;
1721 prepare_write_banner(msgr, con);
1722 prepare_write_connect(msgr, con, 1);
1723 prepare_read_banner(con);
1724 set_bit(CONNECTING, &con->state);
1725 clear_bit(NEGOTIATING, &con->state);
1727 BUG_ON(con->in_msg);
1728 con->in_tag = CEPH_MSGR_TAG_READY;
1729 dout("try_write initiating connect on %p new state %lu\n",
1730 con, con->state);
1731 con->sock = ceph_tcp_connect(con);
1732 if (IS_ERR(con->sock)) {
1733 con->sock = NULL;
1734 con->error_msg = "connect error";
1735 ret = -1;
1736 goto out;
1740 more_kvec:
1741 /* kvec data queued? */
1742 if (con->out_skip) {
1743 ret = write_partial_skip(con);
1744 if (ret <= 0)
1745 goto done;
1746 if (ret < 0) {
1747 dout("try_write write_partial_skip err %d\n", ret);
1748 goto done;
1751 if (con->out_kvec_left) {
1752 ret = write_partial_kvec(con);
1753 if (ret <= 0)
1754 goto done;
1757 /* msg pages? */
1758 if (con->out_msg) {
1759 if (con->out_msg_done) {
1760 ceph_msg_put(con->out_msg);
1761 con->out_msg = NULL; /* we're done with this one */
1762 goto do_next;
1765 ret = write_partial_msg_pages(con);
1766 if (ret == 1)
1767 goto more_kvec; /* we need to send the footer, too! */
1768 if (ret == 0)
1769 goto done;
1770 if (ret < 0) {
1771 dout("try_write write_partial_msg_pages err %d\n",
1772 ret);
1773 goto done;
1777 do_next:
1778 if (!test_bit(CONNECTING, &con->state)) {
1779 /* is anything else pending? */
1780 if (!list_empty(&con->out_queue)) {
1781 prepare_write_message(con);
1782 goto more;
1784 if (con->in_seq > con->in_seq_acked) {
1785 prepare_write_ack(con);
1786 goto more;
1788 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1789 prepare_write_keepalive(con);
1790 goto more;
1794 /* Nothing to do! */
1795 clear_bit(WRITE_PENDING, &con->state);
1796 dout("try_write nothing else to write.\n");
1797 done:
1798 ret = 0;
1799 out:
1800 dout("try_write done on %p\n", con);
1801 return ret;
1807 * Read what we can from the socket.
1809 static int try_read(struct ceph_connection *con)
1811 int ret = -1;
1813 if (!con->sock)
1814 return 0;
1816 if (test_bit(STANDBY, &con->state))
1817 return 0;
1819 dout("try_read start on %p\n", con);
1821 more:
1822 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1823 con->in_base_pos);
1824 if (test_bit(CONNECTING, &con->state)) {
1825 if (!test_bit(NEGOTIATING, &con->state)) {
1826 dout("try_read connecting\n");
1827 ret = read_partial_banner(con);
1828 if (ret <= 0)
1829 goto done;
1830 if (process_banner(con) < 0) {
1831 ret = -1;
1832 goto out;
1835 ret = read_partial_connect(con);
1836 if (ret <= 0)
1837 goto done;
1838 if (process_connect(con) < 0) {
1839 ret = -1;
1840 goto out;
1842 goto more;
1845 if (con->in_base_pos < 0) {
1847 * skipping + discarding content.
1849 * FIXME: there must be a better way to do this!
1851 static char buf[1024];
1852 int skip = min(1024, -con->in_base_pos);
1853 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1854 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1855 if (ret <= 0)
1856 goto done;
1857 con->in_base_pos += ret;
1858 if (con->in_base_pos)
1859 goto more;
1861 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1863 * what's next?
1865 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1866 if (ret <= 0)
1867 goto done;
1868 dout("try_read got tag %d\n", (int)con->in_tag);
1869 switch (con->in_tag) {
1870 case CEPH_MSGR_TAG_MSG:
1871 prepare_read_message(con);
1872 break;
1873 case CEPH_MSGR_TAG_ACK:
1874 prepare_read_ack(con);
1875 break;
1876 case CEPH_MSGR_TAG_CLOSE:
1877 set_bit(CLOSED, &con->state); /* fixme */
1878 goto done;
1879 default:
1880 goto bad_tag;
1883 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1884 ret = read_partial_message(con);
1885 if (ret <= 0) {
1886 switch (ret) {
1887 case -EBADMSG:
1888 con->error_msg = "bad crc";
1889 ret = -EIO;
1890 goto out;
1891 case -EIO:
1892 con->error_msg = "io error";
1893 goto out;
1894 default:
1895 goto done;
1898 if (con->in_tag == CEPH_MSGR_TAG_READY)
1899 goto more;
1900 process_message(con);
1901 goto more;
1903 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1904 ret = read_partial_ack(con);
1905 if (ret <= 0)
1906 goto done;
1907 process_ack(con);
1908 goto more;
1911 done:
1912 ret = 0;
1913 out:
1914 dout("try_read done on %p\n", con);
1915 return ret;
1917 bad_tag:
1918 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1919 con->error_msg = "protocol error, garbage tag";
1920 ret = -1;
1921 goto out;
1926 * Atomically queue work on a connection. Bump @con reference to
1927 * avoid races with connection teardown.
1929 * There is some trickery going on with QUEUED and BUSY because we
1930 * only want a _single_ thread operating on each connection at any
1931 * point in time, but we want to use all available CPUs.
1933 * The worker thread only proceeds if it can atomically set BUSY. It
1934 * clears QUEUED and does it's thing. When it thinks it's done, it
1935 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1936 * (tries again to set BUSY).
1938 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1939 * try to queue work. If that fails (work is already queued, or BUSY)
1940 * we give up (work also already being done or is queued) but leave QUEUED
1941 * set so that the worker thread will loop if necessary.
1943 static void queue_con(struct ceph_connection *con)
1945 if (test_bit(DEAD, &con->state)) {
1946 dout("queue_con %p ignoring: DEAD\n",
1947 con);
1948 return;
1951 if (!con->ops->get(con)) {
1952 dout("queue_con %p ref count 0\n", con);
1953 return;
1956 set_bit(QUEUED, &con->state);
1957 if (test_bit(BUSY, &con->state)) {
1958 dout("queue_con %p - already BUSY\n", con);
1959 con->ops->put(con);
1960 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1961 dout("queue_con %p - already queued\n", con);
1962 con->ops->put(con);
1963 } else {
1964 dout("queue_con %p\n", con);
1969 * Do some work on a connection. Drop a connection ref when we're done.
1971 static void con_work(struct work_struct *work)
1973 struct ceph_connection *con = container_of(work, struct ceph_connection,
1974 work.work);
1975 int backoff = 0;
1977 more:
1978 if (test_and_set_bit(BUSY, &con->state) != 0) {
1979 dout("con_work %p BUSY already set\n", con);
1980 goto out;
1982 dout("con_work %p start, clearing QUEUED\n", con);
1983 clear_bit(QUEUED, &con->state);
1985 mutex_lock(&con->mutex);
1987 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1988 dout("con_work CLOSED\n");
1989 con_close_socket(con);
1990 goto done;
1992 if (test_and_clear_bit(OPENING, &con->state)) {
1993 /* reopen w/ new peer */
1994 dout("con_work OPENING\n");
1995 con_close_socket(con);
1998 if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
1999 try_read(con) < 0 ||
2000 try_write(con) < 0) {
2001 mutex_unlock(&con->mutex);
2002 backoff = 1;
2003 ceph_fault(con); /* error/fault path */
2004 goto done_unlocked;
2007 done:
2008 mutex_unlock(&con->mutex);
2010 done_unlocked:
2011 clear_bit(BUSY, &con->state);
2012 dout("con->state=%lu\n", con->state);
2013 if (test_bit(QUEUED, &con->state)) {
2014 if (!backoff || test_bit(OPENING, &con->state)) {
2015 dout("con_work %p QUEUED reset, looping\n", con);
2016 goto more;
2018 dout("con_work %p QUEUED reset, but just faulted\n", con);
2019 clear_bit(QUEUED, &con->state);
2021 dout("con_work %p done\n", con);
2023 out:
2024 con->ops->put(con);
2029 * Generic error/fault handler. A retry mechanism is used with
2030 * exponential backoff
2032 static void ceph_fault(struct ceph_connection *con)
2034 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2035 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2036 dout("fault %p state %lu to peer %s\n",
2037 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2039 if (test_bit(LOSSYTX, &con->state)) {
2040 dout("fault on LOSSYTX channel\n");
2041 goto out;
2044 mutex_lock(&con->mutex);
2045 if (test_bit(CLOSED, &con->state))
2046 goto out_unlock;
2048 con_close_socket(con);
2050 if (con->in_msg) {
2051 ceph_msg_put(con->in_msg);
2052 con->in_msg = NULL;
2055 /* Requeue anything that hasn't been acked */
2056 list_splice_init(&con->out_sent, &con->out_queue);
2058 /* If there are no messages in the queue, place the connection
2059 * in a STANDBY state (i.e., don't try to reconnect just yet). */
2060 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
2061 dout("fault setting STANDBY\n");
2062 set_bit(STANDBY, &con->state);
2063 } else {
2064 /* retry after a delay. */
2065 if (con->delay == 0)
2066 con->delay = BASE_DELAY_INTERVAL;
2067 else if (con->delay < MAX_DELAY_INTERVAL)
2068 con->delay *= 2;
2069 dout("fault queueing %p delay %lu\n", con, con->delay);
2070 con->ops->get(con);
2071 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2072 round_jiffies_relative(con->delay)) == 0)
2073 con->ops->put(con);
2076 out_unlock:
2077 mutex_unlock(&con->mutex);
2078 out:
2080 * in case we faulted due to authentication, invalidate our
2081 * current tickets so that we can get new ones.
2083 if (con->auth_retry && con->ops->invalidate_authorizer) {
2084 dout("calling invalidate_authorizer()\n");
2085 con->ops->invalidate_authorizer(con);
2088 if (con->ops->fault)
2089 con->ops->fault(con);
2095 * create a new messenger instance
2097 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
2098 u32 supported_features,
2099 u32 required_features)
2101 struct ceph_messenger *msgr;
2103 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2104 if (msgr == NULL)
2105 return ERR_PTR(-ENOMEM);
2107 msgr->supported_features = supported_features;
2108 msgr->required_features = required_features;
2110 spin_lock_init(&msgr->global_seq_lock);
2112 /* the zero page is needed if a request is "canceled" while the message
2113 * is being written over the socket */
2114 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
2115 if (!msgr->zero_page) {
2116 kfree(msgr);
2117 return ERR_PTR(-ENOMEM);
2119 kmap(msgr->zero_page);
2121 if (myaddr)
2122 msgr->inst.addr = *myaddr;
2124 /* select a random nonce */
2125 msgr->inst.addr.type = 0;
2126 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2127 encode_my_addr(msgr);
2129 dout("messenger_create %p\n", msgr);
2130 return msgr;
2132 EXPORT_SYMBOL(ceph_messenger_create);
2134 void ceph_messenger_destroy(struct ceph_messenger *msgr)
2136 dout("destroy %p\n", msgr);
2137 kunmap(msgr->zero_page);
2138 __free_page(msgr->zero_page);
2139 kfree(msgr);
2140 dout("destroyed messenger %p\n", msgr);
2142 EXPORT_SYMBOL(ceph_messenger_destroy);
2145 * Queue up an outgoing message on the given connection.
2147 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2149 if (test_bit(CLOSED, &con->state)) {
2150 dout("con_send %p closed, dropping %p\n", con, msg);
2151 ceph_msg_put(msg);
2152 return;
2155 /* set src+dst */
2156 msg->hdr.src = con->msgr->inst.name;
2158 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2160 msg->needs_out_seq = true;
2162 /* queue */
2163 mutex_lock(&con->mutex);
2164 BUG_ON(!list_empty(&msg->list_head));
2165 list_add_tail(&msg->list_head, &con->out_queue);
2166 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2167 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2168 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2169 le32_to_cpu(msg->hdr.front_len),
2170 le32_to_cpu(msg->hdr.middle_len),
2171 le32_to_cpu(msg->hdr.data_len));
2172 mutex_unlock(&con->mutex);
2174 /* if there wasn't anything waiting to send before, queue
2175 * new work */
2176 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2177 queue_con(con);
2179 EXPORT_SYMBOL(ceph_con_send);
2182 * Revoke a message that was previously queued for send
2184 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2186 mutex_lock(&con->mutex);
2187 if (!list_empty(&msg->list_head)) {
2188 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2189 list_del_init(&msg->list_head);
2190 ceph_msg_put(msg);
2191 msg->hdr.seq = 0;
2193 if (con->out_msg == msg) {
2194 dout("con_revoke %p msg %p - was sending\n", con, msg);
2195 con->out_msg = NULL;
2196 if (con->out_kvec_is_msg) {
2197 con->out_skip = con->out_kvec_bytes;
2198 con->out_kvec_is_msg = false;
2200 ceph_msg_put(msg);
2201 msg->hdr.seq = 0;
2203 mutex_unlock(&con->mutex);
2207 * Revoke a message that we may be reading data into
2209 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2211 mutex_lock(&con->mutex);
2212 if (con->in_msg && con->in_msg == msg) {
2213 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2214 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2215 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2217 /* skip rest of message */
2218 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2219 con->in_base_pos = con->in_base_pos -
2220 sizeof(struct ceph_msg_header) -
2221 front_len -
2222 middle_len -
2223 data_len -
2224 sizeof(struct ceph_msg_footer);
2225 ceph_msg_put(con->in_msg);
2226 con->in_msg = NULL;
2227 con->in_tag = CEPH_MSGR_TAG_READY;
2228 con->in_seq++;
2229 } else {
2230 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2231 con, con->in_msg, msg);
2233 mutex_unlock(&con->mutex);
2237 * Queue a keepalive byte to ensure the tcp connection is alive.
2239 void ceph_con_keepalive(struct ceph_connection *con)
2241 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2242 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2243 queue_con(con);
2245 EXPORT_SYMBOL(ceph_con_keepalive);
2249 * construct a new message with given type, size
2250 * the new msg has a ref count of 1.
2252 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2254 struct ceph_msg *m;
2256 m = kmalloc(sizeof(*m), flags);
2257 if (m == NULL)
2258 goto out;
2259 kref_init(&m->kref);
2260 INIT_LIST_HEAD(&m->list_head);
2262 m->hdr.tid = 0;
2263 m->hdr.type = cpu_to_le16(type);
2264 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2265 m->hdr.version = 0;
2266 m->hdr.front_len = cpu_to_le32(front_len);
2267 m->hdr.middle_len = 0;
2268 m->hdr.data_len = 0;
2269 m->hdr.data_off = 0;
2270 m->hdr.reserved = 0;
2271 m->footer.front_crc = 0;
2272 m->footer.middle_crc = 0;
2273 m->footer.data_crc = 0;
2274 m->footer.flags = 0;
2275 m->front_max = front_len;
2276 m->front_is_vmalloc = false;
2277 m->more_to_follow = false;
2278 m->pool = NULL;
2280 /* front */
2281 if (front_len) {
2282 if (front_len > PAGE_CACHE_SIZE) {
2283 m->front.iov_base = __vmalloc(front_len, flags,
2284 PAGE_KERNEL);
2285 m->front_is_vmalloc = true;
2286 } else {
2287 m->front.iov_base = kmalloc(front_len, flags);
2289 if (m->front.iov_base == NULL) {
2290 pr_err("msg_new can't allocate %d bytes\n",
2291 front_len);
2292 goto out2;
2294 } else {
2295 m->front.iov_base = NULL;
2297 m->front.iov_len = front_len;
2299 /* middle */
2300 m->middle = NULL;
2302 /* data */
2303 m->nr_pages = 0;
2304 m->pages = NULL;
2305 m->pagelist = NULL;
2306 m->bio = NULL;
2307 m->bio_iter = NULL;
2308 m->bio_seg = 0;
2309 m->trail = NULL;
2311 dout("ceph_msg_new %p front %d\n", m, front_len);
2312 return m;
2314 out2:
2315 ceph_msg_put(m);
2316 out:
2317 pr_err("msg_new can't create type %d front %d\n", type, front_len);
2318 return NULL;
2320 EXPORT_SYMBOL(ceph_msg_new);
2323 * Allocate "middle" portion of a message, if it is needed and wasn't
2324 * allocated by alloc_msg. This allows us to read a small fixed-size
2325 * per-type header in the front and then gracefully fail (i.e.,
2326 * propagate the error to the caller based on info in the front) when
2327 * the middle is too large.
2329 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2331 int type = le16_to_cpu(msg->hdr.type);
2332 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2334 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2335 ceph_msg_type_name(type), middle_len);
2336 BUG_ON(!middle_len);
2337 BUG_ON(msg->middle);
2339 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2340 if (!msg->middle)
2341 return -ENOMEM;
2342 return 0;
2346 * Generic message allocator, for incoming messages.
2348 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2349 struct ceph_msg_header *hdr,
2350 int *skip)
2352 int type = le16_to_cpu(hdr->type);
2353 int front_len = le32_to_cpu(hdr->front_len);
2354 int middle_len = le32_to_cpu(hdr->middle_len);
2355 struct ceph_msg *msg = NULL;
2356 int ret;
2358 if (con->ops->alloc_msg) {
2359 mutex_unlock(&con->mutex);
2360 msg = con->ops->alloc_msg(con, hdr, skip);
2361 mutex_lock(&con->mutex);
2362 if (!msg || *skip)
2363 return NULL;
2365 if (!msg) {
2366 *skip = 0;
2367 msg = ceph_msg_new(type, front_len, GFP_NOFS);
2368 if (!msg) {
2369 pr_err("unable to allocate msg type %d len %d\n",
2370 type, front_len);
2371 return NULL;
2374 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2376 if (middle_len && !msg->middle) {
2377 ret = ceph_alloc_middle(con, msg);
2378 if (ret < 0) {
2379 ceph_msg_put(msg);
2380 return NULL;
2384 return msg;
2389 * Free a generically kmalloc'd message.
2391 void ceph_msg_kfree(struct ceph_msg *m)
2393 dout("msg_kfree %p\n", m);
2394 if (m->front_is_vmalloc)
2395 vfree(m->front.iov_base);
2396 else
2397 kfree(m->front.iov_base);
2398 kfree(m);
2402 * Drop a msg ref. Destroy as needed.
2404 void ceph_msg_last_put(struct kref *kref)
2406 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2408 dout("ceph_msg_put last one on %p\n", m);
2409 WARN_ON(!list_empty(&m->list_head));
2411 /* drop middle, data, if any */
2412 if (m->middle) {
2413 ceph_buffer_put(m->middle);
2414 m->middle = NULL;
2416 m->nr_pages = 0;
2417 m->pages = NULL;
2419 if (m->pagelist) {
2420 ceph_pagelist_release(m->pagelist);
2421 kfree(m->pagelist);
2422 m->pagelist = NULL;
2425 m->trail = NULL;
2427 if (m->pool)
2428 ceph_msgpool_put(m->pool, m);
2429 else
2430 ceph_msg_kfree(m);
2432 EXPORT_SYMBOL(ceph_msg_last_put);
2434 void ceph_msg_dump(struct ceph_msg *msg)
2436 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2437 msg->front_max, msg->nr_pages);
2438 print_hex_dump(KERN_DEBUG, "header: ",
2439 DUMP_PREFIX_OFFSET, 16, 1,
2440 &msg->hdr, sizeof(msg->hdr), true);
2441 print_hex_dump(KERN_DEBUG, " front: ",
2442 DUMP_PREFIX_OFFSET, 16, 1,
2443 msg->front.iov_base, msg->front.iov_len, true);
2444 if (msg->middle)
2445 print_hex_dump(KERN_DEBUG, "middle: ",
2446 DUMP_PREFIX_OFFSET, 16, 1,
2447 msg->middle->vec.iov_base,
2448 msg->middle->vec.iov_len, true);
2449 print_hex_dump(KERN_DEBUG, "footer: ",
2450 DUMP_PREFIX_OFFSET, 16, 1,
2451 &msg->footer, sizeof(msg->footer), true);
2453 EXPORT_SYMBOL(ceph_msg_dump);