1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
16 #include <linux/ceph/libceph.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
22 * Ceph uses the messenger to exchange ceph_msg messages with other
23 * hosts in the system. The messenger provides ordered and reliable
24 * delivery. We tolerate TCP disconnects by reconnecting (with
25 * exponential backoff) in the case of a fault (disconnection, bad
26 * crc, protocol error). Acks allow sent messages to be discarded by
30 /* static tag bytes (protocol control messages) */
31 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
32 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
33 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
36 static struct lock_class_key socket_class
;
40 static void queue_con(struct ceph_connection
*con
);
41 static void con_work(struct work_struct
*);
42 static void ceph_fault(struct ceph_connection
*con
);
45 * nicely render a sockaddr as a string.
47 #define MAX_ADDR_STR 20
48 #define MAX_ADDR_STR_LEN 60
49 static char addr_str
[MAX_ADDR_STR
][MAX_ADDR_STR_LEN
];
50 static DEFINE_SPINLOCK(addr_str_lock
);
51 static int last_addr_str
;
53 const char *ceph_pr_addr(const struct sockaddr_storage
*ss
)
57 struct sockaddr_in
*in4
= (void *)ss
;
58 struct sockaddr_in6
*in6
= (void *)ss
;
60 spin_lock(&addr_str_lock
);
62 if (last_addr_str
== MAX_ADDR_STR
)
64 spin_unlock(&addr_str_lock
);
67 switch (ss
->ss_family
) {
69 snprintf(s
, MAX_ADDR_STR_LEN
, "%pI4:%u", &in4
->sin_addr
,
70 (unsigned int)ntohs(in4
->sin_port
));
74 snprintf(s
, MAX_ADDR_STR_LEN
, "[%pI6c]:%u", &in6
->sin6_addr
,
75 (unsigned int)ntohs(in6
->sin6_port
));
79 sprintf(s
, "(unknown sockaddr family %d)", (int)ss
->ss_family
);
84 EXPORT_SYMBOL(ceph_pr_addr
);
86 static void encode_my_addr(struct ceph_messenger
*msgr
)
88 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
89 ceph_encode_addr(&msgr
->my_enc_addr
);
93 * work queue for all reading and writing to/from the socket.
95 struct workqueue_struct
*ceph_msgr_wq
;
97 int ceph_msgr_init(void)
99 ceph_msgr_wq
= create_workqueue("ceph-msgr");
100 if (IS_ERR(ceph_msgr_wq
)) {
101 int ret
= PTR_ERR(ceph_msgr_wq
);
102 pr_err("msgr_init failed to create workqueue: %d\n", ret
);
108 EXPORT_SYMBOL(ceph_msgr_init
);
110 void ceph_msgr_exit(void)
112 destroy_workqueue(ceph_msgr_wq
);
114 EXPORT_SYMBOL(ceph_msgr_exit
);
116 void ceph_msgr_flush(void)
118 flush_workqueue(ceph_msgr_wq
);
120 EXPORT_SYMBOL(ceph_msgr_flush
);
124 * socket callback functions
127 /* data available on socket, or listen socket received a connect */
128 static void ceph_data_ready(struct sock
*sk
, int count_unused
)
130 struct ceph_connection
*con
=
131 (struct ceph_connection
*)sk
->sk_user_data
;
132 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
133 dout("ceph_data_ready on %p state = %lu, queueing work\n",
139 /* socket has buffer space for writing */
140 static void ceph_write_space(struct sock
*sk
)
142 struct ceph_connection
*con
=
143 (struct ceph_connection
*)sk
->sk_user_data
;
145 /* only queue to workqueue if there is data we want to write. */
146 if (test_bit(WRITE_PENDING
, &con
->state
)) {
147 dout("ceph_write_space %p queueing write work\n", con
);
150 dout("ceph_write_space %p nothing to write\n", con
);
153 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
154 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
157 /* socket's state has changed */
158 static void ceph_state_change(struct sock
*sk
)
160 struct ceph_connection
*con
=
161 (struct ceph_connection
*)sk
->sk_user_data
;
163 dout("ceph_state_change %p state = %lu sk_state = %u\n",
164 con
, con
->state
, sk
->sk_state
);
166 if (test_bit(CLOSED
, &con
->state
))
169 switch (sk
->sk_state
) {
171 dout("ceph_state_change TCP_CLOSE\n");
173 dout("ceph_state_change TCP_CLOSE_WAIT\n");
174 if (test_and_set_bit(SOCK_CLOSED
, &con
->state
) == 0) {
175 if (test_bit(CONNECTING
, &con
->state
))
176 con
->error_msg
= "connection failed";
178 con
->error_msg
= "socket closed";
182 case TCP_ESTABLISHED
:
183 dout("ceph_state_change TCP_ESTABLISHED\n");
190 * set up socket callbacks
192 static void set_sock_callbacks(struct socket
*sock
,
193 struct ceph_connection
*con
)
195 struct sock
*sk
= sock
->sk
;
196 sk
->sk_user_data
= (void *)con
;
197 sk
->sk_data_ready
= ceph_data_ready
;
198 sk
->sk_write_space
= ceph_write_space
;
199 sk
->sk_state_change
= ceph_state_change
;
208 * initiate connection to a remote socket.
210 static struct socket
*ceph_tcp_connect(struct ceph_connection
*con
)
212 struct sockaddr_storage
*paddr
= &con
->peer_addr
.in_addr
;
217 ret
= sock_create_kern(con
->peer_addr
.in_addr
.ss_family
, SOCK_STREAM
,
222 sock
->sk
->sk_allocation
= GFP_NOFS
;
224 #ifdef CONFIG_LOCKDEP
225 lockdep_set_class(&sock
->sk
->sk_lock
, &socket_class
);
228 set_sock_callbacks(sock
, con
);
230 dout("connect %s\n", ceph_pr_addr(&con
->peer_addr
.in_addr
));
232 ret
= sock
->ops
->connect(sock
, (struct sockaddr
*)paddr
, sizeof(*paddr
),
234 if (ret
== -EINPROGRESS
) {
235 dout("connect %s EINPROGRESS sk_state = %u\n",
236 ceph_pr_addr(&con
->peer_addr
.in_addr
),
241 pr_err("connect %s error %d\n",
242 ceph_pr_addr(&con
->peer_addr
.in_addr
), ret
);
245 con
->error_msg
= "connect error";
253 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
255 struct kvec iov
= {buf
, len
};
256 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
258 return kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
262 * write something. @more is true if caller will be sending more data
265 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
266 size_t kvlen
, size_t len
, int more
)
268 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
271 msg
.msg_flags
|= MSG_MORE
;
273 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
275 return kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
280 * Shutdown/close the socket for the given connection.
282 static int con_close_socket(struct ceph_connection
*con
)
286 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
289 set_bit(SOCK_CLOSED
, &con
->state
);
290 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
291 sock_release(con
->sock
);
293 clear_bit(SOCK_CLOSED
, &con
->state
);
298 * Reset a connection. Discard all incoming and outgoing messages
299 * and clear *_seq state.
301 static void ceph_msg_remove(struct ceph_msg
*msg
)
303 list_del_init(&msg
->list_head
);
306 static void ceph_msg_remove_list(struct list_head
*head
)
308 while (!list_empty(head
)) {
309 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
311 ceph_msg_remove(msg
);
315 static void reset_connection(struct ceph_connection
*con
)
317 /* reset connection, out_queue, msg_ and connect_seq */
318 /* discard existing out_queue and msg_seq */
319 ceph_msg_remove_list(&con
->out_queue
);
320 ceph_msg_remove_list(&con
->out_sent
);
323 ceph_msg_put(con
->in_msg
);
327 con
->connect_seq
= 0;
330 ceph_msg_put(con
->out_msg
);
333 con
->out_keepalive_pending
= false;
335 con
->in_seq_acked
= 0;
339 * mark a peer down. drop any open connections.
341 void ceph_con_close(struct ceph_connection
*con
)
343 dout("con_close %p peer %s\n", con
,
344 ceph_pr_addr(&con
->peer_addr
.in_addr
));
345 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
346 clear_bit(STANDBY
, &con
->state
); /* avoid connect_seq bump */
347 clear_bit(LOSSYTX
, &con
->state
); /* so we retry next connect */
348 clear_bit(KEEPALIVE_PENDING
, &con
->state
);
349 clear_bit(WRITE_PENDING
, &con
->state
);
350 mutex_lock(&con
->mutex
);
351 reset_connection(con
);
352 con
->peer_global_seq
= 0;
353 cancel_delayed_work(&con
->work
);
354 mutex_unlock(&con
->mutex
);
357 EXPORT_SYMBOL(ceph_con_close
);
360 * Reopen a closed connection, with a new peer address.
362 void ceph_con_open(struct ceph_connection
*con
, struct ceph_entity_addr
*addr
)
364 dout("con_open %p %s\n", con
, ceph_pr_addr(&addr
->in_addr
));
365 set_bit(OPENING
, &con
->state
);
366 clear_bit(CLOSED
, &con
->state
);
367 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
368 con
->delay
= 0; /* reset backoff memory */
371 EXPORT_SYMBOL(ceph_con_open
);
374 * return true if this connection ever successfully opened
376 bool ceph_con_opened(struct ceph_connection
*con
)
378 return con
->connect_seq
> 0;
384 struct ceph_connection
*ceph_con_get(struct ceph_connection
*con
)
386 dout("con_get %p nref = %d -> %d\n", con
,
387 atomic_read(&con
->nref
), atomic_read(&con
->nref
) + 1);
388 if (atomic_inc_not_zero(&con
->nref
))
393 void ceph_con_put(struct ceph_connection
*con
)
395 dout("con_put %p nref = %d -> %d\n", con
,
396 atomic_read(&con
->nref
), atomic_read(&con
->nref
) - 1);
397 BUG_ON(atomic_read(&con
->nref
) == 0);
398 if (atomic_dec_and_test(&con
->nref
)) {
405 * initialize a new connection.
407 void ceph_con_init(struct ceph_messenger
*msgr
, struct ceph_connection
*con
)
409 dout("con_init %p\n", con
);
410 memset(con
, 0, sizeof(*con
));
411 atomic_set(&con
->nref
, 1);
413 mutex_init(&con
->mutex
);
414 INIT_LIST_HEAD(&con
->out_queue
);
415 INIT_LIST_HEAD(&con
->out_sent
);
416 INIT_DELAYED_WORK(&con
->work
, con_work
);
418 EXPORT_SYMBOL(ceph_con_init
);
422 * We maintain a global counter to order connection attempts. Get
423 * a unique seq greater than @gt.
425 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
429 spin_lock(&msgr
->global_seq_lock
);
430 if (msgr
->global_seq
< gt
)
431 msgr
->global_seq
= gt
;
432 ret
= ++msgr
->global_seq
;
433 spin_unlock(&msgr
->global_seq_lock
);
439 * Prepare footer for currently outgoing message, and finish things
440 * off. Assumes out_kvec* are already valid.. we just add on to the end.
442 static void prepare_write_message_footer(struct ceph_connection
*con
, int v
)
444 struct ceph_msg
*m
= con
->out_msg
;
446 dout("prepare_write_message_footer %p\n", con
);
447 con
->out_kvec_is_msg
= true;
448 con
->out_kvec
[v
].iov_base
= &m
->footer
;
449 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
450 con
->out_kvec_bytes
+= sizeof(m
->footer
);
451 con
->out_kvec_left
++;
452 con
->out_more
= m
->more_to_follow
;
453 con
->out_msg_done
= true;
457 * Prepare headers for the next outgoing message.
459 static void prepare_write_message(struct ceph_connection
*con
)
464 con
->out_kvec_bytes
= 0;
465 con
->out_kvec_is_msg
= true;
466 con
->out_msg_done
= false;
468 /* Sneak an ack in there first? If we can get it into the same
469 * TCP packet that's a good thing. */
470 if (con
->in_seq
> con
->in_seq_acked
) {
471 con
->in_seq_acked
= con
->in_seq
;
472 con
->out_kvec
[v
].iov_base
= &tag_ack
;
473 con
->out_kvec
[v
++].iov_len
= 1;
474 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
475 con
->out_kvec
[v
].iov_base
= &con
->out_temp_ack
;
476 con
->out_kvec
[v
++].iov_len
= sizeof(con
->out_temp_ack
);
477 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
480 m
= list_first_entry(&con
->out_queue
,
481 struct ceph_msg
, list_head
);
483 if (test_bit(LOSSYTX
, &con
->state
)) {
484 list_del_init(&m
->list_head
);
486 /* put message on sent list */
488 list_move_tail(&m
->list_head
, &con
->out_sent
);
492 * only assign outgoing seq # if we haven't sent this message
493 * yet. if it is requeued, resend with it's original seq.
495 if (m
->needs_out_seq
) {
496 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
497 m
->needs_out_seq
= false;
500 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
501 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
502 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
503 le32_to_cpu(m
->hdr
.data_len
),
505 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
507 /* tag + hdr + front + middle */
508 con
->out_kvec
[v
].iov_base
= &tag_msg
;
509 con
->out_kvec
[v
++].iov_len
= 1;
510 con
->out_kvec
[v
].iov_base
= &m
->hdr
;
511 con
->out_kvec
[v
++].iov_len
= sizeof(m
->hdr
);
512 con
->out_kvec
[v
++] = m
->front
;
514 con
->out_kvec
[v
++] = m
->middle
->vec
;
515 con
->out_kvec_left
= v
;
516 con
->out_kvec_bytes
+= 1 + sizeof(m
->hdr
) + m
->front
.iov_len
+
517 (m
->middle
? m
->middle
->vec
.iov_len
: 0);
518 con
->out_kvec_cur
= con
->out_kvec
;
520 /* fill in crc (except data pages), footer */
521 con
->out_msg
->hdr
.crc
=
522 cpu_to_le32(crc32c(0, (void *)&m
->hdr
,
523 sizeof(m
->hdr
) - sizeof(m
->hdr
.crc
)));
524 con
->out_msg
->footer
.flags
= CEPH_MSG_FOOTER_COMPLETE
;
525 con
->out_msg
->footer
.front_crc
=
526 cpu_to_le32(crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
));
528 con
->out_msg
->footer
.middle_crc
=
529 cpu_to_le32(crc32c(0, m
->middle
->vec
.iov_base
,
530 m
->middle
->vec
.iov_len
));
532 con
->out_msg
->footer
.middle_crc
= 0;
533 con
->out_msg
->footer
.data_crc
= 0;
534 dout("prepare_write_message front_crc %u data_crc %u\n",
535 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
536 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
538 /* is there a data payload? */
539 if (le32_to_cpu(m
->hdr
.data_len
) > 0) {
540 /* initialize page iterator */
541 con
->out_msg_pos
.page
= 0;
543 con
->out_msg_pos
.page_pos
= m
->page_alignment
;
545 con
->out_msg_pos
.page_pos
= 0;
546 con
->out_msg_pos
.data_pos
= 0;
547 con
->out_msg_pos
.did_page_crc
= 0;
548 con
->out_more
= 1; /* data + footer will follow */
550 /* no, queue up footer too and be done */
551 prepare_write_message_footer(con
, v
);
554 set_bit(WRITE_PENDING
, &con
->state
);
560 static void prepare_write_ack(struct ceph_connection
*con
)
562 dout("prepare_write_ack %p %llu -> %llu\n", con
,
563 con
->in_seq_acked
, con
->in_seq
);
564 con
->in_seq_acked
= con
->in_seq
;
566 con
->out_kvec
[0].iov_base
= &tag_ack
;
567 con
->out_kvec
[0].iov_len
= 1;
568 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
569 con
->out_kvec
[1].iov_base
= &con
->out_temp_ack
;
570 con
->out_kvec
[1].iov_len
= sizeof(con
->out_temp_ack
);
571 con
->out_kvec_left
= 2;
572 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
573 con
->out_kvec_cur
= con
->out_kvec
;
574 con
->out_more
= 1; /* more will follow.. eventually.. */
575 set_bit(WRITE_PENDING
, &con
->state
);
579 * Prepare to write keepalive byte.
581 static void prepare_write_keepalive(struct ceph_connection
*con
)
583 dout("prepare_write_keepalive %p\n", con
);
584 con
->out_kvec
[0].iov_base
= &tag_keepalive
;
585 con
->out_kvec
[0].iov_len
= 1;
586 con
->out_kvec_left
= 1;
587 con
->out_kvec_bytes
= 1;
588 con
->out_kvec_cur
= con
->out_kvec
;
589 set_bit(WRITE_PENDING
, &con
->state
);
593 * Connection negotiation.
596 static void prepare_connect_authorizer(struct ceph_connection
*con
)
600 int auth_protocol
= 0;
602 mutex_unlock(&con
->mutex
);
603 if (con
->ops
->get_authorizer
)
604 con
->ops
->get_authorizer(con
, &auth_buf
, &auth_len
,
605 &auth_protocol
, &con
->auth_reply_buf
,
606 &con
->auth_reply_buf_len
,
608 mutex_lock(&con
->mutex
);
610 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_protocol
);
611 con
->out_connect
.authorizer_len
= cpu_to_le32(auth_len
);
613 con
->out_kvec
[con
->out_kvec_left
].iov_base
= auth_buf
;
614 con
->out_kvec
[con
->out_kvec_left
].iov_len
= auth_len
;
615 con
->out_kvec_left
++;
616 con
->out_kvec_bytes
+= auth_len
;
620 * We connected to a peer and are saying hello.
622 static void prepare_write_banner(struct ceph_messenger
*msgr
,
623 struct ceph_connection
*con
)
625 int len
= strlen(CEPH_BANNER
);
627 con
->out_kvec
[0].iov_base
= CEPH_BANNER
;
628 con
->out_kvec
[0].iov_len
= len
;
629 con
->out_kvec
[1].iov_base
= &msgr
->my_enc_addr
;
630 con
->out_kvec
[1].iov_len
= sizeof(msgr
->my_enc_addr
);
631 con
->out_kvec_left
= 2;
632 con
->out_kvec_bytes
= len
+ sizeof(msgr
->my_enc_addr
);
633 con
->out_kvec_cur
= con
->out_kvec
;
635 set_bit(WRITE_PENDING
, &con
->state
);
638 static void prepare_write_connect(struct ceph_messenger
*msgr
,
639 struct ceph_connection
*con
,
642 unsigned global_seq
= get_global_seq(con
->msgr
, 0);
645 switch (con
->peer_name
.type
) {
646 case CEPH_ENTITY_TYPE_MON
:
647 proto
= CEPH_MONC_PROTOCOL
;
649 case CEPH_ENTITY_TYPE_OSD
:
650 proto
= CEPH_OSDC_PROTOCOL
;
652 case CEPH_ENTITY_TYPE_MDS
:
653 proto
= CEPH_MDSC_PROTOCOL
;
659 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
660 con
->connect_seq
, global_seq
, proto
);
662 con
->out_connect
.features
= cpu_to_le64(msgr
->supported_features
);
663 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
664 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
665 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
666 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
667 con
->out_connect
.flags
= 0;
670 con
->out_kvec_left
= 0;
671 con
->out_kvec_bytes
= 0;
673 con
->out_kvec
[con
->out_kvec_left
].iov_base
= &con
->out_connect
;
674 con
->out_kvec
[con
->out_kvec_left
].iov_len
= sizeof(con
->out_connect
);
675 con
->out_kvec_left
++;
676 con
->out_kvec_bytes
+= sizeof(con
->out_connect
);
677 con
->out_kvec_cur
= con
->out_kvec
;
679 set_bit(WRITE_PENDING
, &con
->state
);
681 prepare_connect_authorizer(con
);
686 * write as much of pending kvecs to the socket as we can.
688 * 0 -> socket full, but more to do
691 static int write_partial_kvec(struct ceph_connection
*con
)
695 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
696 while (con
->out_kvec_bytes
> 0) {
697 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
698 con
->out_kvec_left
, con
->out_kvec_bytes
,
702 con
->out_kvec_bytes
-= ret
;
703 if (con
->out_kvec_bytes
== 0)
706 if (ret
>= con
->out_kvec_cur
->iov_len
) {
707 ret
-= con
->out_kvec_cur
->iov_len
;
709 con
->out_kvec_left
--;
711 con
->out_kvec_cur
->iov_len
-= ret
;
712 con
->out_kvec_cur
->iov_base
+= ret
;
718 con
->out_kvec_left
= 0;
719 con
->out_kvec_is_msg
= false;
722 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
723 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
724 return ret
; /* done! */
728 static void init_bio_iter(struct bio
*bio
, struct bio
**iter
, int *seg
)
739 static void iter_bio_next(struct bio
**bio_iter
, int *seg
)
741 if (*bio_iter
== NULL
)
744 BUG_ON(*seg
>= (*bio_iter
)->bi_vcnt
);
747 if (*seg
== (*bio_iter
)->bi_vcnt
)
748 init_bio_iter((*bio_iter
)->bi_next
, bio_iter
, seg
);
753 * Write as much message data payload as we can. If we finish, queue
755 * 1 -> done, footer is now queued in out_kvec[].
756 * 0 -> socket full, but more to do
759 static int write_partial_msg_pages(struct ceph_connection
*con
)
761 struct ceph_msg
*msg
= con
->out_msg
;
762 unsigned data_len
= le32_to_cpu(msg
->hdr
.data_len
);
764 int crc
= con
->msgr
->nocrc
;
768 size_t trail_len
= (msg
->trail
? msg
->trail
->length
: 0);
770 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
771 con
, con
->out_msg
, con
->out_msg_pos
.page
, con
->out_msg
->nr_pages
,
772 con
->out_msg_pos
.page_pos
);
775 if (msg
->bio
&& !msg
->bio_iter
)
776 init_bio_iter(msg
->bio
, &msg
->bio_iter
, &msg
->bio_seg
);
779 while (data_len
> con
->out_msg_pos
.data_pos
) {
780 struct page
*page
= NULL
;
782 int max_write
= PAGE_SIZE
;
785 total_max_write
= data_len
- trail_len
-
786 con
->out_msg_pos
.data_pos
;
789 * if we are calculating the data crc (the default), we need
790 * to map the page. if our pages[] has been revoked, use the
794 /* have we reached the trail part of the data? */
795 if (con
->out_msg_pos
.data_pos
>= data_len
- trail_len
) {
798 total_max_write
= data_len
- con
->out_msg_pos
.data_pos
;
800 page
= list_first_entry(&msg
->trail
->head
,
804 max_write
= PAGE_SIZE
;
805 } else if (msg
->pages
) {
806 page
= msg
->pages
[con
->out_msg_pos
.page
];
809 } else if (msg
->pagelist
) {
810 page
= list_first_entry(&msg
->pagelist
->head
,
815 } else if (msg
->bio
) {
818 bv
= bio_iovec_idx(msg
->bio_iter
, msg
->bio_seg
);
820 page_shift
= bv
->bv_offset
;
822 kaddr
= kmap(page
) + page_shift
;
823 max_write
= bv
->bv_len
;
826 page
= con
->msgr
->zero_page
;
828 kaddr
= page_address(con
->msgr
->zero_page
);
830 len
= min_t(int, max_write
- con
->out_msg_pos
.page_pos
,
833 if (crc
&& !con
->out_msg_pos
.did_page_crc
) {
834 void *base
= kaddr
+ con
->out_msg_pos
.page_pos
;
835 u32 tmpcrc
= le32_to_cpu(con
->out_msg
->footer
.data_crc
);
837 BUG_ON(kaddr
== NULL
);
838 con
->out_msg
->footer
.data_crc
=
839 cpu_to_le32(crc32c(tmpcrc
, base
, len
));
840 con
->out_msg_pos
.did_page_crc
= 1;
842 ret
= kernel_sendpage(con
->sock
, page
,
843 con
->out_msg_pos
.page_pos
+ page_shift
,
845 MSG_DONTWAIT
| MSG_NOSIGNAL
|
849 (msg
->pages
|| msg
->pagelist
|| msg
->bio
|| in_trail
))
855 con
->out_msg_pos
.data_pos
+= ret
;
856 con
->out_msg_pos
.page_pos
+= ret
;
858 con
->out_msg_pos
.page_pos
= 0;
859 con
->out_msg_pos
.page
++;
860 con
->out_msg_pos
.did_page_crc
= 0;
862 list_move_tail(&page
->lru
,
864 else if (msg
->pagelist
)
865 list_move_tail(&page
->lru
,
866 &msg
->pagelist
->head
);
869 iter_bio_next(&msg
->bio_iter
, &msg
->bio_seg
);
874 dout("write_partial_msg_pages %p msg %p done\n", con
, msg
);
876 /* prepare and queue up footer, too */
878 con
->out_msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
879 con
->out_kvec_bytes
= 0;
880 con
->out_kvec_left
= 0;
881 con
->out_kvec_cur
= con
->out_kvec
;
882 prepare_write_message_footer(con
, 0);
891 static int write_partial_skip(struct ceph_connection
*con
)
895 while (con
->out_skip
> 0) {
897 .iov_base
= page_address(con
->msgr
->zero_page
),
898 .iov_len
= min(con
->out_skip
, (int)PAGE_CACHE_SIZE
)
901 ret
= ceph_tcp_sendmsg(con
->sock
, &iov
, 1, iov
.iov_len
, 1);
904 con
->out_skip
-= ret
;
912 * Prepare to read connection handshake, or an ack.
914 static void prepare_read_banner(struct ceph_connection
*con
)
916 dout("prepare_read_banner %p\n", con
);
917 con
->in_base_pos
= 0;
920 static void prepare_read_connect(struct ceph_connection
*con
)
922 dout("prepare_read_connect %p\n", con
);
923 con
->in_base_pos
= 0;
926 static void prepare_read_ack(struct ceph_connection
*con
)
928 dout("prepare_read_ack %p\n", con
);
929 con
->in_base_pos
= 0;
932 static void prepare_read_tag(struct ceph_connection
*con
)
934 dout("prepare_read_tag %p\n", con
);
935 con
->in_base_pos
= 0;
936 con
->in_tag
= CEPH_MSGR_TAG_READY
;
940 * Prepare to read a message.
942 static int prepare_read_message(struct ceph_connection
*con
)
944 dout("prepare_read_message %p\n", con
);
945 BUG_ON(con
->in_msg
!= NULL
);
946 con
->in_base_pos
= 0;
947 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
952 static int read_partial(struct ceph_connection
*con
,
953 int *to
, int size
, void *object
)
956 while (con
->in_base_pos
< *to
) {
957 int left
= *to
- con
->in_base_pos
;
958 int have
= size
- left
;
959 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
962 con
->in_base_pos
+= ret
;
969 * Read all or part of the connect-side handshake on a new connection
971 static int read_partial_banner(struct ceph_connection
*con
)
975 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
978 ret
= read_partial(con
, &to
, strlen(CEPH_BANNER
), con
->in_banner
);
981 ret
= read_partial(con
, &to
, sizeof(con
->actual_peer_addr
),
982 &con
->actual_peer_addr
);
985 ret
= read_partial(con
, &to
, sizeof(con
->peer_addr_for_me
),
986 &con
->peer_addr_for_me
);
993 static int read_partial_connect(struct ceph_connection
*con
)
997 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
999 ret
= read_partial(con
, &to
, sizeof(con
->in_reply
), &con
->in_reply
);
1002 ret
= read_partial(con
, &to
, le32_to_cpu(con
->in_reply
.authorizer_len
),
1003 con
->auth_reply_buf
);
1007 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1008 con
, (int)con
->in_reply
.tag
,
1009 le32_to_cpu(con
->in_reply
.connect_seq
),
1010 le32_to_cpu(con
->in_reply
.global_seq
));
1017 * Verify the hello banner looks okay.
1019 static int verify_hello(struct ceph_connection
*con
)
1021 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
1022 pr_err("connect to %s got bad banner\n",
1023 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1024 con
->error_msg
= "protocol error, bad banner";
1030 static bool addr_is_blank(struct sockaddr_storage
*ss
)
1032 switch (ss
->ss_family
) {
1034 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
1037 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
1038 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
1039 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
1040 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
1045 static int addr_port(struct sockaddr_storage
*ss
)
1047 switch (ss
->ss_family
) {
1049 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
1051 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
1056 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
1058 switch (ss
->ss_family
) {
1060 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
1062 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
1067 * Parse an ip[:port] list into an addr array. Use the default
1068 * monitor port if a port isn't specified.
1070 int ceph_parse_ips(const char *c
, const char *end
,
1071 struct ceph_entity_addr
*addr
,
1072 int max_count
, int *count
)
1077 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
1078 for (i
= 0; i
< max_count
; i
++) {
1080 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
1081 struct sockaddr_in
*in4
= (void *)ss
;
1082 struct sockaddr_in6
*in6
= (void *)ss
;
1091 memset(ss
, 0, sizeof(*ss
));
1092 if (in4_pton(p
, end
- p
, (u8
*)&in4
->sin_addr
.s_addr
,
1094 ss
->ss_family
= AF_INET
;
1095 else if (in6_pton(p
, end
- p
, (u8
*)&in6
->sin6_addr
.s6_addr
,
1097 ss
->ss_family
= AF_INET6
;
1104 dout("missing matching ']'\n");
1111 if (p
< end
&& *p
== ':') {
1114 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1115 port
= (port
* 10) + (*p
- '0');
1118 if (port
> 65535 || port
== 0)
1121 port
= CEPH_MON_PORT
;
1124 addr_set_port(ss
, port
);
1126 dout("parse_ips got %s\n", ceph_pr_addr(ss
));
1143 pr_err("parse_ips bad ip '%.*s'\n", (int)(end
- c
), c
);
1146 EXPORT_SYMBOL(ceph_parse_ips
);
1148 static int process_banner(struct ceph_connection
*con
)
1150 dout("process_banner on %p\n", con
);
1152 if (verify_hello(con
) < 0)
1155 ceph_decode_addr(&con
->actual_peer_addr
);
1156 ceph_decode_addr(&con
->peer_addr_for_me
);
1159 * Make sure the other end is who we wanted. note that the other
1160 * end may not yet know their ip address, so if it's 0.0.0.0, give
1161 * them the benefit of the doubt.
1163 if (memcmp(&con
->peer_addr
, &con
->actual_peer_addr
,
1164 sizeof(con
->peer_addr
)) != 0 &&
1165 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1166 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1167 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1168 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1169 (int)le32_to_cpu(con
->peer_addr
.nonce
),
1170 ceph_pr_addr(&con
->actual_peer_addr
.in_addr
),
1171 (int)le32_to_cpu(con
->actual_peer_addr
.nonce
));
1172 con
->error_msg
= "wrong peer at address";
1177 * did we learn our address?
1179 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1180 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1182 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1183 &con
->peer_addr_for_me
.in_addr
,
1184 sizeof(con
->peer_addr_for_me
.in_addr
));
1185 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1186 encode_my_addr(con
->msgr
);
1187 dout("process_banner learned my addr is %s\n",
1188 ceph_pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1191 set_bit(NEGOTIATING
, &con
->state
);
1192 prepare_read_connect(con
);
1196 static void fail_protocol(struct ceph_connection
*con
)
1198 reset_connection(con
);
1199 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
1201 mutex_unlock(&con
->mutex
);
1202 if (con
->ops
->bad_proto
)
1203 con
->ops
->bad_proto(con
);
1204 mutex_lock(&con
->mutex
);
1207 static int process_connect(struct ceph_connection
*con
)
1209 u64 sup_feat
= con
->msgr
->supported_features
;
1210 u64 req_feat
= con
->msgr
->required_features
;
1211 u64 server_feat
= le64_to_cpu(con
->in_reply
.features
);
1213 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1215 switch (con
->in_reply
.tag
) {
1216 case CEPH_MSGR_TAG_FEATURES
:
1217 pr_err("%s%lld %s feature set mismatch,"
1218 " my %llx < server's %llx, missing %llx\n",
1219 ENTITY_NAME(con
->peer_name
),
1220 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1221 sup_feat
, server_feat
, server_feat
& ~sup_feat
);
1222 con
->error_msg
= "missing required protocol features";
1226 case CEPH_MSGR_TAG_BADPROTOVER
:
1227 pr_err("%s%lld %s protocol version mismatch,"
1228 " my %d != server's %d\n",
1229 ENTITY_NAME(con
->peer_name
),
1230 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1231 le32_to_cpu(con
->out_connect
.protocol_version
),
1232 le32_to_cpu(con
->in_reply
.protocol_version
));
1233 con
->error_msg
= "protocol version mismatch";
1237 case CEPH_MSGR_TAG_BADAUTHORIZER
:
1239 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
1241 if (con
->auth_retry
== 2) {
1242 con
->error_msg
= "connect authorization failure";
1243 reset_connection(con
);
1244 set_bit(CLOSED
, &con
->state
);
1247 con
->auth_retry
= 1;
1248 prepare_write_connect(con
->msgr
, con
, 0);
1249 prepare_read_connect(con
);
1252 case CEPH_MSGR_TAG_RESETSESSION
:
1254 * If we connected with a large connect_seq but the peer
1255 * has no record of a session with us (no connection, or
1256 * connect_seq == 0), they will send RESETSESION to indicate
1257 * that they must have reset their session, and may have
1260 dout("process_connect got RESET peer seq %u\n",
1261 le32_to_cpu(con
->in_connect
.connect_seq
));
1262 pr_err("%s%lld %s connection reset\n",
1263 ENTITY_NAME(con
->peer_name
),
1264 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1265 reset_connection(con
);
1266 prepare_write_connect(con
->msgr
, con
, 0);
1267 prepare_read_connect(con
);
1269 /* Tell ceph about it. */
1270 mutex_unlock(&con
->mutex
);
1271 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
1272 if (con
->ops
->peer_reset
)
1273 con
->ops
->peer_reset(con
);
1274 mutex_lock(&con
->mutex
);
1277 case CEPH_MSGR_TAG_RETRY_SESSION
:
1279 * If we sent a smaller connect_seq than the peer has, try
1280 * again with a larger value.
1282 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1283 le32_to_cpu(con
->out_connect
.connect_seq
),
1284 le32_to_cpu(con
->in_connect
.connect_seq
));
1285 con
->connect_seq
= le32_to_cpu(con
->in_connect
.connect_seq
);
1286 prepare_write_connect(con
->msgr
, con
, 0);
1287 prepare_read_connect(con
);
1290 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
1292 * If we sent a smaller global_seq than the peer has, try
1293 * again with a larger value.
1295 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1296 con
->peer_global_seq
,
1297 le32_to_cpu(con
->in_connect
.global_seq
));
1298 get_global_seq(con
->msgr
,
1299 le32_to_cpu(con
->in_connect
.global_seq
));
1300 prepare_write_connect(con
->msgr
, con
, 0);
1301 prepare_read_connect(con
);
1304 case CEPH_MSGR_TAG_READY
:
1305 if (req_feat
& ~server_feat
) {
1306 pr_err("%s%lld %s protocol feature mismatch,"
1307 " my required %llx > server's %llx, need %llx\n",
1308 ENTITY_NAME(con
->peer_name
),
1309 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1310 req_feat
, server_feat
, req_feat
& ~server_feat
);
1311 con
->error_msg
= "missing required protocol features";
1315 clear_bit(CONNECTING
, &con
->state
);
1316 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
1318 con
->peer_features
= server_feat
;
1319 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1320 con
->peer_global_seq
,
1321 le32_to_cpu(con
->in_reply
.connect_seq
),
1323 WARN_ON(con
->connect_seq
!=
1324 le32_to_cpu(con
->in_reply
.connect_seq
));
1326 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
1327 set_bit(LOSSYTX
, &con
->state
);
1329 prepare_read_tag(con
);
1332 case CEPH_MSGR_TAG_WAIT
:
1334 * If there is a connection race (we are opening
1335 * connections to each other), one of us may just have
1336 * to WAIT. This shouldn't happen if we are the
1339 pr_err("process_connect peer connecting WAIT\n");
1342 pr_err("connect protocol error, will retry\n");
1343 con
->error_msg
= "protocol error, garbage tag during connect";
1351 * read (part of) an ack
1353 static int read_partial_ack(struct ceph_connection
*con
)
1357 return read_partial(con
, &to
, sizeof(con
->in_temp_ack
),
1363 * We can finally discard anything that's been acked.
1365 static void process_ack(struct ceph_connection
*con
)
1368 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
1371 while (!list_empty(&con
->out_sent
)) {
1372 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
1374 seq
= le64_to_cpu(m
->hdr
.seq
);
1377 dout("got ack for seq %llu type %d at %p\n", seq
,
1378 le16_to_cpu(m
->hdr
.type
), m
);
1381 prepare_read_tag(con
);
1387 static int read_partial_message_section(struct ceph_connection
*con
,
1388 struct kvec
*section
,
1389 unsigned int sec_len
, u32
*crc
)
1395 while (section
->iov_len
< sec_len
) {
1396 BUG_ON(section
->iov_base
== NULL
);
1397 left
= sec_len
- section
->iov_len
;
1398 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)section
->iov_base
+
1399 section
->iov_len
, left
);
1402 section
->iov_len
+= ret
;
1403 if (section
->iov_len
== sec_len
)
1404 *crc
= crc32c(0, section
->iov_base
,
1411 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
1412 struct ceph_msg_header
*hdr
,
1416 static int read_partial_message_pages(struct ceph_connection
*con
,
1417 struct page
**pages
,
1418 unsigned data_len
, int datacrc
)
1424 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1425 (int)(PAGE_SIZE
- con
->in_msg_pos
.page_pos
));
1427 BUG_ON(pages
== NULL
);
1428 p
= kmap(pages
[con
->in_msg_pos
.page
]);
1429 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1431 if (ret
> 0 && datacrc
)
1433 crc32c(con
->in_data_crc
,
1434 p
+ con
->in_msg_pos
.page_pos
, ret
);
1435 kunmap(pages
[con
->in_msg_pos
.page
]);
1438 con
->in_msg_pos
.data_pos
+= ret
;
1439 con
->in_msg_pos
.page_pos
+= ret
;
1440 if (con
->in_msg_pos
.page_pos
== PAGE_SIZE
) {
1441 con
->in_msg_pos
.page_pos
= 0;
1442 con
->in_msg_pos
.page
++;
1449 static int read_partial_message_bio(struct ceph_connection
*con
,
1450 struct bio
**bio_iter
, int *bio_seg
,
1451 unsigned data_len
, int datacrc
)
1453 struct bio_vec
*bv
= bio_iovec_idx(*bio_iter
, *bio_seg
);
1460 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1461 (int)(bv
->bv_len
- con
->in_msg_pos
.page_pos
));
1463 p
= kmap(bv
->bv_page
) + bv
->bv_offset
;
1465 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1467 if (ret
> 0 && datacrc
)
1469 crc32c(con
->in_data_crc
,
1470 p
+ con
->in_msg_pos
.page_pos
, ret
);
1471 kunmap(bv
->bv_page
);
1474 con
->in_msg_pos
.data_pos
+= ret
;
1475 con
->in_msg_pos
.page_pos
+= ret
;
1476 if (con
->in_msg_pos
.page_pos
== bv
->bv_len
) {
1477 con
->in_msg_pos
.page_pos
= 0;
1478 iter_bio_next(bio_iter
, bio_seg
);
1486 * read (part of) a message.
1488 static int read_partial_message(struct ceph_connection
*con
)
1490 struct ceph_msg
*m
= con
->in_msg
;
1493 unsigned front_len
, middle_len
, data_len
;
1494 int datacrc
= con
->msgr
->nocrc
;
1498 dout("read_partial_message con %p msg %p\n", con
, m
);
1501 while (con
->in_base_pos
< sizeof(con
->in_hdr
)) {
1502 left
= sizeof(con
->in_hdr
) - con
->in_base_pos
;
1503 ret
= ceph_tcp_recvmsg(con
->sock
,
1504 (char *)&con
->in_hdr
+ con
->in_base_pos
,
1508 con
->in_base_pos
+= ret
;
1509 if (con
->in_base_pos
== sizeof(con
->in_hdr
)) {
1510 u32 crc
= crc32c(0, (void *)&con
->in_hdr
,
1511 sizeof(con
->in_hdr
) - sizeof(con
->in_hdr
.crc
));
1512 if (crc
!= le32_to_cpu(con
->in_hdr
.crc
)) {
1513 pr_err("read_partial_message bad hdr "
1514 " crc %u != expected %u\n",
1515 crc
, con
->in_hdr
.crc
);
1520 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1521 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
1523 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1524 if (middle_len
> CEPH_MSG_MAX_DATA_LEN
)
1526 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1527 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
1531 seq
= le64_to_cpu(con
->in_hdr
.seq
);
1532 if ((s64
)seq
- (s64
)con
->in_seq
< 1) {
1533 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1534 ENTITY_NAME(con
->peer_name
),
1535 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1536 seq
, con
->in_seq
+ 1);
1537 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1539 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1541 } else if ((s64
)seq
- (s64
)con
->in_seq
> 1) {
1542 pr_err("read_partial_message bad seq %lld expected %lld\n",
1543 seq
, con
->in_seq
+ 1);
1544 con
->error_msg
= "bad message sequence # for incoming message";
1548 /* allocate message? */
1550 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
1551 con
->in_hdr
.front_len
, con
->in_hdr
.data_len
);
1553 con
->in_msg
= ceph_alloc_msg(con
, &con
->in_hdr
, &skip
);
1555 /* skip this message */
1556 dout("alloc_msg said skip message\n");
1557 BUG_ON(con
->in_msg
);
1558 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1560 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1566 "error allocating memory for incoming message";
1570 m
->front
.iov_len
= 0; /* haven't read it yet */
1572 m
->middle
->vec
.iov_len
= 0;
1574 con
->in_msg_pos
.page
= 0;
1576 con
->in_msg_pos
.page_pos
= m
->page_alignment
;
1578 con
->in_msg_pos
.page_pos
= 0;
1579 con
->in_msg_pos
.data_pos
= 0;
1583 ret
= read_partial_message_section(con
, &m
->front
, front_len
,
1584 &con
->in_front_crc
);
1590 ret
= read_partial_message_section(con
, &m
->middle
->vec
,
1592 &con
->in_middle_crc
);
1597 if (m
->bio
&& !m
->bio_iter
)
1598 init_bio_iter(m
->bio
, &m
->bio_iter
, &m
->bio_seg
);
1602 while (con
->in_msg_pos
.data_pos
< data_len
) {
1604 ret
= read_partial_message_pages(con
, m
->pages
,
1609 } else if (m
->bio
) {
1611 ret
= read_partial_message_bio(con
,
1612 &m
->bio_iter
, &m
->bio_seg
,
1623 to
= sizeof(m
->hdr
) + sizeof(m
->footer
);
1624 while (con
->in_base_pos
< to
) {
1625 left
= to
- con
->in_base_pos
;
1626 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)&m
->footer
+
1627 (con
->in_base_pos
- sizeof(m
->hdr
)),
1631 con
->in_base_pos
+= ret
;
1633 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1634 m
, front_len
, m
->footer
.front_crc
, middle_len
,
1635 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
1638 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
1639 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1640 m
, con
->in_front_crc
, m
->footer
.front_crc
);
1643 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
1644 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1645 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
1649 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
1650 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
1651 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
1652 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
1656 return 1; /* done! */
1660 * Process message. This happens in the worker thread. The callback should
1661 * be careful not to do anything that waits on other incoming messages or it
1664 static void process_message(struct ceph_connection
*con
)
1666 struct ceph_msg
*msg
;
1671 /* if first message, set peer_name */
1672 if (con
->peer_name
.type
== 0)
1673 con
->peer_name
= msg
->hdr
.src
;
1676 mutex_unlock(&con
->mutex
);
1678 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1679 msg
, le64_to_cpu(msg
->hdr
.seq
),
1680 ENTITY_NAME(msg
->hdr
.src
),
1681 le16_to_cpu(msg
->hdr
.type
),
1682 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1683 le32_to_cpu(msg
->hdr
.front_len
),
1684 le32_to_cpu(msg
->hdr
.data_len
),
1685 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
1686 con
->ops
->dispatch(con
, msg
);
1688 mutex_lock(&con
->mutex
);
1689 prepare_read_tag(con
);
1694 * Write something to the socket. Called in a worker thread when the
1695 * socket appears to be writeable and we have something ready to send.
1697 static int try_write(struct ceph_connection
*con
)
1699 struct ceph_messenger
*msgr
= con
->msgr
;
1702 dout("try_write start %p state %lu nref %d\n", con
, con
->state
,
1703 atomic_read(&con
->nref
));
1706 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
1708 /* open the socket first? */
1709 if (con
->sock
== NULL
) {
1711 * if we were STANDBY and are reconnecting _this_
1712 * connection, bump connect_seq now. Always bump
1715 if (test_and_clear_bit(STANDBY
, &con
->state
))
1718 prepare_write_banner(msgr
, con
);
1719 prepare_write_connect(msgr
, con
, 1);
1720 prepare_read_banner(con
);
1721 set_bit(CONNECTING
, &con
->state
);
1722 clear_bit(NEGOTIATING
, &con
->state
);
1724 BUG_ON(con
->in_msg
);
1725 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1726 dout("try_write initiating connect on %p new state %lu\n",
1728 con
->sock
= ceph_tcp_connect(con
);
1729 if (IS_ERR(con
->sock
)) {
1731 con
->error_msg
= "connect error";
1738 /* kvec data queued? */
1739 if (con
->out_skip
) {
1740 ret
= write_partial_skip(con
);
1744 dout("try_write write_partial_skip err %d\n", ret
);
1748 if (con
->out_kvec_left
) {
1749 ret
= write_partial_kvec(con
);
1756 if (con
->out_msg_done
) {
1757 ceph_msg_put(con
->out_msg
);
1758 con
->out_msg
= NULL
; /* we're done with this one */
1762 ret
= write_partial_msg_pages(con
);
1764 goto more_kvec
; /* we need to send the footer, too! */
1768 dout("try_write write_partial_msg_pages err %d\n",
1775 if (!test_bit(CONNECTING
, &con
->state
)) {
1776 /* is anything else pending? */
1777 if (!list_empty(&con
->out_queue
)) {
1778 prepare_write_message(con
);
1781 if (con
->in_seq
> con
->in_seq_acked
) {
1782 prepare_write_ack(con
);
1785 if (test_and_clear_bit(KEEPALIVE_PENDING
, &con
->state
)) {
1786 prepare_write_keepalive(con
);
1791 /* Nothing to do! */
1792 clear_bit(WRITE_PENDING
, &con
->state
);
1793 dout("try_write nothing else to write.\n");
1797 dout("try_write done on %p\n", con
);
1804 * Read what we can from the socket.
1806 static int try_read(struct ceph_connection
*con
)
1813 if (test_bit(STANDBY
, &con
->state
))
1816 dout("try_read start on %p\n", con
);
1819 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
1821 if (test_bit(CONNECTING
, &con
->state
)) {
1822 if (!test_bit(NEGOTIATING
, &con
->state
)) {
1823 dout("try_read connecting\n");
1824 ret
= read_partial_banner(con
);
1827 if (process_banner(con
) < 0) {
1832 ret
= read_partial_connect(con
);
1835 if (process_connect(con
) < 0) {
1842 if (con
->in_base_pos
< 0) {
1844 * skipping + discarding content.
1846 * FIXME: there must be a better way to do this!
1848 static char buf
[1024];
1849 int skip
= min(1024, -con
->in_base_pos
);
1850 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
1851 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
1854 con
->in_base_pos
+= ret
;
1855 if (con
->in_base_pos
)
1858 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
1862 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
1865 dout("try_read got tag %d\n", (int)con
->in_tag
);
1866 switch (con
->in_tag
) {
1867 case CEPH_MSGR_TAG_MSG
:
1868 prepare_read_message(con
);
1870 case CEPH_MSGR_TAG_ACK
:
1871 prepare_read_ack(con
);
1873 case CEPH_MSGR_TAG_CLOSE
:
1874 set_bit(CLOSED
, &con
->state
); /* fixme */
1880 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
1881 ret
= read_partial_message(con
);
1885 con
->error_msg
= "bad crc";
1889 con
->error_msg
= "io error";
1895 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
1897 process_message(con
);
1900 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
) {
1901 ret
= read_partial_ack(con
);
1911 dout("try_read done on %p\n", con
);
1915 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
1916 con
->error_msg
= "protocol error, garbage tag";
1923 * Atomically queue work on a connection. Bump @con reference to
1924 * avoid races with connection teardown.
1926 * There is some trickery going on with QUEUED and BUSY because we
1927 * only want a _single_ thread operating on each connection at any
1928 * point in time, but we want to use all available CPUs.
1930 * The worker thread only proceeds if it can atomically set BUSY. It
1931 * clears QUEUED and does it's thing. When it thinks it's done, it
1932 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1933 * (tries again to set BUSY).
1935 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1936 * try to queue work. If that fails (work is already queued, or BUSY)
1937 * we give up (work also already being done or is queued) but leave QUEUED
1938 * set so that the worker thread will loop if necessary.
1940 static void queue_con(struct ceph_connection
*con
)
1942 if (test_bit(DEAD
, &con
->state
)) {
1943 dout("queue_con %p ignoring: DEAD\n",
1948 if (!con
->ops
->get(con
)) {
1949 dout("queue_con %p ref count 0\n", con
);
1953 set_bit(QUEUED
, &con
->state
);
1954 if (test_bit(BUSY
, &con
->state
)) {
1955 dout("queue_con %p - already BUSY\n", con
);
1957 } else if (!queue_work(ceph_msgr_wq
, &con
->work
.work
)) {
1958 dout("queue_con %p - already queued\n", con
);
1961 dout("queue_con %p\n", con
);
1966 * Do some work on a connection. Drop a connection ref when we're done.
1968 static void con_work(struct work_struct
*work
)
1970 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
1975 if (test_and_set_bit(BUSY
, &con
->state
) != 0) {
1976 dout("con_work %p BUSY already set\n", con
);
1979 dout("con_work %p start, clearing QUEUED\n", con
);
1980 clear_bit(QUEUED
, &con
->state
);
1982 mutex_lock(&con
->mutex
);
1984 if (test_bit(CLOSED
, &con
->state
)) { /* e.g. if we are replaced */
1985 dout("con_work CLOSED\n");
1986 con_close_socket(con
);
1989 if (test_and_clear_bit(OPENING
, &con
->state
)) {
1990 /* reopen w/ new peer */
1991 dout("con_work OPENING\n");
1992 con_close_socket(con
);
1995 if (test_and_clear_bit(SOCK_CLOSED
, &con
->state
) ||
1996 try_read(con
) < 0 ||
1997 try_write(con
) < 0) {
1998 mutex_unlock(&con
->mutex
);
2000 ceph_fault(con
); /* error/fault path */
2005 mutex_unlock(&con
->mutex
);
2008 clear_bit(BUSY
, &con
->state
);
2009 dout("con->state=%lu\n", con
->state
);
2010 if (test_bit(QUEUED
, &con
->state
)) {
2011 if (!backoff
|| test_bit(OPENING
, &con
->state
)) {
2012 dout("con_work %p QUEUED reset, looping\n", con
);
2015 dout("con_work %p QUEUED reset, but just faulted\n", con
);
2016 clear_bit(QUEUED
, &con
->state
);
2018 dout("con_work %p done\n", con
);
2026 * Generic error/fault handler. A retry mechanism is used with
2027 * exponential backoff
2029 static void ceph_fault(struct ceph_connection
*con
)
2031 pr_err("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
2032 ceph_pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
2033 dout("fault %p state %lu to peer %s\n",
2034 con
, con
->state
, ceph_pr_addr(&con
->peer_addr
.in_addr
));
2036 if (test_bit(LOSSYTX
, &con
->state
)) {
2037 dout("fault on LOSSYTX channel\n");
2041 mutex_lock(&con
->mutex
);
2042 if (test_bit(CLOSED
, &con
->state
))
2045 con_close_socket(con
);
2048 ceph_msg_put(con
->in_msg
);
2052 /* Requeue anything that hasn't been acked */
2053 list_splice_init(&con
->out_sent
, &con
->out_queue
);
2055 /* If there are no messages in the queue, place the connection
2056 * in a STANDBY state (i.e., don't try to reconnect just yet). */
2057 if (list_empty(&con
->out_queue
) && !con
->out_keepalive_pending
) {
2058 dout("fault setting STANDBY\n");
2059 set_bit(STANDBY
, &con
->state
);
2061 /* retry after a delay. */
2062 if (con
->delay
== 0)
2063 con
->delay
= BASE_DELAY_INTERVAL
;
2064 else if (con
->delay
< MAX_DELAY_INTERVAL
)
2066 dout("fault queueing %p delay %lu\n", con
, con
->delay
);
2068 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
2069 round_jiffies_relative(con
->delay
)) == 0)
2074 mutex_unlock(&con
->mutex
);
2077 * in case we faulted due to authentication, invalidate our
2078 * current tickets so that we can get new ones.
2080 if (con
->auth_retry
&& con
->ops
->invalidate_authorizer
) {
2081 dout("calling invalidate_authorizer()\n");
2082 con
->ops
->invalidate_authorizer(con
);
2085 if (con
->ops
->fault
)
2086 con
->ops
->fault(con
);
2092 * create a new messenger instance
2094 struct ceph_messenger
*ceph_messenger_create(struct ceph_entity_addr
*myaddr
,
2095 u32 supported_features
,
2096 u32 required_features
)
2098 struct ceph_messenger
*msgr
;
2100 msgr
= kzalloc(sizeof(*msgr
), GFP_KERNEL
);
2102 return ERR_PTR(-ENOMEM
);
2104 msgr
->supported_features
= supported_features
;
2105 msgr
->required_features
= required_features
;
2107 spin_lock_init(&msgr
->global_seq_lock
);
2109 /* the zero page is needed if a request is "canceled" while the message
2110 * is being written over the socket */
2111 msgr
->zero_page
= __page_cache_alloc(GFP_KERNEL
| __GFP_ZERO
);
2112 if (!msgr
->zero_page
) {
2114 return ERR_PTR(-ENOMEM
);
2116 kmap(msgr
->zero_page
);
2119 msgr
->inst
.addr
= *myaddr
;
2121 /* select a random nonce */
2122 msgr
->inst
.addr
.type
= 0;
2123 get_random_bytes(&msgr
->inst
.addr
.nonce
, sizeof(msgr
->inst
.addr
.nonce
));
2124 encode_my_addr(msgr
);
2126 dout("messenger_create %p\n", msgr
);
2129 EXPORT_SYMBOL(ceph_messenger_create
);
2131 void ceph_messenger_destroy(struct ceph_messenger
*msgr
)
2133 dout("destroy %p\n", msgr
);
2134 kunmap(msgr
->zero_page
);
2135 __free_page(msgr
->zero_page
);
2137 dout("destroyed messenger %p\n", msgr
);
2139 EXPORT_SYMBOL(ceph_messenger_destroy
);
2142 * Queue up an outgoing message on the given connection.
2144 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2146 if (test_bit(CLOSED
, &con
->state
)) {
2147 dout("con_send %p closed, dropping %p\n", con
, msg
);
2153 msg
->hdr
.src
= con
->msgr
->inst
.name
;
2155 BUG_ON(msg
->front
.iov_len
!= le32_to_cpu(msg
->hdr
.front_len
));
2157 msg
->needs_out_seq
= true;
2160 mutex_lock(&con
->mutex
);
2161 BUG_ON(!list_empty(&msg
->list_head
));
2162 list_add_tail(&msg
->list_head
, &con
->out_queue
);
2163 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
2164 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
2165 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
2166 le32_to_cpu(msg
->hdr
.front_len
),
2167 le32_to_cpu(msg
->hdr
.middle_len
),
2168 le32_to_cpu(msg
->hdr
.data_len
));
2169 mutex_unlock(&con
->mutex
);
2171 /* if there wasn't anything waiting to send before, queue
2173 if (test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2176 EXPORT_SYMBOL(ceph_con_send
);
2179 * Revoke a message that was previously queued for send
2181 void ceph_con_revoke(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2183 mutex_lock(&con
->mutex
);
2184 if (!list_empty(&msg
->list_head
)) {
2185 dout("con_revoke %p msg %p - was on queue\n", con
, msg
);
2186 list_del_init(&msg
->list_head
);
2190 if (con
->out_msg
== msg
) {
2191 dout("con_revoke %p msg %p - was sending\n", con
, msg
);
2192 con
->out_msg
= NULL
;
2193 if (con
->out_kvec_is_msg
) {
2194 con
->out_skip
= con
->out_kvec_bytes
;
2195 con
->out_kvec_is_msg
= false;
2200 mutex_unlock(&con
->mutex
);
2204 * Revoke a message that we may be reading data into
2206 void ceph_con_revoke_message(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2208 mutex_lock(&con
->mutex
);
2209 if (con
->in_msg
&& con
->in_msg
== msg
) {
2210 unsigned front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
2211 unsigned middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
2212 unsigned data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
2214 /* skip rest of message */
2215 dout("con_revoke_pages %p msg %p revoked\n", con
, msg
);
2216 con
->in_base_pos
= con
->in_base_pos
-
2217 sizeof(struct ceph_msg_header
) -
2221 sizeof(struct ceph_msg_footer
);
2222 ceph_msg_put(con
->in_msg
);
2224 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2227 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2228 con
, con
->in_msg
, msg
);
2230 mutex_unlock(&con
->mutex
);
2234 * Queue a keepalive byte to ensure the tcp connection is alive.
2236 void ceph_con_keepalive(struct ceph_connection
*con
)
2238 if (test_and_set_bit(KEEPALIVE_PENDING
, &con
->state
) == 0 &&
2239 test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2242 EXPORT_SYMBOL(ceph_con_keepalive
);
2246 * construct a new message with given type, size
2247 * the new msg has a ref count of 1.
2249 struct ceph_msg
*ceph_msg_new(int type
, int front_len
, gfp_t flags
)
2253 m
= kmalloc(sizeof(*m
), flags
);
2256 kref_init(&m
->kref
);
2257 INIT_LIST_HEAD(&m
->list_head
);
2260 m
->hdr
.type
= cpu_to_le16(type
);
2261 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
2263 m
->hdr
.front_len
= cpu_to_le32(front_len
);
2264 m
->hdr
.middle_len
= 0;
2265 m
->hdr
.data_len
= 0;
2266 m
->hdr
.data_off
= 0;
2267 m
->hdr
.reserved
= 0;
2268 m
->footer
.front_crc
= 0;
2269 m
->footer
.middle_crc
= 0;
2270 m
->footer
.data_crc
= 0;
2271 m
->footer
.flags
= 0;
2272 m
->front_max
= front_len
;
2273 m
->front_is_vmalloc
= false;
2274 m
->more_to_follow
= false;
2279 if (front_len
> PAGE_CACHE_SIZE
) {
2280 m
->front
.iov_base
= __vmalloc(front_len
, flags
,
2282 m
->front_is_vmalloc
= true;
2284 m
->front
.iov_base
= kmalloc(front_len
, flags
);
2286 if (m
->front
.iov_base
== NULL
) {
2287 pr_err("msg_new can't allocate %d bytes\n",
2292 m
->front
.iov_base
= NULL
;
2294 m
->front
.iov_len
= front_len
;
2301 m
->page_alignment
= 0;
2309 dout("ceph_msg_new %p front %d\n", m
, front_len
);
2315 pr_err("msg_new can't create type %d front %d\n", type
, front_len
);
2318 EXPORT_SYMBOL(ceph_msg_new
);
2321 * Allocate "middle" portion of a message, if it is needed and wasn't
2322 * allocated by alloc_msg. This allows us to read a small fixed-size
2323 * per-type header in the front and then gracefully fail (i.e.,
2324 * propagate the error to the caller based on info in the front) when
2325 * the middle is too large.
2327 static int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2329 int type
= le16_to_cpu(msg
->hdr
.type
);
2330 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
2332 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
2333 ceph_msg_type_name(type
), middle_len
);
2334 BUG_ON(!middle_len
);
2335 BUG_ON(msg
->middle
);
2337 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
2344 * Generic message allocator, for incoming messages.
2346 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
2347 struct ceph_msg_header
*hdr
,
2350 int type
= le16_to_cpu(hdr
->type
);
2351 int front_len
= le32_to_cpu(hdr
->front_len
);
2352 int middle_len
= le32_to_cpu(hdr
->middle_len
);
2353 struct ceph_msg
*msg
= NULL
;
2356 if (con
->ops
->alloc_msg
) {
2357 mutex_unlock(&con
->mutex
);
2358 msg
= con
->ops
->alloc_msg(con
, hdr
, skip
);
2359 mutex_lock(&con
->mutex
);
2365 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
);
2367 pr_err("unable to allocate msg type %d len %d\n",
2371 msg
->page_alignment
= le16_to_cpu(hdr
->data_off
);
2373 memcpy(&msg
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
2375 if (middle_len
&& !msg
->middle
) {
2376 ret
= ceph_alloc_middle(con
, msg
);
2388 * Free a generically kmalloc'd message.
2390 void ceph_msg_kfree(struct ceph_msg
*m
)
2392 dout("msg_kfree %p\n", m
);
2393 if (m
->front_is_vmalloc
)
2394 vfree(m
->front
.iov_base
);
2396 kfree(m
->front
.iov_base
);
2401 * Drop a msg ref. Destroy as needed.
2403 void ceph_msg_last_put(struct kref
*kref
)
2405 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
2407 dout("ceph_msg_put last one on %p\n", m
);
2408 WARN_ON(!list_empty(&m
->list_head
));
2410 /* drop middle, data, if any */
2412 ceph_buffer_put(m
->middle
);
2419 ceph_pagelist_release(m
->pagelist
);
2427 ceph_msgpool_put(m
->pool
, m
);
2431 EXPORT_SYMBOL(ceph_msg_last_put
);
2433 void ceph_msg_dump(struct ceph_msg
*msg
)
2435 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg
,
2436 msg
->front_max
, msg
->nr_pages
);
2437 print_hex_dump(KERN_DEBUG
, "header: ",
2438 DUMP_PREFIX_OFFSET
, 16, 1,
2439 &msg
->hdr
, sizeof(msg
->hdr
), true);
2440 print_hex_dump(KERN_DEBUG
, " front: ",
2441 DUMP_PREFIX_OFFSET
, 16, 1,
2442 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
2444 print_hex_dump(KERN_DEBUG
, "middle: ",
2445 DUMP_PREFIX_OFFSET
, 16, 1,
2446 msg
->middle
->vec
.iov_base
,
2447 msg
->middle
->vec
.iov_len
, true);
2448 print_hex_dump(KERN_DEBUG
, "footer: ",
2449 DUMP_PREFIX_OFFSET
, 16, 1,
2450 &msg
->footer
, sizeof(msg
->footer
), true);
2452 EXPORT_SYMBOL(ceph_msg_dump
);