1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
33 * We track the state of the socket on a given connection using
34 * values defined below. The transition to a new socket state is
35 * handled by a function which verifies we aren't coming from an
39 * | NEW* | transient initial state
41 * | con_sock_state_init()
44 * | CLOSED | initialized, but no socket (and no
45 * ---------- TCP connection)
47 * | \ con_sock_state_connecting()
48 * | ----------------------
50 * + con_sock_state_closed() \
51 * |+--------------------------- \
54 * | | CLOSING | socket event; \ \
55 * | ----------- await close \ \
58 * | + con_sock_state_closing() \ |
60 * | / --------------- | |
63 * | / -----------------| CONNECTING | socket created, TCP
64 * | | / -------------- connect initiated
65 * | | | con_sock_state_connected()
68 * | CONNECTED | TCP connection established
71 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
74 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
75 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
76 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
77 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
78 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
83 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
84 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
85 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
86 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
87 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
88 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
91 * ceph_connection flag bits
93 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
94 * messages on errors */
95 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
96 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
97 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
98 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
100 /* static tag bytes (protocol control messages) */
101 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
102 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
103 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
105 #ifdef CONFIG_LOCKDEP
106 static struct lock_class_key socket_class
;
110 * When skipping (ignoring) a block of input we read it into a "skip
111 * buffer," which is this many bytes in size.
113 #define SKIP_BUF_SIZE 1024
115 static void queue_con(struct ceph_connection
*con
);
116 static void con_work(struct work_struct
*);
117 static void ceph_fault(struct ceph_connection
*con
);
120 * Nicely render a sockaddr as a string. An array of formatted
121 * strings is used, to approximate reentrancy.
123 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
124 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
125 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
126 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
128 static char addr_str
[ADDR_STR_COUNT
][MAX_ADDR_STR_LEN
];
129 static atomic_t addr_str_seq
= ATOMIC_INIT(0);
131 static struct page
*zero_page
; /* used in certain error cases */
133 const char *ceph_pr_addr(const struct sockaddr_storage
*ss
)
137 struct sockaddr_in
*in4
= (struct sockaddr_in
*) ss
;
138 struct sockaddr_in6
*in6
= (struct sockaddr_in6
*) ss
;
140 i
= atomic_inc_return(&addr_str_seq
) & ADDR_STR_COUNT_MASK
;
143 switch (ss
->ss_family
) {
145 snprintf(s
, MAX_ADDR_STR_LEN
, "%pI4:%hu", &in4
->sin_addr
,
146 ntohs(in4
->sin_port
));
150 snprintf(s
, MAX_ADDR_STR_LEN
, "[%pI6c]:%hu", &in6
->sin6_addr
,
151 ntohs(in6
->sin6_port
));
155 snprintf(s
, MAX_ADDR_STR_LEN
, "(unknown sockaddr family %hu)",
161 EXPORT_SYMBOL(ceph_pr_addr
);
163 static void encode_my_addr(struct ceph_messenger
*msgr
)
165 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
166 ceph_encode_addr(&msgr
->my_enc_addr
);
170 * work queue for all reading and writing to/from the socket.
172 static struct workqueue_struct
*ceph_msgr_wq
;
174 void _ceph_msgr_exit(void)
177 destroy_workqueue(ceph_msgr_wq
);
181 BUG_ON(zero_page
== NULL
);
183 page_cache_release(zero_page
);
187 int ceph_msgr_init(void)
189 BUG_ON(zero_page
!= NULL
);
190 zero_page
= ZERO_PAGE(0);
191 page_cache_get(zero_page
);
193 ceph_msgr_wq
= alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT
, 0);
197 pr_err("msgr_init failed to create workqueue\n");
202 EXPORT_SYMBOL(ceph_msgr_init
);
204 void ceph_msgr_exit(void)
206 BUG_ON(ceph_msgr_wq
== NULL
);
210 EXPORT_SYMBOL(ceph_msgr_exit
);
212 void ceph_msgr_flush(void)
214 flush_workqueue(ceph_msgr_wq
);
216 EXPORT_SYMBOL(ceph_msgr_flush
);
218 /* Connection socket state transition functions */
220 static void con_sock_state_init(struct ceph_connection
*con
)
224 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CLOSED
);
225 if (WARN_ON(old_state
!= CON_SOCK_STATE_NEW
))
226 printk("%s: unexpected old state %d\n", __func__
, old_state
);
227 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
228 CON_SOCK_STATE_CLOSED
);
231 static void con_sock_state_connecting(struct ceph_connection
*con
)
235 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CONNECTING
);
236 if (WARN_ON(old_state
!= CON_SOCK_STATE_CLOSED
))
237 printk("%s: unexpected old state %d\n", __func__
, old_state
);
238 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
239 CON_SOCK_STATE_CONNECTING
);
242 static void con_sock_state_connected(struct ceph_connection
*con
)
246 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CONNECTED
);
247 if (WARN_ON(old_state
!= CON_SOCK_STATE_CONNECTING
))
248 printk("%s: unexpected old state %d\n", __func__
, old_state
);
249 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
250 CON_SOCK_STATE_CONNECTED
);
253 static void con_sock_state_closing(struct ceph_connection
*con
)
257 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CLOSING
);
258 if (WARN_ON(old_state
!= CON_SOCK_STATE_CONNECTING
&&
259 old_state
!= CON_SOCK_STATE_CONNECTED
&&
260 old_state
!= CON_SOCK_STATE_CLOSING
))
261 printk("%s: unexpected old state %d\n", __func__
, old_state
);
262 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
263 CON_SOCK_STATE_CLOSING
);
266 static void con_sock_state_closed(struct ceph_connection
*con
)
270 old_state
= atomic_xchg(&con
->sock_state
, CON_SOCK_STATE_CLOSED
);
271 if (WARN_ON(old_state
!= CON_SOCK_STATE_CONNECTED
&&
272 old_state
!= CON_SOCK_STATE_CLOSING
&&
273 old_state
!= CON_SOCK_STATE_CONNECTING
&&
274 old_state
!= CON_SOCK_STATE_CLOSED
))
275 printk("%s: unexpected old state %d\n", __func__
, old_state
);
276 dout("%s con %p sock %d -> %d\n", __func__
, con
, old_state
,
277 CON_SOCK_STATE_CLOSED
);
281 * socket callback functions
284 /* data available on socket, or listen socket received a connect */
285 static void ceph_sock_data_ready(struct sock
*sk
, int count_unused
)
287 struct ceph_connection
*con
= sk
->sk_user_data
;
288 if (atomic_read(&con
->msgr
->stopping
)) {
292 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
293 dout("%s on %p state = %lu, queueing work\n", __func__
,
299 /* socket has buffer space for writing */
300 static void ceph_sock_write_space(struct sock
*sk
)
302 struct ceph_connection
*con
= sk
->sk_user_data
;
304 /* only queue to workqueue if there is data we want to write,
305 * and there is sufficient space in the socket buffer to accept
306 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
307 * doesn't get called again until try_write() fills the socket
308 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
309 * and net/core/stream.c:sk_stream_write_space().
311 if (test_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
)) {
312 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
313 dout("%s %p queueing write work\n", __func__
, con
);
314 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
318 dout("%s %p nothing to write\n", __func__
, con
);
322 /* socket's state has changed */
323 static void ceph_sock_state_change(struct sock
*sk
)
325 struct ceph_connection
*con
= sk
->sk_user_data
;
327 dout("%s %p state = %lu sk_state = %u\n", __func__
,
328 con
, con
->state
, sk
->sk_state
);
330 switch (sk
->sk_state
) {
332 dout("%s TCP_CLOSE\n", __func__
);
334 dout("%s TCP_CLOSE_WAIT\n", __func__
);
335 con_sock_state_closing(con
);
336 set_bit(CON_FLAG_SOCK_CLOSED
, &con
->flags
);
339 case TCP_ESTABLISHED
:
340 dout("%s TCP_ESTABLISHED\n", __func__
);
341 con_sock_state_connected(con
);
344 default: /* Everything else is uninteresting */
350 * set up socket callbacks
352 static void set_sock_callbacks(struct socket
*sock
,
353 struct ceph_connection
*con
)
355 struct sock
*sk
= sock
->sk
;
356 sk
->sk_user_data
= con
;
357 sk
->sk_data_ready
= ceph_sock_data_ready
;
358 sk
->sk_write_space
= ceph_sock_write_space
;
359 sk
->sk_state_change
= ceph_sock_state_change
;
368 * initiate connection to a remote socket.
370 static int ceph_tcp_connect(struct ceph_connection
*con
)
372 struct sockaddr_storage
*paddr
= &con
->peer_addr
.in_addr
;
377 ret
= sock_create_kern(con
->peer_addr
.in_addr
.ss_family
, SOCK_STREAM
,
381 sock
->sk
->sk_allocation
= GFP_NOFS
;
383 #ifdef CONFIG_LOCKDEP
384 lockdep_set_class(&sock
->sk
->sk_lock
, &socket_class
);
387 set_sock_callbacks(sock
, con
);
389 dout("connect %s\n", ceph_pr_addr(&con
->peer_addr
.in_addr
));
391 con_sock_state_connecting(con
);
392 ret
= sock
->ops
->connect(sock
, (struct sockaddr
*)paddr
, sizeof(*paddr
),
394 if (ret
== -EINPROGRESS
) {
395 dout("connect %s EINPROGRESS sk_state = %u\n",
396 ceph_pr_addr(&con
->peer_addr
.in_addr
),
398 } else if (ret
< 0) {
399 pr_err("connect %s error %d\n",
400 ceph_pr_addr(&con
->peer_addr
.in_addr
), ret
);
402 con
->error_msg
= "connect error";
410 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
412 struct kvec iov
= {buf
, len
};
413 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
416 r
= kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
423 * write something. @more is true if caller will be sending more data
426 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
427 size_t kvlen
, size_t len
, int more
)
429 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
433 msg
.msg_flags
|= MSG_MORE
;
435 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
437 r
= kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
443 static int ceph_tcp_sendpage(struct socket
*sock
, struct page
*page
,
444 int offset
, size_t size
, int more
)
446 int flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
| (more
? MSG_MORE
: MSG_EOR
);
449 ret
= kernel_sendpage(sock
, page
, offset
, size
, flags
);
458 * Shutdown/close the socket for the given connection.
460 static int con_close_socket(struct ceph_connection
*con
)
464 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
466 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
467 sock_release(con
->sock
);
472 * Forcibly clear the SOCK_CLOSED flag. It gets set
473 * independent of the connection mutex, and we could have
474 * received a socket close event before we had the chance to
475 * shut the socket down.
477 clear_bit(CON_FLAG_SOCK_CLOSED
, &con
->flags
);
479 con_sock_state_closed(con
);
484 * Reset a connection. Discard all incoming and outgoing messages
485 * and clear *_seq state.
487 static void ceph_msg_remove(struct ceph_msg
*msg
)
489 list_del_init(&msg
->list_head
);
490 BUG_ON(msg
->con
== NULL
);
491 msg
->con
->ops
->put(msg
->con
);
496 static void ceph_msg_remove_list(struct list_head
*head
)
498 while (!list_empty(head
)) {
499 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
501 ceph_msg_remove(msg
);
505 static void reset_connection(struct ceph_connection
*con
)
507 /* reset connection, out_queue, msg_ and connect_seq */
508 /* discard existing out_queue and msg_seq */
509 ceph_msg_remove_list(&con
->out_queue
);
510 ceph_msg_remove_list(&con
->out_sent
);
513 BUG_ON(con
->in_msg
->con
!= con
);
514 con
->in_msg
->con
= NULL
;
515 ceph_msg_put(con
->in_msg
);
520 con
->connect_seq
= 0;
523 ceph_msg_put(con
->out_msg
);
527 con
->in_seq_acked
= 0;
531 * mark a peer down. drop any open connections.
533 void ceph_con_close(struct ceph_connection
*con
)
535 mutex_lock(&con
->mutex
);
536 dout("con_close %p peer %s\n", con
,
537 ceph_pr_addr(&con
->peer_addr
.in_addr
));
538 con
->state
= CON_STATE_CLOSED
;
540 clear_bit(CON_FLAG_LOSSYTX
, &con
->flags
); /* so we retry next connect */
541 clear_bit(CON_FLAG_KEEPALIVE_PENDING
, &con
->flags
);
542 clear_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
543 clear_bit(CON_FLAG_KEEPALIVE_PENDING
, &con
->flags
);
544 clear_bit(CON_FLAG_BACKOFF
, &con
->flags
);
546 reset_connection(con
);
547 con
->peer_global_seq
= 0;
548 cancel_delayed_work(&con
->work
);
549 con_close_socket(con
);
550 mutex_unlock(&con
->mutex
);
552 EXPORT_SYMBOL(ceph_con_close
);
555 * Reopen a closed connection, with a new peer address.
557 void ceph_con_open(struct ceph_connection
*con
,
558 __u8 entity_type
, __u64 entity_num
,
559 struct ceph_entity_addr
*addr
)
561 mutex_lock(&con
->mutex
);
562 dout("con_open %p %s\n", con
, ceph_pr_addr(&addr
->in_addr
));
564 BUG_ON(con
->state
!= CON_STATE_CLOSED
);
565 con
->state
= CON_STATE_PREOPEN
;
567 con
->peer_name
.type
= (__u8
) entity_type
;
568 con
->peer_name
.num
= cpu_to_le64(entity_num
);
570 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
571 con
->delay
= 0; /* reset backoff memory */
572 mutex_unlock(&con
->mutex
);
575 EXPORT_SYMBOL(ceph_con_open
);
578 * return true if this connection ever successfully opened
580 bool ceph_con_opened(struct ceph_connection
*con
)
582 return con
->connect_seq
> 0;
586 * initialize a new connection.
588 void ceph_con_init(struct ceph_connection
*con
, void *private,
589 const struct ceph_connection_operations
*ops
,
590 struct ceph_messenger
*msgr
)
592 dout("con_init %p\n", con
);
593 memset(con
, 0, sizeof(*con
));
594 con
->private = private;
598 con_sock_state_init(con
);
600 mutex_init(&con
->mutex
);
601 INIT_LIST_HEAD(&con
->out_queue
);
602 INIT_LIST_HEAD(&con
->out_sent
);
603 INIT_DELAYED_WORK(&con
->work
, con_work
);
605 con
->state
= CON_STATE_CLOSED
;
607 EXPORT_SYMBOL(ceph_con_init
);
611 * We maintain a global counter to order connection attempts. Get
612 * a unique seq greater than @gt.
614 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
618 spin_lock(&msgr
->global_seq_lock
);
619 if (msgr
->global_seq
< gt
)
620 msgr
->global_seq
= gt
;
621 ret
= ++msgr
->global_seq
;
622 spin_unlock(&msgr
->global_seq_lock
);
626 static void con_out_kvec_reset(struct ceph_connection
*con
)
628 con
->out_kvec_left
= 0;
629 con
->out_kvec_bytes
= 0;
630 con
->out_kvec_cur
= &con
->out_kvec
[0];
633 static void con_out_kvec_add(struct ceph_connection
*con
,
634 size_t size
, void *data
)
638 index
= con
->out_kvec_left
;
639 BUG_ON(index
>= ARRAY_SIZE(con
->out_kvec
));
641 con
->out_kvec
[index
].iov_len
= size
;
642 con
->out_kvec
[index
].iov_base
= data
;
643 con
->out_kvec_left
++;
644 con
->out_kvec_bytes
+= size
;
648 static void init_bio_iter(struct bio
*bio
, struct bio
**iter
, int *seg
)
659 static void iter_bio_next(struct bio
**bio_iter
, int *seg
)
661 if (*bio_iter
== NULL
)
664 BUG_ON(*seg
>= (*bio_iter
)->bi_vcnt
);
667 if (*seg
== (*bio_iter
)->bi_vcnt
)
668 init_bio_iter((*bio_iter
)->bi_next
, bio_iter
, seg
);
672 static void prepare_write_message_data(struct ceph_connection
*con
)
674 struct ceph_msg
*msg
= con
->out_msg
;
677 BUG_ON(!msg
->hdr
.data_len
);
679 /* initialize page iterator */
680 con
->out_msg_pos
.page
= 0;
682 con
->out_msg_pos
.page_pos
= msg
->page_alignment
;
684 con
->out_msg_pos
.page_pos
= 0;
687 init_bio_iter(msg
->bio
, &msg
->bio_iter
, &msg
->bio_seg
);
689 con
->out_msg_pos
.data_pos
= 0;
690 con
->out_msg_pos
.did_page_crc
= false;
691 con
->out_more
= 1; /* data + footer will follow */
695 * Prepare footer for currently outgoing message, and finish things
696 * off. Assumes out_kvec* are already valid.. we just add on to the end.
698 static void prepare_write_message_footer(struct ceph_connection
*con
)
700 struct ceph_msg
*m
= con
->out_msg
;
701 int v
= con
->out_kvec_left
;
703 m
->footer
.flags
|= CEPH_MSG_FOOTER_COMPLETE
;
705 dout("prepare_write_message_footer %p\n", con
);
706 con
->out_kvec_is_msg
= true;
707 con
->out_kvec
[v
].iov_base
= &m
->footer
;
708 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
709 con
->out_kvec_bytes
+= sizeof(m
->footer
);
710 con
->out_kvec_left
++;
711 con
->out_more
= m
->more_to_follow
;
712 con
->out_msg_done
= true;
716 * Prepare headers for the next outgoing message.
718 static void prepare_write_message(struct ceph_connection
*con
)
723 con_out_kvec_reset(con
);
724 con
->out_kvec_is_msg
= true;
725 con
->out_msg_done
= false;
727 /* Sneak an ack in there first? If we can get it into the same
728 * TCP packet that's a good thing. */
729 if (con
->in_seq
> con
->in_seq_acked
) {
730 con
->in_seq_acked
= con
->in_seq
;
731 con_out_kvec_add(con
, sizeof (tag_ack
), &tag_ack
);
732 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
733 con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
737 BUG_ON(list_empty(&con
->out_queue
));
738 m
= list_first_entry(&con
->out_queue
, struct ceph_msg
, list_head
);
740 BUG_ON(m
->con
!= con
);
742 /* put message on sent list */
744 list_move_tail(&m
->list_head
, &con
->out_sent
);
747 * only assign outgoing seq # if we haven't sent this message
748 * yet. if it is requeued, resend with it's original seq.
750 if (m
->needs_out_seq
) {
751 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
752 m
->needs_out_seq
= false;
759 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
760 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
761 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
762 le32_to_cpu(m
->hdr
.data_len
),
764 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
766 /* tag + hdr + front + middle */
767 con_out_kvec_add(con
, sizeof (tag_msg
), &tag_msg
);
768 con_out_kvec_add(con
, sizeof (m
->hdr
), &m
->hdr
);
769 con_out_kvec_add(con
, m
->front
.iov_len
, m
->front
.iov_base
);
772 con_out_kvec_add(con
, m
->middle
->vec
.iov_len
,
773 m
->middle
->vec
.iov_base
);
775 /* fill in crc (except data pages), footer */
776 crc
= crc32c(0, &m
->hdr
, offsetof(struct ceph_msg_header
, crc
));
777 con
->out_msg
->hdr
.crc
= cpu_to_le32(crc
);
778 con
->out_msg
->footer
.flags
= 0;
780 crc
= crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
);
781 con
->out_msg
->footer
.front_crc
= cpu_to_le32(crc
);
783 crc
= crc32c(0, m
->middle
->vec
.iov_base
,
784 m
->middle
->vec
.iov_len
);
785 con
->out_msg
->footer
.middle_crc
= cpu_to_le32(crc
);
787 con
->out_msg
->footer
.middle_crc
= 0;
788 dout("%s front_crc %u middle_crc %u\n", __func__
,
789 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
790 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
792 /* is there a data payload? */
793 con
->out_msg
->footer
.data_crc
= 0;
795 prepare_write_message_data(con
);
797 /* no, queue up footer too and be done */
798 prepare_write_message_footer(con
);
800 set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
806 static void prepare_write_ack(struct ceph_connection
*con
)
808 dout("prepare_write_ack %p %llu -> %llu\n", con
,
809 con
->in_seq_acked
, con
->in_seq
);
810 con
->in_seq_acked
= con
->in_seq
;
812 con_out_kvec_reset(con
);
814 con_out_kvec_add(con
, sizeof (tag_ack
), &tag_ack
);
816 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
817 con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
820 con
->out_more
= 1; /* more will follow.. eventually.. */
821 set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
825 * Prepare to write keepalive byte.
827 static void prepare_write_keepalive(struct ceph_connection
*con
)
829 dout("prepare_write_keepalive %p\n", con
);
830 con_out_kvec_reset(con
);
831 con_out_kvec_add(con
, sizeof (tag_keepalive
), &tag_keepalive
);
832 set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
836 * Connection negotiation.
839 static struct ceph_auth_handshake
*get_connect_authorizer(struct ceph_connection
*con
,
842 struct ceph_auth_handshake
*auth
;
844 if (!con
->ops
->get_authorizer
) {
845 con
->out_connect
.authorizer_protocol
= CEPH_AUTH_UNKNOWN
;
846 con
->out_connect
.authorizer_len
= 0;
850 /* Can't hold the mutex while getting authorizer */
851 mutex_unlock(&con
->mutex
);
852 auth
= con
->ops
->get_authorizer(con
, auth_proto
, con
->auth_retry
);
853 mutex_lock(&con
->mutex
);
857 if (con
->state
!= CON_STATE_NEGOTIATING
)
858 return ERR_PTR(-EAGAIN
);
860 con
->auth_reply_buf
= auth
->authorizer_reply_buf
;
861 con
->auth_reply_buf_len
= auth
->authorizer_reply_buf_len
;
866 * We connected to a peer and are saying hello.
868 static void prepare_write_banner(struct ceph_connection
*con
)
870 con_out_kvec_add(con
, strlen(CEPH_BANNER
), CEPH_BANNER
);
871 con_out_kvec_add(con
, sizeof (con
->msgr
->my_enc_addr
),
872 &con
->msgr
->my_enc_addr
);
875 set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
878 static int prepare_write_connect(struct ceph_connection
*con
)
880 unsigned int global_seq
= get_global_seq(con
->msgr
, 0);
883 struct ceph_auth_handshake
*auth
;
885 switch (con
->peer_name
.type
) {
886 case CEPH_ENTITY_TYPE_MON
:
887 proto
= CEPH_MONC_PROTOCOL
;
889 case CEPH_ENTITY_TYPE_OSD
:
890 proto
= CEPH_OSDC_PROTOCOL
;
892 case CEPH_ENTITY_TYPE_MDS
:
893 proto
= CEPH_MDSC_PROTOCOL
;
899 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
900 con
->connect_seq
, global_seq
, proto
);
902 con
->out_connect
.features
= cpu_to_le64(con
->msgr
->supported_features
);
903 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
904 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
905 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
906 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
907 con
->out_connect
.flags
= 0;
909 auth_proto
= CEPH_AUTH_UNKNOWN
;
910 auth
= get_connect_authorizer(con
, &auth_proto
);
912 return PTR_ERR(auth
);
914 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_proto
);
915 con
->out_connect
.authorizer_len
= auth
?
916 cpu_to_le32(auth
->authorizer_buf_len
) : 0;
918 con_out_kvec_add(con
, sizeof (con
->out_connect
),
920 if (auth
&& auth
->authorizer_buf_len
)
921 con_out_kvec_add(con
, auth
->authorizer_buf_len
,
922 auth
->authorizer_buf
);
925 set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
931 * write as much of pending kvecs to the socket as we can.
933 * 0 -> socket full, but more to do
936 static int write_partial_kvec(struct ceph_connection
*con
)
940 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
941 while (con
->out_kvec_bytes
> 0) {
942 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
943 con
->out_kvec_left
, con
->out_kvec_bytes
,
947 con
->out_kvec_bytes
-= ret
;
948 if (con
->out_kvec_bytes
== 0)
951 /* account for full iov entries consumed */
952 while (ret
>= con
->out_kvec_cur
->iov_len
) {
953 BUG_ON(!con
->out_kvec_left
);
954 ret
-= con
->out_kvec_cur
->iov_len
;
956 con
->out_kvec_left
--;
958 /* and for a partially-consumed entry */
960 con
->out_kvec_cur
->iov_len
-= ret
;
961 con
->out_kvec_cur
->iov_base
+= ret
;
964 con
->out_kvec_left
= 0;
965 con
->out_kvec_is_msg
= false;
968 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
969 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
970 return ret
; /* done! */
973 static void out_msg_pos_next(struct ceph_connection
*con
, struct page
*page
,
974 size_t len
, size_t sent
, bool in_trail
)
976 struct ceph_msg
*msg
= con
->out_msg
;
981 con
->out_msg_pos
.data_pos
+= sent
;
982 con
->out_msg_pos
.page_pos
+= sent
;
987 con
->out_msg_pos
.page_pos
= 0;
988 con
->out_msg_pos
.page
++;
989 con
->out_msg_pos
.did_page_crc
= false;
991 list_move_tail(&page
->lru
,
993 else if (msg
->pagelist
)
994 list_move_tail(&page
->lru
,
995 &msg
->pagelist
->head
);
998 iter_bio_next(&msg
->bio_iter
, &msg
->bio_seg
);
1003 * Write as much message data payload as we can. If we finish, queue
1005 * 1 -> done, footer is now queued in out_kvec[].
1006 * 0 -> socket full, but more to do
1009 static int write_partial_msg_pages(struct ceph_connection
*con
)
1011 struct ceph_msg
*msg
= con
->out_msg
;
1012 unsigned int data_len
= le32_to_cpu(msg
->hdr
.data_len
);
1014 bool do_datacrc
= !con
->msgr
->nocrc
;
1016 int total_max_write
;
1017 bool in_trail
= false;
1018 const size_t trail_len
= (msg
->trail
? msg
->trail
->length
: 0);
1019 const size_t trail_off
= data_len
- trail_len
;
1021 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
1022 con
, msg
, con
->out_msg_pos
.page
, msg
->nr_pages
,
1023 con
->out_msg_pos
.page_pos
);
1026 * Iterate through each page that contains data to be
1027 * written, and send as much as possible for each.
1029 * If we are calculating the data crc (the default), we will
1030 * need to map the page. If we have no pages, they have
1031 * been revoked, so use the zero page.
1033 while (data_len
> con
->out_msg_pos
.data_pos
) {
1034 struct page
*page
= NULL
;
1035 int max_write
= PAGE_SIZE
;
1038 in_trail
= in_trail
|| con
->out_msg_pos
.data_pos
>= trail_off
;
1040 total_max_write
= trail_off
- con
->out_msg_pos
.data_pos
;
1043 total_max_write
= data_len
- con
->out_msg_pos
.data_pos
;
1045 page
= list_first_entry(&msg
->trail
->head
,
1047 } else if (msg
->pages
) {
1048 page
= msg
->pages
[con
->out_msg_pos
.page
];
1049 } else if (msg
->pagelist
) {
1050 page
= list_first_entry(&msg
->pagelist
->head
,
1053 } else if (msg
->bio
) {
1056 bv
= bio_iovec_idx(msg
->bio_iter
, msg
->bio_seg
);
1058 bio_offset
= bv
->bv_offset
;
1059 max_write
= bv
->bv_len
;
1064 len
= min_t(int, max_write
- con
->out_msg_pos
.page_pos
,
1067 if (do_datacrc
&& !con
->out_msg_pos
.did_page_crc
) {
1069 u32 crc
= le32_to_cpu(msg
->footer
.data_crc
);
1073 BUG_ON(kaddr
== NULL
);
1074 base
= kaddr
+ con
->out_msg_pos
.page_pos
+ bio_offset
;
1075 crc
= crc32c(crc
, base
, len
);
1077 msg
->footer
.data_crc
= cpu_to_le32(crc
);
1078 con
->out_msg_pos
.did_page_crc
= true;
1080 ret
= ceph_tcp_sendpage(con
->sock
, page
,
1081 con
->out_msg_pos
.page_pos
+ bio_offset
,
1086 out_msg_pos_next(con
, page
, len
, (size_t) ret
, in_trail
);
1089 dout("write_partial_msg_pages %p msg %p done\n", con
, msg
);
1091 /* prepare and queue up footer, too */
1093 msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
1094 con_out_kvec_reset(con
);
1095 prepare_write_message_footer(con
);
1104 static int write_partial_skip(struct ceph_connection
*con
)
1108 while (con
->out_skip
> 0) {
1109 size_t size
= min(con
->out_skip
, (int) PAGE_CACHE_SIZE
);
1111 ret
= ceph_tcp_sendpage(con
->sock
, zero_page
, 0, size
, 1);
1114 con
->out_skip
-= ret
;
1122 * Prepare to read connection handshake, or an ack.
1124 static void prepare_read_banner(struct ceph_connection
*con
)
1126 dout("prepare_read_banner %p\n", con
);
1127 con
->in_base_pos
= 0;
1130 static void prepare_read_connect(struct ceph_connection
*con
)
1132 dout("prepare_read_connect %p\n", con
);
1133 con
->in_base_pos
= 0;
1136 static void prepare_read_ack(struct ceph_connection
*con
)
1138 dout("prepare_read_ack %p\n", con
);
1139 con
->in_base_pos
= 0;
1142 static void prepare_read_tag(struct ceph_connection
*con
)
1144 dout("prepare_read_tag %p\n", con
);
1145 con
->in_base_pos
= 0;
1146 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1150 * Prepare to read a message.
1152 static int prepare_read_message(struct ceph_connection
*con
)
1154 dout("prepare_read_message %p\n", con
);
1155 BUG_ON(con
->in_msg
!= NULL
);
1156 con
->in_base_pos
= 0;
1157 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
1162 static int read_partial(struct ceph_connection
*con
,
1163 int end
, int size
, void *object
)
1165 while (con
->in_base_pos
< end
) {
1166 int left
= end
- con
->in_base_pos
;
1167 int have
= size
- left
;
1168 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
1171 con
->in_base_pos
+= ret
;
1178 * Read all or part of the connect-side handshake on a new connection
1180 static int read_partial_banner(struct ceph_connection
*con
)
1186 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
1189 size
= strlen(CEPH_BANNER
);
1191 ret
= read_partial(con
, end
, size
, con
->in_banner
);
1195 size
= sizeof (con
->actual_peer_addr
);
1197 ret
= read_partial(con
, end
, size
, &con
->actual_peer_addr
);
1201 size
= sizeof (con
->peer_addr_for_me
);
1203 ret
= read_partial(con
, end
, size
, &con
->peer_addr_for_me
);
1211 static int read_partial_connect(struct ceph_connection
*con
)
1217 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
1219 size
= sizeof (con
->in_reply
);
1221 ret
= read_partial(con
, end
, size
, &con
->in_reply
);
1225 size
= le32_to_cpu(con
->in_reply
.authorizer_len
);
1227 ret
= read_partial(con
, end
, size
, con
->auth_reply_buf
);
1231 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1232 con
, (int)con
->in_reply
.tag
,
1233 le32_to_cpu(con
->in_reply
.connect_seq
),
1234 le32_to_cpu(con
->in_reply
.global_seq
));
1241 * Verify the hello banner looks okay.
1243 static int verify_hello(struct ceph_connection
*con
)
1245 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
1246 pr_err("connect to %s got bad banner\n",
1247 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1248 con
->error_msg
= "protocol error, bad banner";
1254 static bool addr_is_blank(struct sockaddr_storage
*ss
)
1256 switch (ss
->ss_family
) {
1258 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
1261 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
1262 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
1263 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
1264 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
1269 static int addr_port(struct sockaddr_storage
*ss
)
1271 switch (ss
->ss_family
) {
1273 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
1275 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
1280 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
1282 switch (ss
->ss_family
) {
1284 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
1287 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
1293 * Unlike other *_pton function semantics, zero indicates success.
1295 static int ceph_pton(const char *str
, size_t len
, struct sockaddr_storage
*ss
,
1296 char delim
, const char **ipend
)
1298 struct sockaddr_in
*in4
= (struct sockaddr_in
*) ss
;
1299 struct sockaddr_in6
*in6
= (struct sockaddr_in6
*) ss
;
1301 memset(ss
, 0, sizeof(*ss
));
1303 if (in4_pton(str
, len
, (u8
*)&in4
->sin_addr
.s_addr
, delim
, ipend
)) {
1304 ss
->ss_family
= AF_INET
;
1308 if (in6_pton(str
, len
, (u8
*)&in6
->sin6_addr
.s6_addr
, delim
, ipend
)) {
1309 ss
->ss_family
= AF_INET6
;
1317 * Extract hostname string and resolve using kernel DNS facility.
1319 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1320 static int ceph_dns_resolve_name(const char *name
, size_t namelen
,
1321 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1323 const char *end
, *delim_p
;
1324 char *colon_p
, *ip_addr
= NULL
;
1328 * The end of the hostname occurs immediately preceding the delimiter or
1329 * the port marker (':') where the delimiter takes precedence.
1331 delim_p
= memchr(name
, delim
, namelen
);
1332 colon_p
= memchr(name
, ':', namelen
);
1334 if (delim_p
&& colon_p
)
1335 end
= delim_p
< colon_p
? delim_p
: colon_p
;
1336 else if (!delim_p
&& colon_p
)
1340 if (!end
) /* case: hostname:/ */
1341 end
= name
+ namelen
;
1347 /* do dns_resolve upcall */
1348 ip_len
= dns_query(NULL
, name
, end
- name
, NULL
, &ip_addr
, NULL
);
1350 ret
= ceph_pton(ip_addr
, ip_len
, ss
, -1, NULL
);
1358 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end
- name
), name
,
1359 ret
, ret
? "failed" : ceph_pr_addr(ss
));
1364 static inline int ceph_dns_resolve_name(const char *name
, size_t namelen
,
1365 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1372 * Parse a server name (IP or hostname). If a valid IP address is not found
1373 * then try to extract a hostname to resolve using userspace DNS upcall.
1375 static int ceph_parse_server_name(const char *name
, size_t namelen
,
1376 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1380 ret
= ceph_pton(name
, namelen
, ss
, delim
, ipend
);
1382 ret
= ceph_dns_resolve_name(name
, namelen
, ss
, delim
, ipend
);
1388 * Parse an ip[:port] list into an addr array. Use the default
1389 * monitor port if a port isn't specified.
1391 int ceph_parse_ips(const char *c
, const char *end
,
1392 struct ceph_entity_addr
*addr
,
1393 int max_count
, int *count
)
1395 int i
, ret
= -EINVAL
;
1398 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
1399 for (i
= 0; i
< max_count
; i
++) {
1401 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
1410 ret
= ceph_parse_server_name(p
, end
- p
, ss
, delim
, &ipend
);
1419 dout("missing matching ']'\n");
1426 if (p
< end
&& *p
== ':') {
1429 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1430 port
= (port
* 10) + (*p
- '0');
1433 if (port
> 65535 || port
== 0)
1436 port
= CEPH_MON_PORT
;
1439 addr_set_port(ss
, port
);
1441 dout("parse_ips got %s\n", ceph_pr_addr(ss
));
1458 pr_err("parse_ips bad ip '%.*s'\n", (int)(end
- c
), c
);
1461 EXPORT_SYMBOL(ceph_parse_ips
);
1463 static int process_banner(struct ceph_connection
*con
)
1465 dout("process_banner on %p\n", con
);
1467 if (verify_hello(con
) < 0)
1470 ceph_decode_addr(&con
->actual_peer_addr
);
1471 ceph_decode_addr(&con
->peer_addr_for_me
);
1474 * Make sure the other end is who we wanted. note that the other
1475 * end may not yet know their ip address, so if it's 0.0.0.0, give
1476 * them the benefit of the doubt.
1478 if (memcmp(&con
->peer_addr
, &con
->actual_peer_addr
,
1479 sizeof(con
->peer_addr
)) != 0 &&
1480 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1481 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1482 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1483 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1484 (int)le32_to_cpu(con
->peer_addr
.nonce
),
1485 ceph_pr_addr(&con
->actual_peer_addr
.in_addr
),
1486 (int)le32_to_cpu(con
->actual_peer_addr
.nonce
));
1487 con
->error_msg
= "wrong peer at address";
1492 * did we learn our address?
1494 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1495 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1497 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1498 &con
->peer_addr_for_me
.in_addr
,
1499 sizeof(con
->peer_addr_for_me
.in_addr
));
1500 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1501 encode_my_addr(con
->msgr
);
1502 dout("process_banner learned my addr is %s\n",
1503 ceph_pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1509 static void fail_protocol(struct ceph_connection
*con
)
1511 reset_connection(con
);
1512 BUG_ON(con
->state
!= CON_STATE_NEGOTIATING
);
1513 con
->state
= CON_STATE_CLOSED
;
1516 static int process_connect(struct ceph_connection
*con
)
1518 u64 sup_feat
= con
->msgr
->supported_features
;
1519 u64 req_feat
= con
->msgr
->required_features
;
1520 u64 server_feat
= le64_to_cpu(con
->in_reply
.features
);
1523 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1525 switch (con
->in_reply
.tag
) {
1526 case CEPH_MSGR_TAG_FEATURES
:
1527 pr_err("%s%lld %s feature set mismatch,"
1528 " my %llx < server's %llx, missing %llx\n",
1529 ENTITY_NAME(con
->peer_name
),
1530 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1531 sup_feat
, server_feat
, server_feat
& ~sup_feat
);
1532 con
->error_msg
= "missing required protocol features";
1536 case CEPH_MSGR_TAG_BADPROTOVER
:
1537 pr_err("%s%lld %s protocol version mismatch,"
1538 " my %d != server's %d\n",
1539 ENTITY_NAME(con
->peer_name
),
1540 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1541 le32_to_cpu(con
->out_connect
.protocol_version
),
1542 le32_to_cpu(con
->in_reply
.protocol_version
));
1543 con
->error_msg
= "protocol version mismatch";
1547 case CEPH_MSGR_TAG_BADAUTHORIZER
:
1549 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
1551 if (con
->auth_retry
== 2) {
1552 con
->error_msg
= "connect authorization failure";
1555 con
->auth_retry
= 1;
1556 con_out_kvec_reset(con
);
1557 ret
= prepare_write_connect(con
);
1560 prepare_read_connect(con
);
1563 case CEPH_MSGR_TAG_RESETSESSION
:
1565 * If we connected with a large connect_seq but the peer
1566 * has no record of a session with us (no connection, or
1567 * connect_seq == 0), they will send RESETSESION to indicate
1568 * that they must have reset their session, and may have
1571 dout("process_connect got RESET peer seq %u\n",
1572 le32_to_cpu(con
->in_reply
.connect_seq
));
1573 pr_err("%s%lld %s connection reset\n",
1574 ENTITY_NAME(con
->peer_name
),
1575 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1576 reset_connection(con
);
1577 con_out_kvec_reset(con
);
1578 ret
= prepare_write_connect(con
);
1581 prepare_read_connect(con
);
1583 /* Tell ceph about it. */
1584 mutex_unlock(&con
->mutex
);
1585 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
1586 if (con
->ops
->peer_reset
)
1587 con
->ops
->peer_reset(con
);
1588 mutex_lock(&con
->mutex
);
1589 if (con
->state
!= CON_STATE_NEGOTIATING
)
1593 case CEPH_MSGR_TAG_RETRY_SESSION
:
1595 * If we sent a smaller connect_seq than the peer has, try
1596 * again with a larger value.
1598 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
1599 le32_to_cpu(con
->out_connect
.connect_seq
),
1600 le32_to_cpu(con
->in_reply
.connect_seq
));
1601 con
->connect_seq
= le32_to_cpu(con
->in_reply
.connect_seq
);
1602 con_out_kvec_reset(con
);
1603 ret
= prepare_write_connect(con
);
1606 prepare_read_connect(con
);
1609 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
1611 * If we sent a smaller global_seq than the peer has, try
1612 * again with a larger value.
1614 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1615 con
->peer_global_seq
,
1616 le32_to_cpu(con
->in_reply
.global_seq
));
1617 get_global_seq(con
->msgr
,
1618 le32_to_cpu(con
->in_reply
.global_seq
));
1619 con_out_kvec_reset(con
);
1620 ret
= prepare_write_connect(con
);
1623 prepare_read_connect(con
);
1626 case CEPH_MSGR_TAG_READY
:
1627 if (req_feat
& ~server_feat
) {
1628 pr_err("%s%lld %s protocol feature mismatch,"
1629 " my required %llx > server's %llx, need %llx\n",
1630 ENTITY_NAME(con
->peer_name
),
1631 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1632 req_feat
, server_feat
, req_feat
& ~server_feat
);
1633 con
->error_msg
= "missing required protocol features";
1638 BUG_ON(con
->state
!= CON_STATE_NEGOTIATING
);
1639 con
->state
= CON_STATE_OPEN
;
1641 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
1643 con
->peer_features
= server_feat
;
1644 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1645 con
->peer_global_seq
,
1646 le32_to_cpu(con
->in_reply
.connect_seq
),
1648 WARN_ON(con
->connect_seq
!=
1649 le32_to_cpu(con
->in_reply
.connect_seq
));
1651 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
1652 set_bit(CON_FLAG_LOSSYTX
, &con
->flags
);
1654 con
->delay
= 0; /* reset backoff memory */
1656 prepare_read_tag(con
);
1659 case CEPH_MSGR_TAG_WAIT
:
1661 * If there is a connection race (we are opening
1662 * connections to each other), one of us may just have
1663 * to WAIT. This shouldn't happen if we are the
1666 pr_err("process_connect got WAIT as client\n");
1667 con
->error_msg
= "protocol error, got WAIT as client";
1671 pr_err("connect protocol error, will retry\n");
1672 con
->error_msg
= "protocol error, garbage tag during connect";
1680 * read (part of) an ack
1682 static int read_partial_ack(struct ceph_connection
*con
)
1684 int size
= sizeof (con
->in_temp_ack
);
1687 return read_partial(con
, end
, size
, &con
->in_temp_ack
);
1692 * We can finally discard anything that's been acked.
1694 static void process_ack(struct ceph_connection
*con
)
1697 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
1700 while (!list_empty(&con
->out_sent
)) {
1701 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
1703 seq
= le64_to_cpu(m
->hdr
.seq
);
1706 dout("got ack for seq %llu type %d at %p\n", seq
,
1707 le16_to_cpu(m
->hdr
.type
), m
);
1708 m
->ack_stamp
= jiffies
;
1711 prepare_read_tag(con
);
1717 static int read_partial_message_section(struct ceph_connection
*con
,
1718 struct kvec
*section
,
1719 unsigned int sec_len
, u32
*crc
)
1725 while (section
->iov_len
< sec_len
) {
1726 BUG_ON(section
->iov_base
== NULL
);
1727 left
= sec_len
- section
->iov_len
;
1728 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)section
->iov_base
+
1729 section
->iov_len
, left
);
1732 section
->iov_len
+= ret
;
1734 if (section
->iov_len
== sec_len
)
1735 *crc
= crc32c(0, section
->iov_base
, section
->iov_len
);
1740 static int ceph_con_in_msg_alloc(struct ceph_connection
*con
, int *skip
);
1742 static int read_partial_message_pages(struct ceph_connection
*con
,
1743 struct page
**pages
,
1744 unsigned int data_len
, bool do_datacrc
)
1750 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1751 (int)(PAGE_SIZE
- con
->in_msg_pos
.page_pos
));
1753 BUG_ON(pages
== NULL
);
1754 p
= kmap(pages
[con
->in_msg_pos
.page
]);
1755 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1757 if (ret
> 0 && do_datacrc
)
1759 crc32c(con
->in_data_crc
,
1760 p
+ con
->in_msg_pos
.page_pos
, ret
);
1761 kunmap(pages
[con
->in_msg_pos
.page
]);
1764 con
->in_msg_pos
.data_pos
+= ret
;
1765 con
->in_msg_pos
.page_pos
+= ret
;
1766 if (con
->in_msg_pos
.page_pos
== PAGE_SIZE
) {
1767 con
->in_msg_pos
.page_pos
= 0;
1768 con
->in_msg_pos
.page
++;
1775 static int read_partial_message_bio(struct ceph_connection
*con
,
1776 struct bio
**bio_iter
, int *bio_seg
,
1777 unsigned int data_len
, bool do_datacrc
)
1779 struct bio_vec
*bv
= bio_iovec_idx(*bio_iter
, *bio_seg
);
1783 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1784 (int)(bv
->bv_len
- con
->in_msg_pos
.page_pos
));
1786 p
= kmap(bv
->bv_page
) + bv
->bv_offset
;
1788 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1790 if (ret
> 0 && do_datacrc
)
1792 crc32c(con
->in_data_crc
,
1793 p
+ con
->in_msg_pos
.page_pos
, ret
);
1794 kunmap(bv
->bv_page
);
1797 con
->in_msg_pos
.data_pos
+= ret
;
1798 con
->in_msg_pos
.page_pos
+= ret
;
1799 if (con
->in_msg_pos
.page_pos
== bv
->bv_len
) {
1800 con
->in_msg_pos
.page_pos
= 0;
1801 iter_bio_next(bio_iter
, bio_seg
);
1809 * read (part of) a message.
1811 static int read_partial_message(struct ceph_connection
*con
)
1813 struct ceph_msg
*m
= con
->in_msg
;
1817 unsigned int front_len
, middle_len
, data_len
;
1818 bool do_datacrc
= !con
->msgr
->nocrc
;
1822 dout("read_partial_message con %p msg %p\n", con
, m
);
1825 size
= sizeof (con
->in_hdr
);
1827 ret
= read_partial(con
, end
, size
, &con
->in_hdr
);
1831 crc
= crc32c(0, &con
->in_hdr
, offsetof(struct ceph_msg_header
, crc
));
1832 if (cpu_to_le32(crc
) != con
->in_hdr
.crc
) {
1833 pr_err("read_partial_message bad hdr "
1834 " crc %u != expected %u\n",
1835 crc
, con
->in_hdr
.crc
);
1839 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1840 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
1842 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1843 if (middle_len
> CEPH_MSG_MAX_DATA_LEN
)
1845 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1846 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
1850 seq
= le64_to_cpu(con
->in_hdr
.seq
);
1851 if ((s64
)seq
- (s64
)con
->in_seq
< 1) {
1852 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1853 ENTITY_NAME(con
->peer_name
),
1854 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1855 seq
, con
->in_seq
+ 1);
1856 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1858 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1860 } else if ((s64
)seq
- (s64
)con
->in_seq
> 1) {
1861 pr_err("read_partial_message bad seq %lld expected %lld\n",
1862 seq
, con
->in_seq
+ 1);
1863 con
->error_msg
= "bad message sequence # for incoming message";
1867 /* allocate message? */
1871 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
1872 con
->in_hdr
.front_len
, con
->in_hdr
.data_len
);
1873 ret
= ceph_con_in_msg_alloc(con
, &skip
);
1877 /* skip this message */
1878 dout("alloc_msg said skip message\n");
1879 BUG_ON(con
->in_msg
);
1880 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1882 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1887 BUG_ON(!con
->in_msg
);
1888 BUG_ON(con
->in_msg
->con
!= con
);
1890 m
->front
.iov_len
= 0; /* haven't read it yet */
1892 m
->middle
->vec
.iov_len
= 0;
1894 con
->in_msg_pos
.page
= 0;
1896 con
->in_msg_pos
.page_pos
= m
->page_alignment
;
1898 con
->in_msg_pos
.page_pos
= 0;
1899 con
->in_msg_pos
.data_pos
= 0;
1903 init_bio_iter(m
->bio
, &m
->bio_iter
, &m
->bio_seg
);
1908 ret
= read_partial_message_section(con
, &m
->front
, front_len
,
1909 &con
->in_front_crc
);
1915 ret
= read_partial_message_section(con
, &m
->middle
->vec
,
1917 &con
->in_middle_crc
);
1923 while (con
->in_msg_pos
.data_pos
< data_len
) {
1925 ret
= read_partial_message_pages(con
, m
->pages
,
1926 data_len
, do_datacrc
);
1930 } else if (m
->bio
) {
1931 BUG_ON(!m
->bio_iter
);
1932 ret
= read_partial_message_bio(con
,
1933 &m
->bio_iter
, &m
->bio_seg
,
1934 data_len
, do_datacrc
);
1944 size
= sizeof (m
->footer
);
1946 ret
= read_partial(con
, end
, size
, &m
->footer
);
1950 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1951 m
, front_len
, m
->footer
.front_crc
, middle_len
,
1952 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
1955 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
1956 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1957 m
, con
->in_front_crc
, m
->footer
.front_crc
);
1960 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
1961 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1962 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
1966 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
1967 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
1968 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
1969 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
1973 return 1; /* done! */
1977 * Process message. This happens in the worker thread. The callback should
1978 * be careful not to do anything that waits on other incoming messages or it
1981 static void process_message(struct ceph_connection
*con
)
1983 struct ceph_msg
*msg
;
1985 BUG_ON(con
->in_msg
->con
!= con
);
1986 con
->in_msg
->con
= NULL
;
1991 /* if first message, set peer_name */
1992 if (con
->peer_name
.type
== 0)
1993 con
->peer_name
= msg
->hdr
.src
;
1996 mutex_unlock(&con
->mutex
);
1998 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1999 msg
, le64_to_cpu(msg
->hdr
.seq
),
2000 ENTITY_NAME(msg
->hdr
.src
),
2001 le16_to_cpu(msg
->hdr
.type
),
2002 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
2003 le32_to_cpu(msg
->hdr
.front_len
),
2004 le32_to_cpu(msg
->hdr
.data_len
),
2005 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
2006 con
->ops
->dispatch(con
, msg
);
2008 mutex_lock(&con
->mutex
);
2013 * Write something to the socket. Called in a worker thread when the
2014 * socket appears to be writeable and we have something ready to send.
2016 static int try_write(struct ceph_connection
*con
)
2020 dout("try_write start %p state %lu\n", con
, con
->state
);
2023 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
2025 /* open the socket first? */
2026 if (con
->state
== CON_STATE_PREOPEN
) {
2028 con
->state
= CON_STATE_CONNECTING
;
2030 con_out_kvec_reset(con
);
2031 prepare_write_banner(con
);
2032 prepare_read_banner(con
);
2034 BUG_ON(con
->in_msg
);
2035 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2036 dout("try_write initiating connect on %p new state %lu\n",
2038 ret
= ceph_tcp_connect(con
);
2040 con
->error_msg
= "connect error";
2046 /* kvec data queued? */
2047 if (con
->out_skip
) {
2048 ret
= write_partial_skip(con
);
2052 if (con
->out_kvec_left
) {
2053 ret
= write_partial_kvec(con
);
2060 if (con
->out_msg_done
) {
2061 ceph_msg_put(con
->out_msg
);
2062 con
->out_msg
= NULL
; /* we're done with this one */
2066 ret
= write_partial_msg_pages(con
);
2068 goto more_kvec
; /* we need to send the footer, too! */
2072 dout("try_write write_partial_msg_pages err %d\n",
2079 if (con
->state
== CON_STATE_OPEN
) {
2080 /* is anything else pending? */
2081 if (!list_empty(&con
->out_queue
)) {
2082 prepare_write_message(con
);
2085 if (con
->in_seq
> con
->in_seq_acked
) {
2086 prepare_write_ack(con
);
2089 if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING
,
2091 prepare_write_keepalive(con
);
2096 /* Nothing to do! */
2097 clear_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
2098 dout("try_write nothing else to write.\n");
2101 dout("try_write done on %p ret %d\n", con
, ret
);
2108 * Read what we can from the socket.
2110 static int try_read(struct ceph_connection
*con
)
2115 dout("try_read start on %p state %lu\n", con
, con
->state
);
2116 if (con
->state
!= CON_STATE_CONNECTING
&&
2117 con
->state
!= CON_STATE_NEGOTIATING
&&
2118 con
->state
!= CON_STATE_OPEN
)
2123 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
2126 if (con
->state
== CON_STATE_CONNECTING
) {
2127 dout("try_read connecting\n");
2128 ret
= read_partial_banner(con
);
2131 ret
= process_banner(con
);
2135 BUG_ON(con
->state
!= CON_STATE_CONNECTING
);
2136 con
->state
= CON_STATE_NEGOTIATING
;
2139 * Received banner is good, exchange connection info.
2140 * Do not reset out_kvec, as sending our banner raced
2141 * with receiving peer banner after connect completed.
2143 ret
= prepare_write_connect(con
);
2146 prepare_read_connect(con
);
2148 /* Send connection info before awaiting response */
2152 if (con
->state
== CON_STATE_NEGOTIATING
) {
2153 dout("try_read negotiating\n");
2154 ret
= read_partial_connect(con
);
2157 ret
= process_connect(con
);
2163 BUG_ON(con
->state
!= CON_STATE_OPEN
);
2165 if (con
->in_base_pos
< 0) {
2167 * skipping + discarding content.
2169 * FIXME: there must be a better way to do this!
2171 static char buf
[SKIP_BUF_SIZE
];
2172 int skip
= min((int) sizeof (buf
), -con
->in_base_pos
);
2174 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
2175 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
2178 con
->in_base_pos
+= ret
;
2179 if (con
->in_base_pos
)
2182 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
2186 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
2189 dout("try_read got tag %d\n", (int)con
->in_tag
);
2190 switch (con
->in_tag
) {
2191 case CEPH_MSGR_TAG_MSG
:
2192 prepare_read_message(con
);
2194 case CEPH_MSGR_TAG_ACK
:
2195 prepare_read_ack(con
);
2197 case CEPH_MSGR_TAG_CLOSE
:
2198 con_close_socket(con
);
2199 con
->state
= CON_STATE_CLOSED
;
2205 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
2206 ret
= read_partial_message(con
);
2210 con
->error_msg
= "bad crc";
2214 con
->error_msg
= "io error";
2219 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
2221 process_message(con
);
2222 if (con
->state
== CON_STATE_OPEN
)
2223 prepare_read_tag(con
);
2226 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
) {
2227 ret
= read_partial_ack(con
);
2235 dout("try_read done on %p ret %d\n", con
, ret
);
2239 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
2240 con
->error_msg
= "protocol error, garbage tag";
2247 * Atomically queue work on a connection. Bump @con reference to
2248 * avoid races with connection teardown.
2250 static void queue_con(struct ceph_connection
*con
)
2252 if (!con
->ops
->get(con
)) {
2253 dout("queue_con %p ref count 0\n", con
);
2257 if (!queue_delayed_work(ceph_msgr_wq
, &con
->work
, 0)) {
2258 dout("queue_con %p - already queued\n", con
);
2261 dout("queue_con %p\n", con
);
2266 * Do some work on a connection. Drop a connection ref when we're done.
2268 static void con_work(struct work_struct
*work
)
2270 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
2274 mutex_lock(&con
->mutex
);
2276 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED
, &con
->flags
)) {
2277 switch (con
->state
) {
2278 case CON_STATE_CONNECTING
:
2279 con
->error_msg
= "connection failed";
2281 case CON_STATE_NEGOTIATING
:
2282 con
->error_msg
= "negotiation failed";
2284 case CON_STATE_OPEN
:
2285 con
->error_msg
= "socket closed";
2288 dout("unrecognized con state %d\n", (int)con
->state
);
2289 con
->error_msg
= "unrecognized con state";
2295 if (test_and_clear_bit(CON_FLAG_BACKOFF
, &con
->flags
)) {
2296 dout("con_work %p backing off\n", con
);
2297 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
2298 round_jiffies_relative(con
->delay
))) {
2299 dout("con_work %p backoff %lu\n", con
, con
->delay
);
2300 mutex_unlock(&con
->mutex
);
2304 dout("con_work %p FAILED to back off %lu\n", con
,
2309 if (con
->state
== CON_STATE_STANDBY
) {
2310 dout("con_work %p STANDBY\n", con
);
2313 if (con
->state
== CON_STATE_CLOSED
) {
2314 dout("con_work %p CLOSED\n", con
);
2318 if (con
->state
== CON_STATE_PREOPEN
) {
2319 dout("con_work OPENING\n");
2323 ret
= try_read(con
);
2327 con
->error_msg
= "socket error on read";
2331 ret
= try_write(con
);
2335 con
->error_msg
= "socket error on write";
2340 mutex_unlock(&con
->mutex
);
2346 ceph_fault(con
); /* error/fault path */
2352 * Generic error/fault handler. A retry mechanism is used with
2353 * exponential backoff
2355 static void ceph_fault(struct ceph_connection
*con
)
2356 __releases(con
->mutex
)
2358 pr_err("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
2359 ceph_pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
2360 dout("fault %p state %lu to peer %s\n",
2361 con
, con
->state
, ceph_pr_addr(&con
->peer_addr
.in_addr
));
2363 BUG_ON(con
->state
!= CON_STATE_CONNECTING
&&
2364 con
->state
!= CON_STATE_NEGOTIATING
&&
2365 con
->state
!= CON_STATE_OPEN
);
2367 con_close_socket(con
);
2369 if (test_bit(CON_FLAG_LOSSYTX
, &con
->flags
)) {
2370 dout("fault on LOSSYTX channel, marking CLOSED\n");
2371 con
->state
= CON_STATE_CLOSED
;
2376 BUG_ON(con
->in_msg
->con
!= con
);
2377 con
->in_msg
->con
= NULL
;
2378 ceph_msg_put(con
->in_msg
);
2383 /* Requeue anything that hasn't been acked */
2384 list_splice_init(&con
->out_sent
, &con
->out_queue
);
2386 /* If there are no messages queued or keepalive pending, place
2387 * the connection in a STANDBY state */
2388 if (list_empty(&con
->out_queue
) &&
2389 !test_bit(CON_FLAG_KEEPALIVE_PENDING
, &con
->flags
)) {
2390 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con
);
2391 clear_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
);
2392 con
->state
= CON_STATE_STANDBY
;
2394 /* retry after a delay. */
2395 con
->state
= CON_STATE_PREOPEN
;
2396 if (con
->delay
== 0)
2397 con
->delay
= BASE_DELAY_INTERVAL
;
2398 else if (con
->delay
< MAX_DELAY_INTERVAL
)
2401 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
2402 round_jiffies_relative(con
->delay
))) {
2403 dout("fault queued %p delay %lu\n", con
, con
->delay
);
2406 dout("fault failed to queue %p delay %lu, backoff\n",
2409 * In many cases we see a socket state change
2410 * while con_work is running and end up
2411 * queuing (non-delayed) work, such that we
2412 * can't backoff with a delay. Set a flag so
2413 * that when con_work restarts we schedule the
2416 set_bit(CON_FLAG_BACKOFF
, &con
->flags
);
2421 mutex_unlock(&con
->mutex
);
2423 * in case we faulted due to authentication, invalidate our
2424 * current tickets so that we can get new ones.
2426 if (con
->auth_retry
&& con
->ops
->invalidate_authorizer
) {
2427 dout("calling invalidate_authorizer()\n");
2428 con
->ops
->invalidate_authorizer(con
);
2431 if (con
->ops
->fault
)
2432 con
->ops
->fault(con
);
2438 * initialize a new messenger instance
2440 void ceph_messenger_init(struct ceph_messenger
*msgr
,
2441 struct ceph_entity_addr
*myaddr
,
2442 u32 supported_features
,
2443 u32 required_features
,
2446 msgr
->supported_features
= supported_features
;
2447 msgr
->required_features
= required_features
;
2449 spin_lock_init(&msgr
->global_seq_lock
);
2452 msgr
->inst
.addr
= *myaddr
;
2454 /* select a random nonce */
2455 msgr
->inst
.addr
.type
= 0;
2456 get_random_bytes(&msgr
->inst
.addr
.nonce
, sizeof(msgr
->inst
.addr
.nonce
));
2457 encode_my_addr(msgr
);
2458 msgr
->nocrc
= nocrc
;
2460 atomic_set(&msgr
->stopping
, 0);
2462 dout("%s %p\n", __func__
, msgr
);
2464 EXPORT_SYMBOL(ceph_messenger_init
);
2466 static void clear_standby(struct ceph_connection
*con
)
2468 /* come back from STANDBY? */
2469 if (con
->state
== CON_STATE_STANDBY
) {
2470 dout("clear_standby %p and ++connect_seq\n", con
);
2471 con
->state
= CON_STATE_PREOPEN
;
2473 WARN_ON(test_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
));
2474 WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING
, &con
->flags
));
2479 * Queue up an outgoing message on the given connection.
2481 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2484 msg
->hdr
.src
= con
->msgr
->inst
.name
;
2485 BUG_ON(msg
->front
.iov_len
!= le32_to_cpu(msg
->hdr
.front_len
));
2486 msg
->needs_out_seq
= true;
2488 mutex_lock(&con
->mutex
);
2490 if (con
->state
== CON_STATE_CLOSED
) {
2491 dout("con_send %p closed, dropping %p\n", con
, msg
);
2493 mutex_unlock(&con
->mutex
);
2497 BUG_ON(msg
->con
!= NULL
);
2498 msg
->con
= con
->ops
->get(con
);
2499 BUG_ON(msg
->con
== NULL
);
2501 BUG_ON(!list_empty(&msg
->list_head
));
2502 list_add_tail(&msg
->list_head
, &con
->out_queue
);
2503 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
2504 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
2505 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
2506 le32_to_cpu(msg
->hdr
.front_len
),
2507 le32_to_cpu(msg
->hdr
.middle_len
),
2508 le32_to_cpu(msg
->hdr
.data_len
));
2511 mutex_unlock(&con
->mutex
);
2513 /* if there wasn't anything waiting to send before, queue
2515 if (test_and_set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
) == 0)
2518 EXPORT_SYMBOL(ceph_con_send
);
2521 * Revoke a message that was previously queued for send
2523 void ceph_msg_revoke(struct ceph_msg
*msg
)
2525 struct ceph_connection
*con
= msg
->con
;
2528 return; /* Message not in our possession */
2530 mutex_lock(&con
->mutex
);
2531 if (!list_empty(&msg
->list_head
)) {
2532 dout("%s %p msg %p - was on queue\n", __func__
, con
, msg
);
2533 list_del_init(&msg
->list_head
);
2534 BUG_ON(msg
->con
== NULL
);
2535 msg
->con
->ops
->put(msg
->con
);
2541 if (con
->out_msg
== msg
) {
2542 dout("%s %p msg %p - was sending\n", __func__
, con
, msg
);
2543 con
->out_msg
= NULL
;
2544 if (con
->out_kvec_is_msg
) {
2545 con
->out_skip
= con
->out_kvec_bytes
;
2546 con
->out_kvec_is_msg
= false;
2552 mutex_unlock(&con
->mutex
);
2556 * Revoke a message that we may be reading data into
2558 void ceph_msg_revoke_incoming(struct ceph_msg
*msg
)
2560 struct ceph_connection
*con
;
2562 BUG_ON(msg
== NULL
);
2564 dout("%s msg %p null con\n", __func__
, msg
);
2566 return; /* Message not in our possession */
2570 mutex_lock(&con
->mutex
);
2571 if (con
->in_msg
== msg
) {
2572 unsigned int front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
2573 unsigned int middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
2574 unsigned int data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
2576 /* skip rest of message */
2577 dout("%s %p msg %p revoked\n", __func__
, con
, msg
);
2578 con
->in_base_pos
= con
->in_base_pos
-
2579 sizeof(struct ceph_msg_header
) -
2583 sizeof(struct ceph_msg_footer
);
2584 ceph_msg_put(con
->in_msg
);
2586 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2589 dout("%s %p in_msg %p msg %p no-op\n",
2590 __func__
, con
, con
->in_msg
, msg
);
2592 mutex_unlock(&con
->mutex
);
2596 * Queue a keepalive byte to ensure the tcp connection is alive.
2598 void ceph_con_keepalive(struct ceph_connection
*con
)
2600 dout("con_keepalive %p\n", con
);
2601 mutex_lock(&con
->mutex
);
2603 mutex_unlock(&con
->mutex
);
2604 if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING
, &con
->flags
) == 0 &&
2605 test_and_set_bit(CON_FLAG_WRITE_PENDING
, &con
->flags
) == 0)
2608 EXPORT_SYMBOL(ceph_con_keepalive
);
2612 * construct a new message with given type, size
2613 * the new msg has a ref count of 1.
2615 struct ceph_msg
*ceph_msg_new(int type
, int front_len
, gfp_t flags
,
2620 m
= kmalloc(sizeof(*m
), flags
);
2623 kref_init(&m
->kref
);
2626 INIT_LIST_HEAD(&m
->list_head
);
2629 m
->hdr
.type
= cpu_to_le16(type
);
2630 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
2632 m
->hdr
.front_len
= cpu_to_le32(front_len
);
2633 m
->hdr
.middle_len
= 0;
2634 m
->hdr
.data_len
= 0;
2635 m
->hdr
.data_off
= 0;
2636 m
->hdr
.reserved
= 0;
2637 m
->footer
.front_crc
= 0;
2638 m
->footer
.middle_crc
= 0;
2639 m
->footer
.data_crc
= 0;
2640 m
->footer
.flags
= 0;
2641 m
->front_max
= front_len
;
2642 m
->front_is_vmalloc
= false;
2643 m
->more_to_follow
= false;
2652 m
->page_alignment
= 0;
2662 if (front_len
> PAGE_CACHE_SIZE
) {
2663 m
->front
.iov_base
= __vmalloc(front_len
, flags
,
2665 m
->front_is_vmalloc
= true;
2667 m
->front
.iov_base
= kmalloc(front_len
, flags
);
2669 if (m
->front
.iov_base
== NULL
) {
2670 dout("ceph_msg_new can't allocate %d bytes\n",
2675 m
->front
.iov_base
= NULL
;
2677 m
->front
.iov_len
= front_len
;
2679 dout("ceph_msg_new %p front %d\n", m
, front_len
);
2686 pr_err("msg_new can't create type %d front %d\n", type
,
2690 dout("msg_new can't create type %d front %d\n", type
,
2695 EXPORT_SYMBOL(ceph_msg_new
);
2698 * Allocate "middle" portion of a message, if it is needed and wasn't
2699 * allocated by alloc_msg. This allows us to read a small fixed-size
2700 * per-type header in the front and then gracefully fail (i.e.,
2701 * propagate the error to the caller based on info in the front) when
2702 * the middle is too large.
2704 static int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2706 int type
= le16_to_cpu(msg
->hdr
.type
);
2707 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
2709 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
2710 ceph_msg_type_name(type
), middle_len
);
2711 BUG_ON(!middle_len
);
2712 BUG_ON(msg
->middle
);
2714 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
2721 * Allocate a message for receiving an incoming message on a
2722 * connection, and save the result in con->in_msg. Uses the
2723 * connection's private alloc_msg op if available.
2725 * Returns 0 on success, or a negative error code.
2727 * On success, if we set *skip = 1:
2728 * - the next message should be skipped and ignored.
2729 * - con->in_msg == NULL
2730 * or if we set *skip = 0:
2731 * - con->in_msg is non-null.
2732 * On error (ENOMEM, EAGAIN, ...),
2733 * - con->in_msg == NULL
2735 static int ceph_con_in_msg_alloc(struct ceph_connection
*con
, int *skip
)
2737 struct ceph_msg_header
*hdr
= &con
->in_hdr
;
2738 int type
= le16_to_cpu(hdr
->type
);
2739 int front_len
= le32_to_cpu(hdr
->front_len
);
2740 int middle_len
= le32_to_cpu(hdr
->middle_len
);
2743 BUG_ON(con
->in_msg
!= NULL
);
2745 if (con
->ops
->alloc_msg
) {
2746 struct ceph_msg
*msg
;
2748 mutex_unlock(&con
->mutex
);
2749 msg
= con
->ops
->alloc_msg(con
, hdr
, skip
);
2750 mutex_lock(&con
->mutex
);
2751 if (con
->state
!= CON_STATE_OPEN
) {
2757 con
->in_msg
->con
= con
->ops
->get(con
);
2758 BUG_ON(con
->in_msg
->con
== NULL
);
2766 "error allocating memory for incoming message";
2771 con
->in_msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
2773 pr_err("unable to allocate msg type %d len %d\n",
2777 con
->in_msg
->con
= con
->ops
->get(con
);
2778 BUG_ON(con
->in_msg
->con
== NULL
);
2779 con
->in_msg
->page_alignment
= le16_to_cpu(hdr
->data_off
);
2781 memcpy(&con
->in_msg
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
2783 if (middle_len
&& !con
->in_msg
->middle
) {
2784 ret
= ceph_alloc_middle(con
, con
->in_msg
);
2786 ceph_msg_put(con
->in_msg
);
2796 * Free a generically kmalloc'd message.
2798 void ceph_msg_kfree(struct ceph_msg
*m
)
2800 dout("msg_kfree %p\n", m
);
2801 if (m
->front_is_vmalloc
)
2802 vfree(m
->front
.iov_base
);
2804 kfree(m
->front
.iov_base
);
2809 * Drop a msg ref. Destroy as needed.
2811 void ceph_msg_last_put(struct kref
*kref
)
2813 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
2815 dout("ceph_msg_put last one on %p\n", m
);
2816 WARN_ON(!list_empty(&m
->list_head
));
2818 /* drop middle, data, if any */
2820 ceph_buffer_put(m
->middle
);
2827 ceph_pagelist_release(m
->pagelist
);
2835 ceph_msgpool_put(m
->pool
, m
);
2839 EXPORT_SYMBOL(ceph_msg_last_put
);
2841 void ceph_msg_dump(struct ceph_msg
*msg
)
2843 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg
,
2844 msg
->front_max
, msg
->nr_pages
);
2845 print_hex_dump(KERN_DEBUG
, "header: ",
2846 DUMP_PREFIX_OFFSET
, 16, 1,
2847 &msg
->hdr
, sizeof(msg
->hdr
), true);
2848 print_hex_dump(KERN_DEBUG
, " front: ",
2849 DUMP_PREFIX_OFFSET
, 16, 1,
2850 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
2852 print_hex_dump(KERN_DEBUG
, "middle: ",
2853 DUMP_PREFIX_OFFSET
, 16, 1,
2854 msg
->middle
->vec
.iov_base
,
2855 msg
->middle
->vec
.iov_len
, true);
2856 print_hex_dump(KERN_DEBUG
, "footer: ",
2857 DUMP_PREFIX_OFFSET
, 16, 1,
2858 &msg
->footer
, sizeof(msg
->footer
), true);
2860 EXPORT_SYMBOL(ceph_msg_dump
);