2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
37 #include <linux/list.h>
42 /* When transmitting messages in rds_send_xmit, we need to emerge from
43 * time to time and briefly release the CPU. Otherwise the softlock watchdog
45 * Also, it seems fairer to not let one busy connection stall all the
48 * send_batch_count is the number of times we'll loop in send_xmit. Setting
49 * it to 0 will restore the old behavior (where we looped until we had
52 static int send_batch_count
= 64;
53 module_param(send_batch_count
, int, 0444);
54 MODULE_PARM_DESC(send_batch_count
, " batch factor when working the send queue");
57 * Reset the send state. Caller must hold c_send_lock when calling here.
59 void rds_send_reset(struct rds_connection
*conn
)
61 struct rds_message
*rm
, *tmp
;
64 if (conn
->c_xmit_rm
) {
65 /* Tell the user the RDMA op is no longer mapped by the
66 * transport. This isn't entirely true (it's flushed out
67 * independently) but as the connection is down, there's
68 * no ongoing RDMA to/from that memory */
69 rds_message_unmapped(conn
->c_xmit_rm
);
70 rds_message_put(conn
->c_xmit_rm
);
71 conn
->c_xmit_rm
= NULL
;
74 conn
->c_xmit_hdr_off
= 0;
75 conn
->c_xmit_data_off
= 0;
76 conn
->c_xmit_rdma_sent
= 0;
78 conn
->c_map_queued
= 0;
80 conn
->c_unacked_packets
= rds_sysctl_max_unacked_packets
;
81 conn
->c_unacked_bytes
= rds_sysctl_max_unacked_bytes
;
83 /* Mark messages as retransmissions, and move them to the send q */
84 spin_lock_irqsave(&conn
->c_lock
, flags
);
85 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
86 set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
87 set_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
);
89 list_splice_init(&conn
->c_retrans
, &conn
->c_send_queue
);
90 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
94 * We're making the concious trade-off here to only send one message
95 * down the connection at a time.
97 * - tx queueing is a simple fifo list
98 * - reassembly is optional and easily done by transports per conn
99 * - no per flow rx lookup at all, straight to the socket
100 * - less per-frag memory and wire overhead
102 * - queued acks can be delayed behind large messages
104 * - small message latency is higher behind queued large messages
105 * - large message latency isn't starved by intervening small sends
107 int rds_send_xmit(struct rds_connection
*conn
)
109 struct rds_message
*rm
;
112 unsigned int send_quota
= send_batch_count
;
113 struct scatterlist
*sg
;
116 LIST_HEAD(to_be_dropped
);
119 * sendmsg calls here after having queued its message on the send
120 * queue. We only have one task feeding the connection at a time. If
121 * another thread is already feeding the queue then we back off. This
122 * avoids blocking the caller and trading per-connection data between
123 * caches per message.
125 * The sem holder will issue a retry if they notice that someone queued
126 * a message after they stopped walking the send queue but before they
129 if (!mutex_trylock(&conn
->c_send_lock
)) {
130 rds_stats_inc(s_send_sem_contention
);
135 if (conn
->c_trans
->xmit_prepare
)
136 conn
->c_trans
->xmit_prepare(conn
);
139 * spin trying to push headers and data down the connection until
140 * the connection doens't make forward progress.
142 while (--send_quota
) {
144 * See if need to send a congestion map update if we're
145 * between sending messages. The send_sem protects our sole
146 * use of c_map_offset and _bytes.
147 * Note this is used only by transports that define a special
148 * xmit_cong_map function. For all others, we create allocate
149 * a cong_map message and treat it just like any other send.
151 if (conn
->c_map_bytes
) {
152 ret
= conn
->c_trans
->xmit_cong_map(conn
, conn
->c_lcong
,
157 conn
->c_map_offset
+= ret
;
158 conn
->c_map_bytes
-= ret
;
159 if (conn
->c_map_bytes
)
163 /* If we're done sending the current message, clear the
164 * offset and S/G temporaries.
166 rm
= conn
->c_xmit_rm
;
168 conn
->c_xmit_hdr_off
== sizeof(struct rds_header
) &&
169 conn
->c_xmit_sg
== rm
->m_nents
) {
170 conn
->c_xmit_rm
= NULL
;
172 conn
->c_xmit_hdr_off
= 0;
173 conn
->c_xmit_data_off
= 0;
174 conn
->c_xmit_rdma_sent
= 0;
176 /* Release the reference to the previous message. */
181 /* If we're asked to send a cong map update, do so.
183 if (rm
== NULL
&& test_and_clear_bit(0, &conn
->c_map_queued
)) {
184 if (conn
->c_trans
->xmit_cong_map
!= NULL
) {
185 conn
->c_map_offset
= 0;
186 conn
->c_map_bytes
= sizeof(struct rds_header
) +
191 rm
= rds_cong_update_alloc(conn
);
197 conn
->c_xmit_rm
= rm
;
201 * Grab the next message from the send queue, if there is one.
203 * c_xmit_rm holds a ref while we're sending this message down
204 * the connction. We can use this ref while holding the
205 * send_sem.. rds_send_reset() is serialized with it.
210 spin_lock_irqsave(&conn
->c_lock
, flags
);
212 if (!list_empty(&conn
->c_send_queue
)) {
213 rm
= list_entry(conn
->c_send_queue
.next
,
216 rds_message_addref(rm
);
219 * Move the message from the send queue to the retransmit
222 list_move_tail(&rm
->m_conn_item
, &conn
->c_retrans
);
225 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
232 /* Unfortunately, the way Infiniband deals with
233 * RDMA to a bad MR key is by moving the entire
234 * queue pair to error state. We cold possibly
235 * recover from that, but right now we drop the
237 * Therefore, we never retransmit messages with RDMA ops.
240 test_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
)) {
241 spin_lock_irqsave(&conn
->c_lock
, flags
);
242 if (test_and_clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
))
243 list_move(&rm
->m_conn_item
, &to_be_dropped
);
244 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
249 /* Require an ACK every once in a while */
250 len
= ntohl(rm
->m_inc
.i_hdr
.h_len
);
251 if (conn
->c_unacked_packets
== 0 ||
252 conn
->c_unacked_bytes
< len
) {
253 __set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
255 conn
->c_unacked_packets
= rds_sysctl_max_unacked_packets
;
256 conn
->c_unacked_bytes
= rds_sysctl_max_unacked_bytes
;
257 rds_stats_inc(s_send_ack_required
);
259 conn
->c_unacked_bytes
-= len
;
260 conn
->c_unacked_packets
--;
263 conn
->c_xmit_rm
= rm
;
267 * Try and send an rdma message. Let's see if we can
268 * keep this simple and require that the transport either
269 * send the whole rdma or none of it.
271 if (rm
->m_rdma_op
&& !conn
->c_xmit_rdma_sent
) {
272 ret
= conn
->c_trans
->xmit_rdma(conn
, rm
->m_rdma_op
);
275 conn
->c_xmit_rdma_sent
= 1;
276 /* The transport owns the mapped memory for now.
277 * You can't unmap it while it's on the send queue */
278 set_bit(RDS_MSG_MAPPED
, &rm
->m_flags
);
281 if (conn
->c_xmit_hdr_off
< sizeof(struct rds_header
) ||
282 conn
->c_xmit_sg
< rm
->m_nents
) {
283 ret
= conn
->c_trans
->xmit(conn
, rm
,
284 conn
->c_xmit_hdr_off
,
286 conn
->c_xmit_data_off
);
290 if (conn
->c_xmit_hdr_off
< sizeof(struct rds_header
)) {
291 tmp
= min_t(int, ret
,
292 sizeof(struct rds_header
) -
293 conn
->c_xmit_hdr_off
);
294 conn
->c_xmit_hdr_off
+= tmp
;
298 sg
= &rm
->m_sg
[conn
->c_xmit_sg
];
300 tmp
= min_t(int, ret
, sg
->length
-
301 conn
->c_xmit_data_off
);
302 conn
->c_xmit_data_off
+= tmp
;
304 if (conn
->c_xmit_data_off
== sg
->length
) {
305 conn
->c_xmit_data_off
= 0;
309 conn
->c_xmit_sg
== rm
->m_nents
);
315 /* Nuke any messages we decided not to retransmit. */
316 if (!list_empty(&to_be_dropped
))
317 rds_send_remove_from_sock(&to_be_dropped
, RDS_RDMA_DROPPED
);
319 if (conn
->c_trans
->xmit_complete
)
320 conn
->c_trans
->xmit_complete(conn
);
323 * We might be racing with another sender who queued a message but
324 * backed off on noticing that we held the c_send_lock. If we check
325 * for queued messages after dropping the sem then either we'll
326 * see the queued message or the queuer will get the sem. If we
327 * notice the queued message then we trigger an immediate retry.
329 * We need to be careful only to do this when we stopped processing
330 * the send queue because it was empty. It's the only way we
331 * stop processing the loop when the transport hasn't taken
332 * responsibility for forward progress.
334 mutex_unlock(&conn
->c_send_lock
);
336 if (conn
->c_map_bytes
|| (send_quota
== 0 && !was_empty
)) {
337 /* We exhausted the send quota, but there's work left to
338 * do. Return and (re-)schedule the send worker.
343 if (ret
== 0 && was_empty
) {
344 /* A simple bit test would be way faster than taking the
346 spin_lock_irqsave(&conn
->c_lock
, flags
);
347 if (!list_empty(&conn
->c_send_queue
)) {
348 rds_stats_inc(s_send_sem_queue_raced
);
351 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
357 static void rds_send_sndbuf_remove(struct rds_sock
*rs
, struct rds_message
*rm
)
359 u32 len
= be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
);
361 assert_spin_locked(&rs
->rs_lock
);
363 BUG_ON(rs
->rs_snd_bytes
< len
);
364 rs
->rs_snd_bytes
-= len
;
366 if (rs
->rs_snd_bytes
== 0)
367 rds_stats_inc(s_send_queue_empty
);
370 static inline int rds_send_is_acked(struct rds_message
*rm
, u64 ack
,
371 is_acked_func is_acked
)
374 return is_acked(rm
, ack
);
375 return be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) <= ack
;
379 * Returns true if there are no messages on the send and retransmit queues
380 * which have a sequence number greater than or equal to the given sequence
383 int rds_send_acked_before(struct rds_connection
*conn
, u64 seq
)
385 struct rds_message
*rm
, *tmp
;
388 spin_lock(&conn
->c_lock
);
390 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
391 if (be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) < seq
)
396 list_for_each_entry_safe(rm
, tmp
, &conn
->c_send_queue
, m_conn_item
) {
397 if (be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
) < seq
)
402 spin_unlock(&conn
->c_lock
);
408 * This is pretty similar to what happens below in the ACK
409 * handling code - except that we call here as soon as we get
410 * the IB send completion on the RDMA op and the accompanying
413 void rds_rdma_send_complete(struct rds_message
*rm
, int status
)
415 struct rds_sock
*rs
= NULL
;
416 struct rds_rdma_op
*ro
;
417 struct rds_notifier
*notifier
;
420 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
423 if (test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
) &&
424 ro
&& ro
->r_notify
&& ro
->r_notifier
) {
425 notifier
= ro
->r_notifier
;
427 sock_hold(rds_rs_to_sk(rs
));
429 notifier
->n_status
= status
;
430 spin_lock(&rs
->rs_lock
);
431 list_add_tail(¬ifier
->n_list
, &rs
->rs_notify_queue
);
432 spin_unlock(&rs
->rs_lock
);
434 ro
->r_notifier
= NULL
;
437 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
440 rds_wake_sk_sleep(rs
);
441 sock_put(rds_rs_to_sk(rs
));
444 EXPORT_SYMBOL_GPL(rds_rdma_send_complete
);
447 * This is the same as rds_rdma_send_complete except we
448 * don't do any locking - we have all the ingredients (message,
449 * socket, socket lock) and can just move the notifier.
452 __rds_rdma_send_complete(struct rds_sock
*rs
, struct rds_message
*rm
, int status
)
454 struct rds_rdma_op
*ro
;
457 if (ro
&& ro
->r_notify
&& ro
->r_notifier
) {
458 ro
->r_notifier
->n_status
= status
;
459 list_add_tail(&ro
->r_notifier
->n_list
, &rs
->rs_notify_queue
);
460 ro
->r_notifier
= NULL
;
463 /* No need to wake the app - caller does this */
467 * This is called from the IB send completion when we detect
468 * a RDMA operation that failed with remote access error.
469 * So speed is not an issue here.
471 struct rds_message
*rds_send_get_message(struct rds_connection
*conn
,
472 struct rds_rdma_op
*op
)
474 struct rds_message
*rm
, *tmp
, *found
= NULL
;
477 spin_lock_irqsave(&conn
->c_lock
, flags
);
479 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
480 if (rm
->m_rdma_op
== op
) {
481 atomic_inc(&rm
->m_refcount
);
487 list_for_each_entry_safe(rm
, tmp
, &conn
->c_send_queue
, m_conn_item
) {
488 if (rm
->m_rdma_op
== op
) {
489 atomic_inc(&rm
->m_refcount
);
496 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
500 EXPORT_SYMBOL_GPL(rds_send_get_message
);
503 * This removes messages from the socket's list if they're on it. The list
504 * argument must be private to the caller, we must be able to modify it
505 * without locks. The messages must have a reference held for their
506 * position on the list. This function will drop that reference after
507 * removing the messages from the 'messages' list regardless of if it found
508 * the messages on the socket list or not.
510 void rds_send_remove_from_sock(struct list_head
*messages
, int status
)
513 struct rds_sock
*rs
= NULL
;
514 struct rds_message
*rm
;
516 while (!list_empty(messages
)) {
519 rm
= list_entry(messages
->next
, struct rds_message
,
521 list_del_init(&rm
->m_conn_item
);
524 * If we see this flag cleared then we're *sure* that someone
525 * else beat us to removing it from the sock. If we race
526 * with their flag update we'll get the lock and then really
527 * see that the flag has been cleared.
529 * The message spinlock makes sure nobody clears rm->m_rs
530 * while we're messing with it. It does not prevent the
531 * message from being removed from the socket, though.
533 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
534 if (!test_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
))
535 goto unlock_and_drop
;
537 if (rs
!= rm
->m_rs
) {
539 rds_wake_sk_sleep(rs
);
540 sock_put(rds_rs_to_sk(rs
));
543 sock_hold(rds_rs_to_sk(rs
));
545 spin_lock(&rs
->rs_lock
);
547 if (test_and_clear_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
)) {
548 struct rds_rdma_op
*ro
= rm
->m_rdma_op
;
549 struct rds_notifier
*notifier
;
551 list_del_init(&rm
->m_sock_item
);
552 rds_send_sndbuf_remove(rs
, rm
);
554 if (ro
&& ro
->r_notifier
&& (status
|| ro
->r_notify
)) {
555 notifier
= ro
->r_notifier
;
556 list_add_tail(¬ifier
->n_list
,
557 &rs
->rs_notify_queue
);
558 if (!notifier
->n_status
)
559 notifier
->n_status
= status
;
560 rm
->m_rdma_op
->r_notifier
= NULL
;
565 spin_unlock(&rs
->rs_lock
);
568 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
575 rds_wake_sk_sleep(rs
);
576 sock_put(rds_rs_to_sk(rs
));
581 * Transports call here when they've determined that the receiver queued
582 * messages up to, and including, the given sequence number. Messages are
583 * moved to the retrans queue when rds_send_xmit picks them off the send
584 * queue. This means that in the TCP case, the message may not have been
585 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
586 * checks the RDS_MSG_HAS_ACK_SEQ bit.
588 * XXX It's not clear to me how this is safely serialized with socket
589 * destruction. Maybe it should bail if it sees SOCK_DEAD.
591 void rds_send_drop_acked(struct rds_connection
*conn
, u64 ack
,
592 is_acked_func is_acked
)
594 struct rds_message
*rm
, *tmp
;
598 spin_lock_irqsave(&conn
->c_lock
, flags
);
600 list_for_each_entry_safe(rm
, tmp
, &conn
->c_retrans
, m_conn_item
) {
601 if (!rds_send_is_acked(rm
, ack
, is_acked
))
604 list_move(&rm
->m_conn_item
, &list
);
605 clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
608 /* order flag updates with spin locks */
609 if (!list_empty(&list
))
610 smp_mb__after_clear_bit();
612 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
614 /* now remove the messages from the sock list as needed */
615 rds_send_remove_from_sock(&list
, RDS_RDMA_SUCCESS
);
617 EXPORT_SYMBOL_GPL(rds_send_drop_acked
);
619 void rds_send_drop_to(struct rds_sock
*rs
, struct sockaddr_in
*dest
)
621 struct rds_message
*rm
, *tmp
;
622 struct rds_connection
*conn
;
626 /* get all the messages we're dropping under the rs lock */
627 spin_lock_irqsave(&rs
->rs_lock
, flags
);
629 list_for_each_entry_safe(rm
, tmp
, &rs
->rs_send_queue
, m_sock_item
) {
630 if (dest
&& (dest
->sin_addr
.s_addr
!= rm
->m_daddr
||
631 dest
->sin_port
!= rm
->m_inc
.i_hdr
.h_dport
))
634 list_move(&rm
->m_sock_item
, &list
);
635 rds_send_sndbuf_remove(rs
, rm
);
636 clear_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
);
639 /* order flag updates with the rs lock */
640 smp_mb__after_clear_bit();
642 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
644 if (list_empty(&list
))
647 /* Remove the messages from the conn */
648 list_for_each_entry(rm
, &list
, m_sock_item
) {
650 conn
= rm
->m_inc
.i_conn
;
652 spin_lock_irqsave(&conn
->c_lock
, flags
);
654 * Maybe someone else beat us to removing rm from the conn.
655 * If we race with their flag update we'll get the lock and
656 * then really see that the flag has been cleared.
658 if (!test_and_clear_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
)) {
659 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
662 list_del_init(&rm
->m_conn_item
);
663 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
666 * Couldn't grab m_rs_lock in top loop (lock ordering),
669 spin_lock_irqsave(&rm
->m_rs_lock
, flags
);
671 spin_lock(&rs
->rs_lock
);
672 __rds_rdma_send_complete(rs
, rm
, RDS_RDMA_CANCELED
);
673 spin_unlock(&rs
->rs_lock
);
676 spin_unlock_irqrestore(&rm
->m_rs_lock
, flags
);
681 rds_wake_sk_sleep(rs
);
683 while (!list_empty(&list
)) {
684 rm
= list_entry(list
.next
, struct rds_message
, m_sock_item
);
685 list_del_init(&rm
->m_sock_item
);
687 rds_message_wait(rm
);
693 * we only want this to fire once so we use the callers 'queued'. It's
694 * possible that another thread can race with us and remove the
695 * message from the flow with RDS_CANCEL_SENT_TO.
697 static int rds_send_queue_rm(struct rds_sock
*rs
, struct rds_connection
*conn
,
698 struct rds_message
*rm
, __be16 sport
,
699 __be16 dport
, int *queued
)
707 len
= be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
);
709 /* this is the only place which holds both the socket's rs_lock
710 * and the connection's c_lock */
711 spin_lock_irqsave(&rs
->rs_lock
, flags
);
714 * If there is a little space in sndbuf, we don't queue anything,
715 * and userspace gets -EAGAIN. But poll() indicates there's send
716 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
717 * freed up by incoming acks. So we check the *old* value of
718 * rs_snd_bytes here to allow the last msg to exceed the buffer,
719 * and poll() now knows no more data can be sent.
721 if (rs
->rs_snd_bytes
< rds_sk_sndbuf(rs
)) {
722 rs
->rs_snd_bytes
+= len
;
724 /* let recv side know we are close to send space exhaustion.
725 * This is probably not the optimal way to do it, as this
726 * means we set the flag on *all* messages as soon as our
727 * throughput hits a certain threshold.
729 if (rs
->rs_snd_bytes
>= rds_sk_sndbuf(rs
) / 2)
730 __set_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
);
732 list_add_tail(&rm
->m_sock_item
, &rs
->rs_send_queue
);
733 set_bit(RDS_MSG_ON_SOCK
, &rm
->m_flags
);
734 rds_message_addref(rm
);
737 /* The code ordering is a little weird, but we're
738 trying to minimize the time we hold c_lock */
739 rds_message_populate_header(&rm
->m_inc
.i_hdr
, sport
, dport
, 0);
740 rm
->m_inc
.i_conn
= conn
;
741 rds_message_addref(rm
);
743 spin_lock(&conn
->c_lock
);
744 rm
->m_inc
.i_hdr
.h_sequence
= cpu_to_be64(conn
->c_next_tx_seq
++);
745 list_add_tail(&rm
->m_conn_item
, &conn
->c_send_queue
);
746 set_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
747 spin_unlock(&conn
->c_lock
);
749 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
750 rm
, len
, rs
, rs
->rs_snd_bytes
,
751 (unsigned long long)be64_to_cpu(rm
->m_inc
.i_hdr
.h_sequence
));
756 spin_unlock_irqrestore(&rs
->rs_lock
, flags
);
761 static int rds_cmsg_send(struct rds_sock
*rs
, struct rds_message
*rm
,
762 struct msghdr
*msg
, int *allocated_mr
)
764 struct cmsghdr
*cmsg
;
767 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
768 if (!CMSG_OK(msg
, cmsg
))
771 if (cmsg
->cmsg_level
!= SOL_RDS
)
774 /* As a side effect, RDMA_DEST and RDMA_MAP will set
775 * rm->m_rdma_cookie and rm->m_rdma_mr.
777 switch (cmsg
->cmsg_type
) {
778 case RDS_CMSG_RDMA_ARGS
:
779 ret
= rds_cmsg_rdma_args(rs
, rm
, cmsg
);
782 case RDS_CMSG_RDMA_DEST
:
783 ret
= rds_cmsg_rdma_dest(rs
, rm
, cmsg
);
786 case RDS_CMSG_RDMA_MAP
:
787 ret
= rds_cmsg_rdma_map(rs
, rm
, cmsg
);
803 int rds_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
806 struct sock
*sk
= sock
->sk
;
807 struct rds_sock
*rs
= rds_sk_to_rs(sk
);
808 struct sockaddr_in
*usin
= (struct sockaddr_in
*)msg
->msg_name
;
811 struct rds_message
*rm
= NULL
;
812 struct rds_connection
*conn
;
814 int queued
= 0, allocated_mr
= 0;
815 int nonblock
= msg
->msg_flags
& MSG_DONTWAIT
;
816 long timeo
= sock_sndtimeo(sk
, nonblock
);
818 /* Mirror Linux UDP mirror of BSD error message compatibility */
819 /* XXX: Perhaps MSG_MORE someday */
820 if (msg
->msg_flags
& ~(MSG_DONTWAIT
| MSG_CMSG_COMPAT
)) {
821 printk(KERN_INFO
"msg_flags 0x%08X\n", msg
->msg_flags
);
826 if (msg
->msg_namelen
) {
827 /* XXX fail non-unicast destination IPs? */
828 if (msg
->msg_namelen
< sizeof(*usin
) || usin
->sin_family
!= AF_INET
) {
832 daddr
= usin
->sin_addr
.s_addr
;
833 dport
= usin
->sin_port
;
835 /* We only care about consistency with ->connect() */
837 daddr
= rs
->rs_conn_addr
;
838 dport
= rs
->rs_conn_port
;
842 /* racing with another thread binding seems ok here */
843 if (daddr
== 0 || rs
->rs_bound_addr
== 0) {
844 ret
= -ENOTCONN
; /* XXX not a great errno */
848 rm
= rds_message_copy_from_user(msg
->msg_iov
, payload_len
);
857 /* rds_conn_create has a spinlock that runs with IRQ off.
858 * Caching the conn in the socket helps a lot. */
859 if (rs
->rs_conn
&& rs
->rs_conn
->c_faddr
== daddr
)
862 conn
= rds_conn_create_outgoing(rs
->rs_bound_addr
, daddr
,
864 sock
->sk
->sk_allocation
);
872 /* Parse any control messages the user may have included. */
873 ret
= rds_cmsg_send(rs
, rm
, msg
, &allocated_mr
);
877 if ((rm
->m_rdma_cookie
|| rm
->m_rdma_op
) &&
878 conn
->c_trans
->xmit_rdma
== NULL
) {
879 if (printk_ratelimit())
880 printk(KERN_NOTICE
"rdma_op %p conn xmit_rdma %p\n",
881 rm
->m_rdma_op
, conn
->c_trans
->xmit_rdma
);
886 /* If the connection is down, trigger a connect. We may
887 * have scheduled a delayed reconnect however - in this case
888 * we should not interfere.
890 if (rds_conn_state(conn
) == RDS_CONN_DOWN
&&
891 !test_and_set_bit(RDS_RECONNECT_PENDING
, &conn
->c_flags
))
892 queue_delayed_work(rds_wq
, &conn
->c_conn_w
, 0);
894 ret
= rds_cong_wait(conn
->c_fcong
, dport
, nonblock
, rs
);
896 rs
->rs_seen_congestion
= 1;
900 while (!rds_send_queue_rm(rs
, conn
, rm
, rs
->rs_bound_port
,
902 rds_stats_inc(s_send_queue_full
);
903 /* XXX make sure this is reasonable */
904 if (payload_len
> rds_sk_sndbuf(rs
)) {
913 timeo
= wait_event_interruptible_timeout(*sk_sleep(sk
),
914 rds_send_queue_rm(rs
, conn
, rm
,
919 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued
, timeo
);
920 if (timeo
> 0 || timeo
== MAX_SCHEDULE_TIMEOUT
)
930 * By now we've committed to the send. We reuse rds_send_worker()
931 * to retry sends in the rds thread if the transport asks us to.
933 rds_stats_inc(s_send_queued
);
935 if (!test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
))
936 rds_send_worker(&conn
->c_send_w
.work
);
942 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
943 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
944 * or in any other way, we need to destroy the MR again */
946 rds_rdma_unuse(rs
, rds_rdma_cookie_key(rm
->m_rdma_cookie
), 1);
954 * Reply to a ping packet.
957 rds_send_pong(struct rds_connection
*conn
, __be16 dport
)
959 struct rds_message
*rm
;
963 rm
= rds_message_alloc(0, GFP_ATOMIC
);
969 rm
->m_daddr
= conn
->c_faddr
;
971 /* If the connection is down, trigger a connect. We may
972 * have scheduled a delayed reconnect however - in this case
973 * we should not interfere.
975 if (rds_conn_state(conn
) == RDS_CONN_DOWN
&&
976 !test_and_set_bit(RDS_RECONNECT_PENDING
, &conn
->c_flags
))
977 queue_delayed_work(rds_wq
, &conn
->c_conn_w
, 0);
979 ret
= rds_cong_wait(conn
->c_fcong
, dport
, 1, NULL
);
983 spin_lock_irqsave(&conn
->c_lock
, flags
);
984 list_add_tail(&rm
->m_conn_item
, &conn
->c_send_queue
);
985 set_bit(RDS_MSG_ON_CONN
, &rm
->m_flags
);
986 rds_message_addref(rm
);
987 rm
->m_inc
.i_conn
= conn
;
989 rds_message_populate_header(&rm
->m_inc
.i_hdr
, 0, dport
,
990 conn
->c_next_tx_seq
);
991 conn
->c_next_tx_seq
++;
992 spin_unlock_irqrestore(&conn
->c_lock
, flags
);
994 rds_stats_inc(s_send_queued
);
995 rds_stats_inc(s_send_pong
);
997 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
1003 rds_message_put(rm
);