2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
42 static void rds_iw_send_rdma_complete(struct rds_message
*rm
,
48 case IB_WC_WR_FLUSH_ERR
:
52 notify_status
= RDS_RDMA_SUCCESS
;
55 case IB_WC_REM_ACCESS_ERR
:
56 notify_status
= RDS_RDMA_REMOTE_ERROR
;
60 notify_status
= RDS_RDMA_OTHER_ERROR
;
63 rds_rdma_send_complete(rm
, notify_status
);
66 static void rds_iw_send_unmap_rdma(struct rds_iw_connection
*ic
,
67 struct rds_rdma_op
*op
)
70 ib_dma_unmap_sg(ic
->i_cm_id
->device
,
71 op
->r_sg
, op
->r_nents
,
72 op
->r_write
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
77 static void rds_iw_send_unmap_rm(struct rds_iw_connection
*ic
,
78 struct rds_iw_send_work
*send
,
81 struct rds_message
*rm
= send
->s_rm
;
83 rdsdebug("ic %p send %p rm %p\n", ic
, send
, rm
);
85 ib_dma_unmap_sg(ic
->i_cm_id
->device
,
86 rm
->m_sg
, rm
->m_nents
,
89 if (rm
->m_rdma_op
!= NULL
) {
90 rds_iw_send_unmap_rdma(ic
, rm
->m_rdma_op
);
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
98 * 2. Notify when the IB stack gives us the completion event for
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
112 rds_iw_send_rdma_complete(rm
, wc_status
);
114 if (rm
->m_rdma_op
->r_write
)
115 rds_stats_add(s_send_rdma_bytes
, rm
->m_rdma_op
->r_bytes
);
117 rds_stats_add(s_recv_rdma_bytes
, rm
->m_rdma_op
->r_bytes
);
120 /* If anyone waited for this message to get flushed out, wake
122 rds_message_unmapped(rm
);
128 void rds_iw_send_init_ring(struct rds_iw_connection
*ic
)
130 struct rds_iw_send_work
*send
;
133 for (i
= 0, send
= ic
->i_sends
; i
< ic
->i_send_ring
.w_nr
; i
++, send
++) {
138 send
->s_mapping
= NULL
;
140 send
->s_wr
.next
= NULL
;
141 send
->s_wr
.wr_id
= i
;
142 send
->s_wr
.sg_list
= send
->s_sge
;
143 send
->s_wr
.num_sge
= 1;
144 send
->s_wr
.opcode
= IB_WR_SEND
;
145 send
->s_wr
.send_flags
= 0;
146 send
->s_wr
.ex
.imm_data
= 0;
148 sge
= rds_iw_data_sge(ic
, send
->s_sge
);
151 sge
= rds_iw_header_sge(ic
, send
->s_sge
);
152 sge
->addr
= ic
->i_send_hdrs_dma
+ (i
* sizeof(struct rds_header
));
153 sge
->length
= sizeof(struct rds_header
);
156 send
->s_mr
= ib_alloc_fast_reg_mr(ic
->i_pd
, fastreg_message_size
);
157 if (IS_ERR(send
->s_mr
)) {
158 printk(KERN_WARNING
"RDS/IW: ib_alloc_fast_reg_mr failed\n");
162 send
->s_page_list
= ib_alloc_fast_reg_page_list(
163 ic
->i_cm_id
->device
, fastreg_message_size
);
164 if (IS_ERR(send
->s_page_list
)) {
165 printk(KERN_WARNING
"RDS/IW: ib_alloc_fast_reg_page_list failed\n");
171 void rds_iw_send_clear_ring(struct rds_iw_connection
*ic
)
173 struct rds_iw_send_work
*send
;
176 for (i
= 0, send
= ic
->i_sends
; i
< ic
->i_send_ring
.w_nr
; i
++, send
++) {
178 ib_dereg_mr(send
->s_mr
);
179 BUG_ON(!send
->s_page_list
);
180 ib_free_fast_reg_page_list(send
->s_page_list
);
181 if (send
->s_wr
.opcode
== 0xdead)
184 rds_iw_send_unmap_rm(ic
, send
, IB_WC_WR_FLUSH_ERR
);
186 rds_iw_send_unmap_rdma(ic
, send
->s_op
);
191 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
192 * operations performed in the send path. As the sender allocs and potentially
193 * unallocs the next free entry in the ring it doesn't alter which is
194 * the next to be freed, which is what this is concerned with.
196 void rds_iw_send_cq_comp_handler(struct ib_cq
*cq
, void *context
)
198 struct rds_connection
*conn
= context
;
199 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
201 struct rds_iw_send_work
*send
;
207 rdsdebug("cq %p conn %p\n", cq
, conn
);
208 rds_iw_stats_inc(s_iw_tx_cq_call
);
209 ret
= ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
211 rdsdebug("ib_req_notify_cq send failed: %d\n", ret
);
213 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
214 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
215 (unsigned long long)wc
.wr_id
, wc
.status
, wc
.byte_len
,
216 be32_to_cpu(wc
.ex
.imm_data
));
217 rds_iw_stats_inc(s_iw_tx_cq_event
);
219 if (wc
.status
!= IB_WC_SUCCESS
) {
220 printk(KERN_ERR
"WC Error: status = %d opcode = %d\n", wc
.status
, wc
.opcode
);
224 if (wc
.opcode
== IB_WC_LOCAL_INV
&& wc
.wr_id
== RDS_IW_LOCAL_INV_WR_ID
) {
225 ic
->i_fastreg_posted
= 0;
229 if (wc
.opcode
== IB_WC_FAST_REG_MR
&& wc
.wr_id
== RDS_IW_FAST_REG_WR_ID
) {
230 ic
->i_fastreg_posted
= 1;
234 if (wc
.wr_id
== RDS_IW_ACK_WR_ID
) {
235 if (ic
->i_ack_queued
+ HZ
/2 < jiffies
)
236 rds_iw_stats_inc(s_iw_tx_stalled
);
237 rds_iw_ack_send_complete(ic
);
241 oldest
= rds_iw_ring_oldest(&ic
->i_send_ring
);
243 completed
= rds_iw_ring_completed(&ic
->i_send_ring
, wc
.wr_id
, oldest
);
245 for (i
= 0; i
< completed
; i
++) {
246 send
= &ic
->i_sends
[oldest
];
248 /* In the error case, wc.opcode sometimes contains garbage */
249 switch (send
->s_wr
.opcode
) {
252 rds_iw_send_unmap_rm(ic
, send
, wc
.status
);
254 case IB_WR_FAST_REG_MR
:
255 case IB_WR_RDMA_WRITE
:
256 case IB_WR_RDMA_READ
:
257 case IB_WR_RDMA_READ_WITH_INV
:
258 /* Nothing to be done - the SG list will be unmapped
259 * when the SEND completes. */
262 if (printk_ratelimit())
264 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
265 __func__
, send
->s_wr
.opcode
);
269 send
->s_wr
.opcode
= 0xdead;
270 send
->s_wr
.num_sge
= 1;
271 if (send
->s_queued
+ HZ
/2 < jiffies
)
272 rds_iw_stats_inc(s_iw_tx_stalled
);
274 /* If a RDMA operation produced an error, signal this right
275 * away. If we don't, the subsequent SEND that goes with this
276 * RDMA will be canceled with ERR_WFLUSH, and the application
277 * never learn that the RDMA failed. */
278 if (unlikely(wc
.status
== IB_WC_REM_ACCESS_ERR
&& send
->s_op
)) {
279 struct rds_message
*rm
;
281 rm
= rds_send_get_message(conn
, send
->s_op
);
283 rds_iw_send_rdma_complete(rm
, wc
.status
);
286 oldest
= (oldest
+ 1) % ic
->i_send_ring
.w_nr
;
289 rds_iw_ring_free(&ic
->i_send_ring
, completed
);
291 if (test_and_clear_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
)
292 || test_bit(0, &conn
->c_map_queued
))
293 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
295 /* We expect errors as the qp is drained during shutdown */
296 if (wc
.status
!= IB_WC_SUCCESS
&& rds_conn_up(conn
)) {
297 rds_iw_conn_error(conn
,
298 "send completion on %pI4 "
299 "had status %u, disconnecting and reconnecting\n",
300 &conn
->c_faddr
, wc
.status
);
306 * This is the main function for allocating credits when sending
309 * Conceptually, we have two counters:
310 * - send credits: this tells us how many WRs we're allowed
311 * to submit without overruning the reciever's queue. For
312 * each SEND WR we post, we decrement this by one.
314 * - posted credits: this tells us how many WRs we recently
315 * posted to the receive queue. This value is transferred
316 * to the peer as a "credit update" in a RDS header field.
317 * Every time we transmit credits to the peer, we subtract
318 * the amount of transferred credits from this counter.
320 * It is essential that we avoid situations where both sides have
321 * exhausted their send credits, and are unable to send new credits
322 * to the peer. We achieve this by requiring that we send at least
323 * one credit update to the peer before exhausting our credits.
324 * When new credits arrive, we subtract one credit that is withheld
325 * until we've posted new buffers and are ready to transmit these
326 * credits (see rds_iw_send_add_credits below).
328 * The RDS send code is essentially single-threaded; rds_send_xmit
329 * grabs c_send_lock to ensure exclusive access to the send ring.
330 * However, the ACK sending code is independent and can race with
333 * In the send path, we need to update the counters for send credits
334 * and the counter of posted buffers atomically - when we use the
335 * last available credit, we cannot allow another thread to race us
336 * and grab the posted credits counter. Hence, we have to use a
337 * spinlock to protect the credit counter, or use atomics.
339 * Spinlocks shared between the send and the receive path are bad,
340 * because they create unnecessary delays. An early implementation
341 * using a spinlock showed a 5% degradation in throughput at some
344 * This implementation avoids spinlocks completely, putting both
345 * counters into a single atomic, and updating that atomic using
346 * atomic_add (in the receive path, when receiving fresh credits),
347 * and using atomic_cmpxchg when updating the two counters.
349 int rds_iw_send_grab_credits(struct rds_iw_connection
*ic
,
350 u32 wanted
, u32
*adv_credits
, int need_posted
)
352 unsigned int avail
, posted
, got
= 0, advertise
;
361 oldval
= newval
= atomic_read(&ic
->i_credits
);
362 posted
= IB_GET_POST_CREDITS(oldval
);
363 avail
= IB_GET_SEND_CREDITS(oldval
);
365 rdsdebug("rds_iw_send_grab_credits(%u): credits=%u posted=%u\n",
366 wanted
, avail
, posted
);
368 /* The last credit must be used to send a credit update. */
369 if (avail
&& !posted
)
372 if (avail
< wanted
) {
373 struct rds_connection
*conn
= ic
->i_cm_id
->context
;
375 /* Oops, there aren't that many credits left! */
376 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
379 /* Sometimes you get what you want, lalala. */
382 newval
-= IB_SET_SEND_CREDITS(got
);
385 * If need_posted is non-zero, then the caller wants
386 * the posted regardless of whether any send credits are
389 if (posted
&& (got
|| need_posted
)) {
390 advertise
= min_t(unsigned int, posted
, RDS_MAX_ADV_CREDIT
);
391 newval
-= IB_SET_POST_CREDITS(advertise
);
394 /* Finally bill everything */
395 if (atomic_cmpxchg(&ic
->i_credits
, oldval
, newval
) != oldval
)
398 *adv_credits
= advertise
;
402 void rds_iw_send_add_credits(struct rds_connection
*conn
, unsigned int credits
)
404 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
409 rdsdebug("rds_iw_send_add_credits(%u): current=%u%s\n",
411 IB_GET_SEND_CREDITS(atomic_read(&ic
->i_credits
)),
412 test_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
) ? ", ll_send_full" : "");
414 atomic_add(IB_SET_SEND_CREDITS(credits
), &ic
->i_credits
);
415 if (test_and_clear_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
))
416 queue_delayed_work(rds_wq
, &conn
->c_send_w
, 0);
418 WARN_ON(IB_GET_SEND_CREDITS(credits
) >= 16384);
420 rds_iw_stats_inc(s_iw_rx_credit_updates
);
423 void rds_iw_advertise_credits(struct rds_connection
*conn
, unsigned int posted
)
425 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
430 atomic_add(IB_SET_POST_CREDITS(posted
), &ic
->i_credits
);
432 /* Decide whether to send an update to the peer now.
433 * If we would send a credit update for every single buffer we
434 * post, we would end up with an ACK storm (ACK arrives,
435 * consumes buffer, we refill the ring, send ACK to remote
436 * advertising the newly posted buffer... ad inf)
438 * Performance pretty much depends on how often we send
439 * credit updates - too frequent updates mean lots of ACKs.
440 * Too infrequent updates, and the peer will run out of
441 * credits and has to throttle.
442 * For the time being, 16 seems to be a good compromise.
444 if (IB_GET_POST_CREDITS(atomic_read(&ic
->i_credits
)) >= 16)
445 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
449 rds_iw_xmit_populate_wr(struct rds_iw_connection
*ic
,
450 struct rds_iw_send_work
*send
, unsigned int pos
,
451 unsigned long buffer
, unsigned int length
,
456 WARN_ON(pos
!= send
- ic
->i_sends
);
458 send
->s_wr
.send_flags
= send_flags
;
459 send
->s_wr
.opcode
= IB_WR_SEND
;
460 send
->s_wr
.num_sge
= 2;
461 send
->s_wr
.next
= NULL
;
462 send
->s_queued
= jiffies
;
466 sge
= rds_iw_data_sge(ic
, send
->s_sge
);
468 sge
->length
= length
;
469 sge
->lkey
= rds_iw_local_dma_lkey(ic
);
471 sge
= rds_iw_header_sge(ic
, send
->s_sge
);
473 /* We're sending a packet with no payload. There is only
475 send
->s_wr
.num_sge
= 1;
476 sge
= &send
->s_sge
[0];
479 sge
->addr
= ic
->i_send_hdrs_dma
+ (pos
* sizeof(struct rds_header
));
480 sge
->length
= sizeof(struct rds_header
);
481 sge
->lkey
= rds_iw_local_dma_lkey(ic
);
485 * This can be called multiple times for a given message. The first time
486 * we see a message we map its scatterlist into the IB device so that
487 * we can provide that mapped address to the IB scatter gather entries
488 * in the IB work requests. We translate the scatterlist into a series
489 * of work requests that fragment the message. These work requests complete
490 * in order so we pass ownership of the message to the completion handler
491 * once we send the final fragment.
493 * The RDS core uses the c_send_lock to only enter this function once
494 * per connection. This makes sure that the tx ring alloc/unalloc pairs
495 * don't get out of sync and confuse the ring.
497 int rds_iw_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
498 unsigned int hdr_off
, unsigned int sg
, unsigned int off
)
500 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
501 struct ib_device
*dev
= ic
->i_cm_id
->device
;
502 struct rds_iw_send_work
*send
= NULL
;
503 struct rds_iw_send_work
*first
;
504 struct rds_iw_send_work
*prev
;
505 struct ib_send_wr
*failed_wr
;
506 struct scatterlist
*scat
;
516 int flow_controlled
= 0;
518 BUG_ON(off
% RDS_FRAG_SIZE
);
519 BUG_ON(hdr_off
!= 0 && hdr_off
!= sizeof(struct rds_header
));
521 /* Fastreg support */
522 if (rds_rdma_cookie_key(rm
->m_rdma_cookie
)
523 && !ic
->i_fastreg_posted
) {
528 /* FIXME we may overallocate here */
529 if (be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
) == 0)
532 i
= ceil(be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
), RDS_FRAG_SIZE
);
534 work_alloc
= rds_iw_ring_alloc(&ic
->i_send_ring
, i
, &pos
);
535 if (work_alloc
== 0) {
536 set_bit(RDS_LL_SEND_FULL
, &conn
->c_flags
);
537 rds_iw_stats_inc(s_iw_tx_ring_full
);
542 credit_alloc
= work_alloc
;
544 credit_alloc
= rds_iw_send_grab_credits(ic
, work_alloc
, &posted
, 0);
545 adv_credits
+= posted
;
546 if (credit_alloc
< work_alloc
) {
547 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
- credit_alloc
);
548 work_alloc
= credit_alloc
;
551 if (work_alloc
== 0) {
552 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
553 rds_iw_stats_inc(s_iw_tx_throttle
);
559 /* map the message the first time we see it */
560 if (ic
->i_rm
== NULL
) {
562 printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
563 be16_to_cpu(rm->m_inc.i_hdr.h_dport),
564 rm->m_inc.i_hdr.h_flags,
565 be32_to_cpu(rm->m_inc.i_hdr.h_len));
568 rm
->m_count
= ib_dma_map_sg(dev
,
569 rm
->m_sg
, rm
->m_nents
, DMA_TO_DEVICE
);
570 rdsdebug("ic %p mapping rm %p: %d\n", ic
, rm
, rm
->m_count
);
571 if (rm
->m_count
== 0) {
572 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure
);
573 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
574 ret
= -ENOMEM
; /* XXX ? */
581 ic
->i_unsignaled_wrs
= rds_iw_sysctl_max_unsig_wrs
;
582 ic
->i_unsignaled_bytes
= rds_iw_sysctl_max_unsig_bytes
;
583 rds_message_addref(rm
);
586 /* Finalize the header */
587 if (test_bit(RDS_MSG_ACK_REQUIRED
, &rm
->m_flags
))
588 rm
->m_inc
.i_hdr
.h_flags
|= RDS_FLAG_ACK_REQUIRED
;
589 if (test_bit(RDS_MSG_RETRANSMITTED
, &rm
->m_flags
))
590 rm
->m_inc
.i_hdr
.h_flags
|= RDS_FLAG_RETRANSMITTED
;
592 /* If it has a RDMA op, tell the peer we did it. This is
593 * used by the peer to release use-once RDMA MRs. */
595 struct rds_ext_header_rdma ext_hdr
;
597 ext_hdr
.h_rdma_rkey
= cpu_to_be32(rm
->m_rdma_op
->r_key
);
598 rds_message_add_extension(&rm
->m_inc
.i_hdr
,
599 RDS_EXTHDR_RDMA
, &ext_hdr
, sizeof(ext_hdr
));
601 if (rm
->m_rdma_cookie
) {
602 rds_message_add_rdma_dest_extension(&rm
->m_inc
.i_hdr
,
603 rds_rdma_cookie_key(rm
->m_rdma_cookie
),
604 rds_rdma_cookie_offset(rm
->m_rdma_cookie
));
607 /* Note - rds_iw_piggyb_ack clears the ACK_REQUIRED bit, so
608 * we should not do this unless we have a chance of at least
609 * sticking the header into the send ring. Which is why we
610 * should call rds_iw_ring_alloc first. */
611 rm
->m_inc
.i_hdr
.h_ack
= cpu_to_be64(rds_iw_piggyb_ack(ic
));
612 rds_message_make_checksum(&rm
->m_inc
.i_hdr
);
615 * Update adv_credits since we reset the ACK_REQUIRED bit.
617 rds_iw_send_grab_credits(ic
, 0, &posted
, 1);
618 adv_credits
+= posted
;
619 BUG_ON(adv_credits
> 255);
620 } else if (ic
->i_rm
!= rm
)
623 send
= &ic
->i_sends
[pos
];
626 scat
= &rm
->m_sg
[sg
];
630 /* Sometimes you want to put a fence between an RDMA
631 * READ and the following SEND.
632 * We could either do this all the time
633 * or when requested by the user. Right now, we let
634 * the application choose.
636 if (rm
->m_rdma_op
&& rm
->m_rdma_op
->r_fence
)
637 send_flags
= IB_SEND_FENCE
;
640 * We could be copying the header into the unused tail of the page.
641 * That would need to be changed in the future when those pages might
642 * be mapped userspace pages or page cache pages. So instead we always
643 * use a second sge and our long-lived ring of mapped headers. We send
644 * the header after the data so that the data payload can be aligned on
648 /* handle a 0-len message */
649 if (be32_to_cpu(rm
->m_inc
.i_hdr
.h_len
) == 0) {
650 rds_iw_xmit_populate_wr(ic
, send
, pos
, 0, 0, send_flags
);
654 /* if there's data reference it with a chain of work reqs */
655 for (; i
< work_alloc
&& scat
!= &rm
->m_sg
[rm
->m_count
]; i
++) {
658 send
= &ic
->i_sends
[pos
];
660 len
= min(RDS_FRAG_SIZE
, ib_sg_dma_len(dev
, scat
) - off
);
661 rds_iw_xmit_populate_wr(ic
, send
, pos
,
662 ib_sg_dma_address(dev
, scat
) + off
, len
,
666 * We want to delay signaling completions just enough to get
667 * the batching benefits but not so much that we create dead time
670 if (ic
->i_unsignaled_wrs
-- == 0) {
671 ic
->i_unsignaled_wrs
= rds_iw_sysctl_max_unsig_wrs
;
672 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
675 ic
->i_unsignaled_bytes
-= len
;
676 if (ic
->i_unsignaled_bytes
<= 0) {
677 ic
->i_unsignaled_bytes
= rds_iw_sysctl_max_unsig_bytes
;
678 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
682 * Always signal the last one if we're stopping due to flow control.
684 if (flow_controlled
&& i
== (work_alloc
-1))
685 send
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
687 rdsdebug("send %p wr %p num_sge %u next %p\n", send
,
688 &send
->s_wr
, send
->s_wr
.num_sge
, send
->s_wr
.next
);
692 if (off
== ib_sg_dma_len(dev
, scat
)) {
698 /* Tack on the header after the data. The header SGE should already
699 * have been set up to point to the right header buffer. */
700 memcpy(&ic
->i_send_hdrs
[pos
], &rm
->m_inc
.i_hdr
, sizeof(struct rds_header
));
703 struct rds_header
*hdr
= &ic
->i_send_hdrs
[pos
];
705 printk(KERN_NOTICE
"send WR dport=%u flags=0x%x len=%d\n",
706 be16_to_cpu(hdr
->h_dport
),
708 be32_to_cpu(hdr
->h_len
));
711 struct rds_header
*hdr
= &ic
->i_send_hdrs
[pos
];
713 /* add credit and redo the header checksum */
714 hdr
->h_credit
= adv_credits
;
715 rds_message_make_checksum(hdr
);
717 rds_iw_stats_inc(s_iw_tx_credit_updates
);
721 prev
->s_wr
.next
= &send
->s_wr
;
724 pos
= (pos
+ 1) % ic
->i_send_ring
.w_nr
;
727 /* Account the RDS header in the number of bytes we sent, but just once.
728 * The caller has no concept of fragmentation. */
730 sent
+= sizeof(struct rds_header
);
732 /* if we finished the message then send completion owns it */
733 if (scat
== &rm
->m_sg
[rm
->m_count
]) {
734 prev
->s_rm
= ic
->i_rm
;
735 prev
->s_wr
.send_flags
|= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
739 if (i
< work_alloc
) {
740 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
- i
);
743 if (ic
->i_flowctl
&& i
< credit_alloc
)
744 rds_iw_send_add_credits(conn
, credit_alloc
- i
);
746 /* XXX need to worry about failed_wr and partial sends. */
747 failed_wr
= &first
->s_wr
;
748 ret
= ib_post_send(ic
->i_cm_id
->qp
, &first
->s_wr
, &failed_wr
);
749 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic
,
750 first
, &first
->s_wr
, ret
, failed_wr
);
751 BUG_ON(failed_wr
!= &first
->s_wr
);
753 printk(KERN_WARNING
"RDS/IW: ib_post_send to %pI4 "
754 "returned %d\n", &conn
->c_faddr
, ret
);
755 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
757 ic
->i_rm
= prev
->s_rm
;
769 static void rds_iw_build_send_fastreg(struct rds_iw_device
*rds_iwdev
, struct rds_iw_connection
*ic
, struct rds_iw_send_work
*send
, int nent
, int len
, u64 sg_addr
)
771 BUG_ON(nent
> send
->s_page_list
->max_page_list_len
);
773 * Perform a WR for the fast_reg_mr. Each individual page
774 * in the sg list is added to the fast reg page list and placed
775 * inside the fast_reg_mr WR.
777 send
->s_wr
.opcode
= IB_WR_FAST_REG_MR
;
778 send
->s_wr
.wr
.fast_reg
.length
= len
;
779 send
->s_wr
.wr
.fast_reg
.rkey
= send
->s_mr
->rkey
;
780 send
->s_wr
.wr
.fast_reg
.page_list
= send
->s_page_list
;
781 send
->s_wr
.wr
.fast_reg
.page_list_len
= nent
;
782 send
->s_wr
.wr
.fast_reg
.page_shift
= rds_iwdev
->page_shift
;
783 send
->s_wr
.wr
.fast_reg
.access_flags
= IB_ACCESS_REMOTE_WRITE
;
784 send
->s_wr
.wr
.fast_reg
.iova_start
= sg_addr
;
786 ib_update_fast_reg_key(send
->s_mr
, send
->s_remap_count
++);
789 int rds_iw_xmit_rdma(struct rds_connection
*conn
, struct rds_rdma_op
*op
)
791 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
792 struct rds_iw_send_work
*send
= NULL
;
793 struct rds_iw_send_work
*first
;
794 struct rds_iw_send_work
*prev
;
795 struct ib_send_wr
*failed_wr
;
796 struct rds_iw_device
*rds_iwdev
;
797 struct scatterlist
*scat
;
799 u64 remote_addr
= op
->r_remote_addr
;
808 rds_iwdev
= ib_get_client_data(ic
->i_cm_id
->device
, &rds_iw_client
);
810 /* map the message the first time we see it */
812 op
->r_count
= ib_dma_map_sg(ic
->i_cm_id
->device
,
813 op
->r_sg
, op
->r_nents
, (op
->r_write
) ?
814 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
815 rdsdebug("ic %p mapping op %p: %d\n", ic
, op
, op
->r_count
);
816 if (op
->r_count
== 0) {
817 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure
);
818 ret
= -ENOMEM
; /* XXX ? */
826 /* Alloc space on the send queue for the fastreg */
827 work_alloc
= rds_iw_ring_alloc(&ic
->i_send_ring
, 1, &fr_pos
);
828 if (work_alloc
!= 1) {
829 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
830 rds_iw_stats_inc(s_iw_tx_ring_full
);
837 * Instead of knowing how to return a partial rdma read/write we insist that there
838 * be enough work requests to send the entire message.
840 i
= ceil(op
->r_count
, rds_iwdev
->max_sge
);
842 work_alloc
= rds_iw_ring_alloc(&ic
->i_send_ring
, i
, &pos
);
843 if (work_alloc
!= i
) {
844 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
845 rds_iw_stats_inc(s_iw_tx_ring_full
);
850 send
= &ic
->i_sends
[pos
];
852 first
= prev
= &ic
->i_sends
[fr_pos
];
859 num_sge
= op
->r_count
;
861 for (i
= 0; i
< work_alloc
&& scat
!= &op
->r_sg
[op
->r_count
]; i
++) {
862 send
->s_wr
.send_flags
= 0;
863 send
->s_queued
= jiffies
;
866 * We want to delay signaling completions just enough to get
867 * the batching benefits but not so much that we create dead time on the wire.
869 if (ic
->i_unsignaled_wrs
-- == 0) {
870 ic
->i_unsignaled_wrs
= rds_iw_sysctl_max_unsig_wrs
;
871 send
->s_wr
.send_flags
= IB_SEND_SIGNALED
;
874 /* To avoid the need to have the plumbing to invalidate the fastreg_mr used
875 * for local access after RDS is finished with it, using
876 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
879 send
->s_wr
.opcode
= IB_WR_RDMA_WRITE
;
881 send
->s_wr
.opcode
= IB_WR_RDMA_READ_WITH_INV
;
883 send
->s_wr
.wr
.rdma
.remote_addr
= remote_addr
;
884 send
->s_wr
.wr
.rdma
.rkey
= op
->r_key
;
887 if (num_sge
> rds_iwdev
->max_sge
) {
888 send
->s_wr
.num_sge
= rds_iwdev
->max_sge
;
889 num_sge
-= rds_iwdev
->max_sge
;
891 send
->s_wr
.num_sge
= num_sge
;
893 send
->s_wr
.next
= NULL
;
896 prev
->s_wr
.next
= &send
->s_wr
;
898 for (j
= 0; j
< send
->s_wr
.num_sge
&& scat
!= &op
->r_sg
[op
->r_count
]; j
++) {
899 len
= ib_sg_dma_len(ic
->i_cm_id
->device
, scat
);
901 if (send
->s_wr
.opcode
== IB_WR_RDMA_READ_WITH_INV
)
902 send
->s_page_list
->page_list
[j
] = ib_sg_dma_address(ic
->i_cm_id
->device
, scat
);
904 send
->s_sge
[j
].addr
= ib_sg_dma_address(ic
->i_cm_id
->device
, scat
);
905 send
->s_sge
[j
].length
= len
;
906 send
->s_sge
[j
].lkey
= rds_iw_local_dma_lkey(ic
);
910 rdsdebug("ic %p sent %d remote_addr %llu\n", ic
, sent
, remote_addr
);
916 if (send
->s_wr
.opcode
== IB_WR_RDMA_READ_WITH_INV
) {
917 send
->s_wr
.num_sge
= 1;
918 send
->s_sge
[0].addr
= conn
->c_xmit_rm
->m_rs
->rs_user_addr
;
919 send
->s_sge
[0].length
= conn
->c_xmit_rm
->m_rs
->rs_user_bytes
;
920 send
->s_sge
[0].lkey
= ic
->i_sends
[fr_pos
].s_mr
->lkey
;
923 rdsdebug("send %p wr %p num_sge %u next %p\n", send
,
924 &send
->s_wr
, send
->s_wr
.num_sge
, send
->s_wr
.next
);
927 if (++send
== &ic
->i_sends
[ic
->i_send_ring
.w_nr
])
931 /* if we finished the message then send completion owns it */
932 if (scat
== &op
->r_sg
[op
->r_count
])
933 first
->s_wr
.send_flags
= IB_SEND_SIGNALED
;
935 if (i
< work_alloc
) {
936 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
- i
);
940 /* On iWARP, local memory access by a remote system (ie, RDMA Read) is not
941 * recommended. Putting the lkey on the wire is a security hole, as it can
942 * allow for memory access to all of memory on the remote system. Some
943 * adapters do not allow using the lkey for this at all. To bypass this use a
944 * fastreg_mr (or possibly a dma_mr)
947 rds_iw_build_send_fastreg(rds_iwdev
, ic
, &ic
->i_sends
[fr_pos
],
948 op
->r_count
, sent
, conn
->c_xmit_rm
->m_rs
->rs_user_addr
);
952 failed_wr
= &first
->s_wr
;
953 ret
= ib_post_send(ic
->i_cm_id
->qp
, &first
->s_wr
, &failed_wr
);
954 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic
,
955 first
, &first
->s_wr
, ret
, failed_wr
);
956 BUG_ON(failed_wr
!= &first
->s_wr
);
958 printk(KERN_WARNING
"RDS/IW: rdma ib_post_send to %pI4 "
959 "returned %d\n", &conn
->c_faddr
, ret
);
960 rds_iw_ring_unalloc(&ic
->i_send_ring
, work_alloc
);
968 void rds_iw_xmit_complete(struct rds_connection
*conn
)
970 struct rds_iw_connection
*ic
= conn
->c_transport_data
;
972 /* We may have a pending ACK or window update we were unable
973 * to send previously (due to flow control). Try again. */
974 rds_iw_attempt_ack(ic
);