2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <rdma/rdma_cm.h>
42 static struct kmem_cache
*rds_ib_incoming_slab
;
43 static struct kmem_cache
*rds_ib_frag_slab
;
44 static atomic_t rds_ib_allocation
= ATOMIC_INIT(0);
46 void rds_ib_recv_init_ring(struct rds_ib_connection
*ic
)
48 struct rds_ib_recv_work
*recv
;
51 for (i
= 0, recv
= ic
->i_recvs
; i
< ic
->i_recv_ring
.w_nr
; i
++, recv
++) {
57 recv
->r_wr
.next
= NULL
;
59 recv
->r_wr
.sg_list
= recv
->r_sge
;
60 recv
->r_wr
.num_sge
= RDS_IB_RECV_SGE
;
62 sge
= &recv
->r_sge
[0];
63 sge
->addr
= ic
->i_recv_hdrs_dma
+ (i
* sizeof(struct rds_header
));
64 sge
->length
= sizeof(struct rds_header
);
65 sge
->lkey
= ic
->i_mr
->lkey
;
67 sge
= &recv
->r_sge
[1];
69 sge
->length
= RDS_FRAG_SIZE
;
70 sge
->lkey
= ic
->i_mr
->lkey
;
75 * The entire 'from' list, including the from element itself, is put on
76 * to the tail of the 'to' list.
78 static void list_splice_entire_tail(struct list_head
*from
,
81 struct list_head
*from_last
= from
->prev
;
83 list_splice_tail(from_last
, to
);
84 list_add_tail(from_last
, to
);
87 static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache
*cache
)
89 struct list_head
*tmp
;
91 tmp
= xchg(&cache
->xfer
, NULL
);
94 list_splice_entire_tail(tmp
, cache
->ready
);
100 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache
*cache
)
102 struct rds_ib_cache_head
*head
;
105 cache
->percpu
= alloc_percpu(struct rds_ib_cache_head
);
109 for_each_possible_cpu(cpu
) {
110 head
= per_cpu_ptr(cache
->percpu
, cpu
);
120 int rds_ib_recv_alloc_caches(struct rds_ib_connection
*ic
)
124 ret
= rds_ib_recv_alloc_cache(&ic
->i_cache_incs
);
126 ret
= rds_ib_recv_alloc_cache(&ic
->i_cache_frags
);
128 free_percpu(ic
->i_cache_incs
.percpu
);
134 static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache
*cache
,
135 struct list_head
*caller_list
)
137 struct rds_ib_cache_head
*head
;
140 for_each_possible_cpu(cpu
) {
141 head
= per_cpu_ptr(cache
->percpu
, cpu
);
143 list_splice_entire_tail(head
->first
, caller_list
);
149 list_splice_entire_tail(cache
->ready
, caller_list
);
154 void rds_ib_recv_free_caches(struct rds_ib_connection
*ic
)
156 struct rds_ib_incoming
*inc
;
157 struct rds_ib_incoming
*inc_tmp
;
158 struct rds_page_frag
*frag
;
159 struct rds_page_frag
*frag_tmp
;
162 rds_ib_cache_xfer_to_ready(&ic
->i_cache_incs
);
163 rds_ib_cache_splice_all_lists(&ic
->i_cache_incs
, &list
);
164 free_percpu(ic
->i_cache_incs
.percpu
);
166 list_for_each_entry_safe(inc
, inc_tmp
, &list
, ii_cache_entry
) {
167 list_del(&inc
->ii_cache_entry
);
168 WARN_ON(!list_empty(&inc
->ii_frags
));
169 kmem_cache_free(rds_ib_incoming_slab
, inc
);
172 rds_ib_cache_xfer_to_ready(&ic
->i_cache_frags
);
173 rds_ib_cache_splice_all_lists(&ic
->i_cache_frags
, &list
);
174 free_percpu(ic
->i_cache_frags
.percpu
);
176 list_for_each_entry_safe(frag
, frag_tmp
, &list
, f_cache_entry
) {
177 list_del(&frag
->f_cache_entry
);
178 WARN_ON(!list_empty(&frag
->f_item
));
179 kmem_cache_free(rds_ib_frag_slab
, frag
);
184 static void rds_ib_recv_cache_put(struct list_head
*new_item
,
185 struct rds_ib_refill_cache
*cache
);
186 static struct list_head
*rds_ib_recv_cache_get(struct rds_ib_refill_cache
*cache
);
189 /* Recycle frag and attached recv buffer f_sg */
190 static void rds_ib_frag_free(struct rds_ib_connection
*ic
,
191 struct rds_page_frag
*frag
)
193 rdsdebug("frag %p page %p\n", frag
, sg_page(&frag
->f_sg
));
195 rds_ib_recv_cache_put(&frag
->f_cache_entry
, &ic
->i_cache_frags
);
198 /* Recycle inc after freeing attached frags */
199 void rds_ib_inc_free(struct rds_incoming
*inc
)
201 struct rds_ib_incoming
*ibinc
;
202 struct rds_page_frag
*frag
;
203 struct rds_page_frag
*pos
;
204 struct rds_ib_connection
*ic
= inc
->i_conn
->c_transport_data
;
206 ibinc
= container_of(inc
, struct rds_ib_incoming
, ii_inc
);
208 /* Free attached frags */
209 list_for_each_entry_safe(frag
, pos
, &ibinc
->ii_frags
, f_item
) {
210 list_del_init(&frag
->f_item
);
211 rds_ib_frag_free(ic
, frag
);
213 BUG_ON(!list_empty(&ibinc
->ii_frags
));
215 rdsdebug("freeing ibinc %p inc %p\n", ibinc
, inc
);
216 rds_ib_recv_cache_put(&ibinc
->ii_cache_entry
, &ic
->i_cache_incs
);
219 static void rds_ib_recv_clear_one(struct rds_ib_connection
*ic
,
220 struct rds_ib_recv_work
*recv
)
223 rds_inc_put(&recv
->r_ibinc
->ii_inc
);
224 recv
->r_ibinc
= NULL
;
227 ib_dma_unmap_sg(ic
->i_cm_id
->device
, &recv
->r_frag
->f_sg
, 1, DMA_FROM_DEVICE
);
228 rds_ib_frag_free(ic
, recv
->r_frag
);
233 void rds_ib_recv_clear_ring(struct rds_ib_connection
*ic
)
237 for (i
= 0; i
< ic
->i_recv_ring
.w_nr
; i
++)
238 rds_ib_recv_clear_one(ic
, &ic
->i_recvs
[i
]);
241 static struct rds_ib_incoming
*rds_ib_refill_one_inc(struct rds_ib_connection
*ic
,
244 struct rds_ib_incoming
*ibinc
;
245 struct list_head
*cache_item
;
248 cache_item
= rds_ib_recv_cache_get(&ic
->i_cache_incs
);
250 ibinc
= container_of(cache_item
, struct rds_ib_incoming
, ii_cache_entry
);
252 avail_allocs
= atomic_add_unless(&rds_ib_allocation
,
253 1, rds_ib_sysctl_max_recv_allocation
);
255 rds_ib_stats_inc(s_ib_rx_alloc_limit
);
258 ibinc
= kmem_cache_alloc(rds_ib_incoming_slab
, slab_mask
);
260 atomic_dec(&rds_ib_allocation
);
264 INIT_LIST_HEAD(&ibinc
->ii_frags
);
265 rds_inc_init(&ibinc
->ii_inc
, ic
->conn
, ic
->conn
->c_faddr
);
270 static struct rds_page_frag
*rds_ib_refill_one_frag(struct rds_ib_connection
*ic
,
271 gfp_t slab_mask
, gfp_t page_mask
)
273 struct rds_page_frag
*frag
;
274 struct list_head
*cache_item
;
277 cache_item
= rds_ib_recv_cache_get(&ic
->i_cache_frags
);
279 frag
= container_of(cache_item
, struct rds_page_frag
, f_cache_entry
);
281 frag
= kmem_cache_alloc(rds_ib_frag_slab
, slab_mask
);
285 sg_init_table(&frag
->f_sg
, 1);
286 ret
= rds_page_remainder_alloc(&frag
->f_sg
,
287 RDS_FRAG_SIZE
, page_mask
);
289 kmem_cache_free(rds_ib_frag_slab
, frag
);
294 INIT_LIST_HEAD(&frag
->f_item
);
299 static int rds_ib_recv_refill_one(struct rds_connection
*conn
,
300 struct rds_ib_recv_work
*recv
, int prefill
)
302 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
305 gfp_t slab_mask
= GFP_NOWAIT
;
306 gfp_t page_mask
= GFP_NOWAIT
;
309 slab_mask
= GFP_KERNEL
;
310 page_mask
= GFP_HIGHUSER
;
313 if (!ic
->i_cache_incs
.ready
)
314 rds_ib_cache_xfer_to_ready(&ic
->i_cache_incs
);
315 if (!ic
->i_cache_frags
.ready
)
316 rds_ib_cache_xfer_to_ready(&ic
->i_cache_frags
);
319 * ibinc was taken from recv if recv contained the start of a message.
320 * recvs that were continuations will still have this allocated.
322 if (!recv
->r_ibinc
) {
323 recv
->r_ibinc
= rds_ib_refill_one_inc(ic
, slab_mask
);
328 WARN_ON(recv
->r_frag
); /* leak! */
329 recv
->r_frag
= rds_ib_refill_one_frag(ic
, slab_mask
, page_mask
);
333 ret
= ib_dma_map_sg(ic
->i_cm_id
->device
, &recv
->r_frag
->f_sg
,
337 sge
= &recv
->r_sge
[0];
338 sge
->addr
= ic
->i_recv_hdrs_dma
+ (recv
- ic
->i_recvs
) * sizeof(struct rds_header
);
339 sge
->length
= sizeof(struct rds_header
);
341 sge
= &recv
->r_sge
[1];
342 sge
->addr
= ib_sg_dma_address(ic
->i_cm_id
->device
, &recv
->r_frag
->f_sg
);
343 sge
->length
= ib_sg_dma_len(ic
->i_cm_id
->device
, &recv
->r_frag
->f_sg
);
351 * This tries to allocate and post unused work requests after making sure that
352 * they have all the allocations they need to queue received fragments into
355 * -1 is returned if posting fails due to temporary resource exhaustion.
357 void rds_ib_recv_refill(struct rds_connection
*conn
, int prefill
)
359 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
360 struct rds_ib_recv_work
*recv
;
361 struct ib_recv_wr
*failed_wr
;
362 unsigned int posted
= 0;
366 while ((prefill
|| rds_conn_up(conn
)) &&
367 rds_ib_ring_alloc(&ic
->i_recv_ring
, 1, &pos
)) {
368 if (pos
>= ic
->i_recv_ring
.w_nr
) {
369 printk(KERN_NOTICE
"Argh - ring alloc returned pos=%u\n",
374 recv
= &ic
->i_recvs
[pos
];
375 ret
= rds_ib_recv_refill_one(conn
, recv
, prefill
);
380 /* XXX when can this fail? */
381 ret
= ib_post_recv(ic
->i_cm_id
->qp
, &recv
->r_wr
, &failed_wr
);
382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv
,
383 recv
->r_ibinc
, sg_page(&recv
->r_frag
->f_sg
),
384 (long) ib_sg_dma_address(
386 &recv
->r_frag
->f_sg
),
389 rds_ib_conn_error(conn
, "recv post on "
390 "%pI4 returned %d, disconnecting and "
391 "reconnecting\n", &conn
->c_faddr
,
399 /* We're doing flow control - update the window. */
400 if (ic
->i_flowctl
&& posted
)
401 rds_ib_advertise_credits(conn
, posted
);
404 rds_ib_ring_unalloc(&ic
->i_recv_ring
, 1);
408 * We want to recycle several types of recv allocations, like incs and frags.
409 * To use this, the *_free() function passes in the ptr to a list_head within
410 * the recyclee, as well as the cache to put it on.
412 * First, we put the memory on a percpu list. When this reaches a certain size,
413 * We move it to an intermediate non-percpu list in a lockless manner, with some
414 * xchg/compxchg wizardry.
416 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
417 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
418 * list_empty() will return true with one element is actually present.
420 static void rds_ib_recv_cache_put(struct list_head
*new_item
,
421 struct rds_ib_refill_cache
*cache
)
424 struct list_head
*old
;
425 struct list_head __percpu
*chpfirst
;
427 local_irq_save(flags
);
429 chpfirst
= __this_cpu_read(cache
->percpu
->first
);
431 INIT_LIST_HEAD(new_item
);
432 else /* put on front */
433 list_add_tail(new_item
, chpfirst
);
435 __this_cpu_write(chpfirst
, new_item
);
436 __this_cpu_inc(cache
->percpu
->count
);
438 if (__this_cpu_read(cache
->percpu
->count
) < RDS_IB_RECYCLE_BATCH_COUNT
)
442 * Return our per-cpu first list to the cache's xfer by atomically
443 * grabbing the current xfer list, appending it to our per-cpu list,
444 * and then atomically returning that entire list back to the
445 * cache's xfer list as long as it's still empty.
448 old
= xchg(&cache
->xfer
, NULL
);
450 list_splice_entire_tail(old
, chpfirst
);
451 old
= cmpxchg(&cache
->xfer
, NULL
, chpfirst
);
455 __this_cpu_write(chpfirst
, NULL
);
456 __this_cpu_write(cache
->percpu
->count
, 0);
458 local_irq_restore(flags
);
461 static struct list_head
*rds_ib_recv_cache_get(struct rds_ib_refill_cache
*cache
)
463 struct list_head
*head
= cache
->ready
;
466 if (!list_empty(head
)) {
467 cache
->ready
= head
->next
;
476 int rds_ib_inc_copy_to_user(struct rds_incoming
*inc
, struct iovec
*first_iov
,
479 struct rds_ib_incoming
*ibinc
;
480 struct rds_page_frag
*frag
;
481 struct iovec
*iov
= first_iov
;
482 unsigned long to_copy
;
483 unsigned long frag_off
= 0;
484 unsigned long iov_off
= 0;
489 ibinc
= container_of(inc
, struct rds_ib_incoming
, ii_inc
);
490 frag
= list_entry(ibinc
->ii_frags
.next
, struct rds_page_frag
, f_item
);
491 len
= be32_to_cpu(inc
->i_hdr
.h_len
);
493 while (copied
< size
&& copied
< len
) {
494 if (frag_off
== RDS_FRAG_SIZE
) {
495 frag
= list_entry(frag
->f_item
.next
,
496 struct rds_page_frag
, f_item
);
499 while (iov_off
== iov
->iov_len
) {
504 to_copy
= min(iov
->iov_len
- iov_off
, RDS_FRAG_SIZE
- frag_off
);
505 to_copy
= min_t(size_t, to_copy
, size
- copied
);
506 to_copy
= min_t(unsigned long, to_copy
, len
- copied
);
508 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
510 to_copy
, iov
->iov_base
, iov
->iov_len
, iov_off
,
511 sg_page(&frag
->f_sg
), frag
->f_sg
.offset
, frag_off
);
513 /* XXX needs + offset for multiple recvs per page */
514 ret
= rds_page_copy_to_user(sg_page(&frag
->f_sg
),
515 frag
->f_sg
.offset
+ frag_off
,
516 iov
->iov_base
+ iov_off
,
531 /* ic starts out kzalloc()ed */
532 void rds_ib_recv_init_ack(struct rds_ib_connection
*ic
)
534 struct ib_send_wr
*wr
= &ic
->i_ack_wr
;
535 struct ib_sge
*sge
= &ic
->i_ack_sge
;
537 sge
->addr
= ic
->i_ack_dma
;
538 sge
->length
= sizeof(struct rds_header
);
539 sge
->lkey
= ic
->i_mr
->lkey
;
543 wr
->opcode
= IB_WR_SEND
;
544 wr
->wr_id
= RDS_IB_ACK_WR_ID
;
545 wr
->send_flags
= IB_SEND_SIGNALED
| IB_SEND_SOLICITED
;
549 * You'd think that with reliable IB connections you wouldn't need to ack
550 * messages that have been received. The problem is that IB hardware generates
551 * an ack message before it has DMAed the message into memory. This creates a
552 * potential message loss if the HCA is disabled for any reason between when it
553 * sends the ack and before the message is DMAed and processed. This is only a
554 * potential issue if another HCA is available for fail-over.
556 * When the remote host receives our ack they'll free the sent message from
557 * their send queue. To decrease the latency of this we always send an ack
558 * immediately after we've received messages.
560 * For simplicity, we only have one ack in flight at a time. This puts
561 * pressure on senders to have deep enough send queues to absorb the latency of
562 * a single ack frame being in flight. This might not be good enough.
564 * This is implemented by have a long-lived send_wr and sge which point to a
565 * statically allocated ack frame. This ack wr does not fall under the ring
566 * accounting that the tx and rx wrs do. The QP attribute specifically makes
567 * room for it beyond the ring size. Send completion notices its special
568 * wr_id and avoids working with the ring in that case.
570 #ifndef KERNEL_HAS_ATOMIC64
571 static void rds_ib_set_ack(struct rds_ib_connection
*ic
, u64 seq
,
576 spin_lock_irqsave(&ic
->i_ack_lock
, flags
);
577 ic
->i_ack_next
= seq
;
579 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
580 spin_unlock_irqrestore(&ic
->i_ack_lock
, flags
);
583 static u64
rds_ib_get_ack(struct rds_ib_connection
*ic
)
588 clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
590 spin_lock_irqsave(&ic
->i_ack_lock
, flags
);
591 seq
= ic
->i_ack_next
;
592 spin_unlock_irqrestore(&ic
->i_ack_lock
, flags
);
597 static void rds_ib_set_ack(struct rds_ib_connection
*ic
, u64 seq
,
600 atomic64_set(&ic
->i_ack_next
, seq
);
602 smp_mb__before_clear_bit();
603 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
607 static u64
rds_ib_get_ack(struct rds_ib_connection
*ic
)
609 clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
610 smp_mb__after_clear_bit();
612 return atomic64_read(&ic
->i_ack_next
);
617 static void rds_ib_send_ack(struct rds_ib_connection
*ic
, unsigned int adv_credits
)
619 struct rds_header
*hdr
= ic
->i_ack
;
620 struct ib_send_wr
*failed_wr
;
624 seq
= rds_ib_get_ack(ic
);
626 rdsdebug("send_ack: ic %p ack %llu\n", ic
, (unsigned long long) seq
);
627 rds_message_populate_header(hdr
, 0, 0, 0);
628 hdr
->h_ack
= cpu_to_be64(seq
);
629 hdr
->h_credit
= adv_credits
;
630 rds_message_make_checksum(hdr
);
631 ic
->i_ack_queued
= jiffies
;
633 ret
= ib_post_send(ic
->i_cm_id
->qp
, &ic
->i_ack_wr
, &failed_wr
);
635 /* Failed to send. Release the WR, and
638 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
639 set_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
641 rds_ib_stats_inc(s_ib_ack_send_failure
);
643 rds_ib_conn_error(ic
->conn
, "sending ack failed\n");
645 rds_ib_stats_inc(s_ib_ack_sent
);
649 * There are 3 ways of getting acknowledgements to the peer:
650 * 1. We call rds_ib_attempt_ack from the recv completion handler
651 * to send an ACK-only frame.
652 * However, there can be only one such frame in the send queue
653 * at any time, so we may have to postpone it.
654 * 2. When another (data) packet is transmitted while there's
655 * an ACK in the queue, we piggyback the ACK sequence number
656 * on the data packet.
657 * 3. If the ACK WR is done sending, we get called from the
658 * send queue completion handler, and check whether there's
659 * another ACK pending (postponed because the WR was on the
660 * queue). If so, we transmit it.
662 * We maintain 2 variables:
663 * - i_ack_flags, which keeps track of whether the ACK WR
664 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
665 * - i_ack_next, which is the last sequence number we received
667 * Potentially, send queue and receive queue handlers can run concurrently.
668 * It would be nice to not have to use a spinlock to synchronize things,
669 * but the one problem that rules this out is that 64bit updates are
670 * not atomic on all platforms. Things would be a lot simpler if
671 * we had atomic64 or maybe cmpxchg64 everywhere.
673 * Reconnecting complicates this picture just slightly. When we
674 * reconnect, we may be seeing duplicate packets. The peer
675 * is retransmitting them, because it hasn't seen an ACK for
676 * them. It is important that we ACK these.
678 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
679 * this flag set *MUST* be acknowledged immediately.
683 * When we get here, we're called from the recv queue handler.
684 * Check whether we ought to transmit an ACK.
686 void rds_ib_attempt_ack(struct rds_ib_connection
*ic
)
688 unsigned int adv_credits
;
690 if (!test_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
))
693 if (test_and_set_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
)) {
694 rds_ib_stats_inc(s_ib_ack_send_delayed
);
698 /* Can we get a send credit? */
699 if (!rds_ib_send_grab_credits(ic
, 1, &adv_credits
, 0, RDS_MAX_ADV_CREDIT
)) {
700 rds_ib_stats_inc(s_ib_tx_throttle
);
701 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
705 clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
);
706 rds_ib_send_ack(ic
, adv_credits
);
710 * We get here from the send completion handler, when the
711 * adapter tells us the ACK frame was sent.
713 void rds_ib_ack_send_complete(struct rds_ib_connection
*ic
)
715 clear_bit(IB_ACK_IN_FLIGHT
, &ic
->i_ack_flags
);
716 rds_ib_attempt_ack(ic
);
720 * This is called by the regular xmit code when it wants to piggyback
721 * an ACK on an outgoing frame.
723 u64
rds_ib_piggyb_ack(struct rds_ib_connection
*ic
)
725 if (test_and_clear_bit(IB_ACK_REQUESTED
, &ic
->i_ack_flags
))
726 rds_ib_stats_inc(s_ib_ack_send_piggybacked
);
727 return rds_ib_get_ack(ic
);
731 * It's kind of lame that we're copying from the posted receive pages into
732 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
733 * them. But receiving new congestion bitmaps should be a *rare* event, so
734 * hopefully we won't need to invest that complexity in making it more
735 * efficient. By copying we can share a simpler core with TCP which has to
738 static void rds_ib_cong_recv(struct rds_connection
*conn
,
739 struct rds_ib_incoming
*ibinc
)
741 struct rds_cong_map
*map
;
742 unsigned int map_off
;
743 unsigned int map_page
;
744 struct rds_page_frag
*frag
;
745 unsigned long frag_off
;
746 unsigned long to_copy
;
747 unsigned long copied
;
748 uint64_t uncongested
= 0;
751 /* catch completely corrupt packets */
752 if (be32_to_cpu(ibinc
->ii_inc
.i_hdr
.h_len
) != RDS_CONG_MAP_BYTES
)
759 frag
= list_entry(ibinc
->ii_frags
.next
, struct rds_page_frag
, f_item
);
764 while (copied
< RDS_CONG_MAP_BYTES
) {
768 to_copy
= min(RDS_FRAG_SIZE
- frag_off
, PAGE_SIZE
- map_off
);
769 BUG_ON(to_copy
& 7); /* Must be 64bit aligned. */
771 addr
= kmap_atomic(sg_page(&frag
->f_sg
));
773 src
= addr
+ frag_off
;
774 dst
= (void *)map
->m_page_addrs
[map_page
] + map_off
;
775 for (k
= 0; k
< to_copy
; k
+= 8) {
776 /* Record ports that became uncongested, ie
777 * bits that changed from 0 to 1. */
778 uncongested
|= ~(*src
) & *dst
;
786 if (map_off
== PAGE_SIZE
) {
792 if (frag_off
== RDS_FRAG_SIZE
) {
793 frag
= list_entry(frag
->f_item
.next
,
794 struct rds_page_frag
, f_item
);
799 /* the congestion map is in little endian order */
800 uncongested
= le64_to_cpu(uncongested
);
802 rds_cong_map_updated(map
, uncongested
);
806 * Rings are posted with all the allocations they'll need to queue the
807 * incoming message to the receiving socket so this can't fail.
808 * All fragments start with a header, so we can make sure we're not receiving
809 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
811 struct rds_ib_ack_state
{
814 unsigned int ack_required
:1;
815 unsigned int ack_next_valid
:1;
816 unsigned int ack_recv_valid
:1;
819 static void rds_ib_process_recv(struct rds_connection
*conn
,
820 struct rds_ib_recv_work
*recv
, u32 data_len
,
821 struct rds_ib_ack_state
*state
)
823 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
824 struct rds_ib_incoming
*ibinc
= ic
->i_ibinc
;
825 struct rds_header
*ihdr
, *hdr
;
827 /* XXX shut down the connection if port 0,0 are seen? */
829 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic
, ibinc
, recv
,
832 if (data_len
< sizeof(struct rds_header
)) {
833 rds_ib_conn_error(conn
, "incoming message "
834 "from %pI4 didn't include a "
835 "header, disconnecting and "
840 data_len
-= sizeof(struct rds_header
);
842 ihdr
= &ic
->i_recv_hdrs
[recv
- ic
->i_recvs
];
844 /* Validate the checksum. */
845 if (!rds_message_verify_checksum(ihdr
)) {
846 rds_ib_conn_error(conn
, "incoming message "
847 "from %pI4 has corrupted header - "
848 "forcing a reconnect\n",
850 rds_stats_inc(s_recv_drop_bad_checksum
);
854 /* Process the ACK sequence which comes with every packet */
855 state
->ack_recv
= be64_to_cpu(ihdr
->h_ack
);
856 state
->ack_recv_valid
= 1;
858 /* Process the credits update if there was one */
860 rds_ib_send_add_credits(conn
, ihdr
->h_credit
);
862 if (ihdr
->h_sport
== 0 && ihdr
->h_dport
== 0 && data_len
== 0) {
863 /* This is an ACK-only packet. The fact that it gets
864 * special treatment here is that historically, ACKs
865 * were rather special beasts.
867 rds_ib_stats_inc(s_ib_ack_received
);
870 * Usually the frags make their way on to incs and are then freed as
871 * the inc is freed. We don't go that route, so we have to drop the
872 * page ref ourselves. We can't just leave the page on the recv
873 * because that confuses the dma mapping of pages and each recv's use
876 * FIXME: Fold this into the code path below.
878 rds_ib_frag_free(ic
, recv
->r_frag
);
884 * If we don't already have an inc on the connection then this
885 * fragment has a header and starts a message.. copy its header
886 * into the inc and save the inc so we can hang upcoming fragments
890 ibinc
= recv
->r_ibinc
;
891 recv
->r_ibinc
= NULL
;
894 hdr
= &ibinc
->ii_inc
.i_hdr
;
895 memcpy(hdr
, ihdr
, sizeof(*hdr
));
896 ic
->i_recv_data_rem
= be32_to_cpu(hdr
->h_len
);
898 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic
, ibinc
,
899 ic
->i_recv_data_rem
, hdr
->h_flags
);
901 hdr
= &ibinc
->ii_inc
.i_hdr
;
902 /* We can't just use memcmp here; fragments of a
903 * single message may carry different ACKs */
904 if (hdr
->h_sequence
!= ihdr
->h_sequence
||
905 hdr
->h_len
!= ihdr
->h_len
||
906 hdr
->h_sport
!= ihdr
->h_sport
||
907 hdr
->h_dport
!= ihdr
->h_dport
) {
908 rds_ib_conn_error(conn
,
909 "fragment header mismatch; forcing reconnect\n");
914 list_add_tail(&recv
->r_frag
->f_item
, &ibinc
->ii_frags
);
917 if (ic
->i_recv_data_rem
> RDS_FRAG_SIZE
)
918 ic
->i_recv_data_rem
-= RDS_FRAG_SIZE
;
920 ic
->i_recv_data_rem
= 0;
923 if (ibinc
->ii_inc
.i_hdr
.h_flags
== RDS_FLAG_CONG_BITMAP
)
924 rds_ib_cong_recv(conn
, ibinc
);
926 rds_recv_incoming(conn
, conn
->c_faddr
, conn
->c_laddr
,
927 &ibinc
->ii_inc
, GFP_ATOMIC
);
928 state
->ack_next
= be64_to_cpu(hdr
->h_sequence
);
929 state
->ack_next_valid
= 1;
932 /* Evaluate the ACK_REQUIRED flag *after* we received
933 * the complete frame, and after bumping the next_rx
935 if (hdr
->h_flags
& RDS_FLAG_ACK_REQUIRED
) {
936 rds_stats_inc(s_recv_ack_required
);
937 state
->ack_required
= 1;
940 rds_inc_put(&ibinc
->ii_inc
);
945 * Plucking the oldest entry from the ring can be done concurrently with
946 * the thread refilling the ring. Each ring operation is protected by
947 * spinlocks and the transient state of refilling doesn't change the
948 * recording of which entry is oldest.
950 * This relies on IB only calling one cq comp_handler for each cq so that
951 * there will only be one caller of rds_recv_incoming() per RDS connection.
953 void rds_ib_recv_cq_comp_handler(struct ib_cq
*cq
, void *context
)
955 struct rds_connection
*conn
= context
;
956 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
958 rdsdebug("conn %p cq %p\n", conn
, cq
);
960 rds_ib_stats_inc(s_ib_rx_cq_call
);
962 tasklet_schedule(&ic
->i_recv_tasklet
);
965 static inline void rds_poll_cq(struct rds_ib_connection
*ic
,
966 struct rds_ib_ack_state
*state
)
968 struct rds_connection
*conn
= ic
->conn
;
970 struct rds_ib_recv_work
*recv
;
972 while (ib_poll_cq(ic
->i_recv_cq
, 1, &wc
) > 0) {
973 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
974 (unsigned long long)wc
.wr_id
, wc
.status
,
975 rds_ib_wc_status_str(wc
.status
), wc
.byte_len
,
976 be32_to_cpu(wc
.ex
.imm_data
));
977 rds_ib_stats_inc(s_ib_rx_cq_event
);
979 recv
= &ic
->i_recvs
[rds_ib_ring_oldest(&ic
->i_recv_ring
)];
981 ib_dma_unmap_sg(ic
->i_cm_id
->device
, &recv
->r_frag
->f_sg
, 1, DMA_FROM_DEVICE
);
984 * Also process recvs in connecting state because it is possible
985 * to get a recv completion _before_ the rdmacm ESTABLISHED
986 * event is processed.
988 if (wc
.status
== IB_WC_SUCCESS
) {
989 rds_ib_process_recv(conn
, recv
, wc
.byte_len
, state
);
991 /* We expect errors as the qp is drained during shutdown */
992 if (rds_conn_up(conn
) || rds_conn_connecting(conn
))
993 rds_ib_conn_error(conn
, "recv completion on %pI4 had "
994 "status %u (%s), disconnecting and "
995 "reconnecting\n", &conn
->c_faddr
,
997 rds_ib_wc_status_str(wc
.status
));
1001 * It's very important that we only free this ring entry if we've truly
1002 * freed the resources allocated to the entry. The refilling path can
1005 rds_ib_ring_free(&ic
->i_recv_ring
, 1);
1009 void rds_ib_recv_tasklet_fn(unsigned long data
)
1011 struct rds_ib_connection
*ic
= (struct rds_ib_connection
*) data
;
1012 struct rds_connection
*conn
= ic
->conn
;
1013 struct rds_ib_ack_state state
= { 0, };
1015 rds_poll_cq(ic
, &state
);
1016 ib_req_notify_cq(ic
->i_recv_cq
, IB_CQ_SOLICITED
);
1017 rds_poll_cq(ic
, &state
);
1019 if (state
.ack_next_valid
)
1020 rds_ib_set_ack(ic
, state
.ack_next
, state
.ack_required
);
1021 if (state
.ack_recv_valid
&& state
.ack_recv
> ic
->i_ack_recv
) {
1022 rds_send_drop_acked(conn
, state
.ack_recv
, NULL
);
1023 ic
->i_ack_recv
= state
.ack_recv
;
1025 if (rds_conn_up(conn
))
1026 rds_ib_attempt_ack(ic
);
1028 /* If we ever end up with a really empty receive ring, we're
1029 * in deep trouble, as the sender will definitely see RNR
1031 if (rds_ib_ring_empty(&ic
->i_recv_ring
))
1032 rds_ib_stats_inc(s_ib_rx_ring_empty
);
1034 if (rds_ib_ring_low(&ic
->i_recv_ring
))
1035 rds_ib_recv_refill(conn
, 0);
1038 int rds_ib_recv(struct rds_connection
*conn
)
1040 struct rds_ib_connection
*ic
= conn
->c_transport_data
;
1043 rdsdebug("conn %p\n", conn
);
1044 if (rds_conn_up(conn
))
1045 rds_ib_attempt_ack(ic
);
1050 int rds_ib_recv_init(void)
1055 /* Default to 30% of all available RAM for recv memory */
1057 rds_ib_sysctl_max_recv_allocation
= si
.totalram
/ 3 * PAGE_SIZE
/ RDS_FRAG_SIZE
;
1059 rds_ib_incoming_slab
= kmem_cache_create("rds_ib_incoming",
1060 sizeof(struct rds_ib_incoming
),
1061 0, SLAB_HWCACHE_ALIGN
, NULL
);
1062 if (!rds_ib_incoming_slab
)
1065 rds_ib_frag_slab
= kmem_cache_create("rds_ib_frag",
1066 sizeof(struct rds_page_frag
),
1067 0, SLAB_HWCACHE_ALIGN
, NULL
);
1068 if (!rds_ib_frag_slab
)
1069 kmem_cache_destroy(rds_ib_incoming_slab
);
1076 void rds_ib_recv_exit(void)
1078 kmem_cache_destroy(rds_ib_incoming_slab
);
1079 kmem_cache_destroy(rds_ib_frag_slab
);