2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_cache.h>
39 #include <linux/icmpv6.h>
41 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
42 static int data_debug_level
;
44 module_param_named(cm_data_debug_level
, data_debug_level
, int, 0644);
45 MODULE_PARM_DESC(cm_data_debug_level
,
46 "Enable data path debug tracing for connected mode if > 0");
51 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
53 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
54 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
55 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
56 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
65 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
66 struct ib_cm_event
*event
);
68 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv
*priv
,
69 u64 mapping
[IPOIB_CM_RX_SG
])
73 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
75 for (i
= 0; i
< IPOIB_CM_RX_SG
- 1; ++i
)
76 ib_dma_unmap_single(priv
->ca
, mapping
[i
+ 1], PAGE_SIZE
, DMA_FROM_DEVICE
);
79 static int ipoib_cm_post_receive(struct net_device
*dev
, int id
)
81 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
82 struct ib_recv_wr
*bad_wr
;
85 priv
->cm
.rx_wr
.wr_id
= id
| IPOIB_CM_OP_SRQ
;
87 for (i
= 0; i
< IPOIB_CM_RX_SG
; ++i
)
88 priv
->cm
.rx_sge
[i
].addr
= priv
->cm
.srq_ring
[id
].mapping
[i
];
90 ret
= ib_post_srq_recv(priv
->cm
.srq
, &priv
->cm
.rx_wr
, &bad_wr
);
92 ipoib_warn(priv
, "post srq failed for buf %d (%d)\n", id
, ret
);
93 ipoib_cm_dma_unmap_rx(priv
, priv
->cm
.srq_ring
[id
].mapping
);
94 dev_kfree_skb_any(priv
->cm
.srq_ring
[id
].skb
);
95 priv
->cm
.srq_ring
[id
].skb
= NULL
;
101 static int ipoib_cm_alloc_rx_skb(struct net_device
*dev
, int id
,
102 u64 mapping
[IPOIB_CM_RX_SG
])
104 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
108 skb
= dev_alloc_skb(IPOIB_CM_HEAD_SIZE
+ 12);
113 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
114 * IP header to a multiple of 16.
116 skb_reserve(skb
, 12);
118 mapping
[0] = ib_dma_map_single(priv
->ca
, skb
->data
, IPOIB_CM_HEAD_SIZE
,
120 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[0]))) {
121 dev_kfree_skb_any(skb
);
125 for (i
= 0; i
< IPOIB_CM_RX_SG
- 1; i
++) {
126 struct page
*page
= alloc_page(GFP_ATOMIC
);
130 skb_fill_page_desc(skb
, i
, page
, 0, PAGE_SIZE
);
132 mapping
[i
+ 1] = ib_dma_map_page(priv
->ca
, skb_shinfo(skb
)->frags
[i
].page
,
133 0, PAGE_SIZE
, DMA_TO_DEVICE
);
134 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[i
+ 1])))
138 priv
->cm
.srq_ring
[id
].skb
= skb
;
143 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
146 ib_dma_unmap_single(priv
->ca
, mapping
[i
+ 1], PAGE_SIZE
, DMA_FROM_DEVICE
);
152 static struct ib_qp
*ipoib_cm_create_rx_qp(struct net_device
*dev
,
153 struct ipoib_cm_rx
*p
)
155 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
156 struct ib_qp_init_attr attr
= {
157 .send_cq
= priv
->cq
, /* does not matter, we never send anything */
160 .cap
.max_send_wr
= 1, /* FIXME: 0 Seems not to work */
161 .cap
.max_send_sge
= 1, /* FIXME: 0 Seems not to work */
162 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
163 .qp_type
= IB_QPT_RC
,
166 return ib_create_qp(priv
->pd
, &attr
);
169 static int ipoib_cm_modify_rx_qp(struct net_device
*dev
,
170 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
,
173 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
174 struct ib_qp_attr qp_attr
;
175 int qp_attr_mask
, ret
;
177 qp_attr
.qp_state
= IB_QPS_INIT
;
178 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
180 ipoib_warn(priv
, "failed to init QP attr for INIT: %d\n", ret
);
183 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
185 ipoib_warn(priv
, "failed to modify QP to INIT: %d\n", ret
);
188 qp_attr
.qp_state
= IB_QPS_RTR
;
189 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
191 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
194 qp_attr
.rq_psn
= psn
;
195 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
197 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
203 static int ipoib_cm_send_rep(struct net_device
*dev
, struct ib_cm_id
*cm_id
,
204 struct ib_qp
*qp
, struct ib_cm_req_event_param
*req
,
207 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
208 struct ipoib_cm_data data
= {};
209 struct ib_cm_rep_param rep
= {};
211 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
212 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
214 rep
.private_data
= &data
;
215 rep
.private_data_len
= sizeof data
;
216 rep
.flow_control
= 0;
217 rep
.rnr_retry_count
= req
->rnr_retry_count
;
218 rep
.target_ack_delay
= 20; /* FIXME */
220 rep
.qp_num
= qp
->qp_num
;
221 rep
.starting_psn
= psn
;
222 return ib_send_cm_rep(cm_id
, &rep
);
225 static int ipoib_cm_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
227 struct net_device
*dev
= cm_id
->context
;
228 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
229 struct ipoib_cm_rx
*p
;
234 ipoib_dbg(priv
, "REQ arrived\n");
235 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
240 p
->qp
= ipoib_cm_create_rx_qp(dev
, p
);
242 ret
= PTR_ERR(p
->qp
);
246 psn
= random32() & 0xffffff;
247 ret
= ipoib_cm_modify_rx_qp(dev
, cm_id
, p
->qp
, psn
);
251 ret
= ipoib_cm_send_rep(dev
, cm_id
, p
->qp
, &event
->param
.req_rcvd
, psn
);
253 ipoib_warn(priv
, "failed to send REP: %d\n", ret
);
258 p
->jiffies
= jiffies
;
259 spin_lock_irqsave(&priv
->lock
, flags
);
260 list_add(&p
->list
, &priv
->cm
.passive_ids
);
261 spin_unlock_irqrestore(&priv
->lock
, flags
);
262 queue_delayed_work(ipoib_workqueue
,
263 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
268 ib_destroy_qp(p
->qp
);
274 static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id
,
275 struct ib_cm_event
*event
)
277 struct ipoib_cm_rx
*p
;
278 struct ipoib_dev_priv
*priv
;
282 switch (event
->event
) {
283 case IB_CM_REQ_RECEIVED
:
284 return ipoib_cm_req_handler(cm_id
, event
);
285 case IB_CM_DREQ_RECEIVED
:
287 ib_send_cm_drep(cm_id
, NULL
, 0);
289 case IB_CM_REJ_RECEIVED
:
291 priv
= netdev_priv(p
->dev
);
292 spin_lock_irqsave(&priv
->lock
, flags
);
293 if (list_empty(&p
->list
))
294 ret
= 0; /* Connection is going away already. */
296 list_del_init(&p
->list
);
299 spin_unlock_irqrestore(&priv
->lock
, flags
);
301 ib_destroy_qp(p
->qp
);
310 /* Adjust length of skb with fragments to match received data */
311 static void skb_put_frags(struct sk_buff
*skb
, unsigned int hdr_space
,
317 /* put header into skb */
318 size
= min(length
, hdr_space
);
323 num_frags
= skb_shinfo(skb
)->nr_frags
;
324 for (i
= 0; i
< num_frags
; i
++) {
325 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
328 /* don't need this page */
329 __free_page(frag
->page
);
330 --skb_shinfo(skb
)->nr_frags
;
332 size
= min(length
, (unsigned) PAGE_SIZE
);
335 skb
->data_len
+= size
;
336 skb
->truesize
+= size
;
343 void ipoib_cm_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
345 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
346 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_CM_OP_SRQ
;
348 struct ipoib_cm_rx
*p
;
350 u64 mapping
[IPOIB_CM_RX_SG
];
352 ipoib_dbg_data(priv
, "cm recv completion: id %d, op %d, status: %d\n",
353 wr_id
, wc
->opcode
, wc
->status
);
355 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
356 ipoib_warn(priv
, "cm recv completion event with wrid %d (> %d)\n",
357 wr_id
, ipoib_recvq_size
);
361 skb
= priv
->cm
.srq_ring
[wr_id
].skb
;
363 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
364 ipoib_dbg(priv
, "cm recv error "
365 "(status=%d, wrid=%d vend_err %x)\n",
366 wc
->status
, wr_id
, wc
->vendor_err
);
367 ++priv
->stats
.rx_dropped
;
371 if (!likely(wr_id
& IPOIB_CM_RX_UPDATE_MASK
)) {
372 p
= wc
->qp
->qp_context
;
373 if (time_after_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_UPDATE_TIME
)) {
374 spin_lock_irqsave(&priv
->lock
, flags
);
375 p
->jiffies
= jiffies
;
376 /* Move this entry to list head, but do
377 * not re-add it if it has been removed. */
378 if (!list_empty(&p
->list
))
379 list_move(&p
->list
, &priv
->cm
.passive_ids
);
380 spin_unlock_irqrestore(&priv
->lock
, flags
);
381 queue_delayed_work(ipoib_workqueue
,
382 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
386 if (unlikely(ipoib_cm_alloc_rx_skb(dev
, wr_id
, mapping
))) {
388 * If we can't allocate a new RX buffer, dump
389 * this packet and reuse the old buffer.
391 ipoib_dbg(priv
, "failed to allocate receive buffer %d\n", wr_id
);
392 ++priv
->stats
.rx_dropped
;
396 ipoib_cm_dma_unmap_rx(priv
, priv
->cm
.srq_ring
[wr_id
].mapping
);
397 memcpy(priv
->cm
.srq_ring
[wr_id
].mapping
, mapping
, sizeof mapping
);
399 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
400 wc
->byte_len
, wc
->slid
);
402 skb_put_frags(skb
, IPOIB_CM_HEAD_SIZE
, wc
->byte_len
);
404 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
405 skb
->mac
.raw
= skb
->data
;
406 skb_pull(skb
, IPOIB_ENCAP_LEN
);
408 dev
->last_rx
= jiffies
;
409 ++priv
->stats
.rx_packets
;
410 priv
->stats
.rx_bytes
+= skb
->len
;
413 /* XXX get correct PACKET_ type here */
414 skb
->pkt_type
= PACKET_HOST
;
418 if (unlikely(ipoib_cm_post_receive(dev
, wr_id
)))
419 ipoib_warn(priv
, "ipoib_cm_post_receive failed "
420 "for buf %d\n", wr_id
);
423 static inline int post_send(struct ipoib_dev_priv
*priv
,
424 struct ipoib_cm_tx
*tx
,
428 struct ib_send_wr
*bad_wr
;
430 priv
->tx_sge
.addr
= addr
;
431 priv
->tx_sge
.length
= len
;
433 priv
->tx_wr
.wr_id
= wr_id
;
435 return ib_post_send(tx
->qp
, &priv
->tx_wr
, &bad_wr
);
438 void ipoib_cm_send(struct net_device
*dev
, struct sk_buff
*skb
, struct ipoib_cm_tx
*tx
)
440 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
441 struct ipoib_tx_buf
*tx_req
;
444 if (unlikely(skb
->len
> tx
->mtu
)) {
445 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
447 ++priv
->stats
.tx_dropped
;
448 ++priv
->stats
.tx_errors
;
449 ipoib_cm_skb_too_long(dev
, skb
, tx
->mtu
- INFINIBAND_ALEN
);
453 ipoib_dbg_data(priv
, "sending packet: head 0x%x length %d connection 0x%x\n",
454 tx
->tx_head
, skb
->len
, tx
->qp
->qp_num
);
457 * We put the skb into the tx_ring _before_ we call post_send()
458 * because it's entirely possible that the completion handler will
459 * run before we execute anything after the post_send(). That
460 * means we have to make sure everything is properly recorded and
461 * our state is consistent before we call post_send().
463 tx_req
= &tx
->tx_ring
[tx
->tx_head
& (ipoib_sendq_size
- 1)];
465 addr
= ib_dma_map_single(priv
->ca
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
466 if (unlikely(ib_dma_mapping_error(priv
->ca
, addr
))) {
467 ++priv
->stats
.tx_errors
;
468 dev_kfree_skb_any(skb
);
472 tx_req
->mapping
= addr
;
474 if (unlikely(post_send(priv
, tx
, tx
->tx_head
& (ipoib_sendq_size
- 1),
476 ipoib_warn(priv
, "post_send failed\n");
477 ++priv
->stats
.tx_errors
;
478 ib_dma_unmap_single(priv
->ca
, addr
, skb
->len
, DMA_TO_DEVICE
);
479 dev_kfree_skb_any(skb
);
481 dev
->trans_start
= jiffies
;
484 if (tx
->tx_head
- tx
->tx_tail
== ipoib_sendq_size
) {
485 ipoib_dbg(priv
, "TX ring 0x%x full, stopping kernel net queue\n",
487 netif_stop_queue(dev
);
488 set_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
);
493 static void ipoib_cm_handle_tx_wc(struct net_device
*dev
, struct ipoib_cm_tx
*tx
,
496 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
497 unsigned int wr_id
= wc
->wr_id
;
498 struct ipoib_tx_buf
*tx_req
;
501 ipoib_dbg_data(priv
, "cm send completion: id %d, op %d, status: %d\n",
502 wr_id
, wc
->opcode
, wc
->status
);
504 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
505 ipoib_warn(priv
, "cm send completion event with wrid %d (> %d)\n",
506 wr_id
, ipoib_sendq_size
);
510 tx_req
= &tx
->tx_ring
[wr_id
];
512 ib_dma_unmap_single(priv
->ca
, tx_req
->mapping
, tx_req
->skb
->len
, DMA_TO_DEVICE
);
514 /* FIXME: is this right? Shouldn't we only increment on success? */
515 ++priv
->stats
.tx_packets
;
516 priv
->stats
.tx_bytes
+= tx_req
->skb
->len
;
518 dev_kfree_skb_any(tx_req
->skb
);
520 spin_lock_irqsave(&priv
->tx_lock
, flags
);
522 if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
)) &&
523 tx
->tx_head
- tx
->tx_tail
<= ipoib_sendq_size
>> 1) {
524 clear_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
);
525 netif_wake_queue(dev
);
528 if (wc
->status
!= IB_WC_SUCCESS
&&
529 wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
530 struct ipoib_neigh
*neigh
;
532 ipoib_dbg(priv
, "failed cm send event "
533 "(status=%d, wrid=%d vend_err %x)\n",
534 wc
->status
, wr_id
, wc
->vendor_err
);
536 spin_lock(&priv
->lock
);
541 list_del(&neigh
->list
);
543 ipoib_put_ah(neigh
->ah
);
544 ipoib_neigh_free(dev
, neigh
);
549 /* queue would be re-started anyway when TX is destroyed,
550 * but it makes sense to do it ASAP here. */
551 if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
))
552 netif_wake_queue(dev
);
554 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
555 list_move(&tx
->list
, &priv
->cm
.reap_list
);
556 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
559 clear_bit(IPOIB_FLAG_OPER_UP
, &tx
->flags
);
561 spin_unlock(&priv
->lock
);
564 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
567 static void ipoib_cm_tx_completion(struct ib_cq
*cq
, void *tx_ptr
)
569 struct ipoib_cm_tx
*tx
= tx_ptr
;
572 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
574 n
= ib_poll_cq(cq
, IPOIB_NUM_WC
, tx
->ibwc
);
575 for (i
= 0; i
< n
; ++i
)
576 ipoib_cm_handle_tx_wc(tx
->dev
, tx
, tx
->ibwc
+ i
);
577 } while (n
== IPOIB_NUM_WC
);
580 int ipoib_cm_dev_open(struct net_device
*dev
)
582 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
585 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
))
588 priv
->cm
.id
= ib_create_cm_id(priv
->ca
, ipoib_cm_rx_handler
, dev
);
589 if (IS_ERR(priv
->cm
.id
)) {
590 printk(KERN_WARNING
"%s: failed to create CM ID\n", priv
->ca
->name
);
591 return IS_ERR(priv
->cm
.id
);
594 ret
= ib_cm_listen(priv
->cm
.id
, cpu_to_be64(IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
),
597 printk(KERN_WARNING
"%s: failed to listen on ID 0x%llx\n", priv
->ca
->name
,
598 IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
);
599 ib_destroy_cm_id(priv
->cm
.id
);
605 void ipoib_cm_dev_stop(struct net_device
*dev
)
607 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
608 struct ipoib_cm_rx
*p
;
611 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
))
614 ib_destroy_cm_id(priv
->cm
.id
);
615 spin_lock_irqsave(&priv
->lock
, flags
);
616 while (!list_empty(&priv
->cm
.passive_ids
)) {
617 p
= list_entry(priv
->cm
.passive_ids
.next
, typeof(*p
), list
);
618 list_del_init(&p
->list
);
619 spin_unlock_irqrestore(&priv
->lock
, flags
);
620 ib_destroy_cm_id(p
->id
);
621 ib_destroy_qp(p
->qp
);
623 spin_lock_irqsave(&priv
->lock
, flags
);
625 spin_unlock_irqrestore(&priv
->lock
, flags
);
627 cancel_delayed_work(&priv
->cm
.stale_task
);
630 static int ipoib_cm_rep_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
632 struct ipoib_cm_tx
*p
= cm_id
->context
;
633 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
634 struct ipoib_cm_data
*data
= event
->private_data
;
635 struct sk_buff_head skqueue
;
636 struct ib_qp_attr qp_attr
;
637 int qp_attr_mask
, ret
;
641 p
->mtu
= be32_to_cpu(data
->mtu
);
643 if (p
->mtu
< priv
->dev
->mtu
+ IPOIB_ENCAP_LEN
) {
644 ipoib_warn(priv
, "Rejecting connection: mtu %d < device mtu %d + 4\n",
645 p
->mtu
, priv
->dev
->mtu
);
649 qp_attr
.qp_state
= IB_QPS_RTR
;
650 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
652 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
656 qp_attr
.rq_psn
= 0 /* FIXME */;
657 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
659 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
663 qp_attr
.qp_state
= IB_QPS_RTS
;
664 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
666 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
669 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
671 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
675 skb_queue_head_init(&skqueue
);
677 spin_lock_irqsave(&priv
->lock
, flags
);
678 set_bit(IPOIB_FLAG_OPER_UP
, &p
->flags
);
680 while ((skb
= __skb_dequeue(&p
->neigh
->queue
)))
681 __skb_queue_tail(&skqueue
, skb
);
682 spin_unlock_irqrestore(&priv
->lock
, flags
);
684 while ((skb
= __skb_dequeue(&skqueue
))) {
686 if (dev_queue_xmit(skb
))
687 ipoib_warn(priv
, "dev_queue_xmit failed "
688 "to requeue packet\n");
691 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
693 ipoib_warn(priv
, "failed to send RTU: %d\n", ret
);
699 static struct ib_qp
*ipoib_cm_create_tx_qp(struct net_device
*dev
, struct ib_cq
*cq
)
701 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
702 struct ib_qp_init_attr attr
= {};
703 attr
.recv_cq
= priv
->cq
;
704 attr
.srq
= priv
->cm
.srq
;
705 attr
.cap
.max_send_wr
= ipoib_sendq_size
;
706 attr
.cap
.max_send_sge
= 1;
707 attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
708 attr
.qp_type
= IB_QPT_RC
;
710 return ib_create_qp(priv
->pd
, &attr
);
713 static int ipoib_cm_send_req(struct net_device
*dev
,
714 struct ib_cm_id
*id
, struct ib_qp
*qp
,
716 struct ib_sa_path_rec
*pathrec
)
718 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
719 struct ipoib_cm_data data
= {};
720 struct ib_cm_req_param req
= {};
722 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
723 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
725 req
.primary_path
= pathrec
;
726 req
.alternate_path
= NULL
;
727 req
.service_id
= cpu_to_be64(IPOIB_CM_IETF_ID
| qpn
);
728 req
.qp_num
= qp
->qp_num
;
729 req
.qp_type
= qp
->qp_type
;
730 req
.private_data
= &data
;
731 req
.private_data_len
= sizeof data
;
732 req
.flow_control
= 0;
734 req
.starting_psn
= 0; /* FIXME */
737 * Pick some arbitrary defaults here; we could make these
738 * module parameters if anyone cared about setting them.
740 req
.responder_resources
= 4;
741 req
.remote_cm_response_timeout
= 20;
742 req
.local_cm_response_timeout
= 20;
743 req
.retry_count
= 0; /* RFC draft warns against retries */
744 req
.rnr_retry_count
= 0; /* RFC draft warns against retries */
745 req
.max_cm_retries
= 15;
747 return ib_send_cm_req(id
, &req
);
750 static int ipoib_cm_modify_tx_init(struct net_device
*dev
,
751 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
)
753 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
754 struct ib_qp_attr qp_attr
;
755 int qp_attr_mask
, ret
;
756 ret
= ib_find_cached_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &qp_attr
.pkey_index
);
758 ipoib_warn(priv
, "pkey 0x%x not in cache: %d\n", priv
->pkey
, ret
);
762 qp_attr
.qp_state
= IB_QPS_INIT
;
763 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
764 qp_attr
.port_num
= priv
->port
;
765 qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
767 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
769 ipoib_warn(priv
, "failed to modify tx QP to INIT: %d\n", ret
);
775 static int ipoib_cm_tx_init(struct ipoib_cm_tx
*p
, u32 qpn
,
776 struct ib_sa_path_rec
*pathrec
)
778 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
781 p
->tx_ring
= kzalloc(ipoib_sendq_size
* sizeof *p
->tx_ring
,
784 ipoib_warn(priv
, "failed to allocate tx ring\n");
789 p
->cq
= ib_create_cq(priv
->ca
, ipoib_cm_tx_completion
, NULL
, p
,
790 ipoib_sendq_size
+ 1);
792 ret
= PTR_ERR(p
->cq
);
793 ipoib_warn(priv
, "failed to allocate tx cq: %d\n", ret
);
797 ret
= ib_req_notify_cq(p
->cq
, IB_CQ_NEXT_COMP
);
799 ipoib_warn(priv
, "failed to request completion notification: %d\n", ret
);
803 p
->qp
= ipoib_cm_create_tx_qp(p
->dev
, p
->cq
);
805 ret
= PTR_ERR(p
->qp
);
806 ipoib_warn(priv
, "failed to allocate tx qp: %d\n", ret
);
810 p
->id
= ib_create_cm_id(priv
->ca
, ipoib_cm_tx_handler
, p
);
812 ret
= PTR_ERR(p
->id
);
813 ipoib_warn(priv
, "failed to create tx cm id: %d\n", ret
);
817 ret
= ipoib_cm_modify_tx_init(p
->dev
, p
->id
, p
->qp
);
819 ipoib_warn(priv
, "failed to modify tx qp to rtr: %d\n", ret
);
823 ret
= ipoib_cm_send_req(p
->dev
, p
->id
, p
->qp
, qpn
, pathrec
);
825 ipoib_warn(priv
, "failed to send cm req: %d\n", ret
);
829 ipoib_dbg(priv
, "Request connection 0x%x for gid " IPOIB_GID_FMT
" qpn 0x%x\n",
830 p
->qp
->qp_num
, IPOIB_GID_ARG(pathrec
->dgid
), qpn
);
836 ib_destroy_cm_id(p
->id
);
839 ib_destroy_qp(p
->qp
);
843 ib_destroy_cq(p
->cq
);
850 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx
*p
)
852 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
853 struct ipoib_tx_buf
*tx_req
;
855 ipoib_dbg(priv
, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
856 p
->qp
? p
->qp
->qp_num
: 0, p
->tx_head
, p
->tx_tail
);
859 ib_destroy_cm_id(p
->id
);
862 ib_destroy_qp(p
->qp
);
865 ib_destroy_cq(p
->cq
);
867 if (test_bit(IPOIB_FLAG_NETIF_STOPPED
, &p
->flags
))
868 netif_wake_queue(p
->dev
);
871 while ((int) p
->tx_tail
- (int) p
->tx_head
< 0) {
872 tx_req
= &p
->tx_ring
[p
->tx_tail
& (ipoib_sendq_size
- 1)];
873 ib_dma_unmap_single(priv
->ca
, tx_req
->mapping
, tx_req
->skb
->len
,
875 dev_kfree_skb_any(tx_req
->skb
);
885 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
886 struct ib_cm_event
*event
)
888 struct ipoib_cm_tx
*tx
= cm_id
->context
;
889 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
890 struct net_device
*dev
= priv
->dev
;
891 struct ipoib_neigh
*neigh
;
895 switch (event
->event
) {
896 case IB_CM_DREQ_RECEIVED
:
897 ipoib_dbg(priv
, "DREQ received.\n");
898 ib_send_cm_drep(cm_id
, NULL
, 0);
900 case IB_CM_REP_RECEIVED
:
901 ipoib_dbg(priv
, "REP received.\n");
902 ret
= ipoib_cm_rep_handler(cm_id
, event
);
904 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
907 case IB_CM_REQ_ERROR
:
908 case IB_CM_REJ_RECEIVED
:
909 case IB_CM_TIMEWAIT_EXIT
:
910 ipoib_dbg(priv
, "CM error %d.\n", event
->event
);
911 spin_lock_irqsave(&priv
->tx_lock
, flags
);
912 spin_lock(&priv
->lock
);
917 list_del(&neigh
->list
);
919 ipoib_put_ah(neigh
->ah
);
920 ipoib_neigh_free(dev
, neigh
);
925 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
926 list_move(&tx
->list
, &priv
->cm
.reap_list
);
927 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
930 spin_unlock(&priv
->lock
);
931 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
940 struct ipoib_cm_tx
*ipoib_cm_create_tx(struct net_device
*dev
, struct ipoib_path
*path
,
941 struct ipoib_neigh
*neigh
)
943 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
944 struct ipoib_cm_tx
*tx
;
946 tx
= kzalloc(sizeof *tx
, GFP_ATOMIC
);
954 list_add(&tx
->list
, &priv
->cm
.start_list
);
955 set_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
);
956 queue_work(ipoib_workqueue
, &priv
->cm
.start_task
);
960 void ipoib_cm_destroy_tx(struct ipoib_cm_tx
*tx
)
962 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
963 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
964 list_move(&tx
->list
, &priv
->cm
.reap_list
);
965 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
966 ipoib_dbg(priv
, "Reap connection for gid " IPOIB_GID_FMT
"\n",
967 IPOIB_GID_ARG(tx
->neigh
->dgid
));
972 static void ipoib_cm_tx_start(struct work_struct
*work
)
974 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
976 struct net_device
*dev
= priv
->dev
;
977 struct ipoib_neigh
*neigh
;
978 struct ipoib_cm_tx
*p
;
982 struct ib_sa_path_rec pathrec
;
985 spin_lock_irqsave(&priv
->tx_lock
, flags
);
986 spin_lock(&priv
->lock
);
987 while (!list_empty(&priv
->cm
.start_list
)) {
988 p
= list_entry(priv
->cm
.start_list
.next
, typeof(*p
), list
);
989 list_del_init(&p
->list
);
991 qpn
= IPOIB_QPN(neigh
->neighbour
->ha
);
992 memcpy(&pathrec
, &p
->path
->pathrec
, sizeof pathrec
);
993 spin_unlock(&priv
->lock
);
994 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
995 ret
= ipoib_cm_tx_init(p
, qpn
, &pathrec
);
996 spin_lock_irqsave(&priv
->tx_lock
, flags
);
997 spin_lock(&priv
->lock
);
1002 list_del(&neigh
->list
);
1004 ipoib_put_ah(neigh
->ah
);
1005 ipoib_neigh_free(dev
, neigh
);
1011 spin_unlock(&priv
->lock
);
1012 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1015 static void ipoib_cm_tx_reap(struct work_struct
*work
)
1017 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1019 struct ipoib_cm_tx
*p
;
1020 unsigned long flags
;
1022 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1023 spin_lock(&priv
->lock
);
1024 while (!list_empty(&priv
->cm
.reap_list
)) {
1025 p
= list_entry(priv
->cm
.reap_list
.next
, typeof(*p
), list
);
1027 spin_unlock(&priv
->lock
);
1028 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1029 ipoib_cm_tx_destroy(p
);
1030 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1031 spin_lock(&priv
->lock
);
1033 spin_unlock(&priv
->lock
);
1034 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1037 static void ipoib_cm_skb_reap(struct work_struct
*work
)
1039 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1041 struct net_device
*dev
= priv
->dev
;
1042 struct sk_buff
*skb
;
1043 unsigned long flags
;
1045 unsigned mtu
= priv
->mcast_mtu
;
1047 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1048 spin_lock(&priv
->lock
);
1049 while ((skb
= skb_dequeue(&priv
->cm
.skb_queue
))) {
1050 spin_unlock(&priv
->lock
);
1051 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1052 if (skb
->protocol
== htons(ETH_P_IP
))
1053 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
1054 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1055 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1056 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, dev
);
1058 dev_kfree_skb_any(skb
);
1059 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1060 spin_lock(&priv
->lock
);
1062 spin_unlock(&priv
->lock
);
1063 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1066 void ipoib_cm_skb_too_long(struct net_device
* dev
, struct sk_buff
*skb
,
1069 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1070 int e
= skb_queue_empty(&priv
->cm
.skb_queue
);
1073 skb
->dst
->ops
->update_pmtu(skb
->dst
, mtu
);
1075 skb_queue_tail(&priv
->cm
.skb_queue
, skb
);
1077 queue_work(ipoib_workqueue
, &priv
->cm
.skb_task
);
1080 static void ipoib_cm_stale_task(struct work_struct
*work
)
1082 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1083 cm
.stale_task
.work
);
1084 struct ipoib_cm_rx
*p
;
1085 unsigned long flags
;
1087 spin_lock_irqsave(&priv
->lock
, flags
);
1088 while (!list_empty(&priv
->cm
.passive_ids
)) {
1089 /* List if sorted by LRU, start from tail,
1090 * stop when we see a recently used entry */
1091 p
= list_entry(priv
->cm
.passive_ids
.prev
, typeof(*p
), list
);
1092 if (time_after_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_TIMEOUT
))
1094 list_del_init(&p
->list
);
1095 spin_unlock_irqrestore(&priv
->lock
, flags
);
1096 ib_destroy_cm_id(p
->id
);
1097 ib_destroy_qp(p
->qp
);
1099 spin_lock_irqsave(&priv
->lock
, flags
);
1101 spin_unlock_irqrestore(&priv
->lock
, flags
);
1105 static ssize_t
show_mode(struct device
*d
, struct device_attribute
*attr
,
1108 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(d
));
1110 if (test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
))
1111 return sprintf(buf
, "connected\n");
1113 return sprintf(buf
, "datagram\n");
1116 static ssize_t
set_mode(struct device
*d
, struct device_attribute
*attr
,
1117 const char *buf
, size_t count
)
1119 struct net_device
*dev
= to_net_dev(d
);
1120 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1122 /* flush paths if we switch modes so that connections are restarted */
1123 if (IPOIB_CM_SUPPORTED(dev
->dev_addr
) && !strcmp(buf
, "connected\n")) {
1124 set_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
1125 ipoib_warn(priv
, "enabling connected mode "
1126 "will cause multicast packet drops\n");
1127 ipoib_flush_paths(dev
);
1131 if (!strcmp(buf
, "datagram\n")) {
1132 clear_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
1133 dev
->mtu
= min(priv
->mcast_mtu
, dev
->mtu
);
1134 ipoib_flush_paths(dev
);
1141 static DEVICE_ATTR(mode
, S_IWUGO
| S_IRUGO
, show_mode
, set_mode
);
1143 int ipoib_cm_add_mode_attr(struct net_device
*dev
)
1145 return device_create_file(&dev
->dev
, &dev_attr_mode
);
1148 int ipoib_cm_dev_init(struct net_device
*dev
)
1150 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1151 struct ib_srq_init_attr srq_init_attr
= {
1153 .max_wr
= ipoib_recvq_size
,
1154 .max_sge
= IPOIB_CM_RX_SG
1159 INIT_LIST_HEAD(&priv
->cm
.passive_ids
);
1160 INIT_LIST_HEAD(&priv
->cm
.reap_list
);
1161 INIT_LIST_HEAD(&priv
->cm
.start_list
);
1162 INIT_WORK(&priv
->cm
.start_task
, ipoib_cm_tx_start
);
1163 INIT_WORK(&priv
->cm
.reap_task
, ipoib_cm_tx_reap
);
1164 INIT_WORK(&priv
->cm
.skb_task
, ipoib_cm_skb_reap
);
1165 INIT_DELAYED_WORK(&priv
->cm
.stale_task
, ipoib_cm_stale_task
);
1167 skb_queue_head_init(&priv
->cm
.skb_queue
);
1169 priv
->cm
.srq
= ib_create_srq(priv
->pd
, &srq_init_attr
);
1170 if (IS_ERR(priv
->cm
.srq
)) {
1171 ret
= PTR_ERR(priv
->cm
.srq
);
1172 priv
->cm
.srq
= NULL
;
1176 priv
->cm
.srq_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->cm
.srq_ring
,
1178 if (!priv
->cm
.srq_ring
) {
1179 printk(KERN_WARNING
"%s: failed to allocate CM ring (%d entries)\n",
1180 priv
->ca
->name
, ipoib_recvq_size
);
1181 ipoib_cm_dev_cleanup(dev
);
1185 for (i
= 0; i
< IPOIB_CM_RX_SG
; ++i
)
1186 priv
->cm
.rx_sge
[i
].lkey
= priv
->mr
->lkey
;
1188 priv
->cm
.rx_sge
[0].length
= IPOIB_CM_HEAD_SIZE
;
1189 for (i
= 1; i
< IPOIB_CM_RX_SG
; ++i
)
1190 priv
->cm
.rx_sge
[i
].length
= PAGE_SIZE
;
1191 priv
->cm
.rx_wr
.next
= NULL
;
1192 priv
->cm
.rx_wr
.sg_list
= priv
->cm
.rx_sge
;
1193 priv
->cm
.rx_wr
.num_sge
= IPOIB_CM_RX_SG
;
1195 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
1196 if (ipoib_cm_alloc_rx_skb(dev
, i
, priv
->cm
.srq_ring
[i
].mapping
)) {
1197 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
1198 ipoib_cm_dev_cleanup(dev
);
1201 if (ipoib_cm_post_receive(dev
, i
)) {
1202 ipoib_warn(priv
, "ipoib_ib_post_receive failed for buf %d\n", i
);
1203 ipoib_cm_dev_cleanup(dev
);
1208 priv
->dev
->dev_addr
[0] = IPOIB_FLAGS_RC
;
1212 void ipoib_cm_dev_cleanup(struct net_device
*dev
)
1214 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1220 ipoib_dbg(priv
, "Cleanup ipoib connected mode.\n");
1222 ret
= ib_destroy_srq(priv
->cm
.srq
);
1224 ipoib_warn(priv
, "ib_destroy_srq failed: %d\n", ret
);
1226 priv
->cm
.srq
= NULL
;
1227 if (!priv
->cm
.srq_ring
)
1229 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
1230 if (priv
->cm
.srq_ring
[i
].skb
) {
1231 ipoib_cm_dma_unmap_rx(priv
, priv
->cm
.srq_ring
[i
].mapping
);
1232 dev_kfree_skb_any(priv
->cm
.srq_ring
[i
].skb
);
1233 priv
->cm
.srq_ring
[i
].skb
= NULL
;
1235 kfree(priv
->cm
.srq_ring
);
1236 priv
->cm
.srq_ring
= NULL
;