2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_cache.h>
39 #include <linux/icmpv6.h>
40 #include <linux/delay.h>
42 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
43 static int data_debug_level
;
45 module_param_named(cm_data_debug_level
, data_debug_level
, int, 0644);
46 MODULE_PARM_DESC(cm_data_debug_level
,
47 "Enable data path debug tracing for connected mode if > 0");
52 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
54 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
55 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
56 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
57 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
59 static struct ib_qp_attr ipoib_cm_err_attr
= {
60 .qp_state
= IB_QPS_ERR
63 #define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff
65 static struct ib_send_wr ipoib_cm_rx_drain_wr
= {
66 .wr_id
= IPOIB_CM_RX_DRAIN_WRID
,
70 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
71 struct ib_cm_event
*event
);
73 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv
*priv
, int frags
,
74 u64 mapping
[IPOIB_CM_RX_SG
])
78 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
80 for (i
= 0; i
< frags
; ++i
)
81 ib_dma_unmap_single(priv
->ca
, mapping
[i
+ 1], PAGE_SIZE
, DMA_FROM_DEVICE
);
84 static int ipoib_cm_post_receive(struct net_device
*dev
, int id
)
86 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
87 struct ib_recv_wr
*bad_wr
;
90 priv
->cm
.rx_wr
.wr_id
= id
| IPOIB_CM_OP_SRQ
;
92 for (i
= 0; i
< IPOIB_CM_RX_SG
; ++i
)
93 priv
->cm
.rx_sge
[i
].addr
= priv
->cm
.srq_ring
[id
].mapping
[i
];
95 ret
= ib_post_srq_recv(priv
->cm
.srq
, &priv
->cm
.rx_wr
, &bad_wr
);
97 ipoib_warn(priv
, "post srq failed for buf %d (%d)\n", id
, ret
);
98 ipoib_cm_dma_unmap_rx(priv
, IPOIB_CM_RX_SG
- 1,
99 priv
->cm
.srq_ring
[id
].mapping
);
100 dev_kfree_skb_any(priv
->cm
.srq_ring
[id
].skb
);
101 priv
->cm
.srq_ring
[id
].skb
= NULL
;
107 static struct sk_buff
*ipoib_cm_alloc_rx_skb(struct net_device
*dev
, int id
, int frags
,
108 u64 mapping
[IPOIB_CM_RX_SG
])
110 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
114 skb
= dev_alloc_skb(IPOIB_CM_HEAD_SIZE
+ 12);
119 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
120 * IP header to a multiple of 16.
122 skb_reserve(skb
, 12);
124 mapping
[0] = ib_dma_map_single(priv
->ca
, skb
->data
, IPOIB_CM_HEAD_SIZE
,
126 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[0]))) {
127 dev_kfree_skb_any(skb
);
131 for (i
= 0; i
< frags
; i
++) {
132 struct page
*page
= alloc_page(GFP_ATOMIC
);
136 skb_fill_page_desc(skb
, i
, page
, 0, PAGE_SIZE
);
138 mapping
[i
+ 1] = ib_dma_map_page(priv
->ca
, skb_shinfo(skb
)->frags
[i
].page
,
139 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
140 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[i
+ 1])))
144 priv
->cm
.srq_ring
[id
].skb
= skb
;
149 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
152 ib_dma_unmap_single(priv
->ca
, mapping
[i
], PAGE_SIZE
, DMA_FROM_DEVICE
);
154 dev_kfree_skb_any(skb
);
158 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv
* priv
)
160 struct ib_send_wr
*bad_wr
;
161 struct ipoib_cm_rx
*p
;
163 /* We only reserved 1 extra slot in CQ for drain WRs, so
164 * make sure we have at most 1 outstanding WR. */
165 if (list_empty(&priv
->cm
.rx_flush_list
) ||
166 !list_empty(&priv
->cm
.rx_drain_list
))
170 * QPs on flush list are error state. This way, a "flush
171 * error" WC will be immediately generated for each WR we post.
173 p
= list_entry(priv
->cm
.rx_flush_list
.next
, typeof(*p
), list
);
174 if (ib_post_send(p
->qp
, &ipoib_cm_rx_drain_wr
, &bad_wr
))
175 ipoib_warn(priv
, "failed to post drain wr\n");
177 list_splice_init(&priv
->cm
.rx_flush_list
, &priv
->cm
.rx_drain_list
);
180 static void ipoib_cm_rx_event_handler(struct ib_event
*event
, void *ctx
)
182 struct ipoib_cm_rx
*p
= ctx
;
183 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
186 if (event
->event
!= IB_EVENT_QP_LAST_WQE_REACHED
)
189 spin_lock_irqsave(&priv
->lock
, flags
);
190 list_move(&p
->list
, &priv
->cm
.rx_flush_list
);
191 p
->state
= IPOIB_CM_RX_FLUSH
;
192 ipoib_cm_start_rx_drain(priv
);
193 spin_unlock_irqrestore(&priv
->lock
, flags
);
196 static struct ib_qp
*ipoib_cm_create_rx_qp(struct net_device
*dev
,
197 struct ipoib_cm_rx
*p
)
199 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
200 struct ib_qp_init_attr attr
= {
201 .event_handler
= ipoib_cm_rx_event_handler
,
202 .send_cq
= priv
->cq
, /* For drain WR */
205 .cap
.max_send_wr
= 1, /* For drain WR */
206 .cap
.max_send_sge
= 1, /* FIXME: 0 Seems not to work */
207 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
208 .qp_type
= IB_QPT_RC
,
211 return ib_create_qp(priv
->pd
, &attr
);
214 static int ipoib_cm_modify_rx_qp(struct net_device
*dev
,
215 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
,
218 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
219 struct ib_qp_attr qp_attr
;
220 int qp_attr_mask
, ret
;
222 qp_attr
.qp_state
= IB_QPS_INIT
;
223 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
225 ipoib_warn(priv
, "failed to init QP attr for INIT: %d\n", ret
);
228 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
230 ipoib_warn(priv
, "failed to modify QP to INIT: %d\n", ret
);
233 qp_attr
.qp_state
= IB_QPS_RTR
;
234 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
236 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
239 qp_attr
.rq_psn
= psn
;
240 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
242 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
247 * Current Mellanox HCA firmware won't generate completions
248 * with error for drain WRs unless the QP has been moved to
249 * RTS first. This work-around leaves a window where a QP has
250 * moved to error asynchronously, but this will eventually get
251 * fixed in firmware, so let's not error out if modify QP
254 qp_attr
.qp_state
= IB_QPS_RTS
;
255 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
257 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
260 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
262 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
269 static int ipoib_cm_send_rep(struct net_device
*dev
, struct ib_cm_id
*cm_id
,
270 struct ib_qp
*qp
, struct ib_cm_req_event_param
*req
,
273 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
274 struct ipoib_cm_data data
= {};
275 struct ib_cm_rep_param rep
= {};
277 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
278 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
280 rep
.private_data
= &data
;
281 rep
.private_data_len
= sizeof data
;
282 rep
.flow_control
= 0;
283 rep
.rnr_retry_count
= req
->rnr_retry_count
;
284 rep
.target_ack_delay
= 20; /* FIXME */
286 rep
.qp_num
= qp
->qp_num
;
287 rep
.starting_psn
= psn
;
288 return ib_send_cm_rep(cm_id
, &rep
);
291 static int ipoib_cm_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
293 struct net_device
*dev
= cm_id
->context
;
294 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
295 struct ipoib_cm_rx
*p
;
299 ipoib_dbg(priv
, "REQ arrived\n");
300 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
306 p
->state
= IPOIB_CM_RX_LIVE
;
307 p
->jiffies
= jiffies
;
308 INIT_LIST_HEAD(&p
->list
);
310 p
->qp
= ipoib_cm_create_rx_qp(dev
, p
);
312 ret
= PTR_ERR(p
->qp
);
316 psn
= random32() & 0xffffff;
317 ret
= ipoib_cm_modify_rx_qp(dev
, cm_id
, p
->qp
, psn
);
321 spin_lock_irq(&priv
->lock
);
322 queue_delayed_work(ipoib_workqueue
,
323 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
324 /* Add this entry to passive ids list head, but do not re-add it
325 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
326 p
->jiffies
= jiffies
;
327 if (p
->state
== IPOIB_CM_RX_LIVE
)
328 list_move(&p
->list
, &priv
->cm
.passive_ids
);
329 spin_unlock_irq(&priv
->lock
);
331 ret
= ipoib_cm_send_rep(dev
, cm_id
, p
->qp
, &event
->param
.req_rcvd
, psn
);
333 ipoib_warn(priv
, "failed to send REP: %d\n", ret
);
334 if (ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
))
335 ipoib_warn(priv
, "unable to move qp to error state\n");
340 ib_destroy_qp(p
->qp
);
346 static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id
,
347 struct ib_cm_event
*event
)
349 struct ipoib_cm_rx
*p
;
350 struct ipoib_dev_priv
*priv
;
352 switch (event
->event
) {
353 case IB_CM_REQ_RECEIVED
:
354 return ipoib_cm_req_handler(cm_id
, event
);
355 case IB_CM_DREQ_RECEIVED
:
357 ib_send_cm_drep(cm_id
, NULL
, 0);
359 case IB_CM_REJ_RECEIVED
:
361 priv
= netdev_priv(p
->dev
);
362 if (ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
))
363 ipoib_warn(priv
, "unable to move qp to error state\n");
369 /* Adjust length of skb with fragments to match received data */
370 static void skb_put_frags(struct sk_buff
*skb
, unsigned int hdr_space
,
371 unsigned int length
, struct sk_buff
*toskb
)
376 /* put header into skb */
377 size
= min(length
, hdr_space
);
382 num_frags
= skb_shinfo(skb
)->nr_frags
;
383 for (i
= 0; i
< num_frags
; i
++) {
384 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
387 /* don't need this page */
388 skb_fill_page_desc(toskb
, i
, frag
->page
, 0, PAGE_SIZE
);
389 --skb_shinfo(skb
)->nr_frags
;
391 size
= min(length
, (unsigned) PAGE_SIZE
);
394 skb
->data_len
+= size
;
395 skb
->truesize
+= size
;
402 void ipoib_cm_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
404 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
405 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_CM_OP_SRQ
;
406 struct sk_buff
*skb
, *newskb
;
407 struct ipoib_cm_rx
*p
;
409 u64 mapping
[IPOIB_CM_RX_SG
];
412 ipoib_dbg_data(priv
, "cm recv completion: id %d, status: %d\n",
415 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
416 if (wr_id
== (IPOIB_CM_RX_DRAIN_WRID
& ~IPOIB_CM_OP_SRQ
)) {
417 spin_lock_irqsave(&priv
->lock
, flags
);
418 list_splice_init(&priv
->cm
.rx_drain_list
, &priv
->cm
.rx_reap_list
);
419 ipoib_cm_start_rx_drain(priv
);
420 queue_work(ipoib_workqueue
, &priv
->cm
.rx_reap_task
);
421 spin_unlock_irqrestore(&priv
->lock
, flags
);
423 ipoib_warn(priv
, "cm recv completion event with wrid %d (> %d)\n",
424 wr_id
, ipoib_recvq_size
);
428 skb
= priv
->cm
.srq_ring
[wr_id
].skb
;
430 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
431 ipoib_dbg(priv
, "cm recv error "
432 "(status=%d, wrid=%d vend_err %x)\n",
433 wc
->status
, wr_id
, wc
->vendor_err
);
434 ++priv
->stats
.rx_dropped
;
438 if (!likely(wr_id
& IPOIB_CM_RX_UPDATE_MASK
)) {
439 p
= wc
->qp
->qp_context
;
440 if (p
&& time_after_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_UPDATE_TIME
)) {
441 spin_lock_irqsave(&priv
->lock
, flags
);
442 p
->jiffies
= jiffies
;
443 /* Move this entry to list head, but do not re-add it
444 * if it has been moved out of list. */
445 if (p
->state
== IPOIB_CM_RX_LIVE
)
446 list_move(&p
->list
, &priv
->cm
.passive_ids
);
447 spin_unlock_irqrestore(&priv
->lock
, flags
);
451 frags
= PAGE_ALIGN(wc
->byte_len
- min(wc
->byte_len
,
452 (unsigned)IPOIB_CM_HEAD_SIZE
)) / PAGE_SIZE
;
454 newskb
= ipoib_cm_alloc_rx_skb(dev
, wr_id
, frags
, mapping
);
455 if (unlikely(!newskb
)) {
457 * If we can't allocate a new RX buffer, dump
458 * this packet and reuse the old buffer.
460 ipoib_dbg(priv
, "failed to allocate receive buffer %d\n", wr_id
);
461 ++priv
->stats
.rx_dropped
;
465 ipoib_cm_dma_unmap_rx(priv
, frags
, priv
->cm
.srq_ring
[wr_id
].mapping
);
466 memcpy(priv
->cm
.srq_ring
[wr_id
].mapping
, mapping
, (frags
+ 1) * sizeof *mapping
);
468 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
469 wc
->byte_len
, wc
->slid
);
471 skb_put_frags(skb
, IPOIB_CM_HEAD_SIZE
, wc
->byte_len
, newskb
);
473 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
474 skb_reset_mac_header(skb
);
475 skb_pull(skb
, IPOIB_ENCAP_LEN
);
477 dev
->last_rx
= jiffies
;
478 ++priv
->stats
.rx_packets
;
479 priv
->stats
.rx_bytes
+= skb
->len
;
482 /* XXX get correct PACKET_ type here */
483 skb
->pkt_type
= PACKET_HOST
;
484 netif_receive_skb(skb
);
487 if (unlikely(ipoib_cm_post_receive(dev
, wr_id
)))
488 ipoib_warn(priv
, "ipoib_cm_post_receive failed "
489 "for buf %d\n", wr_id
);
492 static inline int post_send(struct ipoib_dev_priv
*priv
,
493 struct ipoib_cm_tx
*tx
,
497 struct ib_send_wr
*bad_wr
;
499 priv
->tx_sge
.addr
= addr
;
500 priv
->tx_sge
.length
= len
;
502 priv
->tx_wr
.wr_id
= wr_id
;
504 return ib_post_send(tx
->qp
, &priv
->tx_wr
, &bad_wr
);
507 void ipoib_cm_send(struct net_device
*dev
, struct sk_buff
*skb
, struct ipoib_cm_tx
*tx
)
509 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
510 struct ipoib_tx_buf
*tx_req
;
513 if (unlikely(skb
->len
> tx
->mtu
)) {
514 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
516 ++priv
->stats
.tx_dropped
;
517 ++priv
->stats
.tx_errors
;
518 ipoib_cm_skb_too_long(dev
, skb
, tx
->mtu
- IPOIB_ENCAP_LEN
);
522 ipoib_dbg_data(priv
, "sending packet: head 0x%x length %d connection 0x%x\n",
523 tx
->tx_head
, skb
->len
, tx
->qp
->qp_num
);
526 * We put the skb into the tx_ring _before_ we call post_send()
527 * because it's entirely possible that the completion handler will
528 * run before we execute anything after the post_send(). That
529 * means we have to make sure everything is properly recorded and
530 * our state is consistent before we call post_send().
532 tx_req
= &tx
->tx_ring
[tx
->tx_head
& (ipoib_sendq_size
- 1)];
534 addr
= ib_dma_map_single(priv
->ca
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
535 if (unlikely(ib_dma_mapping_error(priv
->ca
, addr
))) {
536 ++priv
->stats
.tx_errors
;
537 dev_kfree_skb_any(skb
);
541 tx_req
->mapping
= addr
;
543 if (unlikely(post_send(priv
, tx
, tx
->tx_head
& (ipoib_sendq_size
- 1),
545 ipoib_warn(priv
, "post_send failed\n");
546 ++priv
->stats
.tx_errors
;
547 ib_dma_unmap_single(priv
->ca
, addr
, skb
->len
, DMA_TO_DEVICE
);
548 dev_kfree_skb_any(skb
);
550 dev
->trans_start
= jiffies
;
553 if (tx
->tx_head
- tx
->tx_tail
== ipoib_sendq_size
) {
554 ipoib_dbg(priv
, "TX ring 0x%x full, stopping kernel net queue\n",
556 netif_stop_queue(dev
);
557 set_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
);
562 static void ipoib_cm_handle_tx_wc(struct net_device
*dev
, struct ipoib_cm_tx
*tx
,
565 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
566 unsigned int wr_id
= wc
->wr_id
;
567 struct ipoib_tx_buf
*tx_req
;
570 ipoib_dbg_data(priv
, "cm send completion: id %d, status: %d\n",
573 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
574 ipoib_warn(priv
, "cm send completion event with wrid %d (> %d)\n",
575 wr_id
, ipoib_sendq_size
);
579 tx_req
= &tx
->tx_ring
[wr_id
];
581 ib_dma_unmap_single(priv
->ca
, tx_req
->mapping
, tx_req
->skb
->len
, DMA_TO_DEVICE
);
583 /* FIXME: is this right? Shouldn't we only increment on success? */
584 ++priv
->stats
.tx_packets
;
585 priv
->stats
.tx_bytes
+= tx_req
->skb
->len
;
587 dev_kfree_skb_any(tx_req
->skb
);
589 spin_lock_irqsave(&priv
->tx_lock
, flags
);
591 if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
)) &&
592 tx
->tx_head
- tx
->tx_tail
<= ipoib_sendq_size
>> 1) {
593 clear_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
);
594 netif_wake_queue(dev
);
597 if (wc
->status
!= IB_WC_SUCCESS
&&
598 wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
599 struct ipoib_neigh
*neigh
;
601 ipoib_dbg(priv
, "failed cm send event "
602 "(status=%d, wrid=%d vend_err %x)\n",
603 wc
->status
, wr_id
, wc
->vendor_err
);
605 spin_lock(&priv
->lock
);
610 list_del(&neigh
->list
);
612 ipoib_put_ah(neigh
->ah
);
613 ipoib_neigh_free(dev
, neigh
);
618 /* queue would be re-started anyway when TX is destroyed,
619 * but it makes sense to do it ASAP here. */
620 if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED
, &tx
->flags
))
621 netif_wake_queue(dev
);
623 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
624 list_move(&tx
->list
, &priv
->cm
.reap_list
);
625 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
628 clear_bit(IPOIB_FLAG_OPER_UP
, &tx
->flags
);
630 spin_unlock(&priv
->lock
);
633 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
636 static void ipoib_cm_tx_completion(struct ib_cq
*cq
, void *tx_ptr
)
638 struct ipoib_cm_tx
*tx
= tx_ptr
;
641 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
643 n
= ib_poll_cq(cq
, IPOIB_NUM_WC
, tx
->ibwc
);
644 for (i
= 0; i
< n
; ++i
)
645 ipoib_cm_handle_tx_wc(tx
->dev
, tx
, tx
->ibwc
+ i
);
646 } while (n
== IPOIB_NUM_WC
);
649 int ipoib_cm_dev_open(struct net_device
*dev
)
651 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
654 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
))
657 priv
->cm
.id
= ib_create_cm_id(priv
->ca
, ipoib_cm_rx_handler
, dev
);
658 if (IS_ERR(priv
->cm
.id
)) {
659 printk(KERN_WARNING
"%s: failed to create CM ID\n", priv
->ca
->name
);
660 ret
= PTR_ERR(priv
->cm
.id
);
664 ret
= ib_cm_listen(priv
->cm
.id
, cpu_to_be64(IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
),
667 printk(KERN_WARNING
"%s: failed to listen on ID 0x%llx\n", priv
->ca
->name
,
668 IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
);
675 ib_destroy_cm_id(priv
->cm
.id
);
681 void ipoib_cm_dev_stop(struct net_device
*dev
)
683 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
684 struct ipoib_cm_rx
*p
, *n
;
689 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
) || !priv
->cm
.id
)
692 ib_destroy_cm_id(priv
->cm
.id
);
695 spin_lock_irq(&priv
->lock
);
696 while (!list_empty(&priv
->cm
.passive_ids
)) {
697 p
= list_entry(priv
->cm
.passive_ids
.next
, typeof(*p
), list
);
698 list_move(&p
->list
, &priv
->cm
.rx_error_list
);
699 p
->state
= IPOIB_CM_RX_ERROR
;
700 spin_unlock_irq(&priv
->lock
);
701 ret
= ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
);
703 ipoib_warn(priv
, "unable to move qp to error state: %d\n", ret
);
704 spin_lock_irq(&priv
->lock
);
707 /* Wait for all RX to be drained */
710 while (!list_empty(&priv
->cm
.rx_error_list
) ||
711 !list_empty(&priv
->cm
.rx_flush_list
) ||
712 !list_empty(&priv
->cm
.rx_drain_list
)) {
713 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
714 ipoib_warn(priv
, "RX drain timing out\n");
717 * assume the HW is wedged and just free up everything.
719 list_splice_init(&priv
->cm
.rx_flush_list
, &list
);
720 list_splice_init(&priv
->cm
.rx_error_list
, &list
);
721 list_splice_init(&priv
->cm
.rx_drain_list
, &list
);
724 spin_unlock_irq(&priv
->lock
);
727 spin_lock_irq(&priv
->lock
);
730 list_splice_init(&priv
->cm
.rx_reap_list
, &list
);
732 spin_unlock_irq(&priv
->lock
);
734 list_for_each_entry_safe(p
, n
, &list
, list
) {
735 ib_destroy_cm_id(p
->id
);
736 ib_destroy_qp(p
->qp
);
740 cancel_delayed_work(&priv
->cm
.stale_task
);
743 static int ipoib_cm_rep_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
745 struct ipoib_cm_tx
*p
= cm_id
->context
;
746 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
747 struct ipoib_cm_data
*data
= event
->private_data
;
748 struct sk_buff_head skqueue
;
749 struct ib_qp_attr qp_attr
;
750 int qp_attr_mask
, ret
;
753 p
->mtu
= be32_to_cpu(data
->mtu
);
755 if (p
->mtu
<= IPOIB_ENCAP_LEN
) {
756 ipoib_warn(priv
, "Rejecting connection: mtu %d <= %d\n",
757 p
->mtu
, IPOIB_ENCAP_LEN
);
761 qp_attr
.qp_state
= IB_QPS_RTR
;
762 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
764 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
768 qp_attr
.rq_psn
= 0 /* FIXME */;
769 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
771 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
775 qp_attr
.qp_state
= IB_QPS_RTS
;
776 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
778 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
781 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
783 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
787 skb_queue_head_init(&skqueue
);
789 spin_lock_irq(&priv
->lock
);
790 set_bit(IPOIB_FLAG_OPER_UP
, &p
->flags
);
792 while ((skb
= __skb_dequeue(&p
->neigh
->queue
)))
793 __skb_queue_tail(&skqueue
, skb
);
794 spin_unlock_irq(&priv
->lock
);
796 while ((skb
= __skb_dequeue(&skqueue
))) {
798 if (dev_queue_xmit(skb
))
799 ipoib_warn(priv
, "dev_queue_xmit failed "
800 "to requeue packet\n");
803 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
805 ipoib_warn(priv
, "failed to send RTU: %d\n", ret
);
811 static struct ib_qp
*ipoib_cm_create_tx_qp(struct net_device
*dev
, struct ib_cq
*cq
)
813 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
814 struct ib_qp_init_attr attr
= {};
815 attr
.recv_cq
= priv
->cq
;
816 attr
.srq
= priv
->cm
.srq
;
817 attr
.cap
.max_send_wr
= ipoib_sendq_size
;
818 attr
.cap
.max_send_sge
= 1;
819 attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
820 attr
.qp_type
= IB_QPT_RC
;
822 return ib_create_qp(priv
->pd
, &attr
);
825 static int ipoib_cm_send_req(struct net_device
*dev
,
826 struct ib_cm_id
*id
, struct ib_qp
*qp
,
828 struct ib_sa_path_rec
*pathrec
)
830 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
831 struct ipoib_cm_data data
= {};
832 struct ib_cm_req_param req
= {};
834 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
835 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
837 req
.primary_path
= pathrec
;
838 req
.alternate_path
= NULL
;
839 req
.service_id
= cpu_to_be64(IPOIB_CM_IETF_ID
| qpn
);
840 req
.qp_num
= qp
->qp_num
;
841 req
.qp_type
= qp
->qp_type
;
842 req
.private_data
= &data
;
843 req
.private_data_len
= sizeof data
;
844 req
.flow_control
= 0;
846 req
.starting_psn
= 0; /* FIXME */
849 * Pick some arbitrary defaults here; we could make these
850 * module parameters if anyone cared about setting them.
852 req
.responder_resources
= 4;
853 req
.remote_cm_response_timeout
= 20;
854 req
.local_cm_response_timeout
= 20;
855 req
.retry_count
= 0; /* RFC draft warns against retries */
856 req
.rnr_retry_count
= 0; /* RFC draft warns against retries */
857 req
.max_cm_retries
= 15;
859 return ib_send_cm_req(id
, &req
);
862 static int ipoib_cm_modify_tx_init(struct net_device
*dev
,
863 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
)
865 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
866 struct ib_qp_attr qp_attr
;
867 int qp_attr_mask
, ret
;
868 ret
= ib_find_cached_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &qp_attr
.pkey_index
);
870 ipoib_warn(priv
, "pkey 0x%x not in cache: %d\n", priv
->pkey
, ret
);
874 qp_attr
.qp_state
= IB_QPS_INIT
;
875 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
876 qp_attr
.port_num
= priv
->port
;
877 qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
879 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
881 ipoib_warn(priv
, "failed to modify tx QP to INIT: %d\n", ret
);
887 static int ipoib_cm_tx_init(struct ipoib_cm_tx
*p
, u32 qpn
,
888 struct ib_sa_path_rec
*pathrec
)
890 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
893 p
->tx_ring
= kzalloc(ipoib_sendq_size
* sizeof *p
->tx_ring
,
896 ipoib_warn(priv
, "failed to allocate tx ring\n");
901 p
->cq
= ib_create_cq(priv
->ca
, ipoib_cm_tx_completion
, NULL
, p
,
902 ipoib_sendq_size
+ 1, 0);
904 ret
= PTR_ERR(p
->cq
);
905 ipoib_warn(priv
, "failed to allocate tx cq: %d\n", ret
);
909 ret
= ib_req_notify_cq(p
->cq
, IB_CQ_NEXT_COMP
);
911 ipoib_warn(priv
, "failed to request completion notification: %d\n", ret
);
915 p
->qp
= ipoib_cm_create_tx_qp(p
->dev
, p
->cq
);
917 ret
= PTR_ERR(p
->qp
);
918 ipoib_warn(priv
, "failed to allocate tx qp: %d\n", ret
);
922 p
->id
= ib_create_cm_id(priv
->ca
, ipoib_cm_tx_handler
, p
);
924 ret
= PTR_ERR(p
->id
);
925 ipoib_warn(priv
, "failed to create tx cm id: %d\n", ret
);
929 ret
= ipoib_cm_modify_tx_init(p
->dev
, p
->id
, p
->qp
);
931 ipoib_warn(priv
, "failed to modify tx qp to rtr: %d\n", ret
);
935 ret
= ipoib_cm_send_req(p
->dev
, p
->id
, p
->qp
, qpn
, pathrec
);
937 ipoib_warn(priv
, "failed to send cm req: %d\n", ret
);
941 ipoib_dbg(priv
, "Request connection 0x%x for gid " IPOIB_GID_FMT
" qpn 0x%x\n",
942 p
->qp
->qp_num
, IPOIB_GID_ARG(pathrec
->dgid
), qpn
);
948 ib_destroy_cm_id(p
->id
);
951 ib_destroy_qp(p
->qp
);
955 ib_destroy_cq(p
->cq
);
962 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx
*p
)
964 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
965 struct ipoib_tx_buf
*tx_req
;
967 ipoib_dbg(priv
, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
968 p
->qp
? p
->qp
->qp_num
: 0, p
->tx_head
, p
->tx_tail
);
971 ib_destroy_cm_id(p
->id
);
974 ib_destroy_qp(p
->qp
);
977 ib_destroy_cq(p
->cq
);
979 if (test_bit(IPOIB_FLAG_NETIF_STOPPED
, &p
->flags
))
980 netif_wake_queue(p
->dev
);
983 while ((int) p
->tx_tail
- (int) p
->tx_head
< 0) {
984 tx_req
= &p
->tx_ring
[p
->tx_tail
& (ipoib_sendq_size
- 1)];
985 ib_dma_unmap_single(priv
->ca
, tx_req
->mapping
, tx_req
->skb
->len
,
987 dev_kfree_skb_any(tx_req
->skb
);
997 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
998 struct ib_cm_event
*event
)
1000 struct ipoib_cm_tx
*tx
= cm_id
->context
;
1001 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
1002 struct net_device
*dev
= priv
->dev
;
1003 struct ipoib_neigh
*neigh
;
1006 switch (event
->event
) {
1007 case IB_CM_DREQ_RECEIVED
:
1008 ipoib_dbg(priv
, "DREQ received.\n");
1009 ib_send_cm_drep(cm_id
, NULL
, 0);
1011 case IB_CM_REP_RECEIVED
:
1012 ipoib_dbg(priv
, "REP received.\n");
1013 ret
= ipoib_cm_rep_handler(cm_id
, event
);
1015 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
1018 case IB_CM_REQ_ERROR
:
1019 case IB_CM_REJ_RECEIVED
:
1020 case IB_CM_TIMEWAIT_EXIT
:
1021 ipoib_dbg(priv
, "CM error %d.\n", event
->event
);
1022 spin_lock_irq(&priv
->tx_lock
);
1023 spin_lock(&priv
->lock
);
1028 list_del(&neigh
->list
);
1030 ipoib_put_ah(neigh
->ah
);
1031 ipoib_neigh_free(dev
, neigh
);
1036 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
1037 list_move(&tx
->list
, &priv
->cm
.reap_list
);
1038 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
1041 spin_unlock(&priv
->lock
);
1042 spin_unlock_irq(&priv
->tx_lock
);
1051 struct ipoib_cm_tx
*ipoib_cm_create_tx(struct net_device
*dev
, struct ipoib_path
*path
,
1052 struct ipoib_neigh
*neigh
)
1054 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1055 struct ipoib_cm_tx
*tx
;
1057 tx
= kzalloc(sizeof *tx
, GFP_ATOMIC
);
1065 list_add(&tx
->list
, &priv
->cm
.start_list
);
1066 set_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
);
1067 queue_work(ipoib_workqueue
, &priv
->cm
.start_task
);
1071 void ipoib_cm_destroy_tx(struct ipoib_cm_tx
*tx
)
1073 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
1074 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
1075 list_move(&tx
->list
, &priv
->cm
.reap_list
);
1076 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
1077 ipoib_dbg(priv
, "Reap connection for gid " IPOIB_GID_FMT
"\n",
1078 IPOIB_GID_ARG(tx
->neigh
->dgid
));
1083 static void ipoib_cm_tx_start(struct work_struct
*work
)
1085 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1087 struct net_device
*dev
= priv
->dev
;
1088 struct ipoib_neigh
*neigh
;
1089 struct ipoib_cm_tx
*p
;
1090 unsigned long flags
;
1093 struct ib_sa_path_rec pathrec
;
1096 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1097 spin_lock(&priv
->lock
);
1098 while (!list_empty(&priv
->cm
.start_list
)) {
1099 p
= list_entry(priv
->cm
.start_list
.next
, typeof(*p
), list
);
1100 list_del_init(&p
->list
);
1102 qpn
= IPOIB_QPN(neigh
->neighbour
->ha
);
1103 memcpy(&pathrec
, &p
->path
->pathrec
, sizeof pathrec
);
1104 spin_unlock(&priv
->lock
);
1105 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1106 ret
= ipoib_cm_tx_init(p
, qpn
, &pathrec
);
1107 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1108 spin_lock(&priv
->lock
);
1113 list_del(&neigh
->list
);
1115 ipoib_put_ah(neigh
->ah
);
1116 ipoib_neigh_free(dev
, neigh
);
1122 spin_unlock(&priv
->lock
);
1123 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1126 static void ipoib_cm_tx_reap(struct work_struct
*work
)
1128 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1130 struct ipoib_cm_tx
*p
;
1132 spin_lock_irq(&priv
->tx_lock
);
1133 spin_lock(&priv
->lock
);
1134 while (!list_empty(&priv
->cm
.reap_list
)) {
1135 p
= list_entry(priv
->cm
.reap_list
.next
, typeof(*p
), list
);
1137 spin_unlock(&priv
->lock
);
1138 spin_unlock_irq(&priv
->tx_lock
);
1139 ipoib_cm_tx_destroy(p
);
1140 spin_lock_irq(&priv
->tx_lock
);
1141 spin_lock(&priv
->lock
);
1143 spin_unlock(&priv
->lock
);
1144 spin_unlock_irq(&priv
->tx_lock
);
1147 static void ipoib_cm_skb_reap(struct work_struct
*work
)
1149 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1151 struct net_device
*dev
= priv
->dev
;
1152 struct sk_buff
*skb
;
1154 unsigned mtu
= priv
->mcast_mtu
;
1156 spin_lock_irq(&priv
->tx_lock
);
1157 spin_lock(&priv
->lock
);
1158 while ((skb
= skb_dequeue(&priv
->cm
.skb_queue
))) {
1159 spin_unlock(&priv
->lock
);
1160 spin_unlock_irq(&priv
->tx_lock
);
1161 if (skb
->protocol
== htons(ETH_P_IP
))
1162 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
1163 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1164 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1165 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, dev
);
1167 dev_kfree_skb_any(skb
);
1168 spin_lock_irq(&priv
->tx_lock
);
1169 spin_lock(&priv
->lock
);
1171 spin_unlock(&priv
->lock
);
1172 spin_unlock_irq(&priv
->tx_lock
);
1175 void ipoib_cm_skb_too_long(struct net_device
* dev
, struct sk_buff
*skb
,
1178 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1179 int e
= skb_queue_empty(&priv
->cm
.skb_queue
);
1182 skb
->dst
->ops
->update_pmtu(skb
->dst
, mtu
);
1184 skb_queue_tail(&priv
->cm
.skb_queue
, skb
);
1186 queue_work(ipoib_workqueue
, &priv
->cm
.skb_task
);
1189 static void ipoib_cm_rx_reap(struct work_struct
*work
)
1191 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1193 struct ipoib_cm_rx
*p
, *n
;
1196 spin_lock_irq(&priv
->lock
);
1197 list_splice_init(&priv
->cm
.rx_reap_list
, &list
);
1198 spin_unlock_irq(&priv
->lock
);
1200 list_for_each_entry_safe(p
, n
, &list
, list
) {
1201 ib_destroy_cm_id(p
->id
);
1202 ib_destroy_qp(p
->qp
);
1207 static void ipoib_cm_stale_task(struct work_struct
*work
)
1209 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1210 cm
.stale_task
.work
);
1211 struct ipoib_cm_rx
*p
;
1214 spin_lock_irq(&priv
->lock
);
1215 while (!list_empty(&priv
->cm
.passive_ids
)) {
1216 /* List is sorted by LRU, start from tail,
1217 * stop when we see a recently used entry */
1218 p
= list_entry(priv
->cm
.passive_ids
.prev
, typeof(*p
), list
);
1219 if (time_before_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_TIMEOUT
))
1221 list_move(&p
->list
, &priv
->cm
.rx_error_list
);
1222 p
->state
= IPOIB_CM_RX_ERROR
;
1223 spin_unlock_irq(&priv
->lock
);
1224 ret
= ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
);
1226 ipoib_warn(priv
, "unable to move qp to error state: %d\n", ret
);
1227 spin_lock_irq(&priv
->lock
);
1230 if (!list_empty(&priv
->cm
.passive_ids
))
1231 queue_delayed_work(ipoib_workqueue
,
1232 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
1233 spin_unlock_irq(&priv
->lock
);
1237 static ssize_t
show_mode(struct device
*d
, struct device_attribute
*attr
,
1240 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(d
));
1242 if (test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
))
1243 return sprintf(buf
, "connected\n");
1245 return sprintf(buf
, "datagram\n");
1248 static ssize_t
set_mode(struct device
*d
, struct device_attribute
*attr
,
1249 const char *buf
, size_t count
)
1251 struct net_device
*dev
= to_net_dev(d
);
1252 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1254 /* flush paths if we switch modes so that connections are restarted */
1255 if (IPOIB_CM_SUPPORTED(dev
->dev_addr
) && !strcmp(buf
, "connected\n")) {
1256 set_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
1257 ipoib_warn(priv
, "enabling connected mode "
1258 "will cause multicast packet drops\n");
1259 ipoib_flush_paths(dev
);
1263 if (!strcmp(buf
, "datagram\n")) {
1264 clear_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
1265 dev
->mtu
= min(priv
->mcast_mtu
, dev
->mtu
);
1266 ipoib_flush_paths(dev
);
1273 static DEVICE_ATTR(mode
, S_IWUSR
| S_IRUGO
, show_mode
, set_mode
);
1275 int ipoib_cm_add_mode_attr(struct net_device
*dev
)
1277 return device_create_file(&dev
->dev
, &dev_attr_mode
);
1280 int ipoib_cm_dev_init(struct net_device
*dev
)
1282 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1283 struct ib_srq_init_attr srq_init_attr
= {
1285 .max_wr
= ipoib_recvq_size
,
1286 .max_sge
= IPOIB_CM_RX_SG
1291 INIT_LIST_HEAD(&priv
->cm
.passive_ids
);
1292 INIT_LIST_HEAD(&priv
->cm
.reap_list
);
1293 INIT_LIST_HEAD(&priv
->cm
.start_list
);
1294 INIT_LIST_HEAD(&priv
->cm
.rx_error_list
);
1295 INIT_LIST_HEAD(&priv
->cm
.rx_flush_list
);
1296 INIT_LIST_HEAD(&priv
->cm
.rx_drain_list
);
1297 INIT_LIST_HEAD(&priv
->cm
.rx_reap_list
);
1298 INIT_WORK(&priv
->cm
.start_task
, ipoib_cm_tx_start
);
1299 INIT_WORK(&priv
->cm
.reap_task
, ipoib_cm_tx_reap
);
1300 INIT_WORK(&priv
->cm
.skb_task
, ipoib_cm_skb_reap
);
1301 INIT_WORK(&priv
->cm
.rx_reap_task
, ipoib_cm_rx_reap
);
1302 INIT_DELAYED_WORK(&priv
->cm
.stale_task
, ipoib_cm_stale_task
);
1304 skb_queue_head_init(&priv
->cm
.skb_queue
);
1306 priv
->cm
.srq
= ib_create_srq(priv
->pd
, &srq_init_attr
);
1307 if (IS_ERR(priv
->cm
.srq
)) {
1308 ret
= PTR_ERR(priv
->cm
.srq
);
1309 priv
->cm
.srq
= NULL
;
1313 priv
->cm
.srq_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->cm
.srq_ring
,
1315 if (!priv
->cm
.srq_ring
) {
1316 printk(KERN_WARNING
"%s: failed to allocate CM ring (%d entries)\n",
1317 priv
->ca
->name
, ipoib_recvq_size
);
1318 ipoib_cm_dev_cleanup(dev
);
1322 for (i
= 0; i
< IPOIB_CM_RX_SG
; ++i
)
1323 priv
->cm
.rx_sge
[i
].lkey
= priv
->mr
->lkey
;
1325 priv
->cm
.rx_sge
[0].length
= IPOIB_CM_HEAD_SIZE
;
1326 for (i
= 1; i
< IPOIB_CM_RX_SG
; ++i
)
1327 priv
->cm
.rx_sge
[i
].length
= PAGE_SIZE
;
1328 priv
->cm
.rx_wr
.next
= NULL
;
1329 priv
->cm
.rx_wr
.sg_list
= priv
->cm
.rx_sge
;
1330 priv
->cm
.rx_wr
.num_sge
= IPOIB_CM_RX_SG
;
1332 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
1333 if (!ipoib_cm_alloc_rx_skb(dev
, i
, IPOIB_CM_RX_SG
- 1,
1334 priv
->cm
.srq_ring
[i
].mapping
)) {
1335 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
1336 ipoib_cm_dev_cleanup(dev
);
1339 if (ipoib_cm_post_receive(dev
, i
)) {
1340 ipoib_warn(priv
, "ipoib_ib_post_receive failed for buf %d\n", i
);
1341 ipoib_cm_dev_cleanup(dev
);
1346 priv
->dev
->dev_addr
[0] = IPOIB_FLAGS_RC
;
1350 void ipoib_cm_dev_cleanup(struct net_device
*dev
)
1352 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1358 ipoib_dbg(priv
, "Cleanup ipoib connected mode.\n");
1360 ret
= ib_destroy_srq(priv
->cm
.srq
);
1362 ipoib_warn(priv
, "ib_destroy_srq failed: %d\n", ret
);
1364 priv
->cm
.srq
= NULL
;
1365 if (!priv
->cm
.srq_ring
)
1367 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
1368 if (priv
->cm
.srq_ring
[i
].skb
) {
1369 ipoib_cm_dma_unmap_rx(priv
, IPOIB_CM_RX_SG
- 1,
1370 priv
->cm
.srq_ring
[i
].mapping
);
1371 dev_kfree_skb_any(priv
->cm
.srq_ring
[i
].skb
);
1372 priv
->cm
.srq_ring
[i
].skb
= NULL
;
1374 kfree(priv
->cm
.srq_ring
);
1375 priv
->cm
.srq_ring
= NULL
;