2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_cache.h>
39 #include <linux/icmpv6.h>
40 #include <linux/delay.h>
41 #include <linux/vmalloc.h>
45 int ipoib_max_conn_qp
= 128;
47 module_param_named(max_nonsrq_conn_qp
, ipoib_max_conn_qp
, int, 0444);
48 MODULE_PARM_DESC(max_nonsrq_conn_qp
,
49 "Max number of connected-mode QPs per interface "
50 "(applied only if shared receive queue is not available)");
52 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
53 static int data_debug_level
;
55 module_param_named(cm_data_debug_level
, data_debug_level
, int, 0644);
56 MODULE_PARM_DESC(cm_data_debug_level
,
57 "Enable data path debug tracing for connected mode if > 0");
60 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
62 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
63 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
64 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
65 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
67 static struct ib_qp_attr ipoib_cm_err_attr
= {
68 .qp_state
= IB_QPS_ERR
71 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
73 static struct ib_send_wr ipoib_cm_rx_drain_wr
= {
74 .wr_id
= IPOIB_CM_RX_DRAIN_WRID
,
78 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
79 struct ib_cm_event
*event
);
81 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv
*priv
, int frags
,
82 u64 mapping
[IPOIB_CM_RX_SG
])
86 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
88 for (i
= 0; i
< frags
; ++i
)
89 ib_dma_unmap_single(priv
->ca
, mapping
[i
+ 1], PAGE_SIZE
, DMA_FROM_DEVICE
);
92 static int ipoib_cm_post_receive_srq(struct net_device
*dev
, int id
)
94 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
95 struct ib_recv_wr
*bad_wr
;
98 priv
->cm
.rx_wr
.wr_id
= id
| IPOIB_OP_CM
| IPOIB_OP_RECV
;
100 for (i
= 0; i
< priv
->cm
.num_frags
; ++i
)
101 priv
->cm
.rx_sge
[i
].addr
= priv
->cm
.srq_ring
[id
].mapping
[i
];
103 ret
= ib_post_srq_recv(priv
->cm
.srq
, &priv
->cm
.rx_wr
, &bad_wr
);
105 ipoib_warn(priv
, "post srq failed for buf %d (%d)\n", id
, ret
);
106 ipoib_cm_dma_unmap_rx(priv
, priv
->cm
.num_frags
- 1,
107 priv
->cm
.srq_ring
[id
].mapping
);
108 dev_kfree_skb_any(priv
->cm
.srq_ring
[id
].skb
);
109 priv
->cm
.srq_ring
[id
].skb
= NULL
;
115 static int ipoib_cm_post_receive_nonsrq(struct net_device
*dev
,
116 struct ipoib_cm_rx
*rx
, int id
)
118 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
119 struct ib_recv_wr
*bad_wr
;
122 priv
->cm
.rx_wr
.wr_id
= id
| IPOIB_OP_CM
| IPOIB_OP_RECV
;
124 for (i
= 0; i
< IPOIB_CM_RX_SG
; ++i
)
125 priv
->cm
.rx_sge
[i
].addr
= rx
->rx_ring
[id
].mapping
[i
];
127 ret
= ib_post_recv(rx
->qp
, &priv
->cm
.rx_wr
, &bad_wr
);
129 ipoib_warn(priv
, "post recv failed for buf %d (%d)\n", id
, ret
);
130 ipoib_cm_dma_unmap_rx(priv
, IPOIB_CM_RX_SG
- 1,
131 rx
->rx_ring
[id
].mapping
);
132 dev_kfree_skb_any(rx
->rx_ring
[id
].skb
);
133 rx
->rx_ring
[id
].skb
= NULL
;
139 static struct sk_buff
*ipoib_cm_alloc_rx_skb(struct net_device
*dev
,
140 struct ipoib_cm_rx_buf
*rx_ring
,
142 u64 mapping
[IPOIB_CM_RX_SG
])
144 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
148 skb
= dev_alloc_skb(IPOIB_CM_HEAD_SIZE
+ 12);
153 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
154 * IP header to a multiple of 16.
156 skb_reserve(skb
, 12);
158 mapping
[0] = ib_dma_map_single(priv
->ca
, skb
->data
, IPOIB_CM_HEAD_SIZE
,
160 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[0]))) {
161 dev_kfree_skb_any(skb
);
165 for (i
= 0; i
< frags
; i
++) {
166 struct page
*page
= alloc_page(GFP_ATOMIC
);
170 skb_fill_page_desc(skb
, i
, page
, 0, PAGE_SIZE
);
172 mapping
[i
+ 1] = ib_dma_map_page(priv
->ca
, skb_shinfo(skb
)->frags
[i
].page
,
173 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
174 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[i
+ 1])))
178 rx_ring
[id
].skb
= skb
;
183 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_CM_HEAD_SIZE
, DMA_FROM_DEVICE
);
186 ib_dma_unmap_single(priv
->ca
, mapping
[i
], PAGE_SIZE
, DMA_FROM_DEVICE
);
188 dev_kfree_skb_any(skb
);
192 static void ipoib_cm_free_rx_ring(struct net_device
*dev
,
193 struct ipoib_cm_rx_buf
*rx_ring
)
195 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
198 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
199 if (rx_ring
[i
].skb
) {
200 ipoib_cm_dma_unmap_rx(priv
, IPOIB_CM_RX_SG
- 1,
202 dev_kfree_skb_any(rx_ring
[i
].skb
);
208 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv
*priv
)
210 struct ib_send_wr
*bad_wr
;
211 struct ipoib_cm_rx
*p
;
213 /* We only reserved 1 extra slot in CQ for drain WRs, so
214 * make sure we have at most 1 outstanding WR. */
215 if (list_empty(&priv
->cm
.rx_flush_list
) ||
216 !list_empty(&priv
->cm
.rx_drain_list
))
220 * QPs on flush list are error state. This way, a "flush
221 * error" WC will be immediately generated for each WR we post.
223 p
= list_entry(priv
->cm
.rx_flush_list
.next
, typeof(*p
), list
);
224 if (ib_post_send(p
->qp
, &ipoib_cm_rx_drain_wr
, &bad_wr
))
225 ipoib_warn(priv
, "failed to post drain wr\n");
227 list_splice_init(&priv
->cm
.rx_flush_list
, &priv
->cm
.rx_drain_list
);
230 static void ipoib_cm_rx_event_handler(struct ib_event
*event
, void *ctx
)
232 struct ipoib_cm_rx
*p
= ctx
;
233 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
236 if (event
->event
!= IB_EVENT_QP_LAST_WQE_REACHED
)
239 spin_lock_irqsave(&priv
->lock
, flags
);
240 list_move(&p
->list
, &priv
->cm
.rx_flush_list
);
241 p
->state
= IPOIB_CM_RX_FLUSH
;
242 ipoib_cm_start_rx_drain(priv
);
243 spin_unlock_irqrestore(&priv
->lock
, flags
);
246 static struct ib_qp
*ipoib_cm_create_rx_qp(struct net_device
*dev
,
247 struct ipoib_cm_rx
*p
)
249 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
250 struct ib_qp_init_attr attr
= {
251 .event_handler
= ipoib_cm_rx_event_handler
,
252 .send_cq
= priv
->cq
, /* For drain WR */
255 .cap
.max_send_wr
= 1, /* For drain WR */
256 .cap
.max_send_sge
= 1, /* FIXME: 0 Seems not to work */
257 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
258 .qp_type
= IB_QPT_RC
,
262 if (!ipoib_cm_has_srq(dev
)) {
263 attr
.cap
.max_recv_wr
= ipoib_recvq_size
;
264 attr
.cap
.max_recv_sge
= IPOIB_CM_RX_SG
;
267 return ib_create_qp(priv
->pd
, &attr
);
270 static int ipoib_cm_modify_rx_qp(struct net_device
*dev
,
271 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
,
274 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
275 struct ib_qp_attr qp_attr
;
276 int qp_attr_mask
, ret
;
278 qp_attr
.qp_state
= IB_QPS_INIT
;
279 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
281 ipoib_warn(priv
, "failed to init QP attr for INIT: %d\n", ret
);
284 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
286 ipoib_warn(priv
, "failed to modify QP to INIT: %d\n", ret
);
289 qp_attr
.qp_state
= IB_QPS_RTR
;
290 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
292 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
295 qp_attr
.rq_psn
= psn
;
296 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
298 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
303 * Current Mellanox HCA firmware won't generate completions
304 * with error for drain WRs unless the QP has been moved to
305 * RTS first. This work-around leaves a window where a QP has
306 * moved to error asynchronously, but this will eventually get
307 * fixed in firmware, so let's not error out if modify QP
310 qp_attr
.qp_state
= IB_QPS_RTS
;
311 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
313 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
316 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
318 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
325 static int ipoib_cm_nonsrq_init_rx(struct net_device
*dev
, struct ib_cm_id
*cm_id
,
326 struct ipoib_cm_rx
*rx
)
328 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
332 rx
->rx_ring
= kcalloc(ipoib_recvq_size
, sizeof *rx
->rx_ring
, GFP_KERNEL
);
336 spin_lock_irq(&priv
->lock
);
338 if (priv
->cm
.nonsrq_conn_qp
>= ipoib_max_conn_qp
) {
339 spin_unlock_irq(&priv
->lock
);
340 ib_send_cm_rej(cm_id
, IB_CM_REJ_NO_QP
, NULL
, 0, NULL
, 0);
344 ++priv
->cm
.nonsrq_conn_qp
;
346 spin_unlock_irq(&priv
->lock
);
348 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
349 if (!ipoib_cm_alloc_rx_skb(dev
, rx
->rx_ring
, i
, IPOIB_CM_RX_SG
- 1,
350 rx
->rx_ring
[i
].mapping
)) {
351 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
355 ret
= ipoib_cm_post_receive_nonsrq(dev
, rx
, i
);
357 ipoib_warn(priv
, "ipoib_cm_post_receive_nonsrq "
358 "failed for buf %d\n", i
);
364 rx
->recv_count
= ipoib_recvq_size
;
369 spin_lock_irq(&priv
->lock
);
370 --priv
->cm
.nonsrq_conn_qp
;
371 spin_unlock_irq(&priv
->lock
);
374 ipoib_cm_free_rx_ring(dev
, rx
->rx_ring
);
379 static int ipoib_cm_send_rep(struct net_device
*dev
, struct ib_cm_id
*cm_id
,
380 struct ib_qp
*qp
, struct ib_cm_req_event_param
*req
,
383 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
384 struct ipoib_cm_data data
= {};
385 struct ib_cm_rep_param rep
= {};
387 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
388 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
390 rep
.private_data
= &data
;
391 rep
.private_data_len
= sizeof data
;
392 rep
.flow_control
= 0;
393 rep
.rnr_retry_count
= req
->rnr_retry_count
;
394 rep
.srq
= ipoib_cm_has_srq(dev
);
395 rep
.qp_num
= qp
->qp_num
;
396 rep
.starting_psn
= psn
;
397 return ib_send_cm_rep(cm_id
, &rep
);
400 static int ipoib_cm_req_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
402 struct net_device
*dev
= cm_id
->context
;
403 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
404 struct ipoib_cm_rx
*p
;
408 ipoib_dbg(priv
, "REQ arrived\n");
409 p
= kzalloc(sizeof *p
, GFP_KERNEL
);
415 p
->state
= IPOIB_CM_RX_LIVE
;
416 p
->jiffies
= jiffies
;
417 INIT_LIST_HEAD(&p
->list
);
419 p
->qp
= ipoib_cm_create_rx_qp(dev
, p
);
421 ret
= PTR_ERR(p
->qp
);
425 psn
= random32() & 0xffffff;
426 ret
= ipoib_cm_modify_rx_qp(dev
, cm_id
, p
->qp
, psn
);
430 if (!ipoib_cm_has_srq(dev
)) {
431 ret
= ipoib_cm_nonsrq_init_rx(dev
, cm_id
, p
);
436 spin_lock_irq(&priv
->lock
);
437 queue_delayed_work(ipoib_workqueue
,
438 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
439 /* Add this entry to passive ids list head, but do not re-add it
440 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
441 p
->jiffies
= jiffies
;
442 if (p
->state
== IPOIB_CM_RX_LIVE
)
443 list_move(&p
->list
, &priv
->cm
.passive_ids
);
444 spin_unlock_irq(&priv
->lock
);
446 ret
= ipoib_cm_send_rep(dev
, cm_id
, p
->qp
, &event
->param
.req_rcvd
, psn
);
448 ipoib_warn(priv
, "failed to send REP: %d\n", ret
);
449 if (ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
))
450 ipoib_warn(priv
, "unable to move qp to error state\n");
455 ib_destroy_qp(p
->qp
);
461 static int ipoib_cm_rx_handler(struct ib_cm_id
*cm_id
,
462 struct ib_cm_event
*event
)
464 struct ipoib_cm_rx
*p
;
465 struct ipoib_dev_priv
*priv
;
467 switch (event
->event
) {
468 case IB_CM_REQ_RECEIVED
:
469 return ipoib_cm_req_handler(cm_id
, event
);
470 case IB_CM_DREQ_RECEIVED
:
472 ib_send_cm_drep(cm_id
, NULL
, 0);
474 case IB_CM_REJ_RECEIVED
:
476 priv
= netdev_priv(p
->dev
);
477 if (ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
))
478 ipoib_warn(priv
, "unable to move qp to error state\n");
484 /* Adjust length of skb with fragments to match received data */
485 static void skb_put_frags(struct sk_buff
*skb
, unsigned int hdr_space
,
486 unsigned int length
, struct sk_buff
*toskb
)
491 /* put header into skb */
492 size
= min(length
, hdr_space
);
497 num_frags
= skb_shinfo(skb
)->nr_frags
;
498 for (i
= 0; i
< num_frags
; i
++) {
499 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
502 /* don't need this page */
503 skb_fill_page_desc(toskb
, i
, frag
->page
, 0, PAGE_SIZE
);
504 --skb_shinfo(skb
)->nr_frags
;
506 size
= min(length
, (unsigned) PAGE_SIZE
);
509 skb
->data_len
+= size
;
510 skb
->truesize
+= size
;
517 void ipoib_cm_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
519 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
520 struct ipoib_cm_rx_buf
*rx_ring
;
521 unsigned int wr_id
= wc
->wr_id
& ~(IPOIB_OP_CM
| IPOIB_OP_RECV
);
522 struct sk_buff
*skb
, *newskb
;
523 struct ipoib_cm_rx
*p
;
525 u64 mapping
[IPOIB_CM_RX_SG
];
529 ipoib_dbg_data(priv
, "cm recv completion: id %d, status: %d\n",
532 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
533 if (wr_id
== (IPOIB_CM_RX_DRAIN_WRID
& ~(IPOIB_OP_CM
| IPOIB_OP_RECV
))) {
534 spin_lock_irqsave(&priv
->lock
, flags
);
535 list_splice_init(&priv
->cm
.rx_drain_list
, &priv
->cm
.rx_reap_list
);
536 ipoib_cm_start_rx_drain(priv
);
537 queue_work(ipoib_workqueue
, &priv
->cm
.rx_reap_task
);
538 spin_unlock_irqrestore(&priv
->lock
, flags
);
540 ipoib_warn(priv
, "cm recv completion event with wrid %d (> %d)\n",
541 wr_id
, ipoib_recvq_size
);
545 p
= wc
->qp
->qp_context
;
547 has_srq
= ipoib_cm_has_srq(dev
);
548 rx_ring
= has_srq
? priv
->cm
.srq_ring
: p
->rx_ring
;
550 skb
= rx_ring
[wr_id
].skb
;
552 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
553 ipoib_dbg(priv
, "cm recv error "
554 "(status=%d, wrid=%d vend_err %x)\n",
555 wc
->status
, wr_id
, wc
->vendor_err
);
556 ++dev
->stats
.rx_dropped
;
560 if (!--p
->recv_count
) {
561 spin_lock_irqsave(&priv
->lock
, flags
);
562 list_move(&p
->list
, &priv
->cm
.rx_reap_list
);
563 spin_unlock_irqrestore(&priv
->lock
, flags
);
564 queue_work(ipoib_workqueue
, &priv
->cm
.rx_reap_task
);
570 if (unlikely(!(wr_id
& IPOIB_CM_RX_UPDATE_MASK
))) {
571 if (p
&& time_after_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_UPDATE_TIME
)) {
572 spin_lock_irqsave(&priv
->lock
, flags
);
573 p
->jiffies
= jiffies
;
574 /* Move this entry to list head, but do not re-add it
575 * if it has been moved out of list. */
576 if (p
->state
== IPOIB_CM_RX_LIVE
)
577 list_move(&p
->list
, &priv
->cm
.passive_ids
);
578 spin_unlock_irqrestore(&priv
->lock
, flags
);
582 frags
= PAGE_ALIGN(wc
->byte_len
- min(wc
->byte_len
,
583 (unsigned)IPOIB_CM_HEAD_SIZE
)) / PAGE_SIZE
;
585 newskb
= ipoib_cm_alloc_rx_skb(dev
, rx_ring
, wr_id
, frags
, mapping
);
586 if (unlikely(!newskb
)) {
588 * If we can't allocate a new RX buffer, dump
589 * this packet and reuse the old buffer.
591 ipoib_dbg(priv
, "failed to allocate receive buffer %d\n", wr_id
);
592 ++dev
->stats
.rx_dropped
;
596 ipoib_cm_dma_unmap_rx(priv
, frags
, rx_ring
[wr_id
].mapping
);
597 memcpy(rx_ring
[wr_id
].mapping
, mapping
, (frags
+ 1) * sizeof *mapping
);
599 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
600 wc
->byte_len
, wc
->slid
);
602 skb_put_frags(skb
, IPOIB_CM_HEAD_SIZE
, wc
->byte_len
, newskb
);
604 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
605 skb_reset_mac_header(skb
);
606 skb_pull(skb
, IPOIB_ENCAP_LEN
);
608 dev
->last_rx
= jiffies
;
609 ++dev
->stats
.rx_packets
;
610 dev
->stats
.rx_bytes
+= skb
->len
;
613 /* XXX get correct PACKET_ type here */
614 skb
->pkt_type
= PACKET_HOST
;
615 netif_receive_skb(skb
);
619 if (unlikely(ipoib_cm_post_receive_srq(dev
, wr_id
)))
620 ipoib_warn(priv
, "ipoib_cm_post_receive_srq failed "
621 "for buf %d\n", wr_id
);
623 if (unlikely(ipoib_cm_post_receive_nonsrq(dev
, p
, wr_id
))) {
625 ipoib_warn(priv
, "ipoib_cm_post_receive_nonsrq failed "
626 "for buf %d\n", wr_id
);
631 static inline int post_send(struct ipoib_dev_priv
*priv
,
632 struct ipoib_cm_tx
*tx
,
636 struct ib_send_wr
*bad_wr
;
638 priv
->tx_sge
[0].addr
= addr
;
639 priv
->tx_sge
[0].length
= len
;
641 priv
->tx_wr
.num_sge
= 1;
642 priv
->tx_wr
.wr_id
= wr_id
| IPOIB_OP_CM
;
644 return ib_post_send(tx
->qp
, &priv
->tx_wr
, &bad_wr
);
647 void ipoib_cm_send(struct net_device
*dev
, struct sk_buff
*skb
, struct ipoib_cm_tx
*tx
)
649 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
650 struct ipoib_tx_buf
*tx_req
;
653 if (unlikely(skb
->len
> tx
->mtu
)) {
654 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
656 ++dev
->stats
.tx_dropped
;
657 ++dev
->stats
.tx_errors
;
658 ipoib_cm_skb_too_long(dev
, skb
, tx
->mtu
- IPOIB_ENCAP_LEN
);
662 ipoib_dbg_data(priv
, "sending packet: head 0x%x length %d connection 0x%x\n",
663 tx
->tx_head
, skb
->len
, tx
->qp
->qp_num
);
666 * We put the skb into the tx_ring _before_ we call post_send()
667 * because it's entirely possible that the completion handler will
668 * run before we execute anything after the post_send(). That
669 * means we have to make sure everything is properly recorded and
670 * our state is consistent before we call post_send().
672 tx_req
= &tx
->tx_ring
[tx
->tx_head
& (ipoib_sendq_size
- 1)];
674 addr
= ib_dma_map_single(priv
->ca
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
675 if (unlikely(ib_dma_mapping_error(priv
->ca
, addr
))) {
676 ++dev
->stats
.tx_errors
;
677 dev_kfree_skb_any(skb
);
681 tx_req
->mapping
[0] = addr
;
683 if (unlikely(post_send(priv
, tx
, tx
->tx_head
& (ipoib_sendq_size
- 1),
685 ipoib_warn(priv
, "post_send failed\n");
686 ++dev
->stats
.tx_errors
;
687 ib_dma_unmap_single(priv
->ca
, addr
, skb
->len
, DMA_TO_DEVICE
);
688 dev_kfree_skb_any(skb
);
690 dev
->trans_start
= jiffies
;
693 if (++priv
->tx_outstanding
== ipoib_sendq_size
) {
694 ipoib_dbg(priv
, "TX ring 0x%x full, stopping kernel net queue\n",
696 netif_stop_queue(dev
);
701 void ipoib_cm_handle_tx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
703 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
704 struct ipoib_cm_tx
*tx
= wc
->qp
->qp_context
;
705 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_OP_CM
;
706 struct ipoib_tx_buf
*tx_req
;
709 ipoib_dbg_data(priv
, "cm send completion: id %d, status: %d\n",
712 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
713 ipoib_warn(priv
, "cm send completion event with wrid %d (> %d)\n",
714 wr_id
, ipoib_sendq_size
);
718 tx_req
= &tx
->tx_ring
[wr_id
];
720 ib_dma_unmap_single(priv
->ca
, tx_req
->mapping
[0], tx_req
->skb
->len
, DMA_TO_DEVICE
);
722 /* FIXME: is this right? Shouldn't we only increment on success? */
723 ++dev
->stats
.tx_packets
;
724 dev
->stats
.tx_bytes
+= tx_req
->skb
->len
;
726 dev_kfree_skb_any(tx_req
->skb
);
728 spin_lock_irqsave(&priv
->tx_lock
, flags
);
730 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
731 netif_queue_stopped(dev
) &&
732 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
733 netif_wake_queue(dev
);
735 if (wc
->status
!= IB_WC_SUCCESS
&&
736 wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
737 struct ipoib_neigh
*neigh
;
739 ipoib_dbg(priv
, "failed cm send event "
740 "(status=%d, wrid=%d vend_err %x)\n",
741 wc
->status
, wr_id
, wc
->vendor_err
);
743 spin_lock(&priv
->lock
);
748 list_del(&neigh
->list
);
750 ipoib_put_ah(neigh
->ah
);
751 ipoib_neigh_free(dev
, neigh
);
756 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
757 list_move(&tx
->list
, &priv
->cm
.reap_list
);
758 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
761 clear_bit(IPOIB_FLAG_OPER_UP
, &tx
->flags
);
763 spin_unlock(&priv
->lock
);
766 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
769 int ipoib_cm_dev_open(struct net_device
*dev
)
771 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
774 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
))
777 priv
->cm
.id
= ib_create_cm_id(priv
->ca
, ipoib_cm_rx_handler
, dev
);
778 if (IS_ERR(priv
->cm
.id
)) {
779 printk(KERN_WARNING
"%s: failed to create CM ID\n", priv
->ca
->name
);
780 ret
= PTR_ERR(priv
->cm
.id
);
784 ret
= ib_cm_listen(priv
->cm
.id
, cpu_to_be64(IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
),
787 printk(KERN_WARNING
"%s: failed to listen on ID 0x%llx\n", priv
->ca
->name
,
788 IPOIB_CM_IETF_ID
| priv
->qp
->qp_num
);
795 ib_destroy_cm_id(priv
->cm
.id
);
801 static void ipoib_cm_free_rx_reap_list(struct net_device
*dev
)
803 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
804 struct ipoib_cm_rx
*rx
, *n
;
807 spin_lock_irq(&priv
->lock
);
808 list_splice_init(&priv
->cm
.rx_reap_list
, &list
);
809 spin_unlock_irq(&priv
->lock
);
811 list_for_each_entry_safe(rx
, n
, &list
, list
) {
812 ib_destroy_cm_id(rx
->id
);
813 ib_destroy_qp(rx
->qp
);
814 if (!ipoib_cm_has_srq(dev
)) {
815 ipoib_cm_free_rx_ring(priv
->dev
, rx
->rx_ring
);
816 spin_lock_irq(&priv
->lock
);
817 --priv
->cm
.nonsrq_conn_qp
;
818 spin_unlock_irq(&priv
->lock
);
824 void ipoib_cm_dev_stop(struct net_device
*dev
)
826 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
827 struct ipoib_cm_rx
*p
;
831 if (!IPOIB_CM_SUPPORTED(dev
->dev_addr
) || !priv
->cm
.id
)
834 ib_destroy_cm_id(priv
->cm
.id
);
837 spin_lock_irq(&priv
->lock
);
838 while (!list_empty(&priv
->cm
.passive_ids
)) {
839 p
= list_entry(priv
->cm
.passive_ids
.next
, typeof(*p
), list
);
840 list_move(&p
->list
, &priv
->cm
.rx_error_list
);
841 p
->state
= IPOIB_CM_RX_ERROR
;
842 spin_unlock_irq(&priv
->lock
);
843 ret
= ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
);
845 ipoib_warn(priv
, "unable to move qp to error state: %d\n", ret
);
846 spin_lock_irq(&priv
->lock
);
849 /* Wait for all RX to be drained */
852 while (!list_empty(&priv
->cm
.rx_error_list
) ||
853 !list_empty(&priv
->cm
.rx_flush_list
) ||
854 !list_empty(&priv
->cm
.rx_drain_list
)) {
855 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
856 ipoib_warn(priv
, "RX drain timing out\n");
859 * assume the HW is wedged and just free up everything.
861 list_splice_init(&priv
->cm
.rx_flush_list
,
862 &priv
->cm
.rx_reap_list
);
863 list_splice_init(&priv
->cm
.rx_error_list
,
864 &priv
->cm
.rx_reap_list
);
865 list_splice_init(&priv
->cm
.rx_drain_list
,
866 &priv
->cm
.rx_reap_list
);
869 spin_unlock_irq(&priv
->lock
);
872 spin_lock_irq(&priv
->lock
);
875 spin_unlock_irq(&priv
->lock
);
877 ipoib_cm_free_rx_reap_list(dev
);
879 cancel_delayed_work(&priv
->cm
.stale_task
);
882 static int ipoib_cm_rep_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
884 struct ipoib_cm_tx
*p
= cm_id
->context
;
885 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
886 struct ipoib_cm_data
*data
= event
->private_data
;
887 struct sk_buff_head skqueue
;
888 struct ib_qp_attr qp_attr
;
889 int qp_attr_mask
, ret
;
892 p
->mtu
= be32_to_cpu(data
->mtu
);
894 if (p
->mtu
<= IPOIB_ENCAP_LEN
) {
895 ipoib_warn(priv
, "Rejecting connection: mtu %d <= %d\n",
896 p
->mtu
, IPOIB_ENCAP_LEN
);
900 qp_attr
.qp_state
= IB_QPS_RTR
;
901 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
903 ipoib_warn(priv
, "failed to init QP attr for RTR: %d\n", ret
);
907 qp_attr
.rq_psn
= 0 /* FIXME */;
908 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
910 ipoib_warn(priv
, "failed to modify QP to RTR: %d\n", ret
);
914 qp_attr
.qp_state
= IB_QPS_RTS
;
915 ret
= ib_cm_init_qp_attr(cm_id
, &qp_attr
, &qp_attr_mask
);
917 ipoib_warn(priv
, "failed to init QP attr for RTS: %d\n", ret
);
920 ret
= ib_modify_qp(p
->qp
, &qp_attr
, qp_attr_mask
);
922 ipoib_warn(priv
, "failed to modify QP to RTS: %d\n", ret
);
926 skb_queue_head_init(&skqueue
);
928 spin_lock_irq(&priv
->lock
);
929 set_bit(IPOIB_FLAG_OPER_UP
, &p
->flags
);
931 while ((skb
= __skb_dequeue(&p
->neigh
->queue
)))
932 __skb_queue_tail(&skqueue
, skb
);
933 spin_unlock_irq(&priv
->lock
);
935 while ((skb
= __skb_dequeue(&skqueue
))) {
937 if (dev_queue_xmit(skb
))
938 ipoib_warn(priv
, "dev_queue_xmit failed "
939 "to requeue packet\n");
942 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
944 ipoib_warn(priv
, "failed to send RTU: %d\n", ret
);
950 static struct ib_qp
*ipoib_cm_create_tx_qp(struct net_device
*dev
, struct ipoib_cm_tx
*tx
)
952 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
953 struct ib_qp_init_attr attr
= {
957 .cap
.max_send_wr
= ipoib_sendq_size
,
958 .cap
.max_send_sge
= 1,
959 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
960 .qp_type
= IB_QPT_RC
,
964 return ib_create_qp(priv
->pd
, &attr
);
967 static int ipoib_cm_send_req(struct net_device
*dev
,
968 struct ib_cm_id
*id
, struct ib_qp
*qp
,
970 struct ib_sa_path_rec
*pathrec
)
972 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
973 struct ipoib_cm_data data
= {};
974 struct ib_cm_req_param req
= {};
976 data
.qpn
= cpu_to_be32(priv
->qp
->qp_num
);
977 data
.mtu
= cpu_to_be32(IPOIB_CM_BUF_SIZE
);
979 req
.primary_path
= pathrec
;
980 req
.alternate_path
= NULL
;
981 req
.service_id
= cpu_to_be64(IPOIB_CM_IETF_ID
| qpn
);
982 req
.qp_num
= qp
->qp_num
;
983 req
.qp_type
= qp
->qp_type
;
984 req
.private_data
= &data
;
985 req
.private_data_len
= sizeof data
;
986 req
.flow_control
= 0;
988 req
.starting_psn
= 0; /* FIXME */
991 * Pick some arbitrary defaults here; we could make these
992 * module parameters if anyone cared about setting them.
994 req
.responder_resources
= 4;
995 req
.remote_cm_response_timeout
= 20;
996 req
.local_cm_response_timeout
= 20;
997 req
.retry_count
= 0; /* RFC draft warns against retries */
998 req
.rnr_retry_count
= 0; /* RFC draft warns against retries */
999 req
.max_cm_retries
= 15;
1000 req
.srq
= ipoib_cm_has_srq(dev
);
1001 return ib_send_cm_req(id
, &req
);
1004 static int ipoib_cm_modify_tx_init(struct net_device
*dev
,
1005 struct ib_cm_id
*cm_id
, struct ib_qp
*qp
)
1007 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1008 struct ib_qp_attr qp_attr
;
1009 int qp_attr_mask
, ret
;
1010 ret
= ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &qp_attr
.pkey_index
);
1012 ipoib_warn(priv
, "pkey 0x%x not found: %d\n", priv
->pkey
, ret
);
1016 qp_attr
.qp_state
= IB_QPS_INIT
;
1017 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
1018 qp_attr
.port_num
= priv
->port
;
1019 qp_attr_mask
= IB_QP_STATE
| IB_QP_ACCESS_FLAGS
| IB_QP_PKEY_INDEX
| IB_QP_PORT
;
1021 ret
= ib_modify_qp(qp
, &qp_attr
, qp_attr_mask
);
1023 ipoib_warn(priv
, "failed to modify tx QP to INIT: %d\n", ret
);
1029 static int ipoib_cm_tx_init(struct ipoib_cm_tx
*p
, u32 qpn
,
1030 struct ib_sa_path_rec
*pathrec
)
1032 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
1035 p
->tx_ring
= vmalloc(ipoib_sendq_size
* sizeof *p
->tx_ring
);
1037 ipoib_warn(priv
, "failed to allocate tx ring\n");
1041 memset(p
->tx_ring
, 0, ipoib_sendq_size
* sizeof *p
->tx_ring
);
1043 p
->qp
= ipoib_cm_create_tx_qp(p
->dev
, p
);
1044 if (IS_ERR(p
->qp
)) {
1045 ret
= PTR_ERR(p
->qp
);
1046 ipoib_warn(priv
, "failed to allocate tx qp: %d\n", ret
);
1050 p
->id
= ib_create_cm_id(priv
->ca
, ipoib_cm_tx_handler
, p
);
1051 if (IS_ERR(p
->id
)) {
1052 ret
= PTR_ERR(p
->id
);
1053 ipoib_warn(priv
, "failed to create tx cm id: %d\n", ret
);
1057 ret
= ipoib_cm_modify_tx_init(p
->dev
, p
->id
, p
->qp
);
1059 ipoib_warn(priv
, "failed to modify tx qp to rtr: %d\n", ret
);
1063 ret
= ipoib_cm_send_req(p
->dev
, p
->id
, p
->qp
, qpn
, pathrec
);
1065 ipoib_warn(priv
, "failed to send cm req: %d\n", ret
);
1069 ipoib_dbg(priv
, "Request connection 0x%x for gid " IPOIB_GID_FMT
" qpn 0x%x\n",
1070 p
->qp
->qp_num
, IPOIB_GID_ARG(pathrec
->dgid
), qpn
);
1076 ib_destroy_cm_id(p
->id
);
1079 ib_destroy_qp(p
->qp
);
1087 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx
*p
)
1089 struct ipoib_dev_priv
*priv
= netdev_priv(p
->dev
);
1090 struct ipoib_tx_buf
*tx_req
;
1091 unsigned long flags
;
1092 unsigned long begin
;
1094 ipoib_dbg(priv
, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1095 p
->qp
? p
->qp
->qp_num
: 0, p
->tx_head
, p
->tx_tail
);
1098 ib_destroy_cm_id(p
->id
);
1101 /* Wait for all sends to complete */
1103 while ((int) p
->tx_tail
- (int) p
->tx_head
< 0) {
1104 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
1105 ipoib_warn(priv
, "timing out; %d sends not completed\n",
1106 p
->tx_head
- p
->tx_tail
);
1116 while ((int) p
->tx_tail
- (int) p
->tx_head
< 0) {
1117 tx_req
= &p
->tx_ring
[p
->tx_tail
& (ipoib_sendq_size
- 1)];
1118 ib_dma_unmap_single(priv
->ca
, tx_req
->mapping
[0], tx_req
->skb
->len
,
1120 dev_kfree_skb_any(tx_req
->skb
);
1122 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1123 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
1124 netif_queue_stopped(p
->dev
) &&
1125 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
1126 netif_wake_queue(p
->dev
);
1127 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1131 ib_destroy_qp(p
->qp
);
1137 static int ipoib_cm_tx_handler(struct ib_cm_id
*cm_id
,
1138 struct ib_cm_event
*event
)
1140 struct ipoib_cm_tx
*tx
= cm_id
->context
;
1141 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
1142 struct net_device
*dev
= priv
->dev
;
1143 struct ipoib_neigh
*neigh
;
1146 switch (event
->event
) {
1147 case IB_CM_DREQ_RECEIVED
:
1148 ipoib_dbg(priv
, "DREQ received.\n");
1149 ib_send_cm_drep(cm_id
, NULL
, 0);
1151 case IB_CM_REP_RECEIVED
:
1152 ipoib_dbg(priv
, "REP received.\n");
1153 ret
= ipoib_cm_rep_handler(cm_id
, event
);
1155 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
,
1158 case IB_CM_REQ_ERROR
:
1159 case IB_CM_REJ_RECEIVED
:
1160 case IB_CM_TIMEWAIT_EXIT
:
1161 ipoib_dbg(priv
, "CM error %d.\n", event
->event
);
1162 spin_lock_irq(&priv
->tx_lock
);
1163 spin_lock(&priv
->lock
);
1168 list_del(&neigh
->list
);
1170 ipoib_put_ah(neigh
->ah
);
1171 ipoib_neigh_free(dev
, neigh
);
1176 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
1177 list_move(&tx
->list
, &priv
->cm
.reap_list
);
1178 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
1181 spin_unlock(&priv
->lock
);
1182 spin_unlock_irq(&priv
->tx_lock
);
1191 struct ipoib_cm_tx
*ipoib_cm_create_tx(struct net_device
*dev
, struct ipoib_path
*path
,
1192 struct ipoib_neigh
*neigh
)
1194 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1195 struct ipoib_cm_tx
*tx
;
1197 tx
= kzalloc(sizeof *tx
, GFP_ATOMIC
);
1205 list_add(&tx
->list
, &priv
->cm
.start_list
);
1206 set_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
);
1207 queue_work(ipoib_workqueue
, &priv
->cm
.start_task
);
1211 void ipoib_cm_destroy_tx(struct ipoib_cm_tx
*tx
)
1213 struct ipoib_dev_priv
*priv
= netdev_priv(tx
->dev
);
1214 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &tx
->flags
)) {
1215 list_move(&tx
->list
, &priv
->cm
.reap_list
);
1216 queue_work(ipoib_workqueue
, &priv
->cm
.reap_task
);
1217 ipoib_dbg(priv
, "Reap connection for gid " IPOIB_GID_FMT
"\n",
1218 IPOIB_GID_ARG(tx
->neigh
->dgid
));
1223 static void ipoib_cm_tx_start(struct work_struct
*work
)
1225 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1227 struct net_device
*dev
= priv
->dev
;
1228 struct ipoib_neigh
*neigh
;
1229 struct ipoib_cm_tx
*p
;
1230 unsigned long flags
;
1233 struct ib_sa_path_rec pathrec
;
1236 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1237 spin_lock(&priv
->lock
);
1238 while (!list_empty(&priv
->cm
.start_list
)) {
1239 p
= list_entry(priv
->cm
.start_list
.next
, typeof(*p
), list
);
1240 list_del_init(&p
->list
);
1242 qpn
= IPOIB_QPN(neigh
->neighbour
->ha
);
1243 memcpy(&pathrec
, &p
->path
->pathrec
, sizeof pathrec
);
1244 spin_unlock(&priv
->lock
);
1245 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1246 ret
= ipoib_cm_tx_init(p
, qpn
, &pathrec
);
1247 spin_lock_irqsave(&priv
->tx_lock
, flags
);
1248 spin_lock(&priv
->lock
);
1253 list_del(&neigh
->list
);
1255 ipoib_put_ah(neigh
->ah
);
1256 ipoib_neigh_free(dev
, neigh
);
1262 spin_unlock(&priv
->lock
);
1263 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
1266 static void ipoib_cm_tx_reap(struct work_struct
*work
)
1268 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1270 struct ipoib_cm_tx
*p
;
1272 spin_lock_irq(&priv
->tx_lock
);
1273 spin_lock(&priv
->lock
);
1274 while (!list_empty(&priv
->cm
.reap_list
)) {
1275 p
= list_entry(priv
->cm
.reap_list
.next
, typeof(*p
), list
);
1277 spin_unlock(&priv
->lock
);
1278 spin_unlock_irq(&priv
->tx_lock
);
1279 ipoib_cm_tx_destroy(p
);
1280 spin_lock_irq(&priv
->tx_lock
);
1281 spin_lock(&priv
->lock
);
1283 spin_unlock(&priv
->lock
);
1284 spin_unlock_irq(&priv
->tx_lock
);
1287 static void ipoib_cm_skb_reap(struct work_struct
*work
)
1289 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1291 struct sk_buff
*skb
;
1293 unsigned mtu
= priv
->mcast_mtu
;
1295 spin_lock_irq(&priv
->tx_lock
);
1296 spin_lock(&priv
->lock
);
1297 while ((skb
= skb_dequeue(&priv
->cm
.skb_queue
))) {
1298 spin_unlock(&priv
->lock
);
1299 spin_unlock_irq(&priv
->tx_lock
);
1300 if (skb
->protocol
== htons(ETH_P_IP
))
1301 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
, htonl(mtu
));
1302 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1303 else if (skb
->protocol
== htons(ETH_P_IPV6
))
1304 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
, priv
->dev
);
1306 dev_kfree_skb_any(skb
);
1307 spin_lock_irq(&priv
->tx_lock
);
1308 spin_lock(&priv
->lock
);
1310 spin_unlock(&priv
->lock
);
1311 spin_unlock_irq(&priv
->tx_lock
);
1314 void ipoib_cm_skb_too_long(struct net_device
*dev
, struct sk_buff
*skb
,
1317 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1318 int e
= skb_queue_empty(&priv
->cm
.skb_queue
);
1321 skb
->dst
->ops
->update_pmtu(skb
->dst
, mtu
);
1323 skb_queue_tail(&priv
->cm
.skb_queue
, skb
);
1325 queue_work(ipoib_workqueue
, &priv
->cm
.skb_task
);
1328 static void ipoib_cm_rx_reap(struct work_struct
*work
)
1330 ipoib_cm_free_rx_reap_list(container_of(work
, struct ipoib_dev_priv
,
1331 cm
.rx_reap_task
)->dev
);
1334 static void ipoib_cm_stale_task(struct work_struct
*work
)
1336 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
1337 cm
.stale_task
.work
);
1338 struct ipoib_cm_rx
*p
;
1341 spin_lock_irq(&priv
->lock
);
1342 while (!list_empty(&priv
->cm
.passive_ids
)) {
1343 /* List is sorted by LRU, start from tail,
1344 * stop when we see a recently used entry */
1345 p
= list_entry(priv
->cm
.passive_ids
.prev
, typeof(*p
), list
);
1346 if (time_before_eq(jiffies
, p
->jiffies
+ IPOIB_CM_RX_TIMEOUT
))
1348 list_move(&p
->list
, &priv
->cm
.rx_error_list
);
1349 p
->state
= IPOIB_CM_RX_ERROR
;
1350 spin_unlock_irq(&priv
->lock
);
1351 ret
= ib_modify_qp(p
->qp
, &ipoib_cm_err_attr
, IB_QP_STATE
);
1353 ipoib_warn(priv
, "unable to move qp to error state: %d\n", ret
);
1354 spin_lock_irq(&priv
->lock
);
1357 if (!list_empty(&priv
->cm
.passive_ids
))
1358 queue_delayed_work(ipoib_workqueue
,
1359 &priv
->cm
.stale_task
, IPOIB_CM_RX_DELAY
);
1360 spin_unlock_irq(&priv
->lock
);
1364 static ssize_t
show_mode(struct device
*d
, struct device_attribute
*attr
,
1367 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(d
));
1369 if (test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
))
1370 return sprintf(buf
, "connected\n");
1372 return sprintf(buf
, "datagram\n");
1375 static ssize_t
set_mode(struct device
*d
, struct device_attribute
*attr
,
1376 const char *buf
, size_t count
)
1378 struct net_device
*dev
= to_net_dev(d
);
1379 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1381 /* flush paths if we switch modes so that connections are restarted */
1382 if (IPOIB_CM_SUPPORTED(dev
->dev_addr
) && !strcmp(buf
, "connected\n")) {
1383 set_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
1384 ipoib_warn(priv
, "enabling connected mode "
1385 "will cause multicast packet drops\n");
1387 dev
->features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_TSO
);
1388 priv
->tx_wr
.send_flags
&= ~IB_SEND_IP_CSUM
;
1390 ipoib_flush_paths(dev
);
1394 if (!strcmp(buf
, "datagram\n")) {
1395 clear_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
1396 dev
->mtu
= min(priv
->mcast_mtu
, dev
->mtu
);
1397 ipoib_flush_paths(dev
);
1399 if (test_bit(IPOIB_FLAG_CSUM
, &priv
->flags
)) {
1400 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
1401 if (priv
->hca_caps
& IB_DEVICE_UD_TSO
)
1402 dev
->features
|= NETIF_F_TSO
;
1411 static DEVICE_ATTR(mode
, S_IWUSR
| S_IRUGO
, show_mode
, set_mode
);
1413 int ipoib_cm_add_mode_attr(struct net_device
*dev
)
1415 return device_create_file(&dev
->dev
, &dev_attr_mode
);
1418 static void ipoib_cm_create_srq(struct net_device
*dev
, int max_sge
)
1420 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1421 struct ib_srq_init_attr srq_init_attr
= {
1423 .max_wr
= ipoib_recvq_size
,
1428 priv
->cm
.srq
= ib_create_srq(priv
->pd
, &srq_init_attr
);
1429 if (IS_ERR(priv
->cm
.srq
)) {
1430 if (PTR_ERR(priv
->cm
.srq
) != -ENOSYS
)
1431 printk(KERN_WARNING
"%s: failed to allocate SRQ, error %ld\n",
1432 priv
->ca
->name
, PTR_ERR(priv
->cm
.srq
));
1433 priv
->cm
.srq
= NULL
;
1437 priv
->cm
.srq_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->cm
.srq_ring
,
1439 if (!priv
->cm
.srq_ring
) {
1440 printk(KERN_WARNING
"%s: failed to allocate CM SRQ ring (%d entries)\n",
1441 priv
->ca
->name
, ipoib_recvq_size
);
1442 ib_destroy_srq(priv
->cm
.srq
);
1443 priv
->cm
.srq
= NULL
;
1447 int ipoib_cm_dev_init(struct net_device
*dev
)
1449 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1451 struct ib_device_attr attr
;
1453 INIT_LIST_HEAD(&priv
->cm
.passive_ids
);
1454 INIT_LIST_HEAD(&priv
->cm
.reap_list
);
1455 INIT_LIST_HEAD(&priv
->cm
.start_list
);
1456 INIT_LIST_HEAD(&priv
->cm
.rx_error_list
);
1457 INIT_LIST_HEAD(&priv
->cm
.rx_flush_list
);
1458 INIT_LIST_HEAD(&priv
->cm
.rx_drain_list
);
1459 INIT_LIST_HEAD(&priv
->cm
.rx_reap_list
);
1460 INIT_WORK(&priv
->cm
.start_task
, ipoib_cm_tx_start
);
1461 INIT_WORK(&priv
->cm
.reap_task
, ipoib_cm_tx_reap
);
1462 INIT_WORK(&priv
->cm
.skb_task
, ipoib_cm_skb_reap
);
1463 INIT_WORK(&priv
->cm
.rx_reap_task
, ipoib_cm_rx_reap
);
1464 INIT_DELAYED_WORK(&priv
->cm
.stale_task
, ipoib_cm_stale_task
);
1466 skb_queue_head_init(&priv
->cm
.skb_queue
);
1468 ret
= ib_query_device(priv
->ca
, &attr
);
1470 printk(KERN_WARNING
"ib_query_device() failed with %d\n", ret
);
1474 ipoib_dbg(priv
, "max_srq_sge=%d\n", attr
.max_srq_sge
);
1476 attr
.max_srq_sge
= min_t(int, IPOIB_CM_RX_SG
, attr
.max_srq_sge
);
1477 ipoib_cm_create_srq(dev
, attr
.max_srq_sge
);
1478 if (ipoib_cm_has_srq(dev
)) {
1479 priv
->cm
.max_cm_mtu
= attr
.max_srq_sge
* PAGE_SIZE
- 0x10;
1480 priv
->cm
.num_frags
= attr
.max_srq_sge
;
1481 ipoib_dbg(priv
, "max_cm_mtu = 0x%x, num_frags=%d\n",
1482 priv
->cm
.max_cm_mtu
, priv
->cm
.num_frags
);
1484 priv
->cm
.max_cm_mtu
= IPOIB_CM_MTU
;
1485 priv
->cm
.num_frags
= IPOIB_CM_RX_SG
;
1488 for (i
= 0; i
< priv
->cm
.num_frags
; ++i
)
1489 priv
->cm
.rx_sge
[i
].lkey
= priv
->mr
->lkey
;
1491 priv
->cm
.rx_sge
[0].length
= IPOIB_CM_HEAD_SIZE
;
1492 for (i
= 1; i
< priv
->cm
.num_frags
; ++i
)
1493 priv
->cm
.rx_sge
[i
].length
= PAGE_SIZE
;
1494 priv
->cm
.rx_wr
.next
= NULL
;
1495 priv
->cm
.rx_wr
.sg_list
= priv
->cm
.rx_sge
;
1496 priv
->cm
.rx_wr
.num_sge
= priv
->cm
.num_frags
;
1498 if (ipoib_cm_has_srq(dev
)) {
1499 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
1500 if (!ipoib_cm_alloc_rx_skb(dev
, priv
->cm
.srq_ring
, i
,
1501 priv
->cm
.num_frags
- 1,
1502 priv
->cm
.srq_ring
[i
].mapping
)) {
1503 ipoib_warn(priv
, "failed to allocate "
1504 "receive buffer %d\n", i
);
1505 ipoib_cm_dev_cleanup(dev
);
1509 if (ipoib_cm_post_receive_srq(dev
, i
)) {
1510 ipoib_warn(priv
, "ipoib_cm_post_receive_srq "
1511 "failed for buf %d\n", i
);
1512 ipoib_cm_dev_cleanup(dev
);
1518 priv
->dev
->dev_addr
[0] = IPOIB_FLAGS_RC
;
1522 void ipoib_cm_dev_cleanup(struct net_device
*dev
)
1524 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1530 ipoib_dbg(priv
, "Cleanup ipoib connected mode.\n");
1532 ret
= ib_destroy_srq(priv
->cm
.srq
);
1534 ipoib_warn(priv
, "ib_destroy_srq failed: %d\n", ret
);
1536 priv
->cm
.srq
= NULL
;
1537 if (!priv
->cm
.srq_ring
)
1540 ipoib_cm_free_rx_ring(dev
, priv
->cm
.srq_ring
);
1541 priv
->cm
.srq_ring
= NULL
;