2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
41 #include <rdma/ib_cache.h>
43 #include <linux/tcp.h>
47 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
48 static int data_debug_level
;
50 module_param(data_debug_level
, int, 0644);
51 MODULE_PARM_DESC(data_debug_level
,
52 "Enable data path debug tracing if > 0");
55 static DEFINE_MUTEX(pkey_mutex
);
57 struct ipoib_ah
*ipoib_create_ah(struct net_device
*dev
,
58 struct ib_pd
*pd
, struct ib_ah_attr
*attr
)
62 ah
= kmalloc(sizeof *ah
, GFP_KERNEL
);
70 ah
->ah
= ib_create_ah(pd
, attr
);
75 ipoib_dbg(netdev_priv(dev
), "Created ah %p\n", ah
->ah
);
80 void ipoib_free_ah(struct kref
*kref
)
82 struct ipoib_ah
*ah
= container_of(kref
, struct ipoib_ah
, ref
);
83 struct ipoib_dev_priv
*priv
= netdev_priv(ah
->dev
);
87 spin_lock_irqsave(&priv
->lock
, flags
);
88 list_add_tail(&ah
->list
, &priv
->dead_ahs
);
89 spin_unlock_irqrestore(&priv
->lock
, flags
);
92 static int ipoib_ib_post_receive(struct net_device
*dev
, int id
)
94 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
96 struct ib_recv_wr param
;
97 struct ib_recv_wr
*bad_wr
;
100 list
.addr
= priv
->rx_ring
[id
].mapping
;
101 list
.length
= IPOIB_BUF_SIZE
;
102 list
.lkey
= priv
->mr
->lkey
;
105 param
.wr_id
= id
| IPOIB_OP_RECV
;
106 param
.sg_list
= &list
;
109 ret
= ib_post_recv(priv
->qp
, ¶m
, &bad_wr
);
111 ipoib_warn(priv
, "receive failed for buf %d (%d)\n", id
, ret
);
112 ib_dma_unmap_single(priv
->ca
, priv
->rx_ring
[id
].mapping
,
113 IPOIB_BUF_SIZE
, DMA_FROM_DEVICE
);
114 dev_kfree_skb_any(priv
->rx_ring
[id
].skb
);
115 priv
->rx_ring
[id
].skb
= NULL
;
121 static int ipoib_alloc_rx_skb(struct net_device
*dev
, int id
)
123 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
127 skb
= dev_alloc_skb(IPOIB_BUF_SIZE
+ 4);
132 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
133 * header. So we need 4 more bytes to get to 48 and align the
134 * IP header to a multiple of 16.
138 addr
= ib_dma_map_single(priv
->ca
, skb
->data
, IPOIB_BUF_SIZE
,
140 if (unlikely(ib_dma_mapping_error(priv
->ca
, addr
))) {
141 dev_kfree_skb_any(skb
);
145 priv
->rx_ring
[id
].skb
= skb
;
146 priv
->rx_ring
[id
].mapping
= addr
;
151 static int ipoib_ib_post_receives(struct net_device
*dev
)
153 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
156 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
157 if (ipoib_alloc_rx_skb(dev
, i
)) {
158 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
161 if (ipoib_ib_post_receive(dev
, i
)) {
162 ipoib_warn(priv
, "ipoib_ib_post_receive failed for buf %d\n", i
);
170 static void ipoib_ib_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
172 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
173 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_OP_RECV
;
177 ipoib_dbg_data(priv
, "recv completion: id %d, status: %d\n",
180 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
181 ipoib_warn(priv
, "recv completion event with wrid %d (> %d)\n",
182 wr_id
, ipoib_recvq_size
);
186 skb
= priv
->rx_ring
[wr_id
].skb
;
187 addr
= priv
->rx_ring
[wr_id
].mapping
;
189 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
190 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
191 ipoib_warn(priv
, "failed recv event "
192 "(status=%d, wrid=%d vend_err %x)\n",
193 wc
->status
, wr_id
, wc
->vendor_err
);
194 ib_dma_unmap_single(priv
->ca
, addr
,
195 IPOIB_BUF_SIZE
, DMA_FROM_DEVICE
);
196 dev_kfree_skb_any(skb
);
197 priv
->rx_ring
[wr_id
].skb
= NULL
;
202 * Drop packets that this interface sent, ie multicast packets
203 * that the HCA has replicated.
205 if (wc
->slid
== priv
->local_lid
&& wc
->src_qp
== priv
->qp
->qp_num
)
209 * If we can't allocate a new RX buffer, dump
210 * this packet and reuse the old buffer.
212 if (unlikely(ipoib_alloc_rx_skb(dev
, wr_id
))) {
213 ++dev
->stats
.rx_dropped
;
217 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
218 wc
->byte_len
, wc
->slid
);
220 ib_dma_unmap_single(priv
->ca
, addr
, IPOIB_BUF_SIZE
, DMA_FROM_DEVICE
);
222 skb_put(skb
, wc
->byte_len
);
223 skb_pull(skb
, IB_GRH_BYTES
);
225 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
226 skb_reset_mac_header(skb
);
227 skb_pull(skb
, IPOIB_ENCAP_LEN
);
229 dev
->last_rx
= jiffies
;
230 ++dev
->stats
.rx_packets
;
231 dev
->stats
.rx_bytes
+= skb
->len
;
234 /* XXX get correct PACKET_ type here */
235 skb
->pkt_type
= PACKET_HOST
;
237 if (test_bit(IPOIB_FLAG_CSUM
, &priv
->flags
) && likely(wc
->csum_ok
))
238 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
240 netif_receive_skb(skb
);
243 if (unlikely(ipoib_ib_post_receive(dev
, wr_id
)))
244 ipoib_warn(priv
, "ipoib_ib_post_receive failed "
245 "for buf %d\n", wr_id
);
248 static int ipoib_dma_map_tx(struct ib_device
*ca
,
249 struct ipoib_tx_buf
*tx_req
)
251 struct sk_buff
*skb
= tx_req
->skb
;
252 u64
*mapping
= tx_req
->mapping
;
256 if (skb_headlen(skb
)) {
257 mapping
[0] = ib_dma_map_single(ca
, skb
->data
, skb_headlen(skb
),
259 if (unlikely(ib_dma_mapping_error(ca
, mapping
[0])))
266 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
267 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
268 mapping
[i
+ off
] = ib_dma_map_page(ca
, frag
->page
,
269 frag
->page_offset
, frag
->size
,
271 if (unlikely(ib_dma_mapping_error(ca
, mapping
[i
+ off
])))
278 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
279 ib_dma_unmap_page(ca
, mapping
[i
- !off
], frag
->size
, DMA_TO_DEVICE
);
283 ib_dma_unmap_single(ca
, mapping
[0], skb_headlen(skb
), DMA_TO_DEVICE
);
288 static void ipoib_dma_unmap_tx(struct ib_device
*ca
,
289 struct ipoib_tx_buf
*tx_req
)
291 struct sk_buff
*skb
= tx_req
->skb
;
292 u64
*mapping
= tx_req
->mapping
;
296 if (skb_headlen(skb
)) {
297 ib_dma_unmap_single(ca
, mapping
[0], skb_headlen(skb
), DMA_TO_DEVICE
);
302 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
303 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
304 ib_dma_unmap_page(ca
, mapping
[i
+ off
], frag
->size
,
309 static void ipoib_ib_handle_tx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
311 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
312 unsigned int wr_id
= wc
->wr_id
;
313 struct ipoib_tx_buf
*tx_req
;
316 ipoib_dbg_data(priv
, "send completion: id %d, status: %d\n",
319 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
320 ipoib_warn(priv
, "send completion event with wrid %d (> %d)\n",
321 wr_id
, ipoib_sendq_size
);
325 tx_req
= &priv
->tx_ring
[wr_id
];
327 ipoib_dma_unmap_tx(priv
->ca
, tx_req
);
329 ++dev
->stats
.tx_packets
;
330 dev
->stats
.tx_bytes
+= tx_req
->skb
->len
;
332 dev_kfree_skb_any(tx_req
->skb
);
334 spin_lock_irqsave(&priv
->tx_lock
, flags
);
336 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
337 netif_queue_stopped(dev
) &&
338 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
339 netif_wake_queue(dev
);
340 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
342 if (wc
->status
!= IB_WC_SUCCESS
&&
343 wc
->status
!= IB_WC_WR_FLUSH_ERR
)
344 ipoib_warn(priv
, "failed send event "
345 "(status=%d, wrid=%d vend_err %x)\n",
346 wc
->status
, wr_id
, wc
->vendor_err
);
349 int ipoib_poll(struct napi_struct
*napi
, int budget
)
351 struct ipoib_dev_priv
*priv
= container_of(napi
, struct ipoib_dev_priv
, napi
);
352 struct net_device
*dev
= priv
->dev
;
360 while (done
< budget
) {
361 int max
= (budget
- done
);
363 t
= min(IPOIB_NUM_WC
, max
);
364 n
= ib_poll_cq(priv
->cq
, t
, priv
->ibwc
);
366 for (i
= 0; i
< n
; i
++) {
367 struct ib_wc
*wc
= priv
->ibwc
+ i
;
369 if (wc
->wr_id
& IPOIB_OP_RECV
) {
371 if (wc
->wr_id
& IPOIB_OP_CM
)
372 ipoib_cm_handle_rx_wc(dev
, wc
);
374 ipoib_ib_handle_rx_wc(dev
, wc
);
376 if (wc
->wr_id
& IPOIB_OP_CM
)
377 ipoib_cm_handle_tx_wc(dev
, wc
);
379 ipoib_ib_handle_tx_wc(dev
, wc
);
388 netif_rx_complete(dev
, napi
);
389 if (unlikely(ib_req_notify_cq(priv
->cq
,
391 IB_CQ_REPORT_MISSED_EVENTS
)) &&
392 netif_rx_reschedule(dev
, napi
))
399 void ipoib_ib_completion(struct ib_cq
*cq
, void *dev_ptr
)
401 struct net_device
*dev
= dev_ptr
;
402 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
404 netif_rx_schedule(dev
, &priv
->napi
);
407 static inline int post_send(struct ipoib_dev_priv
*priv
,
409 struct ib_ah
*address
, u32 qpn
,
410 struct ipoib_tx_buf
*tx_req
,
411 void *head
, int hlen
)
413 struct ib_send_wr
*bad_wr
;
415 struct sk_buff
*skb
= tx_req
->skb
;
416 skb_frag_t
*frags
= skb_shinfo(skb
)->frags
;
417 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
418 u64
*mapping
= tx_req
->mapping
;
420 if (skb_headlen(skb
)) {
421 priv
->tx_sge
[0].addr
= mapping
[0];
422 priv
->tx_sge
[0].length
= skb_headlen(skb
);
427 for (i
= 0; i
< nr_frags
; ++i
) {
428 priv
->tx_sge
[i
+ off
].addr
= mapping
[i
+ off
];
429 priv
->tx_sge
[i
+ off
].length
= frags
[i
].size
;
431 priv
->tx_wr
.num_sge
= nr_frags
+ off
;
432 priv
->tx_wr
.wr_id
= wr_id
;
433 priv
->tx_wr
.wr
.ud
.remote_qpn
= qpn
;
434 priv
->tx_wr
.wr
.ud
.ah
= address
;
437 priv
->tx_wr
.wr
.ud
.mss
= skb_shinfo(skb
)->gso_size
;
438 priv
->tx_wr
.wr
.ud
.header
= head
;
439 priv
->tx_wr
.wr
.ud
.hlen
= hlen
;
440 priv
->tx_wr
.opcode
= IB_WR_LSO
;
442 priv
->tx_wr
.opcode
= IB_WR_SEND
;
444 return ib_post_send(priv
->qp
, &priv
->tx_wr
, &bad_wr
);
447 void ipoib_send(struct net_device
*dev
, struct sk_buff
*skb
,
448 struct ipoib_ah
*address
, u32 qpn
)
450 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
451 struct ipoib_tx_buf
*tx_req
;
455 if (skb_is_gso(skb
)) {
456 hlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
458 if (unlikely(!skb_pull(skb
, hlen
))) {
459 ipoib_warn(priv
, "linear data too small\n");
460 ++dev
->stats
.tx_dropped
;
461 ++dev
->stats
.tx_errors
;
462 dev_kfree_skb_any(skb
);
466 if (unlikely(skb
->len
> priv
->mcast_mtu
+ IPOIB_ENCAP_LEN
)) {
467 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
468 skb
->len
, priv
->mcast_mtu
+ IPOIB_ENCAP_LEN
);
469 ++dev
->stats
.tx_dropped
;
470 ++dev
->stats
.tx_errors
;
471 ipoib_cm_skb_too_long(dev
, skb
, priv
->mcast_mtu
);
478 ipoib_dbg_data(priv
, "sending packet, length=%d address=%p qpn=0x%06x\n",
479 skb
->len
, address
, qpn
);
482 * We put the skb into the tx_ring _before_ we call post_send()
483 * because it's entirely possible that the completion handler will
484 * run before we execute anything after the post_send(). That
485 * means we have to make sure everything is properly recorded and
486 * our state is consistent before we call post_send().
488 tx_req
= &priv
->tx_ring
[priv
->tx_head
& (ipoib_sendq_size
- 1)];
490 if (unlikely(ipoib_dma_map_tx(priv
->ca
, tx_req
))) {
491 ++dev
->stats
.tx_errors
;
492 dev_kfree_skb_any(skb
);
496 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
497 priv
->tx_wr
.send_flags
|= IB_SEND_IP_CSUM
;
499 priv
->tx_wr
.send_flags
&= ~IB_SEND_IP_CSUM
;
501 if (unlikely(post_send(priv
, priv
->tx_head
& (ipoib_sendq_size
- 1),
502 address
->ah
, qpn
, tx_req
, phead
, hlen
))) {
503 ipoib_warn(priv
, "post_send failed\n");
504 ++dev
->stats
.tx_errors
;
505 ipoib_dma_unmap_tx(priv
->ca
, tx_req
);
506 dev_kfree_skb_any(skb
);
508 dev
->trans_start
= jiffies
;
510 address
->last_send
= priv
->tx_head
;
513 if (++priv
->tx_outstanding
== ipoib_sendq_size
) {
514 ipoib_dbg(priv
, "TX ring full, stopping kernel net queue\n");
515 netif_stop_queue(dev
);
520 static void __ipoib_reap_ah(struct net_device
*dev
)
522 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
523 struct ipoib_ah
*ah
, *tah
;
524 LIST_HEAD(remove_list
);
526 spin_lock_irq(&priv
->tx_lock
);
527 spin_lock(&priv
->lock
);
528 list_for_each_entry_safe(ah
, tah
, &priv
->dead_ahs
, list
)
529 if ((int) priv
->tx_tail
- (int) ah
->last_send
>= 0) {
531 ib_destroy_ah(ah
->ah
);
534 spin_unlock(&priv
->lock
);
535 spin_unlock_irq(&priv
->tx_lock
);
538 void ipoib_reap_ah(struct work_struct
*work
)
540 struct ipoib_dev_priv
*priv
=
541 container_of(work
, struct ipoib_dev_priv
, ah_reap_task
.work
);
542 struct net_device
*dev
= priv
->dev
;
544 __ipoib_reap_ah(dev
);
546 if (!test_bit(IPOIB_STOP_REAPER
, &priv
->flags
))
547 queue_delayed_work(ipoib_workqueue
, &priv
->ah_reap_task
,
548 round_jiffies_relative(HZ
));
551 int ipoib_ib_dev_open(struct net_device
*dev
)
553 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
556 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &priv
->pkey_index
)) {
557 ipoib_warn(priv
, "P_Key 0x%04x not found\n", priv
->pkey
);
558 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
561 set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
563 ret
= ipoib_init_qp(dev
);
565 ipoib_warn(priv
, "ipoib_init_qp returned %d\n", ret
);
569 ret
= ipoib_ib_post_receives(dev
);
571 ipoib_warn(priv
, "ipoib_ib_post_receives returned %d\n", ret
);
572 ipoib_ib_dev_stop(dev
, 1);
576 ret
= ipoib_cm_dev_open(dev
);
578 ipoib_warn(priv
, "ipoib_cm_dev_open returned %d\n", ret
);
579 ipoib_ib_dev_stop(dev
, 1);
583 clear_bit(IPOIB_STOP_REAPER
, &priv
->flags
);
584 queue_delayed_work(ipoib_workqueue
, &priv
->ah_reap_task
,
585 round_jiffies_relative(HZ
));
587 set_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
);
592 static void ipoib_pkey_dev_check_presence(struct net_device
*dev
)
594 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
597 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &pkey_index
))
598 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
600 set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
603 int ipoib_ib_dev_up(struct net_device
*dev
)
605 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
607 ipoib_pkey_dev_check_presence(dev
);
609 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
610 ipoib_dbg(priv
, "PKEY is not assigned.\n");
614 set_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
616 return ipoib_mcast_start_thread(dev
);
619 int ipoib_ib_dev_down(struct net_device
*dev
, int flush
)
621 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
623 ipoib_dbg(priv
, "downing ib_dev\n");
625 clear_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
626 netif_carrier_off(dev
);
628 /* Shutdown the P_Key thread if still active */
629 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
630 mutex_lock(&pkey_mutex
);
631 set_bit(IPOIB_PKEY_STOP
, &priv
->flags
);
632 cancel_delayed_work(&priv
->pkey_poll_task
);
633 mutex_unlock(&pkey_mutex
);
635 flush_workqueue(ipoib_workqueue
);
638 ipoib_mcast_stop_thread(dev
, flush
);
639 ipoib_mcast_dev_flush(dev
);
641 ipoib_flush_paths(dev
);
646 static int recvs_pending(struct net_device
*dev
)
648 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
652 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
653 if (priv
->rx_ring
[i
].skb
)
659 void ipoib_drain_cq(struct net_device
*dev
)
661 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
664 n
= ib_poll_cq(priv
->cq
, IPOIB_NUM_WC
, priv
->ibwc
);
665 for (i
= 0; i
< n
; ++i
) {
667 * Convert any successful completions to flush
668 * errors to avoid passing packets up the
669 * stack after bringing the device down.
671 if (priv
->ibwc
[i
].status
== IB_WC_SUCCESS
)
672 priv
->ibwc
[i
].status
= IB_WC_WR_FLUSH_ERR
;
674 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_RECV
) {
675 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_CM
)
676 ipoib_cm_handle_rx_wc(dev
, priv
->ibwc
+ i
);
678 ipoib_ib_handle_rx_wc(dev
, priv
->ibwc
+ i
);
680 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_CM
)
681 ipoib_cm_handle_tx_wc(dev
, priv
->ibwc
+ i
);
683 ipoib_ib_handle_tx_wc(dev
, priv
->ibwc
+ i
);
686 } while (n
== IPOIB_NUM_WC
);
689 int ipoib_ib_dev_stop(struct net_device
*dev
, int flush
)
691 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
692 struct ib_qp_attr qp_attr
;
694 struct ipoib_tx_buf
*tx_req
;
697 clear_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
);
699 ipoib_cm_dev_stop(dev
);
702 * Move our QP to the error state and then reinitialize in
703 * when all work requests have completed or have been flushed.
705 qp_attr
.qp_state
= IB_QPS_ERR
;
706 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
707 ipoib_warn(priv
, "Failed to modify QP to ERROR state\n");
709 /* Wait for all sends and receives to complete */
712 while (priv
->tx_head
!= priv
->tx_tail
|| recvs_pending(dev
)) {
713 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
714 ipoib_warn(priv
, "timing out; %d sends %d receives not completed\n",
715 priv
->tx_head
- priv
->tx_tail
, recvs_pending(dev
));
718 * assume the HW is wedged and just free up
719 * all our pending work requests.
721 while ((int) priv
->tx_tail
- (int) priv
->tx_head
< 0) {
722 tx_req
= &priv
->tx_ring
[priv
->tx_tail
&
723 (ipoib_sendq_size
- 1)];
724 ipoib_dma_unmap_tx(priv
->ca
, tx_req
);
725 dev_kfree_skb_any(tx_req
->skb
);
727 --priv
->tx_outstanding
;
730 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
731 struct ipoib_rx_buf
*rx_req
;
733 rx_req
= &priv
->rx_ring
[i
];
736 ib_dma_unmap_single(priv
->ca
,
740 dev_kfree_skb_any(rx_req
->skb
);
752 ipoib_dbg(priv
, "All sends and receives done.\n");
755 qp_attr
.qp_state
= IB_QPS_RESET
;
756 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
757 ipoib_warn(priv
, "Failed to modify QP to RESET state\n");
759 /* Wait for all AHs to be reaped */
760 set_bit(IPOIB_STOP_REAPER
, &priv
->flags
);
761 cancel_delayed_work(&priv
->ah_reap_task
);
763 flush_workqueue(ipoib_workqueue
);
767 while (!list_empty(&priv
->dead_ahs
)) {
768 __ipoib_reap_ah(dev
);
770 if (time_after(jiffies
, begin
+ HZ
)) {
771 ipoib_warn(priv
, "timing out; will leak address handles\n");
778 ib_req_notify_cq(priv
->cq
, IB_CQ_NEXT_COMP
);
783 int ipoib_ib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
785 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
791 if (ipoib_transport_dev_init(dev
, ca
)) {
792 printk(KERN_WARNING
"%s: ipoib_transport_dev_init failed\n", ca
->name
);
796 if (dev
->flags
& IFF_UP
) {
797 if (ipoib_ib_dev_open(dev
)) {
798 ipoib_transport_dev_cleanup(dev
);
806 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv
*priv
, int pkey_event
)
808 struct ipoib_dev_priv
*cpriv
;
809 struct net_device
*dev
= priv
->dev
;
812 mutex_lock(&priv
->vlan_mutex
);
815 * Flush any child interfaces too -- they might be up even if
816 * the parent is down.
818 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
)
819 __ipoib_ib_dev_flush(cpriv
, pkey_event
);
821 mutex_unlock(&priv
->vlan_mutex
);
823 if (!test_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
)) {
824 ipoib_dbg(priv
, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
828 if (!test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
)) {
829 ipoib_dbg(priv
, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
834 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &new_index
)) {
835 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
836 ipoib_ib_dev_down(dev
, 0);
837 ipoib_ib_dev_stop(dev
, 0);
838 if (ipoib_pkey_dev_delay_open(dev
))
842 /* restart QP only if P_Key index is changed */
843 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
) &&
844 new_index
== priv
->pkey_index
) {
845 ipoib_dbg(priv
, "Not flushing - P_Key index not changed.\n");
848 priv
->pkey_index
= new_index
;
851 ipoib_dbg(priv
, "flushing\n");
853 ipoib_ib_dev_down(dev
, 0);
856 ipoib_ib_dev_stop(dev
, 0);
857 ipoib_ib_dev_open(dev
);
861 * The device could have been brought down between the start and when
862 * we get here, don't bring it back up if it's not configured up
864 if (test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
)) {
865 ipoib_ib_dev_up(dev
);
866 ipoib_mcast_restart_task(&priv
->restart_task
);
870 void ipoib_ib_dev_flush(struct work_struct
*work
)
872 struct ipoib_dev_priv
*priv
=
873 container_of(work
, struct ipoib_dev_priv
, flush_task
);
875 ipoib_dbg(priv
, "Flushing %s\n", priv
->dev
->name
);
876 __ipoib_ib_dev_flush(priv
, 0);
879 void ipoib_pkey_event(struct work_struct
*work
)
881 struct ipoib_dev_priv
*priv
=
882 container_of(work
, struct ipoib_dev_priv
, pkey_event_task
);
884 ipoib_dbg(priv
, "Flushing %s and restarting its QP\n", priv
->dev
->name
);
885 __ipoib_ib_dev_flush(priv
, 1);
888 void ipoib_ib_dev_cleanup(struct net_device
*dev
)
890 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
892 ipoib_dbg(priv
, "cleaning up ib_dev\n");
894 ipoib_mcast_stop_thread(dev
, 1);
895 ipoib_mcast_dev_flush(dev
);
897 ipoib_transport_dev_cleanup(dev
);
901 * Delayed P_Key Assigment Interim Support
903 * The following is initial implementation of delayed P_Key assigment
904 * mechanism. It is using the same approach implemented for the multicast
905 * group join. The single goal of this implementation is to quickly address
906 * Bug #2507. This implementation will probably be removed when the P_Key
907 * change async notification is available.
910 void ipoib_pkey_poll(struct work_struct
*work
)
912 struct ipoib_dev_priv
*priv
=
913 container_of(work
, struct ipoib_dev_priv
, pkey_poll_task
.work
);
914 struct net_device
*dev
= priv
->dev
;
916 ipoib_pkey_dev_check_presence(dev
);
918 if (test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
))
921 mutex_lock(&pkey_mutex
);
922 if (!test_bit(IPOIB_PKEY_STOP
, &priv
->flags
))
923 queue_delayed_work(ipoib_workqueue
,
924 &priv
->pkey_poll_task
,
926 mutex_unlock(&pkey_mutex
);
930 int ipoib_pkey_dev_delay_open(struct net_device
*dev
)
932 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
934 /* Look for the interface pkey value in the IB Port P_Key table and */
935 /* set the interface pkey assigment flag */
936 ipoib_pkey_dev_check_presence(dev
);
938 /* P_Key value not assigned yet - start polling */
939 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
940 mutex_lock(&pkey_mutex
);
941 clear_bit(IPOIB_PKEY_STOP
, &priv
->flags
);
942 queue_delayed_work(ipoib_workqueue
,
943 &priv
->pkey_poll_task
,
945 mutex_unlock(&pkey_mutex
);