2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
39 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/kernel.h>
45 #include <linux/if_arp.h> /* For ARPHRD_xxx */
52 MODULE_AUTHOR("Roland Dreier");
53 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54 MODULE_LICENSE("Dual BSD/GPL");
56 int ipoib_sendq_size __read_mostly
= IPOIB_TX_RING_SIZE
;
57 int ipoib_recvq_size __read_mostly
= IPOIB_RX_RING_SIZE
;
59 module_param_named(send_queue_size
, ipoib_sendq_size
, int, 0444);
60 MODULE_PARM_DESC(send_queue_size
, "Number of descriptors in send queue");
61 module_param_named(recv_queue_size
, ipoib_recvq_size
, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size
, "Number of descriptors in receive queue");
64 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
65 int ipoib_debug_level
;
67 module_param_named(debug_level
, ipoib_debug_level
, int, 0644);
68 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
71 struct ipoib_path_iter
{
72 struct net_device
*dev
;
73 struct ipoib_path path
;
76 static const u8 ipv4_bcast_addr
[] = {
77 0x00, 0xff, 0xff, 0xff,
78 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
79 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
82 struct workqueue_struct
*ipoib_workqueue
;
84 struct ib_sa_client ipoib_sa_client
;
86 static void ipoib_add_one(struct ib_device
*device
);
87 static void ipoib_remove_one(struct ib_device
*device
);
89 static struct ib_client ipoib_client
= {
92 .remove
= ipoib_remove_one
95 int ipoib_open(struct net_device
*dev
)
97 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
99 ipoib_dbg(priv
, "bringing up interface\n");
101 napi_enable(&priv
->napi
);
102 set_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
104 if (ipoib_pkey_dev_delay_open(dev
))
107 if (ipoib_ib_dev_open(dev
)) {
108 napi_disable(&priv
->napi
);
112 if (ipoib_ib_dev_up(dev
)) {
113 ipoib_ib_dev_stop(dev
, 1);
114 napi_disable(&priv
->napi
);
118 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
119 struct ipoib_dev_priv
*cpriv
;
121 /* Bring up any child interfaces too */
122 mutex_lock(&priv
->vlan_mutex
);
123 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
126 flags
= cpriv
->dev
->flags
;
130 dev_change_flags(cpriv
->dev
, flags
| IFF_UP
);
132 mutex_unlock(&priv
->vlan_mutex
);
135 netif_start_queue(dev
);
140 static int ipoib_stop(struct net_device
*dev
)
142 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
144 ipoib_dbg(priv
, "stopping interface\n");
146 clear_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
147 napi_disable(&priv
->napi
);
149 netif_stop_queue(dev
);
151 clear_bit(IPOIB_FLAG_NETIF_STOPPED
, &priv
->flags
);
154 * Now flush workqueue to make sure a scheduled task doesn't
155 * bring our internal state back up.
157 flush_workqueue(ipoib_workqueue
);
159 ipoib_ib_dev_down(dev
, 1);
160 ipoib_ib_dev_stop(dev
, 1);
162 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
163 struct ipoib_dev_priv
*cpriv
;
165 /* Bring down any child interfaces too */
166 mutex_lock(&priv
->vlan_mutex
);
167 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
170 flags
= cpriv
->dev
->flags
;
171 if (!(flags
& IFF_UP
))
174 dev_change_flags(cpriv
->dev
, flags
& ~IFF_UP
);
176 mutex_unlock(&priv
->vlan_mutex
);
182 static int ipoib_change_mtu(struct net_device
*dev
, int new_mtu
)
184 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
186 /* dev->mtu > 2K ==> connected mode */
187 if (ipoib_cm_admin_enabled(dev
) && new_mtu
<= IPOIB_CM_MTU
) {
188 if (new_mtu
> priv
->mcast_mtu
)
189 ipoib_warn(priv
, "mtu > %d will cause multicast packet drops.\n",
195 if (new_mtu
> IPOIB_PACKET_SIZE
- IPOIB_ENCAP_LEN
) {
199 priv
->admin_mtu
= new_mtu
;
201 dev
->mtu
= min(priv
->mcast_mtu
, priv
->admin_mtu
);
206 static struct ipoib_path
*__path_find(struct net_device
*dev
, void *gid
)
208 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
209 struct rb_node
*n
= priv
->path_tree
.rb_node
;
210 struct ipoib_path
*path
;
214 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
216 ret
= memcmp(gid
, path
->pathrec
.dgid
.raw
,
217 sizeof (union ib_gid
));
230 static int __path_add(struct net_device
*dev
, struct ipoib_path
*path
)
232 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
233 struct rb_node
**n
= &priv
->path_tree
.rb_node
;
234 struct rb_node
*pn
= NULL
;
235 struct ipoib_path
*tpath
;
240 tpath
= rb_entry(pn
, struct ipoib_path
, rb_node
);
242 ret
= memcmp(path
->pathrec
.dgid
.raw
, tpath
->pathrec
.dgid
.raw
,
243 sizeof (union ib_gid
));
252 rb_link_node(&path
->rb_node
, pn
, n
);
253 rb_insert_color(&path
->rb_node
, &priv
->path_tree
);
255 list_add_tail(&path
->list
, &priv
->path_list
);
260 static void path_free(struct net_device
*dev
, struct ipoib_path
*path
)
262 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
263 struct ipoib_neigh
*neigh
, *tn
;
267 while ((skb
= __skb_dequeue(&path
->queue
)))
268 dev_kfree_skb_irq(skb
);
270 spin_lock_irqsave(&priv
->lock
, flags
);
272 list_for_each_entry_safe(neigh
, tn
, &path
->neigh_list
, list
) {
274 * It's safe to call ipoib_put_ah() inside priv->lock
275 * here, because we know that path->ah will always
276 * hold one more reference, so ipoib_put_ah() will
277 * never do more than decrement the ref count.
280 ipoib_put_ah(neigh
->ah
);
282 ipoib_neigh_free(dev
, neigh
);
285 spin_unlock_irqrestore(&priv
->lock
, flags
);
288 ipoib_put_ah(path
->ah
);
293 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
295 struct ipoib_path_iter
*ipoib_path_iter_init(struct net_device
*dev
)
297 struct ipoib_path_iter
*iter
;
299 iter
= kmalloc(sizeof *iter
, GFP_KERNEL
);
304 memset(iter
->path
.pathrec
.dgid
.raw
, 0, 16);
306 if (ipoib_path_iter_next(iter
)) {
314 int ipoib_path_iter_next(struct ipoib_path_iter
*iter
)
316 struct ipoib_dev_priv
*priv
= netdev_priv(iter
->dev
);
318 struct ipoib_path
*path
;
321 spin_lock_irq(&priv
->lock
);
323 n
= rb_first(&priv
->path_tree
);
326 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
328 if (memcmp(iter
->path
.pathrec
.dgid
.raw
, path
->pathrec
.dgid
.raw
,
329 sizeof (union ib_gid
)) < 0) {
338 spin_unlock_irq(&priv
->lock
);
343 void ipoib_path_iter_read(struct ipoib_path_iter
*iter
,
344 struct ipoib_path
*path
)
349 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
351 void ipoib_flush_paths(struct net_device
*dev
)
353 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
354 struct ipoib_path
*path
, *tp
;
355 LIST_HEAD(remove_list
);
357 spin_lock_irq(&priv
->tx_lock
);
358 spin_lock(&priv
->lock
);
360 list_splice(&priv
->path_list
, &remove_list
);
361 INIT_LIST_HEAD(&priv
->path_list
);
363 list_for_each_entry(path
, &remove_list
, list
)
364 rb_erase(&path
->rb_node
, &priv
->path_tree
);
366 list_for_each_entry_safe(path
, tp
, &remove_list
, list
) {
368 ib_sa_cancel_query(path
->query_id
, path
->query
);
369 spin_unlock(&priv
->lock
);
370 spin_unlock_irq(&priv
->tx_lock
);
371 wait_for_completion(&path
->done
);
372 path_free(dev
, path
);
373 spin_lock_irq(&priv
->tx_lock
);
374 spin_lock(&priv
->lock
);
376 spin_unlock(&priv
->lock
);
377 spin_unlock_irq(&priv
->tx_lock
);
380 static void path_rec_completion(int status
,
381 struct ib_sa_path_rec
*pathrec
,
384 struct ipoib_path
*path
= path_ptr
;
385 struct net_device
*dev
= path
->dev
;
386 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
387 struct ipoib_ah
*ah
= NULL
;
388 struct ipoib_neigh
*neigh
, *tn
;
389 struct sk_buff_head skqueue
;
394 ipoib_dbg(priv
, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT
"\n",
395 be16_to_cpu(pathrec
->dlid
), IPOIB_GID_ARG(pathrec
->dgid
));
397 ipoib_dbg(priv
, "PathRec status %d for GID " IPOIB_GID_FMT
"\n",
398 status
, IPOIB_GID_ARG(path
->pathrec
.dgid
));
400 skb_queue_head_init(&skqueue
);
403 struct ib_ah_attr av
;
405 if (!ib_init_ah_from_path(priv
->ca
, priv
->port
, pathrec
, &av
))
406 ah
= ipoib_create_ah(dev
, priv
->pd
, &av
);
409 spin_lock_irqsave(&priv
->lock
, flags
);
414 path
->pathrec
= *pathrec
;
416 ipoib_dbg(priv
, "created address handle %p for LID 0x%04x, SL %d\n",
417 ah
, be16_to_cpu(pathrec
->dlid
), pathrec
->sl
);
419 while ((skb
= __skb_dequeue(&path
->queue
)))
420 __skb_queue_tail(&skqueue
, skb
);
422 list_for_each_entry_safe(neigh
, tn
, &path
->neigh_list
, list
) {
423 kref_get(&path
->ah
->ref
);
424 neigh
->ah
= path
->ah
;
425 memcpy(&neigh
->dgid
.raw
, &path
->pathrec
.dgid
.raw
,
426 sizeof(union ib_gid
));
428 if (ipoib_cm_enabled(dev
, neigh
->neighbour
)) {
429 if (!ipoib_cm_get(neigh
))
430 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
,
433 if (!ipoib_cm_get(neigh
)) {
434 list_del(&neigh
->list
);
436 ipoib_put_ah(neigh
->ah
);
437 ipoib_neigh_free(dev
, neigh
);
442 while ((skb
= __skb_dequeue(&neigh
->queue
)))
443 __skb_queue_tail(&skqueue
, skb
);
448 complete(&path
->done
);
450 spin_unlock_irqrestore(&priv
->lock
, flags
);
452 while ((skb
= __skb_dequeue(&skqueue
))) {
454 if (dev_queue_xmit(skb
))
455 ipoib_warn(priv
, "dev_queue_xmit failed "
456 "to requeue packet\n");
460 static struct ipoib_path
*path_rec_create(struct net_device
*dev
, void *gid
)
462 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
463 struct ipoib_path
*path
;
465 path
= kzalloc(sizeof *path
, GFP_ATOMIC
);
471 skb_queue_head_init(&path
->queue
);
473 INIT_LIST_HEAD(&path
->neigh_list
);
475 memcpy(path
->pathrec
.dgid
.raw
, gid
, sizeof (union ib_gid
));
476 path
->pathrec
.sgid
= priv
->local_gid
;
477 path
->pathrec
.pkey
= cpu_to_be16(priv
->pkey
);
478 path
->pathrec
.numb_path
= 1;
483 static int path_rec_start(struct net_device
*dev
,
484 struct ipoib_path
*path
)
486 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
488 ipoib_dbg(priv
, "Start path record lookup for " IPOIB_GID_FMT
"\n",
489 IPOIB_GID_ARG(path
->pathrec
.dgid
));
491 init_completion(&path
->done
);
494 ib_sa_path_rec_get(&ipoib_sa_client
, priv
->ca
, priv
->port
,
496 IB_SA_PATH_REC_DGID
|
497 IB_SA_PATH_REC_SGID
|
498 IB_SA_PATH_REC_NUMB_PATH
|
503 if (path
->query_id
< 0) {
504 ipoib_warn(priv
, "ib_sa_path_rec_get failed\n");
506 return path
->query_id
;
512 static void neigh_add_path(struct sk_buff
*skb
, struct net_device
*dev
)
514 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
515 struct ipoib_path
*path
;
516 struct ipoib_neigh
*neigh
;
518 neigh
= ipoib_neigh_alloc(skb
->dst
->neighbour
);
520 ++priv
->stats
.tx_dropped
;
521 dev_kfree_skb_any(skb
);
526 * We can only be called from ipoib_start_xmit, so we're
527 * inside tx_lock -- no need to save/restore flags.
529 spin_lock(&priv
->lock
);
531 path
= __path_find(dev
, skb
->dst
->neighbour
->ha
+ 4);
533 path
= path_rec_create(dev
, skb
->dst
->neighbour
->ha
+ 4);
537 __path_add(dev
, path
);
540 list_add_tail(&neigh
->list
, &path
->neigh_list
);
543 kref_get(&path
->ah
->ref
);
544 neigh
->ah
= path
->ah
;
545 memcpy(&neigh
->dgid
.raw
, &path
->pathrec
.dgid
.raw
,
546 sizeof(union ib_gid
));
548 if (ipoib_cm_enabled(dev
, neigh
->neighbour
)) {
549 if (!ipoib_cm_get(neigh
))
550 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
, path
, neigh
));
551 if (!ipoib_cm_get(neigh
)) {
552 list_del(&neigh
->list
);
554 ipoib_put_ah(neigh
->ah
);
555 ipoib_neigh_free(dev
, neigh
);
558 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
)
559 __skb_queue_tail(&neigh
->queue
, skb
);
561 ipoib_warn(priv
, "queue length limit %d. Packet drop.\n",
562 skb_queue_len(&neigh
->queue
));
566 ipoib_send(dev
, skb
, path
->ah
, IPOIB_QPN(skb
->dst
->neighbour
->ha
));
570 if (!path
->query
&& path_rec_start(dev
, path
))
573 __skb_queue_tail(&neigh
->queue
, skb
);
576 spin_unlock(&priv
->lock
);
580 list_del(&neigh
->list
);
583 ipoib_neigh_free(dev
, neigh
);
585 ++priv
->stats
.tx_dropped
;
586 dev_kfree_skb_any(skb
);
588 spin_unlock(&priv
->lock
);
591 static void ipoib_path_lookup(struct sk_buff
*skb
, struct net_device
*dev
)
593 struct ipoib_dev_priv
*priv
= netdev_priv(skb
->dev
);
595 /* Look up path record for unicasts */
596 if (skb
->dst
->neighbour
->ha
[4] != 0xff) {
597 neigh_add_path(skb
, dev
);
601 /* Add in the P_Key for multicasts */
602 skb
->dst
->neighbour
->ha
[8] = (priv
->pkey
>> 8) & 0xff;
603 skb
->dst
->neighbour
->ha
[9] = priv
->pkey
& 0xff;
604 ipoib_mcast_send(dev
, skb
->dst
->neighbour
->ha
+ 4, skb
);
607 static void unicast_arp_send(struct sk_buff
*skb
, struct net_device
*dev
,
608 struct ipoib_pseudoheader
*phdr
)
610 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
611 struct ipoib_path
*path
;
614 * We can only be called from ipoib_start_xmit, so we're
615 * inside tx_lock -- no need to save/restore flags.
617 spin_lock(&priv
->lock
);
619 path
= __path_find(dev
, phdr
->hwaddr
+ 4);
621 path
= path_rec_create(dev
, phdr
->hwaddr
+ 4);
623 /* put pseudoheader back on for next time */
624 skb_push(skb
, sizeof *phdr
);
625 __skb_queue_tail(&path
->queue
, skb
);
627 if (path_rec_start(dev
, path
)) {
628 spin_unlock(&priv
->lock
);
629 path_free(dev
, path
);
632 __path_add(dev
, path
);
634 ++priv
->stats
.tx_dropped
;
635 dev_kfree_skb_any(skb
);
638 spin_unlock(&priv
->lock
);
643 ipoib_dbg(priv
, "Send unicast ARP to %04x\n",
644 be16_to_cpu(path
->pathrec
.dlid
));
646 ipoib_send(dev
, skb
, path
->ah
, IPOIB_QPN(phdr
->hwaddr
));
647 } else if ((path
->query
|| !path_rec_start(dev
, path
)) &&
648 skb_queue_len(&path
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
649 /* put pseudoheader back on for next time */
650 skb_push(skb
, sizeof *phdr
);
651 __skb_queue_tail(&path
->queue
, skb
);
653 ++priv
->stats
.tx_dropped
;
654 dev_kfree_skb_any(skb
);
657 spin_unlock(&priv
->lock
);
660 static int ipoib_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
662 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
663 struct ipoib_neigh
*neigh
;
666 if (unlikely(!spin_trylock_irqsave(&priv
->tx_lock
, flags
)))
667 return NETDEV_TX_LOCKED
;
670 * Check if our queue is stopped. Since we have the LLTX bit
671 * set, we can't rely on netif_stop_queue() preventing our
672 * xmit function from being called with a full queue.
674 if (unlikely(netif_queue_stopped(dev
))) {
675 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
676 return NETDEV_TX_BUSY
;
679 if (likely(skb
->dst
&& skb
->dst
->neighbour
)) {
680 if (unlikely(!*to_ipoib_neigh(skb
->dst
->neighbour
))) {
681 ipoib_path_lookup(skb
, dev
);
685 neigh
= *to_ipoib_neigh(skb
->dst
->neighbour
);
687 if (ipoib_cm_get(neigh
)) {
688 if (ipoib_cm_up(neigh
)) {
689 ipoib_cm_send(dev
, skb
, ipoib_cm_get(neigh
));
692 } else if (neigh
->ah
) {
693 if (unlikely(memcmp(&neigh
->dgid
.raw
,
694 skb
->dst
->neighbour
->ha
+ 4,
695 sizeof(union ib_gid
)))) {
696 spin_lock(&priv
->lock
);
698 * It's safe to call ipoib_put_ah() inside
699 * priv->lock here, because we know that
700 * path->ah will always hold one more reference,
701 * so ipoib_put_ah() will never do more than
702 * decrement the ref count.
704 ipoib_put_ah(neigh
->ah
);
705 list_del(&neigh
->list
);
706 ipoib_neigh_free(dev
, neigh
);
707 spin_unlock(&priv
->lock
);
708 ipoib_path_lookup(skb
, dev
);
712 ipoib_send(dev
, skb
, neigh
->ah
, IPOIB_QPN(skb
->dst
->neighbour
->ha
));
716 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
717 spin_lock(&priv
->lock
);
718 __skb_queue_tail(&neigh
->queue
, skb
);
719 spin_unlock(&priv
->lock
);
721 ++priv
->stats
.tx_dropped
;
722 dev_kfree_skb_any(skb
);
725 struct ipoib_pseudoheader
*phdr
=
726 (struct ipoib_pseudoheader
*) skb
->data
;
727 skb_pull(skb
, sizeof *phdr
);
729 if (phdr
->hwaddr
[4] == 0xff) {
730 /* Add in the P_Key for multicast*/
731 phdr
->hwaddr
[8] = (priv
->pkey
>> 8) & 0xff;
732 phdr
->hwaddr
[9] = priv
->pkey
& 0xff;
734 ipoib_mcast_send(dev
, phdr
->hwaddr
+ 4, skb
);
736 /* unicast GID -- should be ARP or RARP reply */
738 if ((be16_to_cpup((__be16
*) skb
->data
) != ETH_P_ARP
) &&
739 (be16_to_cpup((__be16
*) skb
->data
) != ETH_P_RARP
)) {
740 ipoib_warn(priv
, "Unicast, no %s: type %04x, QPN %06x "
742 skb
->dst
? "neigh" : "dst",
743 be16_to_cpup((__be16
*) skb
->data
),
744 IPOIB_QPN(phdr
->hwaddr
),
745 IPOIB_GID_RAW_ARG(phdr
->hwaddr
+ 4));
746 dev_kfree_skb_any(skb
);
747 ++priv
->stats
.tx_dropped
;
751 unicast_arp_send(skb
, dev
, phdr
);
756 spin_unlock_irqrestore(&priv
->tx_lock
, flags
);
761 static struct net_device_stats
*ipoib_get_stats(struct net_device
*dev
)
763 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
768 static void ipoib_timeout(struct net_device
*dev
)
770 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
772 ipoib_warn(priv
, "transmit timeout: latency %d msecs\n",
773 jiffies_to_msecs(jiffies
- dev
->trans_start
));
774 ipoib_warn(priv
, "queue stopped %d, tx_head %u, tx_tail %u\n",
775 netif_queue_stopped(dev
),
776 priv
->tx_head
, priv
->tx_tail
);
777 /* XXX reset QP, etc. */
780 static int ipoib_hard_header(struct sk_buff
*skb
,
781 struct net_device
*dev
,
783 void *daddr
, void *saddr
, unsigned len
)
785 struct ipoib_header
*header
;
787 header
= (struct ipoib_header
*) skb_push(skb
, sizeof *header
);
789 header
->proto
= htons(type
);
790 header
->reserved
= 0;
793 * If we don't have a neighbour structure, stuff the
794 * destination address onto the front of the skb so we can
795 * figure out where to send the packet later.
797 if ((!skb
->dst
|| !skb
->dst
->neighbour
) && daddr
) {
798 struct ipoib_pseudoheader
*phdr
=
799 (struct ipoib_pseudoheader
*) skb_push(skb
, sizeof *phdr
);
800 memcpy(phdr
->hwaddr
, daddr
, INFINIBAND_ALEN
);
806 static void ipoib_set_mcast_list(struct net_device
*dev
)
808 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
810 if (!test_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
)) {
811 ipoib_dbg(priv
, "IPOIB_FLAG_OPER_UP not set");
815 queue_work(ipoib_workqueue
, &priv
->restart_task
);
818 static void ipoib_neigh_cleanup(struct neighbour
*n
)
820 struct ipoib_neigh
*neigh
;
821 struct ipoib_dev_priv
*priv
= netdev_priv(n
->dev
);
823 struct ipoib_ah
*ah
= NULL
;
826 "neigh_cleanup for %06x " IPOIB_GID_FMT
"\n",
828 IPOIB_GID_RAW_ARG(n
->ha
+ 4));
830 spin_lock_irqsave(&priv
->lock
, flags
);
832 neigh
= *to_ipoib_neigh(n
);
836 list_del(&neigh
->list
);
837 ipoib_neigh_free(n
->dev
, neigh
);
840 spin_unlock_irqrestore(&priv
->lock
, flags
);
846 struct ipoib_neigh
*ipoib_neigh_alloc(struct neighbour
*neighbour
)
848 struct ipoib_neigh
*neigh
;
850 neigh
= kmalloc(sizeof *neigh
, GFP_ATOMIC
);
854 neigh
->neighbour
= neighbour
;
855 *to_ipoib_neigh(neighbour
) = neigh
;
856 skb_queue_head_init(&neigh
->queue
);
857 ipoib_cm_set(neigh
, NULL
);
862 void ipoib_neigh_free(struct net_device
*dev
, struct ipoib_neigh
*neigh
)
864 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
866 *to_ipoib_neigh(neigh
->neighbour
) = NULL
;
867 while ((skb
= __skb_dequeue(&neigh
->queue
))) {
868 ++priv
->stats
.tx_dropped
;
869 dev_kfree_skb_any(skb
);
871 if (ipoib_cm_get(neigh
))
872 ipoib_cm_destroy_tx(ipoib_cm_get(neigh
));
876 static int ipoib_neigh_setup_dev(struct net_device
*dev
, struct neigh_parms
*parms
)
878 parms
->neigh_cleanup
= ipoib_neigh_cleanup
;
883 int ipoib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
885 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
887 /* Allocate RX/TX "rings" to hold queued skbs */
888 priv
->rx_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->rx_ring
,
890 if (!priv
->rx_ring
) {
891 printk(KERN_WARNING
"%s: failed to allocate RX ring (%d entries)\n",
892 ca
->name
, ipoib_recvq_size
);
896 priv
->tx_ring
= kzalloc(ipoib_sendq_size
* sizeof *priv
->tx_ring
,
898 if (!priv
->tx_ring
) {
899 printk(KERN_WARNING
"%s: failed to allocate TX ring (%d entries)\n",
900 ca
->name
, ipoib_sendq_size
);
901 goto out_rx_ring_cleanup
;
904 /* priv->tx_head & tx_tail are already 0 */
906 if (ipoib_ib_dev_init(dev
, ca
, port
))
907 goto out_tx_ring_cleanup
;
912 kfree(priv
->tx_ring
);
915 kfree(priv
->rx_ring
);
921 void ipoib_dev_cleanup(struct net_device
*dev
)
923 struct ipoib_dev_priv
*priv
= netdev_priv(dev
), *cpriv
, *tcpriv
;
925 ipoib_delete_debug_files(dev
);
927 /* Delete any child interfaces first */
928 list_for_each_entry_safe(cpriv
, tcpriv
, &priv
->child_intfs
, list
) {
929 unregister_netdev(cpriv
->dev
);
930 ipoib_dev_cleanup(cpriv
->dev
);
931 free_netdev(cpriv
->dev
);
934 ipoib_ib_dev_cleanup(dev
);
936 kfree(priv
->rx_ring
);
937 kfree(priv
->tx_ring
);
939 priv
->rx_ring
= NULL
;
940 priv
->tx_ring
= NULL
;
943 static void ipoib_setup(struct net_device
*dev
)
945 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
947 dev
->open
= ipoib_open
;
948 dev
->stop
= ipoib_stop
;
949 dev
->change_mtu
= ipoib_change_mtu
;
950 dev
->hard_start_xmit
= ipoib_start_xmit
;
951 dev
->get_stats
= ipoib_get_stats
;
952 dev
->tx_timeout
= ipoib_timeout
;
953 dev
->hard_header
= ipoib_hard_header
;
954 dev
->set_multicast_list
= ipoib_set_mcast_list
;
955 dev
->neigh_setup
= ipoib_neigh_setup_dev
;
957 netif_napi_add(dev
, &priv
->napi
, ipoib_poll
, 100);
959 dev
->watchdog_timeo
= HZ
;
961 dev
->flags
|= IFF_BROADCAST
| IFF_MULTICAST
;
964 * We add in INFINIBAND_ALEN to allow for the destination
965 * address "pseudoheader" for skbs without neighbour struct.
967 dev
->hard_header_len
= IPOIB_ENCAP_LEN
+ INFINIBAND_ALEN
;
968 dev
->addr_len
= INFINIBAND_ALEN
;
969 dev
->type
= ARPHRD_INFINIBAND
;
970 dev
->tx_queue_len
= ipoib_sendq_size
* 2;
971 dev
->features
= NETIF_F_VLAN_CHALLENGED
| NETIF_F_LLTX
;
973 /* MTU will be reset when mcast join happens */
974 dev
->mtu
= IPOIB_PACKET_SIZE
- IPOIB_ENCAP_LEN
;
975 priv
->mcast_mtu
= priv
->admin_mtu
= dev
->mtu
;
977 memcpy(dev
->broadcast
, ipv4_bcast_addr
, INFINIBAND_ALEN
);
979 netif_carrier_off(dev
);
981 SET_MODULE_OWNER(dev
);
985 spin_lock_init(&priv
->lock
);
986 spin_lock_init(&priv
->tx_lock
);
988 mutex_init(&priv
->mcast_mutex
);
989 mutex_init(&priv
->vlan_mutex
);
991 INIT_LIST_HEAD(&priv
->path_list
);
992 INIT_LIST_HEAD(&priv
->child_intfs
);
993 INIT_LIST_HEAD(&priv
->dead_ahs
);
994 INIT_LIST_HEAD(&priv
->multicast_list
);
996 INIT_DELAYED_WORK(&priv
->pkey_poll_task
, ipoib_pkey_poll
);
997 INIT_WORK(&priv
->pkey_event_task
, ipoib_pkey_event
);
998 INIT_DELAYED_WORK(&priv
->mcast_task
, ipoib_mcast_join_task
);
999 INIT_WORK(&priv
->flush_task
, ipoib_ib_dev_flush
);
1000 INIT_WORK(&priv
->restart_task
, ipoib_mcast_restart_task
);
1001 INIT_DELAYED_WORK(&priv
->ah_reap_task
, ipoib_reap_ah
);
1004 struct ipoib_dev_priv
*ipoib_intf_alloc(const char *name
)
1006 struct net_device
*dev
;
1008 dev
= alloc_netdev((int) sizeof (struct ipoib_dev_priv
), name
,
1013 return netdev_priv(dev
);
1016 static ssize_t
show_pkey(struct device
*dev
,
1017 struct device_attribute
*attr
, char *buf
)
1019 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(dev
));
1021 return sprintf(buf
, "0x%04x\n", priv
->pkey
);
1023 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
1025 static ssize_t
create_child(struct device
*dev
,
1026 struct device_attribute
*attr
,
1027 const char *buf
, size_t count
)
1032 if (sscanf(buf
, "%i", &pkey
) != 1)
1035 if (pkey
< 0 || pkey
> 0xffff)
1039 * Set the full membership bit, so that we join the right
1040 * broadcast group, etc.
1044 ret
= ipoib_vlan_add(to_net_dev(dev
), pkey
);
1046 return ret
? ret
: count
;
1048 static DEVICE_ATTR(create_child
, S_IWUGO
, NULL
, create_child
);
1050 static ssize_t
delete_child(struct device
*dev
,
1051 struct device_attribute
*attr
,
1052 const char *buf
, size_t count
)
1057 if (sscanf(buf
, "%i", &pkey
) != 1)
1060 if (pkey
< 0 || pkey
> 0xffff)
1063 ret
= ipoib_vlan_delete(to_net_dev(dev
), pkey
);
1065 return ret
? ret
: count
;
1068 static DEVICE_ATTR(delete_child
, S_IWUGO
, NULL
, delete_child
);
1070 int ipoib_add_pkey_attr(struct net_device
*dev
)
1072 return device_create_file(&dev
->dev
, &dev_attr_pkey
);
1075 static struct net_device
*ipoib_add_port(const char *format
,
1076 struct ib_device
*hca
, u8 port
)
1078 struct ipoib_dev_priv
*priv
;
1079 int result
= -ENOMEM
;
1081 priv
= ipoib_intf_alloc(format
);
1083 goto alloc_mem_failed
;
1085 SET_NETDEV_DEV(priv
->dev
, hca
->dma_device
);
1087 result
= ib_query_pkey(hca
, port
, 0, &priv
->pkey
);
1089 printk(KERN_WARNING
"%s: ib_query_pkey port %d failed (ret = %d)\n",
1090 hca
->name
, port
, result
);
1091 goto alloc_mem_failed
;
1095 * Set the full membership bit, so that we join the right
1096 * broadcast group, etc.
1098 priv
->pkey
|= 0x8000;
1100 priv
->dev
->broadcast
[8] = priv
->pkey
>> 8;
1101 priv
->dev
->broadcast
[9] = priv
->pkey
& 0xff;
1103 result
= ib_query_gid(hca
, port
, 0, &priv
->local_gid
);
1105 printk(KERN_WARNING
"%s: ib_query_gid port %d failed (ret = %d)\n",
1106 hca
->name
, port
, result
);
1107 goto alloc_mem_failed
;
1109 memcpy(priv
->dev
->dev_addr
+ 4, priv
->local_gid
.raw
, sizeof (union ib_gid
));
1112 result
= ipoib_dev_init(priv
->dev
, hca
, port
);
1114 printk(KERN_WARNING
"%s: failed to initialize port %d (ret = %d)\n",
1115 hca
->name
, port
, result
);
1116 goto device_init_failed
;
1119 INIT_IB_EVENT_HANDLER(&priv
->event_handler
,
1120 priv
->ca
, ipoib_event
);
1121 result
= ib_register_event_handler(&priv
->event_handler
);
1123 printk(KERN_WARNING
"%s: ib_register_event_handler failed for "
1124 "port %d (ret = %d)\n",
1125 hca
->name
, port
, result
);
1129 result
= register_netdev(priv
->dev
);
1131 printk(KERN_WARNING
"%s: couldn't register ipoib port %d; error %d\n",
1132 hca
->name
, port
, result
);
1133 goto register_failed
;
1136 ipoib_create_debug_files(priv
->dev
);
1138 if (ipoib_cm_add_mode_attr(priv
->dev
))
1140 if (ipoib_add_pkey_attr(priv
->dev
))
1142 if (device_create_file(&priv
->dev
->dev
, &dev_attr_create_child
))
1144 if (device_create_file(&priv
->dev
->dev
, &dev_attr_delete_child
))
1150 ipoib_delete_debug_files(priv
->dev
);
1151 unregister_netdev(priv
->dev
);
1154 ib_unregister_event_handler(&priv
->event_handler
);
1155 flush_scheduled_work();
1158 ipoib_dev_cleanup(priv
->dev
);
1161 free_netdev(priv
->dev
);
1164 return ERR_PTR(result
);
1167 static void ipoib_add_one(struct ib_device
*device
)
1169 struct list_head
*dev_list
;
1170 struct net_device
*dev
;
1171 struct ipoib_dev_priv
*priv
;
1174 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
1177 dev_list
= kmalloc(sizeof *dev_list
, GFP_KERNEL
);
1181 INIT_LIST_HEAD(dev_list
);
1183 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
1188 e
= device
->phys_port_cnt
;
1191 for (p
= s
; p
<= e
; ++p
) {
1192 dev
= ipoib_add_port("ib%d", device
, p
);
1194 priv
= netdev_priv(dev
);
1195 list_add_tail(&priv
->list
, dev_list
);
1199 ib_set_client_data(device
, &ipoib_client
, dev_list
);
1202 static void ipoib_remove_one(struct ib_device
*device
)
1204 struct ipoib_dev_priv
*priv
, *tmp
;
1205 struct list_head
*dev_list
;
1207 if (rdma_node_get_transport(device
->node_type
) != RDMA_TRANSPORT_IB
)
1210 dev_list
= ib_get_client_data(device
, &ipoib_client
);
1212 list_for_each_entry_safe(priv
, tmp
, dev_list
, list
) {
1213 ib_unregister_event_handler(&priv
->event_handler
);
1214 flush_scheduled_work();
1216 unregister_netdev(priv
->dev
);
1217 ipoib_dev_cleanup(priv
->dev
);
1218 free_netdev(priv
->dev
);
1224 static int __init
ipoib_init_module(void)
1228 ipoib_recvq_size
= roundup_pow_of_two(ipoib_recvq_size
);
1229 ipoib_recvq_size
= min(ipoib_recvq_size
, IPOIB_MAX_QUEUE_SIZE
);
1230 ipoib_recvq_size
= max(ipoib_recvq_size
, IPOIB_MIN_QUEUE_SIZE
);
1232 ipoib_sendq_size
= roundup_pow_of_two(ipoib_sendq_size
);
1233 ipoib_sendq_size
= min(ipoib_sendq_size
, IPOIB_MAX_QUEUE_SIZE
);
1234 ipoib_sendq_size
= max(ipoib_sendq_size
, IPOIB_MIN_QUEUE_SIZE
);
1236 ret
= ipoib_register_debugfs();
1241 * We create our own workqueue mainly because we want to be
1242 * able to flush it when devices are being removed. We can't
1243 * use schedule_work()/flush_scheduled_work() because both
1244 * unregister_netdev() and linkwatch_event take the rtnl lock,
1245 * so flush_scheduled_work() can deadlock during device
1248 ipoib_workqueue
= create_singlethread_workqueue("ipoib");
1249 if (!ipoib_workqueue
) {
1254 ib_sa_register_client(&ipoib_sa_client
);
1256 ret
= ib_register_client(&ipoib_client
);
1263 ib_sa_unregister_client(&ipoib_sa_client
);
1264 destroy_workqueue(ipoib_workqueue
);
1267 ipoib_unregister_debugfs();
1272 static void __exit
ipoib_cleanup_module(void)
1274 ib_unregister_client(&ipoib_client
);
1275 ib_sa_unregister_client(&ipoib_sa_client
);
1276 ipoib_unregister_debugfs();
1277 destroy_workqueue(ipoib_workqueue
);
1280 module_init(ipoib_init_module
);
1281 module_exit(ipoib_cleanup_module
);