1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/if_vlan.h>
47 #include <linux/pci.h>
48 #include <linux/slab.h>
49 #include <linux/tcp.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include "vxge-main.h"
56 MODULE_LICENSE("Dual BSD/GPL");
57 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
58 "Virtualized Server Adapter");
60 static DEFINE_PCI_DEVICE_TABLE(vxge_id_table
) = {
61 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_WIN
, PCI_ANY_ID
,
63 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_UNI
, PCI_ANY_ID
,
68 MODULE_DEVICE_TABLE(pci
, vxge_id_table
);
70 VXGE_MODULE_PARAM_INT(vlan_tag_strip
, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
);
71 VXGE_MODULE_PARAM_INT(addr_learn_en
, VXGE_HW_MAC_ADDR_LEARN_DEFAULT
);
72 VXGE_MODULE_PARAM_INT(max_config_port
, VXGE_MAX_CONFIG_PORT
);
73 VXGE_MODULE_PARAM_INT(max_config_vpath
, VXGE_USE_DEFAULT
);
74 VXGE_MODULE_PARAM_INT(max_mac_vpath
, VXGE_MAX_MAC_ADDR_COUNT
);
75 VXGE_MODULE_PARAM_INT(max_config_dev
, VXGE_MAX_CONFIG_DEV
);
77 static u16 vpath_selector
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
78 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
79 static unsigned int bw_percentage
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
80 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS
- 1)] = 0xFF};
81 module_param_array(bw_percentage
, uint
, NULL
, 0);
83 static struct vxge_drv_config
*driver_config
;
85 static enum vxge_hw_status
vxge_add_mac_addr(struct vxgedev
*vdev
,
87 static enum vxge_hw_status
vxge_del_mac_addr(struct vxgedev
*vdev
,
89 static int vxge_mac_list_add(struct vxge_vpath
*vpath
, struct macInfo
*mac
);
90 static int vxge_mac_list_del(struct vxge_vpath
*vpath
, struct macInfo
*mac
);
91 static enum vxge_hw_status
vxge_restore_vpath_vid_table(struct vxge_vpath
*vpath
);
92 static enum vxge_hw_status
vxge_restore_vpath_mac_addr(struct vxge_vpath
*vpath
);
93 static enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
);
95 static inline int is_vxge_card_up(struct vxgedev
*vdev
)
97 return test_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
100 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo
*fifo
)
102 struct sk_buff
**skb_ptr
= NULL
;
103 struct sk_buff
**temp
;
104 #define NR_SKB_COMPLETED 128
105 struct sk_buff
*completed
[NR_SKB_COMPLETED
];
112 if (__netif_tx_trylock(fifo
->txq
)) {
113 vxge_hw_vpath_poll_tx(fifo
->handle
, &skb_ptr
,
114 NR_SKB_COMPLETED
, &more
);
115 __netif_tx_unlock(fifo
->txq
);
119 for (temp
= completed
; temp
!= skb_ptr
; temp
++)
120 dev_kfree_skb_irq(*temp
);
124 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev
*vdev
)
128 /* Complete all transmits */
129 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
130 VXGE_COMPLETE_VPATH_TX(&vdev
->vpaths
[i
].fifo
);
133 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev
*vdev
)
136 struct vxge_ring
*ring
;
138 /* Complete all receives*/
139 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
140 ring
= &vdev
->vpaths
[i
].ring
;
141 vxge_hw_vpath_poll_rx(ring
->handle
);
146 * vxge_callback_link_up
148 * This function is called during interrupt context to notify link up state
152 vxge_callback_link_up(struct __vxge_hw_device
*hldev
)
154 struct net_device
*dev
= hldev
->ndev
;
155 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
157 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
158 vdev
->ndev
->name
, __func__
, __LINE__
);
159 netdev_notice(vdev
->ndev
, "Link Up\n");
160 vdev
->stats
.link_up
++;
162 netif_carrier_on(vdev
->ndev
);
163 netif_tx_wake_all_queues(vdev
->ndev
);
165 vxge_debug_entryexit(VXGE_TRACE
,
166 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
170 * vxge_callback_link_down
172 * This function is called during interrupt context to notify link down state
176 vxge_callback_link_down(struct __vxge_hw_device
*hldev
)
178 struct net_device
*dev
= hldev
->ndev
;
179 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
181 vxge_debug_entryexit(VXGE_TRACE
,
182 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
183 netdev_notice(vdev
->ndev
, "Link Down\n");
185 vdev
->stats
.link_down
++;
186 netif_carrier_off(vdev
->ndev
);
187 netif_tx_stop_all_queues(vdev
->ndev
);
189 vxge_debug_entryexit(VXGE_TRACE
,
190 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
198 static struct sk_buff
*
199 vxge_rx_alloc(void *dtrh
, struct vxge_ring
*ring
, const int skb_size
)
201 struct net_device
*dev
;
203 struct vxge_rx_priv
*rx_priv
;
206 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
207 ring
->ndev
->name
, __func__
, __LINE__
);
209 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
211 /* try to allocate skb first. this one may fail */
212 skb
= netdev_alloc_skb(dev
, skb_size
+
213 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
215 vxge_debug_mem(VXGE_ERR
,
216 "%s: out of memory to allocate SKB", dev
->name
);
217 ring
->stats
.skb_alloc_fail
++;
221 vxge_debug_mem(VXGE_TRACE
,
222 "%s: %s:%d Skb : 0x%p", ring
->ndev
->name
,
223 __func__
, __LINE__
, skb
);
225 skb_reserve(skb
, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
228 rx_priv
->skb_data
= NULL
;
229 rx_priv
->data_size
= skb_size
;
230 vxge_debug_entryexit(VXGE_TRACE
,
231 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
239 static int vxge_rx_map(void *dtrh
, struct vxge_ring
*ring
)
241 struct vxge_rx_priv
*rx_priv
;
244 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
245 ring
->ndev
->name
, __func__
, __LINE__
);
246 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
248 rx_priv
->skb_data
= rx_priv
->skb
->data
;
249 dma_addr
= pci_map_single(ring
->pdev
, rx_priv
->skb_data
,
250 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
252 if (unlikely(pci_dma_mapping_error(ring
->pdev
, dma_addr
))) {
253 ring
->stats
.pci_map_fail
++;
256 vxge_debug_mem(VXGE_TRACE
,
257 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
258 ring
->ndev
->name
, __func__
, __LINE__
,
259 (unsigned long long)dma_addr
);
260 vxge_hw_ring_rxd_1b_set(dtrh
, dma_addr
, rx_priv
->data_size
);
262 rx_priv
->data_dma
= dma_addr
;
263 vxge_debug_entryexit(VXGE_TRACE
,
264 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
270 * vxge_rx_initial_replenish
271 * Allocation of RxD as an initial replenish procedure.
273 static enum vxge_hw_status
274 vxge_rx_initial_replenish(void *dtrh
, void *userdata
)
276 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
277 struct vxge_rx_priv
*rx_priv
;
279 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
280 ring
->ndev
->name
, __func__
, __LINE__
);
281 if (vxge_rx_alloc(dtrh
, ring
,
282 VXGE_LL_MAX_FRAME_SIZE(ring
->ndev
)) == NULL
)
285 if (vxge_rx_map(dtrh
, ring
)) {
286 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
287 dev_kfree_skb(rx_priv
->skb
);
291 vxge_debug_entryexit(VXGE_TRACE
,
292 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
298 vxge_rx_complete(struct vxge_ring
*ring
, struct sk_buff
*skb
, u16 vlan
,
299 int pkt_length
, struct vxge_hw_ring_rxd_info
*ext_info
)
302 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
303 ring
->ndev
->name
, __func__
, __LINE__
);
304 skb_record_rx_queue(skb
, ring
->driver_id
);
305 skb
->protocol
= eth_type_trans(skb
, ring
->ndev
);
307 ring
->stats
.rx_frms
++;
308 ring
->stats
.rx_bytes
+= pkt_length
;
310 if (skb
->pkt_type
== PACKET_MULTICAST
)
311 ring
->stats
.rx_mcast
++;
313 vxge_debug_rx(VXGE_TRACE
,
314 "%s: %s:%d skb protocol = %d",
315 ring
->ndev
->name
, __func__
, __LINE__
, skb
->protocol
);
317 if (ring
->gro_enable
) {
318 if (ring
->vlgrp
&& ext_info
->vlan
&&
319 (ring
->vlan_tag_strip
==
320 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
))
321 vlan_gro_receive(ring
->napi_p
, ring
->vlgrp
,
322 ext_info
->vlan
, skb
);
324 napi_gro_receive(ring
->napi_p
, skb
);
326 if (ring
->vlgrp
&& vlan
&&
327 (ring
->vlan_tag_strip
==
328 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
))
329 vlan_hwaccel_receive_skb(skb
, ring
->vlgrp
, vlan
);
331 netif_receive_skb(skb
);
333 vxge_debug_entryexit(VXGE_TRACE
,
334 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
337 static inline void vxge_re_pre_post(void *dtr
, struct vxge_ring
*ring
,
338 struct vxge_rx_priv
*rx_priv
)
340 pci_dma_sync_single_for_device(ring
->pdev
,
341 rx_priv
->data_dma
, rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
343 vxge_hw_ring_rxd_1b_set(dtr
, rx_priv
->data_dma
, rx_priv
->data_size
);
344 vxge_hw_ring_rxd_pre_post(ring
->handle
, dtr
);
347 static inline void vxge_post(int *dtr_cnt
, void **first_dtr
,
348 void *post_dtr
, struct __vxge_hw_ring
*ringh
)
350 int dtr_count
= *dtr_cnt
;
351 if ((*dtr_cnt
% VXGE_HW_RXSYNC_FREQ_CNT
) == 0) {
353 vxge_hw_ring_rxd_post_post_wmb(ringh
, *first_dtr
);
354 *first_dtr
= post_dtr
;
356 vxge_hw_ring_rxd_post_post(ringh
, post_dtr
);
358 *dtr_cnt
= dtr_count
;
364 * If the interrupt is because of a received frame or if the receive ring
365 * contains fresh as yet un-processed frames, this function is called.
367 static enum vxge_hw_status
368 vxge_rx_1b_compl(struct __vxge_hw_ring
*ringh
, void *dtr
,
369 u8 t_code
, void *userdata
)
371 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
372 struct net_device
*dev
= ring
->ndev
;
373 unsigned int dma_sizes
;
374 void *first_dtr
= NULL
;
380 struct vxge_rx_priv
*rx_priv
;
381 struct vxge_hw_ring_rxd_info ext_info
;
382 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
383 ring
->ndev
->name
, __func__
, __LINE__
);
384 ring
->pkts_processed
= 0;
386 vxge_hw_ring_replenish(ringh
);
389 prefetch((char *)dtr
+ L1_CACHE_BYTES
);
390 rx_priv
= vxge_hw_ring_rxd_private_get(dtr
);
392 data_size
= rx_priv
->data_size
;
393 data_dma
= rx_priv
->data_dma
;
394 prefetch(rx_priv
->skb_data
);
396 vxge_debug_rx(VXGE_TRACE
,
397 "%s: %s:%d skb = 0x%p",
398 ring
->ndev
->name
, __func__
, __LINE__
, skb
);
400 vxge_hw_ring_rxd_1b_get(ringh
, dtr
, &dma_sizes
);
401 pkt_length
= dma_sizes
;
403 pkt_length
-= ETH_FCS_LEN
;
405 vxge_debug_rx(VXGE_TRACE
,
406 "%s: %s:%d Packet Length = %d",
407 ring
->ndev
->name
, __func__
, __LINE__
, pkt_length
);
409 vxge_hw_ring_rxd_1b_info_get(ringh
, dtr
, &ext_info
);
411 /* check skb validity */
414 prefetch((char *)skb
+ L1_CACHE_BYTES
);
415 if (unlikely(t_code
)) {
417 if (vxge_hw_ring_handle_tcode(ringh
, dtr
, t_code
) !=
420 ring
->stats
.rx_errors
++;
421 vxge_debug_rx(VXGE_TRACE
,
422 "%s: %s :%d Rx T_code is %d",
423 ring
->ndev
->name
, __func__
,
426 /* If the t_code is not supported and if the
427 * t_code is other than 0x5 (unparseable packet
428 * such as unknown UPV6 header), Drop it !!!
430 vxge_re_pre_post(dtr
, ring
, rx_priv
);
432 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
433 ring
->stats
.rx_dropped
++;
438 if (pkt_length
> VXGE_LL_RX_COPY_THRESHOLD
) {
440 if (vxge_rx_alloc(dtr
, ring
, data_size
) != NULL
) {
442 if (!vxge_rx_map(dtr
, ring
)) {
443 skb_put(skb
, pkt_length
);
445 pci_unmap_single(ring
->pdev
, data_dma
,
446 data_size
, PCI_DMA_FROMDEVICE
);
448 vxge_hw_ring_rxd_pre_post(ringh
, dtr
);
449 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
452 dev_kfree_skb(rx_priv
->skb
);
454 rx_priv
->data_size
= data_size
;
455 vxge_re_pre_post(dtr
, ring
, rx_priv
);
457 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
459 ring
->stats
.rx_dropped
++;
463 vxge_re_pre_post(dtr
, ring
, rx_priv
);
465 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
466 ring
->stats
.rx_dropped
++;
470 struct sk_buff
*skb_up
;
472 skb_up
= netdev_alloc_skb(dev
, pkt_length
+
473 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
474 if (skb_up
!= NULL
) {
476 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
478 pci_dma_sync_single_for_cpu(ring
->pdev
,
482 vxge_debug_mem(VXGE_TRACE
,
483 "%s: %s:%d skb_up = %p",
484 ring
->ndev
->name
, __func__
,
486 memcpy(skb_up
->data
, skb
->data
, pkt_length
);
488 vxge_re_pre_post(dtr
, ring
, rx_priv
);
490 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
492 /* will netif_rx small SKB instead */
494 skb_put(skb
, pkt_length
);
496 vxge_re_pre_post(dtr
, ring
, rx_priv
);
498 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
499 vxge_debug_rx(VXGE_ERR
,
500 "%s: vxge_rx_1b_compl: out of "
501 "memory", dev
->name
);
502 ring
->stats
.skb_alloc_fail
++;
507 if ((ext_info
.proto
& VXGE_HW_FRAME_PROTO_TCP_OR_UDP
) &&
508 !(ext_info
.proto
& VXGE_HW_FRAME_PROTO_IP_FRAG
) &&
509 ring
->rx_csum
&& /* Offload Rx side CSUM */
510 ext_info
.l3_cksum
== VXGE_HW_L3_CKSUM_OK
&&
511 ext_info
.l4_cksum
== VXGE_HW_L4_CKSUM_OK
)
512 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
514 skb_checksum_none_assert(skb
);
516 vxge_rx_complete(ring
, skb
, ext_info
.vlan
,
517 pkt_length
, &ext_info
);
520 ring
->pkts_processed
++;
524 } while (vxge_hw_ring_rxd_next_completed(ringh
, &dtr
,
525 &t_code
) == VXGE_HW_OK
);
528 vxge_hw_ring_rxd_post_post_wmb(ringh
, first_dtr
);
530 vxge_debug_entryexit(VXGE_TRACE
,
539 * If an interrupt was raised to indicate DMA complete of the Tx packet,
540 * this function is called. It identifies the last TxD whose buffer was
541 * freed and frees all skbs whose data have already DMA'ed into the NICs
544 static enum vxge_hw_status
545 vxge_xmit_compl(struct __vxge_hw_fifo
*fifo_hw
, void *dtr
,
546 enum vxge_hw_fifo_tcode t_code
, void *userdata
,
547 struct sk_buff
***skb_ptr
, int nr_skb
, int *more
)
549 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
550 struct sk_buff
*skb
, **done_skb
= *skb_ptr
;
553 vxge_debug_entryexit(VXGE_TRACE
,
554 "%s:%d Entered....", __func__
, __LINE__
);
560 struct vxge_tx_priv
*txd_priv
=
561 vxge_hw_fifo_txdl_private_get(dtr
);
564 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
565 frag
= &skb_shinfo(skb
)->frags
[0];
567 vxge_debug_tx(VXGE_TRACE
,
568 "%s: %s:%d fifo_hw = %p dtr = %p "
569 "tcode = 0x%x", fifo
->ndev
->name
, __func__
,
570 __LINE__
, fifo_hw
, dtr
, t_code
);
571 /* check skb validity */
573 vxge_debug_tx(VXGE_TRACE
,
574 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
575 fifo
->ndev
->name
, __func__
, __LINE__
,
576 skb
, txd_priv
, frg_cnt
);
577 if (unlikely(t_code
)) {
578 fifo
->stats
.tx_errors
++;
579 vxge_debug_tx(VXGE_ERR
,
580 "%s: tx: dtr %p completed due to "
581 "error t_code %01x", fifo
->ndev
->name
,
583 vxge_hw_fifo_handle_tcode(fifo_hw
, dtr
, t_code
);
586 /* for unfragmented skb */
587 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
588 skb_headlen(skb
), PCI_DMA_TODEVICE
);
590 for (j
= 0; j
< frg_cnt
; j
++) {
591 pci_unmap_page(fifo
->pdev
,
592 txd_priv
->dma_buffers
[i
++],
593 frag
->size
, PCI_DMA_TODEVICE
);
597 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
599 /* Updating the statistics block */
600 fifo
->stats
.tx_frms
++;
601 fifo
->stats
.tx_bytes
+= skb
->len
;
611 if (pkt_cnt
> fifo
->indicate_max_pkts
)
614 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw
,
615 &dtr
, &t_code
) == VXGE_HW_OK
);
618 if (netif_tx_queue_stopped(fifo
->txq
))
619 netif_tx_wake_queue(fifo
->txq
);
621 vxge_debug_entryexit(VXGE_TRACE
,
622 "%s: %s:%d Exiting...",
623 fifo
->ndev
->name
, __func__
, __LINE__
);
627 /* select a vpath to transmit the packet */
628 static u32
vxge_get_vpath_no(struct vxgedev
*vdev
, struct sk_buff
*skb
)
630 u16 queue_len
, counter
= 0;
631 if (skb
->protocol
== htons(ETH_P_IP
)) {
637 if ((ip
->frag_off
& htons(IP_OFFSET
|IP_MF
)) == 0) {
638 th
= (struct tcphdr
*)(((unsigned char *)ip
) +
641 queue_len
= vdev
->no_of_vpath
;
642 counter
= (ntohs(th
->source
) +
644 vdev
->vpath_selector
[queue_len
- 1];
645 if (counter
>= queue_len
)
646 counter
= queue_len
- 1;
652 static enum vxge_hw_status
vxge_search_mac_addr_in_list(
653 struct vxge_vpath
*vpath
, u64 del_mac
)
655 struct list_head
*entry
, *next
;
656 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
657 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
)
663 static int vxge_learn_mac(struct vxgedev
*vdev
, u8
*mac_header
)
665 struct macInfo mac_info
;
666 u8
*mac_address
= NULL
;
667 u64 mac_addr
= 0, vpath_vector
= 0;
669 enum vxge_hw_status status
= VXGE_HW_OK
;
670 struct vxge_vpath
*vpath
= NULL
;
671 struct __vxge_hw_device
*hldev
;
673 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
675 mac_address
= (u8
*)&mac_addr
;
676 memcpy(mac_address
, mac_header
, ETH_ALEN
);
678 /* Is this mac address already in the list? */
679 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
680 vpath
= &vdev
->vpaths
[vpath_idx
];
681 if (vxge_search_mac_addr_in_list(vpath
, mac_addr
))
685 memset(&mac_info
, 0, sizeof(struct macInfo
));
686 memcpy(mac_info
.macaddr
, mac_header
, ETH_ALEN
);
688 /* Any vpath has room to add mac address to its da table? */
689 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
690 vpath
= &vdev
->vpaths
[vpath_idx
];
691 if (vpath
->mac_addr_cnt
< vpath
->max_mac_addr_cnt
) {
692 /* Add this mac address to this vpath */
693 mac_info
.vpath_no
= vpath_idx
;
694 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
695 status
= vxge_add_mac_addr(vdev
, &mac_info
);
696 if (status
!= VXGE_HW_OK
)
702 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_LIST
;
704 mac_info
.vpath_no
= vpath_idx
;
705 /* Is the first vpath already selected as catch-basin ? */
706 vpath
= &vdev
->vpaths
[vpath_idx
];
707 if (vpath
->mac_addr_cnt
> vpath
->max_mac_addr_cnt
) {
708 /* Add this mac address to this vpath */
709 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
714 /* Select first vpath as catch-basin */
715 vpath_vector
= vxge_mBIT(vpath
->device_id
);
716 status
= vxge_hw_mgmt_reg_write(vpath
->vdev
->devh
,
717 vxge_hw_mgmt_reg_type_mrpcim
,
720 struct vxge_hw_mrpcim_reg
,
723 if (status
!= VXGE_HW_OK
) {
724 vxge_debug_tx(VXGE_ERR
,
725 "%s: Unable to set the vpath-%d in catch-basin mode",
726 VXGE_DRIVER_NAME
, vpath
->device_id
);
730 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
738 * @skb : the socket buffer containing the Tx data.
739 * @dev : device pointer.
741 * This function is the Tx entry point of the driver. Neterion NIC supports
742 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
745 vxge_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
747 struct vxge_fifo
*fifo
= NULL
;
750 struct vxgedev
*vdev
= NULL
;
751 enum vxge_hw_status status
;
752 int frg_cnt
, first_frg_len
;
754 int i
= 0, j
= 0, avail
;
756 struct vxge_tx_priv
*txdl_priv
= NULL
;
757 struct __vxge_hw_fifo
*fifo_hw
;
761 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
762 dev
->name
, __func__
, __LINE__
);
764 /* A buffer with no data will be dropped */
765 if (unlikely(skb
->len
<= 0)) {
766 vxge_debug_tx(VXGE_ERR
,
767 "%s: Buffer has no data..", dev
->name
);
772 vdev
= (struct vxgedev
*)netdev_priv(dev
);
774 if (unlikely(!is_vxge_card_up(vdev
))) {
775 vxge_debug_tx(VXGE_ERR
,
776 "%s: vdev not initialized", dev
->name
);
781 if (vdev
->config
.addr_learn_en
) {
782 vpath_no
= vxge_learn_mac(vdev
, skb
->data
+ ETH_ALEN
);
783 if (vpath_no
== -EPERM
) {
784 vxge_debug_tx(VXGE_ERR
,
785 "%s: Failed to store the mac address",
792 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
793 vpath_no
= skb_get_queue_mapping(skb
);
794 else if (vdev
->config
.tx_steering_type
== TX_PORT_STEERING
)
795 vpath_no
= vxge_get_vpath_no(vdev
, skb
);
797 vxge_debug_tx(VXGE_TRACE
, "%s: vpath_no= %d", dev
->name
, vpath_no
);
799 if (vpath_no
>= vdev
->no_of_vpath
)
802 fifo
= &vdev
->vpaths
[vpath_no
].fifo
;
803 fifo_hw
= fifo
->handle
;
805 if (netif_tx_queue_stopped(fifo
->txq
))
806 return NETDEV_TX_BUSY
;
808 avail
= vxge_hw_fifo_free_txdl_count_get(fifo_hw
);
810 vxge_debug_tx(VXGE_ERR
,
811 "%s: No free TXDs available", dev
->name
);
812 fifo
->stats
.txd_not_free
++;
816 /* Last TXD? Stop tx queue to avoid dropping packets. TX
817 * completion will resume the queue.
820 netif_tx_stop_queue(fifo
->txq
);
822 status
= vxge_hw_fifo_txdl_reserve(fifo_hw
, &dtr
, &dtr_priv
);
823 if (unlikely(status
!= VXGE_HW_OK
)) {
824 vxge_debug_tx(VXGE_ERR
,
825 "%s: Out of descriptors .", dev
->name
);
826 fifo
->stats
.txd_out_of_desc
++;
830 vxge_debug_tx(VXGE_TRACE
,
831 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
832 dev
->name
, __func__
, __LINE__
,
833 fifo_hw
, dtr
, dtr_priv
);
835 if (vlan_tx_tag_present(skb
)) {
836 u16 vlan_tag
= vlan_tx_tag_get(skb
);
837 vxge_hw_fifo_txdl_vlan_set(dtr
, vlan_tag
);
840 first_frg_len
= skb_headlen(skb
);
842 dma_pointer
= pci_map_single(fifo
->pdev
, skb
->data
, first_frg_len
,
845 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
))) {
846 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
847 fifo
->stats
.pci_map_fail
++;
851 txdl_priv
= vxge_hw_fifo_txdl_private_get(dtr
);
852 txdl_priv
->skb
= skb
;
853 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
855 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
856 vxge_debug_tx(VXGE_TRACE
,
857 "%s: %s:%d skb = %p txdl_priv = %p "
858 "frag_cnt = %d dma_pointer = 0x%llx", dev
->name
,
859 __func__
, __LINE__
, skb
, txdl_priv
,
860 frg_cnt
, (unsigned long long)dma_pointer
);
862 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
865 frag
= &skb_shinfo(skb
)->frags
[0];
866 for (i
= 0; i
< frg_cnt
; i
++) {
867 /* ignore 0 length fragment */
871 dma_pointer
= (u64
) pci_map_page(fifo
->pdev
, frag
->page
,
872 frag
->page_offset
, frag
->size
,
875 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
)))
877 vxge_debug_tx(VXGE_TRACE
,
878 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
879 dev
->name
, __func__
, __LINE__
, i
,
880 (unsigned long long)dma_pointer
);
882 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
883 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
888 offload_type
= vxge_offload_type(skb
);
890 if (offload_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
891 int mss
= vxge_tcp_mss(skb
);
893 vxge_debug_tx(VXGE_TRACE
, "%s: %s:%d mss = %d",
894 dev
->name
, __func__
, __LINE__
, mss
);
895 vxge_hw_fifo_txdl_mss_set(dtr
, mss
);
897 vxge_assert(skb
->len
<=
898 dev
->mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
);
904 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
905 vxge_hw_fifo_txdl_cksum_set_bits(dtr
,
906 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN
|
907 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN
|
908 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN
);
910 vxge_hw_fifo_txdl_post(fifo_hw
, dtr
);
912 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
913 dev
->name
, __func__
, __LINE__
);
917 vxge_debug_tx(VXGE_TRACE
, "%s: pci_map_page failed", dev
->name
);
920 frag
= &skb_shinfo(skb
)->frags
[0];
922 pci_unmap_single(fifo
->pdev
, txdl_priv
->dma_buffers
[j
++],
923 skb_headlen(skb
), PCI_DMA_TODEVICE
);
926 pci_unmap_page(fifo
->pdev
, txdl_priv
->dma_buffers
[j
],
927 frag
->size
, PCI_DMA_TODEVICE
);
931 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
933 netif_tx_stop_queue(fifo
->txq
);
942 * Function will be called by hw function to abort all outstanding receive
946 vxge_rx_term(void *dtrh
, enum vxge_hw_rxd_state state
, void *userdata
)
948 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
949 struct vxge_rx_priv
*rx_priv
=
950 vxge_hw_ring_rxd_private_get(dtrh
);
952 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
953 ring
->ndev
->name
, __func__
, __LINE__
);
954 if (state
!= VXGE_HW_RXD_STATE_POSTED
)
957 pci_unmap_single(ring
->pdev
, rx_priv
->data_dma
,
958 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
960 dev_kfree_skb(rx_priv
->skb
);
961 rx_priv
->skb_data
= NULL
;
963 vxge_debug_entryexit(VXGE_TRACE
,
964 "%s: %s:%d Exiting...",
965 ring
->ndev
->name
, __func__
, __LINE__
);
971 * Function will be called to abort all outstanding tx descriptors
974 vxge_tx_term(void *dtrh
, enum vxge_hw_txdl_state state
, void *userdata
)
976 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
978 int i
= 0, j
, frg_cnt
;
979 struct vxge_tx_priv
*txd_priv
= vxge_hw_fifo_txdl_private_get(dtrh
);
980 struct sk_buff
*skb
= txd_priv
->skb
;
982 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
984 if (state
!= VXGE_HW_TXDL_STATE_POSTED
)
987 /* check skb validity */
989 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
990 frag
= &skb_shinfo(skb
)->frags
[0];
992 /* for unfragmented skb */
993 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
994 skb_headlen(skb
), PCI_DMA_TODEVICE
);
996 for (j
= 0; j
< frg_cnt
; j
++) {
997 pci_unmap_page(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
998 frag
->size
, PCI_DMA_TODEVICE
);
1004 vxge_debug_entryexit(VXGE_TRACE
,
1005 "%s:%d Exiting...", __func__
, __LINE__
);
1009 * vxge_set_multicast
1010 * @dev: pointer to the device structure
1012 * Entry point for multicast address enable/disable
1013 * This function is a driver entry point which gets called by the kernel
1014 * whenever multicast addresses must be enabled/disabled. This also gets
1015 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1016 * determine, if multicast address must be enabled or if promiscuous mode
1017 * is to be disabled etc.
1019 static void vxge_set_multicast(struct net_device
*dev
)
1021 struct netdev_hw_addr
*ha
;
1022 struct vxgedev
*vdev
;
1023 int i
, mcast_cnt
= 0;
1024 struct __vxge_hw_device
*hldev
;
1025 struct vxge_vpath
*vpath
;
1026 enum vxge_hw_status status
= VXGE_HW_OK
;
1027 struct macInfo mac_info
;
1029 struct vxge_mac_addrs
*mac_entry
;
1030 struct list_head
*list_head
;
1031 struct list_head
*entry
, *next
;
1032 u8
*mac_address
= NULL
;
1034 vxge_debug_entryexit(VXGE_TRACE
,
1035 "%s:%d", __func__
, __LINE__
);
1037 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1038 hldev
= (struct __vxge_hw_device
*)vdev
->devh
;
1040 if (unlikely(!is_vxge_card_up(vdev
)))
1043 if ((dev
->flags
& IFF_ALLMULTI
) && (!vdev
->all_multi_flg
)) {
1044 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1045 vpath
= &vdev
->vpaths
[i
];
1046 vxge_assert(vpath
->is_open
);
1047 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1048 if (status
!= VXGE_HW_OK
)
1049 vxge_debug_init(VXGE_ERR
, "failed to enable "
1050 "multicast, status %d", status
);
1051 vdev
->all_multi_flg
= 1;
1053 } else if (!(dev
->flags
& IFF_ALLMULTI
) && (vdev
->all_multi_flg
)) {
1054 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1055 vpath
= &vdev
->vpaths
[i
];
1056 vxge_assert(vpath
->is_open
);
1057 status
= vxge_hw_vpath_mcast_disable(vpath
->handle
);
1058 if (status
!= VXGE_HW_OK
)
1059 vxge_debug_init(VXGE_ERR
, "failed to disable "
1060 "multicast, status %d", status
);
1061 vdev
->all_multi_flg
= 0;
1066 if (!vdev
->config
.addr_learn_en
) {
1067 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1068 vpath
= &vdev
->vpaths
[i
];
1069 vxge_assert(vpath
->is_open
);
1071 if (dev
->flags
& IFF_PROMISC
)
1072 status
= vxge_hw_vpath_promisc_enable(
1075 status
= vxge_hw_vpath_promisc_disable(
1077 if (status
!= VXGE_HW_OK
)
1078 vxge_debug_init(VXGE_ERR
, "failed to %s promisc"
1079 ", status %d", dev
->flags
&IFF_PROMISC
?
1080 "enable" : "disable", status
);
1084 memset(&mac_info
, 0, sizeof(struct macInfo
));
1085 /* Update individual M_CAST address list */
1086 if ((!vdev
->all_multi_flg
) && netdev_mc_count(dev
)) {
1087 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1088 list_head
= &vdev
->vpaths
[0].mac_addr_list
;
1089 if ((netdev_mc_count(dev
) +
1090 (vdev
->vpaths
[0].mac_addr_cnt
- mcast_cnt
)) >
1091 vdev
->vpaths
[0].max_mac_addr_cnt
)
1092 goto _set_all_mcast
;
1094 /* Delete previous MC's */
1095 for (i
= 0; i
< mcast_cnt
; i
++) {
1096 list_for_each_safe(entry
, next
, list_head
) {
1097 mac_entry
= (struct vxge_mac_addrs
*) entry
;
1098 /* Copy the mac address to delete */
1099 mac_address
= (u8
*)&mac_entry
->macaddr
;
1100 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1102 /* Is this a multicast address */
1103 if (0x01 & mac_info
.macaddr
[0]) {
1104 for (vpath_idx
= 0; vpath_idx
<
1107 mac_info
.vpath_no
= vpath_idx
;
1108 status
= vxge_del_mac_addr(
1117 netdev_for_each_mc_addr(ha
, dev
) {
1118 memcpy(mac_info
.macaddr
, ha
->addr
, ETH_ALEN
);
1119 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1121 mac_info
.vpath_no
= vpath_idx
;
1122 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1123 status
= vxge_add_mac_addr(vdev
, &mac_info
);
1124 if (status
!= VXGE_HW_OK
) {
1125 vxge_debug_init(VXGE_ERR
,
1126 "%s:%d Setting individual"
1127 "multicast address failed",
1128 __func__
, __LINE__
);
1129 goto _set_all_mcast
;
1136 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1137 /* Delete previous MC's */
1138 for (i
= 0; i
< mcast_cnt
; i
++) {
1139 list_for_each_safe(entry
, next
, list_head
) {
1140 mac_entry
= (struct vxge_mac_addrs
*) entry
;
1141 /* Copy the mac address to delete */
1142 mac_address
= (u8
*)&mac_entry
->macaddr
;
1143 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1145 /* Is this a multicast address */
1146 if (0x01 & mac_info
.macaddr
[0])
1150 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1152 mac_info
.vpath_no
= vpath_idx
;
1153 status
= vxge_del_mac_addr(vdev
, &mac_info
);
1157 /* Enable all multicast */
1158 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1159 vpath
= &vdev
->vpaths
[i
];
1160 vxge_assert(vpath
->is_open
);
1162 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1163 if (status
!= VXGE_HW_OK
) {
1164 vxge_debug_init(VXGE_ERR
,
1165 "%s:%d Enabling all multicasts failed",
1166 __func__
, __LINE__
);
1168 vdev
->all_multi_flg
= 1;
1170 dev
->flags
|= IFF_ALLMULTI
;
1173 vxge_debug_entryexit(VXGE_TRACE
,
1174 "%s:%d Exiting...", __func__
, __LINE__
);
1179 * @dev: pointer to the device structure
1181 * Update entry "0" (default MAC addr)
1183 static int vxge_set_mac_addr(struct net_device
*dev
, void *p
)
1185 struct sockaddr
*addr
= p
;
1186 struct vxgedev
*vdev
;
1187 struct __vxge_hw_device
*hldev
;
1188 enum vxge_hw_status status
= VXGE_HW_OK
;
1189 struct macInfo mac_info_new
, mac_info_old
;
1192 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1194 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1197 if (!is_valid_ether_addr(addr
->sa_data
))
1200 memset(&mac_info_new
, 0, sizeof(struct macInfo
));
1201 memset(&mac_info_old
, 0, sizeof(struct macInfo
));
1203 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d Exiting...",
1204 __func__
, __LINE__
);
1206 /* Get the old address */
1207 memcpy(mac_info_old
.macaddr
, dev
->dev_addr
, dev
->addr_len
);
1209 /* Copy the new address */
1210 memcpy(mac_info_new
.macaddr
, addr
->sa_data
, dev
->addr_len
);
1212 /* First delete the old mac address from all the vpaths
1213 as we can't specify the index while adding new mac address */
1214 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1215 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vpath_idx
];
1216 if (!vpath
->is_open
) {
1217 /* This can happen when this interface is added/removed
1218 to the bonding interface. Delete this station address
1219 from the linked list */
1220 vxge_mac_list_del(vpath
, &mac_info_old
);
1222 /* Add this new address to the linked list
1223 for later restoring */
1224 vxge_mac_list_add(vpath
, &mac_info_new
);
1228 /* Delete the station address */
1229 mac_info_old
.vpath_no
= vpath_idx
;
1230 status
= vxge_del_mac_addr(vdev
, &mac_info_old
);
1233 if (unlikely(!is_vxge_card_up(vdev
))) {
1234 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1238 /* Set this mac address to all the vpaths */
1239 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1240 mac_info_new
.vpath_no
= vpath_idx
;
1241 mac_info_new
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1242 status
= vxge_add_mac_addr(vdev
, &mac_info_new
);
1243 if (status
!= VXGE_HW_OK
)
1247 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1253 * vxge_vpath_intr_enable
1254 * @vdev: pointer to vdev
1255 * @vp_id: vpath for which to enable the interrupts
1257 * Enables the interrupts for the vpath
1259 static void vxge_vpath_intr_enable(struct vxgedev
*vdev
, int vp_id
)
1261 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1263 int tim_msix_id
[4] = {0, 1, 0, 0};
1264 int alarm_msix_id
= VXGE_ALARM_MSIX_ID
;
1266 vxge_hw_vpath_intr_enable(vpath
->handle
);
1268 if (vdev
->config
.intr_type
== INTA
)
1269 vxge_hw_vpath_inta_unmask_tx_rx(vpath
->handle
);
1271 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
1274 msix_id
= vpath
->device_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1275 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1276 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
+ 1);
1278 /* enable the alarm vector */
1279 msix_id
= (vpath
->handle
->vpath
->hldev
->first_vp_id
*
1280 VXGE_HW_VPATH_MSIX_ACTIVE
) + alarm_msix_id
;
1281 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1286 * vxge_vpath_intr_disable
1287 * @vdev: pointer to vdev
1288 * @vp_id: vpath for which to disable the interrupts
1290 * Disables the interrupts for the vpath
1292 static void vxge_vpath_intr_disable(struct vxgedev
*vdev
, int vp_id
)
1294 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1297 vxge_hw_vpath_intr_disable(vpath
->handle
);
1299 if (vdev
->config
.intr_type
== INTA
)
1300 vxge_hw_vpath_inta_mask_tx_rx(vpath
->handle
);
1302 msix_id
= vpath
->device_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1303 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1304 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
+ 1);
1306 /* disable the alarm vector */
1307 msix_id
= (vpath
->handle
->vpath
->hldev
->first_vp_id
*
1308 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
1309 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1315 * @vdev: pointer to vdev
1316 * @vp_id: vpath to reset
1320 static int vxge_reset_vpath(struct vxgedev
*vdev
, int vp_id
)
1322 enum vxge_hw_status status
= VXGE_HW_OK
;
1323 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1326 /* check if device is down already */
1327 if (unlikely(!is_vxge_card_up(vdev
)))
1330 /* is device reset already scheduled */
1331 if (test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1334 if (vpath
->handle
) {
1335 if (vxge_hw_vpath_reset(vpath
->handle
) == VXGE_HW_OK
) {
1336 if (is_vxge_card_up(vdev
) &&
1337 vxge_hw_vpath_recover_from_reset(vpath
->handle
)
1339 vxge_debug_init(VXGE_ERR
,
1340 "vxge_hw_vpath_recover_from_reset"
1341 "failed for vpath:%d", vp_id
);
1345 vxge_debug_init(VXGE_ERR
,
1346 "vxge_hw_vpath_reset failed for"
1351 return VXGE_HW_FAIL
;
1353 vxge_restore_vpath_mac_addr(vpath
);
1354 vxge_restore_vpath_vid_table(vpath
);
1356 /* Enable all broadcast */
1357 vxge_hw_vpath_bcast_enable(vpath
->handle
);
1359 /* Enable all multicast */
1360 if (vdev
->all_multi_flg
) {
1361 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
1362 if (status
!= VXGE_HW_OK
)
1363 vxge_debug_init(VXGE_ERR
,
1364 "%s:%d Enabling multicast failed",
1365 __func__
, __LINE__
);
1368 /* Enable the interrupts */
1369 vxge_vpath_intr_enable(vdev
, vp_id
);
1373 /* Enable the flow of traffic through the vpath */
1374 vxge_hw_vpath_enable(vpath
->handle
);
1377 vxge_hw_vpath_rx_doorbell_init(vpath
->handle
);
1378 vpath
->ring
.last_status
= VXGE_HW_OK
;
1380 /* Vpath reset done */
1381 clear_bit(vp_id
, &vdev
->vp_reset
);
1383 /* Start the vpath queue */
1384 if (netif_tx_queue_stopped(vpath
->fifo
.txq
))
1385 netif_tx_wake_queue(vpath
->fifo
.txq
);
1390 static int do_vxge_reset(struct vxgedev
*vdev
, int event
)
1392 enum vxge_hw_status status
;
1393 int ret
= 0, vp_id
, i
;
1395 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1397 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
)) {
1398 /* check if device is down already */
1399 if (unlikely(!is_vxge_card_up(vdev
)))
1402 /* is reset already scheduled */
1403 if (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1407 if (event
== VXGE_LL_FULL_RESET
) {
1408 /* wait for all the vpath reset to complete */
1409 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1410 while (test_bit(vp_id
, &vdev
->vp_reset
))
1414 /* if execution mode is set to debug, don't reset the adapter */
1415 if (unlikely(vdev
->exec_mode
)) {
1416 vxge_debug_init(VXGE_ERR
,
1417 "%s: execution mode is debug, returning..",
1419 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1420 netif_tx_stop_all_queues(vdev
->ndev
);
1425 if (event
== VXGE_LL_FULL_RESET
) {
1426 vxge_hw_device_intr_disable(vdev
->devh
);
1428 switch (vdev
->cric_err_event
) {
1429 case VXGE_HW_EVENT_UNKNOWN
:
1430 netif_tx_stop_all_queues(vdev
->ndev
);
1431 vxge_debug_init(VXGE_ERR
,
1432 "fatal: %s: Disabling device due to"
1437 case VXGE_HW_EVENT_RESET_START
:
1439 case VXGE_HW_EVENT_RESET_COMPLETE
:
1440 case VXGE_HW_EVENT_LINK_DOWN
:
1441 case VXGE_HW_EVENT_LINK_UP
:
1442 case VXGE_HW_EVENT_ALARM_CLEARED
:
1443 case VXGE_HW_EVENT_ECCERR
:
1444 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
1447 case VXGE_HW_EVENT_FIFO_ERR
:
1448 case VXGE_HW_EVENT_VPATH_ERR
:
1450 case VXGE_HW_EVENT_CRITICAL_ERR
:
1451 netif_tx_stop_all_queues(vdev
->ndev
);
1452 vxge_debug_init(VXGE_ERR
,
1453 "fatal: %s: Disabling device due to"
1456 /* SOP or device reset required */
1457 /* This event is not currently used */
1460 case VXGE_HW_EVENT_SERR
:
1461 netif_tx_stop_all_queues(vdev
->ndev
);
1462 vxge_debug_init(VXGE_ERR
,
1463 "fatal: %s: Disabling device due to"
1468 case VXGE_HW_EVENT_SRPCIM_SERR
:
1469 case VXGE_HW_EVENT_MRPCIM_SERR
:
1472 case VXGE_HW_EVENT_SLOT_FREEZE
:
1473 netif_tx_stop_all_queues(vdev
->ndev
);
1474 vxge_debug_init(VXGE_ERR
,
1475 "fatal: %s: Disabling device due to"
1486 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
))
1487 netif_tx_stop_all_queues(vdev
->ndev
);
1489 if (event
== VXGE_LL_FULL_RESET
) {
1490 status
= vxge_reset_all_vpaths(vdev
);
1491 if (status
!= VXGE_HW_OK
) {
1492 vxge_debug_init(VXGE_ERR
,
1493 "fatal: %s: can not reset vpaths",
1500 if (event
== VXGE_LL_COMPL_RESET
) {
1501 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1502 if (vdev
->vpaths
[i
].handle
) {
1503 if (vxge_hw_vpath_recover_from_reset(
1504 vdev
->vpaths
[i
].handle
)
1506 vxge_debug_init(VXGE_ERR
,
1507 "vxge_hw_vpath_recover_"
1508 "from_reset failed for vpath: "
1514 vxge_debug_init(VXGE_ERR
,
1515 "vxge_hw_vpath_reset failed for "
1522 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
)) {
1523 /* Reprogram the DA table with populated mac addresses */
1524 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1525 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1526 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1529 /* enable vpath interrupts */
1530 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1531 vxge_vpath_intr_enable(vdev
, i
);
1533 vxge_hw_device_intr_enable(vdev
->devh
);
1537 /* Indicate card up */
1538 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1540 /* Get the traffic to flow through the vpaths */
1541 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1542 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
1544 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
1547 netif_tx_wake_all_queues(vdev
->ndev
);
1551 vxge_debug_entryexit(VXGE_TRACE
,
1552 "%s:%d Exiting...", __func__
, __LINE__
);
1554 /* Indicate reset done */
1555 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
))
1556 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
1562 * @vdev: pointer to ll device
1564 * driver may reset the chip on events of serr, eccerr, etc
1566 static int vxge_reset(struct vxgedev
*vdev
)
1568 return do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
1572 * vxge_poll - Receive handler when Receive Polling is used.
1573 * @dev: pointer to the device structure.
1574 * @budget: Number of packets budgeted to be processed in this iteration.
1576 * This function comes into picture only if Receive side is being handled
1577 * through polling (called NAPI in linux). It mostly does what the normal
1578 * Rx interrupt handler does in terms of descriptor and packet processing
1579 * but not in an interrupt context. Also it will process a specified number
1580 * of packets at most in one iteration. This value is passed down by the
1581 * kernel as the function argument 'budget'.
1583 static int vxge_poll_msix(struct napi_struct
*napi
, int budget
)
1585 struct vxge_ring
*ring
=
1586 container_of(napi
, struct vxge_ring
, napi
);
1587 int budget_org
= budget
;
1588 ring
->budget
= budget
;
1590 vxge_hw_vpath_poll_rx(ring
->handle
);
1592 if (ring
->pkts_processed
< budget_org
) {
1593 napi_complete(napi
);
1594 /* Re enable the Rx interrupts for the vpath */
1595 vxge_hw_channel_msix_unmask(
1596 (struct __vxge_hw_channel
*)ring
->handle
,
1597 ring
->rx_vector_no
);
1600 return ring
->pkts_processed
;
1603 static int vxge_poll_inta(struct napi_struct
*napi
, int budget
)
1605 struct vxgedev
*vdev
= container_of(napi
, struct vxgedev
, napi
);
1606 int pkts_processed
= 0;
1608 int budget_org
= budget
;
1609 struct vxge_ring
*ring
;
1611 struct __vxge_hw_device
*hldev
= (struct __vxge_hw_device
*)
1612 pci_get_drvdata(vdev
->pdev
);
1614 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1615 ring
= &vdev
->vpaths
[i
].ring
;
1616 ring
->budget
= budget
;
1617 vxge_hw_vpath_poll_rx(ring
->handle
);
1618 pkts_processed
+= ring
->pkts_processed
;
1619 budget
-= ring
->pkts_processed
;
1624 VXGE_COMPLETE_ALL_TX(vdev
);
1626 if (pkts_processed
< budget_org
) {
1627 napi_complete(napi
);
1628 /* Re enable the Rx interrupts for the ring */
1629 vxge_hw_device_unmask_all(hldev
);
1630 vxge_hw_device_flush_io(hldev
);
1633 return pkts_processed
;
1636 #ifdef CONFIG_NET_POLL_CONTROLLER
1638 * vxge_netpoll - netpoll event handler entry point
1639 * @dev : pointer to the device structure.
1641 * This function will be called by upper layer to check for events on the
1642 * interface in situations where interrupts are disabled. It is used for
1643 * specific in-kernel networking tasks, such as remote consoles and kernel
1644 * debugging over the network (example netdump in RedHat).
1646 static void vxge_netpoll(struct net_device
*dev
)
1648 struct __vxge_hw_device
*hldev
;
1649 struct vxgedev
*vdev
;
1651 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1652 hldev
= (struct __vxge_hw_device
*)pci_get_drvdata(vdev
->pdev
);
1654 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1656 if (pci_channel_offline(vdev
->pdev
))
1659 disable_irq(dev
->irq
);
1660 vxge_hw_device_clear_tx_rx(hldev
);
1662 vxge_hw_device_clear_tx_rx(hldev
);
1663 VXGE_COMPLETE_ALL_RX(vdev
);
1664 VXGE_COMPLETE_ALL_TX(vdev
);
1666 enable_irq(dev
->irq
);
1668 vxge_debug_entryexit(VXGE_TRACE
,
1669 "%s:%d Exiting...", __func__
, __LINE__
);
1673 /* RTH configuration */
1674 static enum vxge_hw_status
vxge_rth_configure(struct vxgedev
*vdev
)
1676 enum vxge_hw_status status
= VXGE_HW_OK
;
1677 struct vxge_hw_rth_hash_types hash_types
;
1678 u8 itable
[256] = {0}; /* indirection table */
1679 u8 mtable
[256] = {0}; /* CPU to vpath mapping */
1684 * - itable with bucket numbers
1685 * - mtable with bucket-to-vpath mapping
1687 for (index
= 0; index
< (1 << vdev
->config
.rth_bkt_sz
); index
++) {
1688 itable
[index
] = index
;
1689 mtable
[index
] = index
% vdev
->no_of_vpath
;
1692 /* Fill RTH hash types */
1693 hash_types
.hash_type_tcpipv4_en
= vdev
->config
.rth_hash_type_tcpipv4
;
1694 hash_types
.hash_type_ipv4_en
= vdev
->config
.rth_hash_type_ipv4
;
1695 hash_types
.hash_type_tcpipv6_en
= vdev
->config
.rth_hash_type_tcpipv6
;
1696 hash_types
.hash_type_ipv6_en
= vdev
->config
.rth_hash_type_ipv6
;
1697 hash_types
.hash_type_tcpipv6ex_en
=
1698 vdev
->config
.rth_hash_type_tcpipv6ex
;
1699 hash_types
.hash_type_ipv6ex_en
= vdev
->config
.rth_hash_type_ipv6ex
;
1701 /* set indirection table, bucket-to-vpath mapping */
1702 status
= vxge_hw_vpath_rts_rth_itable_set(vdev
->vp_handles
,
1705 vdev
->config
.rth_bkt_sz
);
1706 if (status
!= VXGE_HW_OK
) {
1707 vxge_debug_init(VXGE_ERR
,
1708 "RTH indirection table configuration failed "
1709 "for vpath:%d", vdev
->vpaths
[0].device_id
);
1714 * Because the itable_set() method uses the active_table field
1715 * for the target virtual path the RTH config should be updated
1716 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1717 * when steering frames.
1719 for (index
= 0; index
< vdev
->no_of_vpath
; index
++) {
1720 status
= vxge_hw_vpath_rts_rth_set(
1721 vdev
->vpaths
[index
].handle
,
1722 vdev
->config
.rth_algorithm
,
1724 vdev
->config
.rth_bkt_sz
);
1726 if (status
!= VXGE_HW_OK
) {
1727 vxge_debug_init(VXGE_ERR
,
1728 "RTH configuration failed for vpath:%d",
1729 vdev
->vpaths
[index
].device_id
);
1737 static int vxge_mac_list_add(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1739 struct vxge_mac_addrs
*new_mac_entry
;
1740 u8
*mac_address
= NULL
;
1742 if (vpath
->mac_addr_cnt
>= VXGE_MAX_LEARN_MAC_ADDR_CNT
)
1745 new_mac_entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_ATOMIC
);
1746 if (!new_mac_entry
) {
1747 vxge_debug_mem(VXGE_ERR
,
1748 "%s: memory allocation failed",
1753 list_add(&new_mac_entry
->item
, &vpath
->mac_addr_list
);
1755 /* Copy the new mac address to the list */
1756 mac_address
= (u8
*)&new_mac_entry
->macaddr
;
1757 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1759 new_mac_entry
->state
= mac
->state
;
1760 vpath
->mac_addr_cnt
++;
1762 /* Is this a multicast address */
1763 if (0x01 & mac
->macaddr
[0])
1764 vpath
->mcast_addr_cnt
++;
1769 /* Add a mac address to DA table */
1770 static enum vxge_hw_status
vxge_add_mac_addr(struct vxgedev
*vdev
,
1771 struct macInfo
*mac
)
1773 enum vxge_hw_status status
= VXGE_HW_OK
;
1774 struct vxge_vpath
*vpath
;
1775 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
;
1777 if (0x01 & mac
->macaddr
[0]) /* multicast address */
1778 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
;
1780 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
;
1782 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1783 status
= vxge_hw_vpath_mac_addr_add(vpath
->handle
, mac
->macaddr
,
1784 mac
->macmask
, duplicate_mode
);
1785 if (status
!= VXGE_HW_OK
) {
1786 vxge_debug_init(VXGE_ERR
,
1787 "DA config add entry failed for vpath:%d",
1790 if (FALSE
== vxge_mac_list_add(vpath
, mac
))
1796 static int vxge_mac_list_del(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1798 struct list_head
*entry
, *next
;
1800 u8
*mac_address
= (u8
*) (&del_mac
);
1802 /* Copy the mac address to delete from the list */
1803 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1805 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1806 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
) {
1808 kfree((struct vxge_mac_addrs
*)entry
);
1809 vpath
->mac_addr_cnt
--;
1811 /* Is this a multicast address */
1812 if (0x01 & mac
->macaddr
[0])
1813 vpath
->mcast_addr_cnt
--;
1820 /* delete a mac address from DA table */
1821 static enum vxge_hw_status
vxge_del_mac_addr(struct vxgedev
*vdev
,
1822 struct macInfo
*mac
)
1824 enum vxge_hw_status status
= VXGE_HW_OK
;
1825 struct vxge_vpath
*vpath
;
1827 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1828 status
= vxge_hw_vpath_mac_addr_delete(vpath
->handle
, mac
->macaddr
,
1830 if (status
!= VXGE_HW_OK
) {
1831 vxge_debug_init(VXGE_ERR
,
1832 "DA config delete entry failed for vpath:%d",
1835 vxge_mac_list_del(vpath
, mac
);
1839 /* list all mac addresses from DA table */
1841 static vxge_search_mac_addr_in_da_table(struct vxge_vpath
*vpath
,
1842 struct macInfo
*mac
)
1844 enum vxge_hw_status status
= VXGE_HW_OK
;
1845 unsigned char macmask
[ETH_ALEN
];
1846 unsigned char macaddr
[ETH_ALEN
];
1848 status
= vxge_hw_vpath_mac_addr_get(vpath
->handle
,
1850 if (status
!= VXGE_HW_OK
) {
1851 vxge_debug_init(VXGE_ERR
,
1852 "DA config list entry failed for vpath:%d",
1857 while (memcmp(mac
->macaddr
, macaddr
, ETH_ALEN
)) {
1859 status
= vxge_hw_vpath_mac_addr_get_next(vpath
->handle
,
1861 if (status
!= VXGE_HW_OK
)
1868 /* Store all vlan ids from the list to the vid table */
1869 static enum vxge_hw_status
vxge_restore_vpath_vid_table(struct vxge_vpath
*vpath
)
1871 enum vxge_hw_status status
= VXGE_HW_OK
;
1872 struct vxgedev
*vdev
= vpath
->vdev
;
1875 if (vdev
->vlgrp
&& vpath
->is_open
) {
1877 for (vid
= 0; vid
< VLAN_N_VID
; vid
++) {
1878 if (!vlan_group_get_device(vdev
->vlgrp
, vid
))
1880 /* Add these vlan to the vid table */
1881 status
= vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
1888 /* Store all mac addresses from the list to the DA table */
1889 static enum vxge_hw_status
vxge_restore_vpath_mac_addr(struct vxge_vpath
*vpath
)
1891 enum vxge_hw_status status
= VXGE_HW_OK
;
1892 struct macInfo mac_info
;
1893 u8
*mac_address
= NULL
;
1894 struct list_head
*entry
, *next
;
1896 memset(&mac_info
, 0, sizeof(struct macInfo
));
1898 if (vpath
->is_open
) {
1900 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1903 ((struct vxge_mac_addrs
*)entry
)->macaddr
;
1904 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1905 ((struct vxge_mac_addrs
*)entry
)->state
=
1906 VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1907 /* does this mac address already exist in da table? */
1908 status
= vxge_search_mac_addr_in_da_table(vpath
,
1910 if (status
!= VXGE_HW_OK
) {
1911 /* Add this mac address to the DA table */
1912 status
= vxge_hw_vpath_mac_addr_add(
1913 vpath
->handle
, mac_info
.macaddr
,
1915 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
);
1916 if (status
!= VXGE_HW_OK
) {
1917 vxge_debug_init(VXGE_ERR
,
1918 "DA add entry failed for vpath:%d",
1920 ((struct vxge_mac_addrs
*)entry
)->state
1921 = VXGE_LL_MAC_ADDR_IN_LIST
;
1931 static enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
)
1933 enum vxge_hw_status status
= VXGE_HW_OK
;
1934 struct vxge_vpath
*vpath
;
1937 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1938 vpath
= &vdev
->vpaths
[i
];
1939 if (vpath
->handle
) {
1940 if (vxge_hw_vpath_reset(vpath
->handle
) == VXGE_HW_OK
) {
1941 if (is_vxge_card_up(vdev
) &&
1942 vxge_hw_vpath_recover_from_reset(
1943 vpath
->handle
) != VXGE_HW_OK
) {
1944 vxge_debug_init(VXGE_ERR
,
1945 "vxge_hw_vpath_recover_"
1946 "from_reset failed for vpath: "
1951 vxge_debug_init(VXGE_ERR
,
1952 "vxge_hw_vpath_reset failed for "
1963 static void vxge_close_vpaths(struct vxgedev
*vdev
, int index
)
1965 struct vxge_vpath
*vpath
;
1968 for (i
= index
; i
< vdev
->no_of_vpath
; i
++) {
1969 vpath
= &vdev
->vpaths
[i
];
1971 if (vpath
->handle
&& vpath
->is_open
) {
1972 vxge_hw_vpath_close(vpath
->handle
);
1973 vdev
->stats
.vpaths_open
--;
1976 vpath
->handle
= NULL
;
1981 static int vxge_open_vpaths(struct vxgedev
*vdev
)
1983 struct vxge_hw_vpath_attr attr
;
1984 enum vxge_hw_status status
;
1985 struct vxge_vpath
*vpath
;
1989 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1990 vpath
= &vdev
->vpaths
[i
];
1992 vxge_assert(vpath
->is_configured
);
1993 attr
.vp_id
= vpath
->device_id
;
1994 attr
.fifo_attr
.callback
= vxge_xmit_compl
;
1995 attr
.fifo_attr
.txdl_term
= vxge_tx_term
;
1996 attr
.fifo_attr
.per_txdl_space
= sizeof(struct vxge_tx_priv
);
1997 attr
.fifo_attr
.userdata
= &vpath
->fifo
;
1999 attr
.ring_attr
.callback
= vxge_rx_1b_compl
;
2000 attr
.ring_attr
.rxd_init
= vxge_rx_initial_replenish
;
2001 attr
.ring_attr
.rxd_term
= vxge_rx_term
;
2002 attr
.ring_attr
.per_rxd_space
= sizeof(struct vxge_rx_priv
);
2003 attr
.ring_attr
.userdata
= &vpath
->ring
;
2005 vpath
->ring
.ndev
= vdev
->ndev
;
2006 vpath
->ring
.pdev
= vdev
->pdev
;
2007 status
= vxge_hw_vpath_open(vdev
->devh
, &attr
, &vpath
->handle
);
2008 if (status
== VXGE_HW_OK
) {
2009 vpath
->fifo
.handle
=
2010 (struct __vxge_hw_fifo
*)attr
.fifo_attr
.userdata
;
2011 vpath
->ring
.handle
=
2012 (struct __vxge_hw_ring
*)attr
.ring_attr
.userdata
;
2013 vpath
->fifo
.tx_steering_type
=
2014 vdev
->config
.tx_steering_type
;
2015 vpath
->fifo
.ndev
= vdev
->ndev
;
2016 vpath
->fifo
.pdev
= vdev
->pdev
;
2017 if (vdev
->config
.tx_steering_type
)
2019 netdev_get_tx_queue(vdev
->ndev
, i
);
2022 netdev_get_tx_queue(vdev
->ndev
, 0);
2023 vpath
->fifo
.indicate_max_pkts
=
2024 vdev
->config
.fifo_indicate_max_pkts
;
2025 vpath
->ring
.rx_vector_no
= 0;
2026 vpath
->ring
.rx_csum
= vdev
->rx_csum
;
2028 vdev
->vp_handles
[i
] = vpath
->handle
;
2029 vpath
->ring
.gro_enable
= vdev
->config
.gro_enable
;
2030 vpath
->ring
.vlan_tag_strip
= vdev
->vlan_tag_strip
;
2031 vdev
->stats
.vpaths_open
++;
2033 vdev
->stats
.vpath_open_fail
++;
2034 vxge_debug_init(VXGE_ERR
,
2035 "%s: vpath: %d failed to open "
2037 vdev
->ndev
->name
, vpath
->device_id
,
2039 vxge_close_vpaths(vdev
, 0);
2043 vp_id
= vpath
->handle
->vpath
->vp_id
;
2044 vdev
->vpaths_deployed
|= vxge_mBIT(vp_id
);
2051 * @irq: the irq of the device.
2052 * @dev_id: a void pointer to the hldev structure of the Titan device
2053 * @ptregs: pointer to the registers pushed on the stack.
2055 * This function is the ISR handler of the device when napi is enabled. It
2056 * identifies the reason for the interrupt and calls the relevant service
2059 static irqreturn_t
vxge_isr_napi(int irq
, void *dev_id
)
2061 struct net_device
*dev
;
2062 struct __vxge_hw_device
*hldev
;
2064 enum vxge_hw_status status
;
2065 struct vxgedev
*vdev
= (struct vxgedev
*) dev_id
;;
2067 vxge_debug_intr(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
2070 hldev
= (struct __vxge_hw_device
*)pci_get_drvdata(vdev
->pdev
);
2072 if (pci_channel_offline(vdev
->pdev
))
2075 if (unlikely(!is_vxge_card_up(vdev
)))
2078 status
= vxge_hw_device_begin_irq(hldev
, vdev
->exec_mode
,
2080 if (status
== VXGE_HW_OK
) {
2081 vxge_hw_device_mask_all(hldev
);
2084 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2085 vdev
->vpaths_deployed
>>
2086 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
))) {
2088 vxge_hw_device_clear_tx_rx(hldev
);
2089 napi_schedule(&vdev
->napi
);
2090 vxge_debug_intr(VXGE_TRACE
,
2091 "%s:%d Exiting...", __func__
, __LINE__
);
2094 vxge_hw_device_unmask_all(hldev
);
2095 } else if (unlikely((status
== VXGE_HW_ERR_VPATH
) ||
2096 (status
== VXGE_HW_ERR_CRITICAL
) ||
2097 (status
== VXGE_HW_ERR_FIFO
))) {
2098 vxge_hw_device_mask_all(hldev
);
2099 vxge_hw_device_flush_io(hldev
);
2101 } else if (unlikely(status
== VXGE_HW_ERR_SLOT_FREEZE
))
2104 vxge_debug_intr(VXGE_TRACE
, "%s:%d Exiting...", __func__
, __LINE__
);
2108 #ifdef CONFIG_PCI_MSI
2111 vxge_tx_msix_handle(int irq
, void *dev_id
)
2113 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)dev_id
;
2115 VXGE_COMPLETE_VPATH_TX(fifo
);
2121 vxge_rx_msix_napi_handle(int irq
, void *dev_id
)
2123 struct vxge_ring
*ring
= (struct vxge_ring
*)dev_id
;
2125 /* MSIX_IDX for Rx is 1 */
2126 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)ring
->handle
,
2127 ring
->rx_vector_no
);
2129 napi_schedule(&ring
->napi
);
2134 vxge_alarm_msix_handle(int irq
, void *dev_id
)
2137 enum vxge_hw_status status
;
2138 struct vxge_vpath
*vpath
= (struct vxge_vpath
*)dev_id
;
2139 struct vxgedev
*vdev
= vpath
->vdev
;
2140 int msix_id
= (vpath
->handle
->vpath
->vp_id
*
2141 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
2143 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2144 vxge_hw_vpath_msix_mask(vdev
->vpaths
[i
].handle
, msix_id
);
2146 status
= vxge_hw_vpath_alarm_process(vdev
->vpaths
[i
].handle
,
2148 if (status
== VXGE_HW_OK
) {
2150 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[i
].handle
,
2154 vxge_debug_intr(VXGE_ERR
,
2155 "%s: vxge_hw_vpath_alarm_process failed %x ",
2156 VXGE_DRIVER_NAME
, status
);
2161 static int vxge_alloc_msix(struct vxgedev
*vdev
)
2164 int msix_intr_vect
= 0, temp
;
2168 /* Tx/Rx MSIX Vectors count */
2169 vdev
->intr_cnt
= vdev
->no_of_vpath
* 2;
2171 /* Alarm MSIX Vectors count */
2174 vdev
->entries
= kcalloc(vdev
->intr_cnt
, sizeof(struct msix_entry
),
2176 if (!vdev
->entries
) {
2177 vxge_debug_init(VXGE_ERR
,
2178 "%s: memory allocation failed",
2181 goto alloc_entries_failed
;
2184 vdev
->vxge_entries
= kcalloc(vdev
->intr_cnt
,
2185 sizeof(struct vxge_msix_entry
),
2187 if (!vdev
->vxge_entries
) {
2188 vxge_debug_init(VXGE_ERR
, "%s: memory allocation failed",
2191 goto alloc_vxge_entries_failed
;
2194 for (i
= 0, j
= 0; i
< vdev
->no_of_vpath
; i
++) {
2196 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2198 /* Initialize the fifo vector */
2199 vdev
->entries
[j
].entry
= msix_intr_vect
;
2200 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
;
2201 vdev
->vxge_entries
[j
].in_use
= 0;
2204 /* Initialize the ring vector */
2205 vdev
->entries
[j
].entry
= msix_intr_vect
+ 1;
2206 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
+ 1;
2207 vdev
->vxge_entries
[j
].in_use
= 0;
2211 /* Initialize the alarm vector */
2212 vdev
->entries
[j
].entry
= VXGE_ALARM_MSIX_ID
;
2213 vdev
->vxge_entries
[j
].entry
= VXGE_ALARM_MSIX_ID
;
2214 vdev
->vxge_entries
[j
].in_use
= 0;
2216 ret
= pci_enable_msix(vdev
->pdev
, vdev
->entries
, vdev
->intr_cnt
);
2218 vxge_debug_init(VXGE_ERR
,
2219 "%s: MSI-X enable failed for %d vectors, ret: %d",
2220 VXGE_DRIVER_NAME
, vdev
->intr_cnt
, ret
);
2221 if ((max_config_vpath
!= VXGE_USE_DEFAULT
) || (ret
< 3)) {
2223 goto enable_msix_failed
;
2226 kfree(vdev
->entries
);
2227 kfree(vdev
->vxge_entries
);
2228 vdev
->entries
= NULL
;
2229 vdev
->vxge_entries
= NULL
;
2230 /* Try with less no of vector by reducing no of vpaths count */
2232 vxge_close_vpaths(vdev
, temp
);
2233 vdev
->no_of_vpath
= temp
;
2235 } else if (ret
< 0) {
2237 goto enable_msix_failed
;
2242 kfree(vdev
->vxge_entries
);
2243 alloc_vxge_entries_failed
:
2244 kfree(vdev
->entries
);
2245 alloc_entries_failed
:
2249 static int vxge_enable_msix(struct vxgedev
*vdev
)
2253 /* 0 - Tx, 1 - Rx */
2254 int tim_msix_id
[4] = {0, 1, 0, 0};
2258 /* allocate msix vectors */
2259 ret
= vxge_alloc_msix(vdev
);
2261 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2262 struct vxge_vpath
*vpath
= &vdev
->vpaths
[i
];
2264 /* If fifo or ring are not enabled, the MSIX vector for
2265 * it should be set to 0.
2267 vpath
->ring
.rx_vector_no
= (vpath
->device_id
*
2268 VXGE_HW_VPATH_MSIX_ACTIVE
) + 1;
2270 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
2271 VXGE_ALARM_MSIX_ID
);
2278 static void vxge_rem_msix_isr(struct vxgedev
*vdev
)
2282 for (intr_cnt
= 0; intr_cnt
< (vdev
->no_of_vpath
* 2 + 1);
2284 if (vdev
->vxge_entries
[intr_cnt
].in_use
) {
2285 synchronize_irq(vdev
->entries
[intr_cnt
].vector
);
2286 free_irq(vdev
->entries
[intr_cnt
].vector
,
2287 vdev
->vxge_entries
[intr_cnt
].arg
);
2288 vdev
->vxge_entries
[intr_cnt
].in_use
= 0;
2292 kfree(vdev
->entries
);
2293 kfree(vdev
->vxge_entries
);
2294 vdev
->entries
= NULL
;
2295 vdev
->vxge_entries
= NULL
;
2297 if (vdev
->config
.intr_type
== MSI_X
)
2298 pci_disable_msix(vdev
->pdev
);
2302 static void vxge_rem_isr(struct vxgedev
*vdev
)
2304 struct __vxge_hw_device
*hldev
;
2305 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2307 #ifdef CONFIG_PCI_MSI
2308 if (vdev
->config
.intr_type
== MSI_X
) {
2309 vxge_rem_msix_isr(vdev
);
2312 if (vdev
->config
.intr_type
== INTA
) {
2313 synchronize_irq(vdev
->pdev
->irq
);
2314 free_irq(vdev
->pdev
->irq
, vdev
);
2318 static int vxge_add_isr(struct vxgedev
*vdev
)
2321 #ifdef CONFIG_PCI_MSI
2322 int vp_idx
= 0, intr_idx
= 0, intr_cnt
= 0, msix_idx
= 0, irq_req
= 0;
2323 int pci_fun
= PCI_FUNC(vdev
->pdev
->devfn
);
2325 if (vdev
->config
.intr_type
== MSI_X
)
2326 ret
= vxge_enable_msix(vdev
);
2329 vxge_debug_init(VXGE_ERR
,
2330 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME
);
2331 vxge_debug_init(VXGE_ERR
,
2332 "%s: Defaulting to INTA", VXGE_DRIVER_NAME
);
2333 vdev
->config
.intr_type
= INTA
;
2336 if (vdev
->config
.intr_type
== MSI_X
) {
2338 intr_idx
< (vdev
->no_of_vpath
*
2339 VXGE_HW_VPATH_MSIX_ACTIVE
); intr_idx
++) {
2341 msix_idx
= intr_idx
% VXGE_HW_VPATH_MSIX_ACTIVE
;
2346 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2347 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2349 vdev
->entries
[intr_cnt
].entry
,
2352 vdev
->entries
[intr_cnt
].vector
,
2353 vxge_tx_msix_handle
, 0,
2354 vdev
->desc
[intr_cnt
],
2355 &vdev
->vpaths
[vp_idx
].fifo
);
2356 vdev
->vxge_entries
[intr_cnt
].arg
=
2357 &vdev
->vpaths
[vp_idx
].fifo
;
2361 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2362 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2364 vdev
->entries
[intr_cnt
].entry
,
2367 vdev
->entries
[intr_cnt
].vector
,
2368 vxge_rx_msix_napi_handle
,
2370 vdev
->desc
[intr_cnt
],
2371 &vdev
->vpaths
[vp_idx
].ring
);
2372 vdev
->vxge_entries
[intr_cnt
].arg
=
2373 &vdev
->vpaths
[vp_idx
].ring
;
2379 vxge_debug_init(VXGE_ERR
,
2380 "%s: MSIX - %d Registration failed",
2381 vdev
->ndev
->name
, intr_cnt
);
2382 vxge_rem_msix_isr(vdev
);
2383 vdev
->config
.intr_type
= INTA
;
2384 vxge_debug_init(VXGE_ERR
,
2385 "%s: Defaulting to INTA"
2386 , vdev
->ndev
->name
);
2391 /* We requested for this msix interrupt */
2392 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2393 msix_idx
+= vdev
->vpaths
[vp_idx
].device_id
*
2394 VXGE_HW_VPATH_MSIX_ACTIVE
;
2395 vxge_hw_vpath_msix_unmask(
2396 vdev
->vpaths
[vp_idx
].handle
,
2401 /* Point to next vpath handler */
2402 if (((intr_idx
+ 1) % VXGE_HW_VPATH_MSIX_ACTIVE
== 0) &&
2403 (vp_idx
< (vdev
->no_of_vpath
- 1)))
2407 intr_cnt
= vdev
->no_of_vpath
* 2;
2408 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2409 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2411 vdev
->entries
[intr_cnt
].entry
,
2413 /* For Alarm interrupts */
2414 ret
= request_irq(vdev
->entries
[intr_cnt
].vector
,
2415 vxge_alarm_msix_handle
, 0,
2416 vdev
->desc
[intr_cnt
],
2419 vxge_debug_init(VXGE_ERR
,
2420 "%s: MSIX - %d Registration failed",
2421 vdev
->ndev
->name
, intr_cnt
);
2422 vxge_rem_msix_isr(vdev
);
2423 vdev
->config
.intr_type
= INTA
;
2424 vxge_debug_init(VXGE_ERR
,
2425 "%s: Defaulting to INTA",
2430 msix_idx
= (vdev
->vpaths
[0].handle
->vpath
->vp_id
*
2431 VXGE_HW_VPATH_MSIX_ACTIVE
) + VXGE_ALARM_MSIX_ID
;
2432 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[vp_idx
].handle
,
2434 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2435 vdev
->vxge_entries
[intr_cnt
].arg
= &vdev
->vpaths
[0];
2440 if (vdev
->config
.intr_type
== INTA
) {
2441 snprintf(vdev
->desc
[0], VXGE_INTR_STRLEN
,
2442 "%s:vxge:INTA", vdev
->ndev
->name
);
2443 vxge_hw_device_set_intr_type(vdev
->devh
,
2444 VXGE_HW_INTR_MODE_IRQLINE
);
2445 vxge_hw_vpath_tti_ci_set(vdev
->devh
,
2446 vdev
->vpaths
[0].device_id
);
2447 ret
= request_irq((int) vdev
->pdev
->irq
,
2449 IRQF_SHARED
, vdev
->desc
[0], vdev
);
2451 vxge_debug_init(VXGE_ERR
,
2452 "%s %s-%d: ISR registration failed",
2453 VXGE_DRIVER_NAME
, "IRQ", vdev
->pdev
->irq
);
2456 vxge_debug_init(VXGE_TRACE
,
2457 "new %s-%d line allocated",
2458 "IRQ", vdev
->pdev
->irq
);
2464 static void vxge_poll_vp_reset(unsigned long data
)
2466 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2469 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2470 if (test_bit(i
, &vdev
->vp_reset
)) {
2471 vxge_reset_vpath(vdev
, i
);
2475 if (j
&& (vdev
->config
.intr_type
!= MSI_X
)) {
2476 vxge_hw_device_unmask_all(vdev
->devh
);
2477 vxge_hw_device_flush_io(vdev
->devh
);
2480 mod_timer(&vdev
->vp_reset_timer
, jiffies
+ HZ
/ 2);
2483 static void vxge_poll_vp_lockup(unsigned long data
)
2485 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2486 enum vxge_hw_status status
= VXGE_HW_OK
;
2487 struct vxge_vpath
*vpath
;
2488 struct vxge_ring
*ring
;
2491 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2492 ring
= &vdev
->vpaths
[i
].ring
;
2493 /* Did this vpath received any packets */
2494 if (ring
->stats
.prev_rx_frms
== ring
->stats
.rx_frms
) {
2495 status
= vxge_hw_vpath_check_leak(ring
->handle
);
2497 /* Did it received any packets last time */
2498 if ((VXGE_HW_FAIL
== status
) &&
2499 (VXGE_HW_FAIL
== ring
->last_status
)) {
2501 /* schedule vpath reset */
2502 if (!test_and_set_bit(i
, &vdev
->vp_reset
)) {
2503 vpath
= &vdev
->vpaths
[i
];
2505 /* disable interrupts for this vpath */
2506 vxge_vpath_intr_disable(vdev
, i
);
2508 /* stop the queue for this vpath */
2509 netif_tx_stop_queue(vpath
->fifo
.txq
);
2514 ring
->stats
.prev_rx_frms
= ring
->stats
.rx_frms
;
2515 ring
->last_status
= status
;
2518 /* Check every 1 milli second */
2519 mod_timer(&vdev
->vp_lockup_timer
, jiffies
+ HZ
/ 1000);
2524 * @dev: pointer to the device structure.
2526 * This function is the open entry point of the driver. It mainly calls a
2527 * function to allocate Rx buffers and inserts them into the buffer
2528 * descriptors and then enables the Rx part of the NIC.
2529 * Return value: '0' on success and an appropriate (-)ve integer as
2530 * defined in errno.h file on failure.
2533 vxge_open(struct net_device
*dev
)
2535 enum vxge_hw_status status
;
2536 struct vxgedev
*vdev
;
2537 struct __vxge_hw_device
*hldev
;
2538 struct vxge_vpath
*vpath
;
2541 u64 val64
, function_mode
;
2542 vxge_debug_entryexit(VXGE_TRACE
,
2543 "%s: %s:%d", dev
->name
, __func__
, __LINE__
);
2545 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2546 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2547 function_mode
= vdev
->config
.device_hw_info
.function_mode
;
2549 /* make sure you have link off by default every time Nic is
2551 netif_carrier_off(dev
);
2554 status
= vxge_open_vpaths(vdev
);
2555 if (status
!= VXGE_HW_OK
) {
2556 vxge_debug_init(VXGE_ERR
,
2557 "%s: fatal: Vpath open failed", vdev
->ndev
->name
);
2562 vdev
->mtu
= dev
->mtu
;
2564 status
= vxge_add_isr(vdev
);
2565 if (status
!= VXGE_HW_OK
) {
2566 vxge_debug_init(VXGE_ERR
,
2567 "%s: fatal: ISR add failed", dev
->name
);
2572 if (vdev
->config
.intr_type
!= MSI_X
) {
2573 netif_napi_add(dev
, &vdev
->napi
, vxge_poll_inta
,
2574 vdev
->config
.napi_weight
);
2575 napi_enable(&vdev
->napi
);
2576 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2577 vpath
= &vdev
->vpaths
[i
];
2578 vpath
->ring
.napi_p
= &vdev
->napi
;
2581 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2582 vpath
= &vdev
->vpaths
[i
];
2583 netif_napi_add(dev
, &vpath
->ring
.napi
,
2584 vxge_poll_msix
, vdev
->config
.napi_weight
);
2585 napi_enable(&vpath
->ring
.napi
);
2586 vpath
->ring
.napi_p
= &vpath
->ring
.napi
;
2591 if (vdev
->config
.rth_steering
) {
2592 status
= vxge_rth_configure(vdev
);
2593 if (status
!= VXGE_HW_OK
) {
2594 vxge_debug_init(VXGE_ERR
,
2595 "%s: fatal: RTH configuration failed",
2602 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2603 vpath
= &vdev
->vpaths
[i
];
2605 /* set initial mtu before enabling the device */
2606 status
= vxge_hw_vpath_mtu_set(vpath
->handle
, vdev
->mtu
);
2607 if (status
!= VXGE_HW_OK
) {
2608 vxge_debug_init(VXGE_ERR
,
2609 "%s: fatal: can not set new MTU", dev
->name
);
2615 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE
, VXGE_COMPONENT_LL
, vdev
);
2616 vxge_debug_init(vdev
->level_trace
,
2617 "%s: MTU is %d", vdev
->ndev
->name
, vdev
->mtu
);
2618 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR
, VXGE_COMPONENT_LL
, vdev
);
2620 /* Restore the DA, VID table and also multicast and promiscuous mode
2623 if (vdev
->all_multi_flg
) {
2624 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2625 vpath
= &vdev
->vpaths
[i
];
2626 vxge_restore_vpath_mac_addr(vpath
);
2627 vxge_restore_vpath_vid_table(vpath
);
2629 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
2630 if (status
!= VXGE_HW_OK
)
2631 vxge_debug_init(VXGE_ERR
,
2632 "%s:%d Enabling multicast failed",
2633 __func__
, __LINE__
);
2637 /* Enable vpath to sniff all unicast/multicast traffic that not
2638 * addressed to them. We allow promiscous mode for PF only
2642 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
2643 val64
|= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i
);
2645 vxge_hw_mgmt_reg_write(vdev
->devh
,
2646 vxge_hw_mgmt_reg_type_mrpcim
,
2648 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2649 rxmac_authorize_all_addr
),
2652 vxge_hw_mgmt_reg_write(vdev
->devh
,
2653 vxge_hw_mgmt_reg_type_mrpcim
,
2655 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2656 rxmac_authorize_all_vid
),
2659 vxge_set_multicast(dev
);
2661 /* Enabling Bcast and mcast for all vpath */
2662 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2663 vpath
= &vdev
->vpaths
[i
];
2664 status
= vxge_hw_vpath_bcast_enable(vpath
->handle
);
2665 if (status
!= VXGE_HW_OK
)
2666 vxge_debug_init(VXGE_ERR
,
2667 "%s : Can not enable bcast for vpath "
2668 "id %d", dev
->name
, i
);
2669 if (vdev
->config
.addr_learn_en
) {
2670 status
= vxge_hw_vpath_mcast_enable(vpath
->handle
);
2671 if (status
!= VXGE_HW_OK
)
2672 vxge_debug_init(VXGE_ERR
,
2673 "%s : Can not enable mcast for vpath "
2674 "id %d", dev
->name
, i
);
2678 vxge_hw_device_setpause_data(vdev
->devh
, 0,
2679 vdev
->config
.tx_pause_enable
,
2680 vdev
->config
.rx_pause_enable
);
2682 if (vdev
->vp_reset_timer
.function
== NULL
)
2683 vxge_os_timer(vdev
->vp_reset_timer
,
2684 vxge_poll_vp_reset
, vdev
, (HZ
/2));
2686 if (vdev
->vp_lockup_timer
.function
== NULL
)
2687 vxge_os_timer(vdev
->vp_lockup_timer
,
2688 vxge_poll_vp_lockup
, vdev
, (HZ
/2));
2690 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2694 if (vxge_hw_device_link_state_get(vdev
->devh
) == VXGE_HW_LINK_UP
) {
2695 netif_carrier_on(vdev
->ndev
);
2696 netdev_notice(vdev
->ndev
, "Link Up\n");
2697 vdev
->stats
.link_up
++;
2700 vxge_hw_device_intr_enable(vdev
->devh
);
2704 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2705 vpath
= &vdev
->vpaths
[i
];
2707 vxge_hw_vpath_enable(vpath
->handle
);
2709 vxge_hw_vpath_rx_doorbell_init(vpath
->handle
);
2712 netif_tx_start_all_queues(vdev
->ndev
);
2719 if (vdev
->config
.intr_type
!= MSI_X
)
2720 napi_disable(&vdev
->napi
);
2722 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2723 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2727 vxge_close_vpaths(vdev
, 0);
2729 vxge_debug_entryexit(VXGE_TRACE
,
2730 "%s: %s:%d Exiting...",
2731 dev
->name
, __func__
, __LINE__
);
2735 /* Loop throught the mac address list and delete all the entries */
2736 static void vxge_free_mac_add_list(struct vxge_vpath
*vpath
)
2739 struct list_head
*entry
, *next
;
2740 if (list_empty(&vpath
->mac_addr_list
))
2743 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
2745 kfree((struct vxge_mac_addrs
*)entry
);
2749 static void vxge_napi_del_all(struct vxgedev
*vdev
)
2752 if (vdev
->config
.intr_type
!= MSI_X
)
2753 netif_napi_del(&vdev
->napi
);
2755 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2756 netif_napi_del(&vdev
->vpaths
[i
].ring
.napi
);
2760 static int do_vxge_close(struct net_device
*dev
, int do_io
)
2762 enum vxge_hw_status status
;
2763 struct vxgedev
*vdev
;
2764 struct __vxge_hw_device
*hldev
;
2766 u64 val64
, vpath_vector
;
2767 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
2768 dev
->name
, __func__
, __LINE__
);
2770 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2771 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2773 if (unlikely(!is_vxge_card_up(vdev
)))
2776 /* If vxge_handle_crit_err task is executing,
2777 * wait till it completes. */
2778 while (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
2781 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2783 /* Put the vpath back in normal mode */
2784 vpath_vector
= vxge_mBIT(vdev
->vpaths
[0].device_id
);
2785 status
= vxge_hw_mgmt_reg_read(vdev
->devh
,
2786 vxge_hw_mgmt_reg_type_mrpcim
,
2789 struct vxge_hw_mrpcim_reg
,
2790 rts_mgr_cbasin_cfg
),
2793 if (status
== VXGE_HW_OK
) {
2794 val64
&= ~vpath_vector
;
2795 status
= vxge_hw_mgmt_reg_write(vdev
->devh
,
2796 vxge_hw_mgmt_reg_type_mrpcim
,
2799 struct vxge_hw_mrpcim_reg
,
2800 rts_mgr_cbasin_cfg
),
2804 /* Remove the function 0 from promiscous mode */
2805 vxge_hw_mgmt_reg_write(vdev
->devh
,
2806 vxge_hw_mgmt_reg_type_mrpcim
,
2808 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2809 rxmac_authorize_all_addr
),
2812 vxge_hw_mgmt_reg_write(vdev
->devh
,
2813 vxge_hw_mgmt_reg_type_mrpcim
,
2815 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2816 rxmac_authorize_all_vid
),
2821 del_timer_sync(&vdev
->vp_lockup_timer
);
2823 del_timer_sync(&vdev
->vp_reset_timer
);
2826 if (vdev
->config
.intr_type
!= MSI_X
)
2827 napi_disable(&vdev
->napi
);
2829 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2830 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2833 netif_carrier_off(vdev
->ndev
);
2834 netdev_notice(vdev
->ndev
, "Link Down\n");
2835 netif_tx_stop_all_queues(vdev
->ndev
);
2837 /* Note that at this point xmit() is stopped by upper layer */
2839 vxge_hw_device_intr_disable(vdev
->devh
);
2845 vxge_napi_del_all(vdev
);
2848 vxge_reset_all_vpaths(vdev
);
2850 vxge_close_vpaths(vdev
, 0);
2852 vxge_debug_entryexit(VXGE_TRACE
,
2853 "%s: %s:%d Exiting...", dev
->name
, __func__
, __LINE__
);
2855 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
2862 * @dev: device pointer.
2864 * This is the stop entry point of the driver. It needs to undo exactly
2865 * whatever was done by the open entry point, thus it's usually referred to
2866 * as the close function.Among other things this function mainly stops the
2867 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2868 * Return value: '0' on success and an appropriate (-)ve integer as
2869 * defined in errno.h file on failure.
2872 vxge_close(struct net_device
*dev
)
2874 do_vxge_close(dev
, 1);
2880 * @dev: net device pointer.
2881 * @new_mtu :the new MTU size for the device.
2883 * A driver entry point to change MTU size for the device. Before changing
2884 * the MTU the device must be stopped.
2886 static int vxge_change_mtu(struct net_device
*dev
, int new_mtu
)
2888 struct vxgedev
*vdev
= netdev_priv(dev
);
2890 vxge_debug_entryexit(vdev
->level_trace
,
2891 "%s:%d", __func__
, __LINE__
);
2892 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> VXGE_HW_MAX_MTU
)) {
2893 vxge_debug_init(vdev
->level_err
,
2894 "%s: mtu size is invalid", dev
->name
);
2898 /* check if device is down already */
2899 if (unlikely(!is_vxge_card_up(vdev
))) {
2900 /* just store new value, will use later on open() */
2902 vxge_debug_init(vdev
->level_err
,
2903 "%s", "device is down on MTU change");
2907 vxge_debug_init(vdev
->level_trace
,
2908 "trying to apply new MTU %d", new_mtu
);
2910 if (vxge_close(dev
))
2914 vdev
->mtu
= new_mtu
;
2919 vxge_debug_init(vdev
->level_trace
,
2920 "%s: MTU changed to %d", vdev
->ndev
->name
, new_mtu
);
2922 vxge_debug_entryexit(vdev
->level_trace
,
2923 "%s:%d Exiting...", __func__
, __LINE__
);
2930 * @dev: pointer to the device structure
2931 * @stats: pointer to struct rtnl_link_stats64
2934 static struct rtnl_link_stats64
*
2935 vxge_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*net_stats
)
2937 struct vxgedev
*vdev
= netdev_priv(dev
);
2940 /* net_stats already zeroed by caller */
2941 for (k
= 0; k
< vdev
->no_of_vpath
; k
++) {
2942 net_stats
->rx_packets
+= vdev
->vpaths
[k
].ring
.stats
.rx_frms
;
2943 net_stats
->rx_bytes
+= vdev
->vpaths
[k
].ring
.stats
.rx_bytes
;
2944 net_stats
->rx_errors
+= vdev
->vpaths
[k
].ring
.stats
.rx_errors
;
2945 net_stats
->multicast
+= vdev
->vpaths
[k
].ring
.stats
.rx_mcast
;
2946 net_stats
->rx_dropped
+=
2947 vdev
->vpaths
[k
].ring
.stats
.rx_dropped
;
2949 net_stats
->tx_packets
+= vdev
->vpaths
[k
].fifo
.stats
.tx_frms
;
2950 net_stats
->tx_bytes
+= vdev
->vpaths
[k
].fifo
.stats
.tx_bytes
;
2951 net_stats
->tx_errors
+= vdev
->vpaths
[k
].fifo
.stats
.tx_errors
;
2959 * @dev: Device pointer.
2960 * @ifr: An IOCTL specific structure, that can contain a pointer to
2961 * a proprietary structure used to pass information to the driver.
2962 * @cmd: This is used to distinguish between the different commands that
2963 * can be passed to the IOCTL functions.
2965 * Entry point for the Ioctl.
2967 static int vxge_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2974 * @dev: pointer to net device structure
2976 * Watchdog for transmit side.
2977 * This function is triggered if the Tx Queue is stopped
2978 * for a pre-defined amount of time when the Interface is still up.
2981 vxge_tx_watchdog(struct net_device
*dev
)
2983 struct vxgedev
*vdev
;
2985 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
2987 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2989 vdev
->cric_err_event
= VXGE_HW_EVENT_RESET_START
;
2992 vxge_debug_entryexit(VXGE_TRACE
,
2993 "%s:%d Exiting...", __func__
, __LINE__
);
2997 * vxge_vlan_rx_register
2998 * @dev: net device pointer.
3001 * Vlan group registration
3004 vxge_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
3006 struct vxgedev
*vdev
;
3007 struct vxge_vpath
*vpath
;
3010 enum vxge_hw_status status
;
3013 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3015 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3017 vpath
= &vdev
->vpaths
[0];
3018 if ((NULL
== grp
) && (vpath
->is_open
)) {
3019 /* Get the first vlan */
3020 status
= vxge_hw_vpath_vid_get(vpath
->handle
, &vid
);
3022 while (status
== VXGE_HW_OK
) {
3024 /* Delete this vlan from the vid table */
3025 for (vp
= 0; vp
< vdev
->no_of_vpath
; vp
++) {
3026 vpath
= &vdev
->vpaths
[vp
];
3027 if (!vpath
->is_open
)
3030 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3033 /* Get the next vlan to be deleted */
3034 vpath
= &vdev
->vpaths
[0];
3035 status
= vxge_hw_vpath_vid_get(vpath
->handle
, &vid
);
3041 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
3042 if (vdev
->vpaths
[i
].is_configured
)
3043 vdev
->vpaths
[i
].ring
.vlgrp
= grp
;
3046 vxge_debug_entryexit(VXGE_TRACE
,
3047 "%s:%d Exiting...", __func__
, __LINE__
);
3051 * vxge_vlan_rx_add_vid
3052 * @dev: net device pointer.
3055 * Add the vlan id to the devices vlan id table
3058 vxge_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
3060 struct vxgedev
*vdev
;
3061 struct vxge_vpath
*vpath
;
3064 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3066 /* Add these vlan to the vid table */
3067 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3068 vpath
= &vdev
->vpaths
[vp_id
];
3069 if (!vpath
->is_open
)
3071 vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
3076 * vxge_vlan_rx_add_vid
3077 * @dev: net device pointer.
3080 * Remove the vlan id from the device's vlan id table
3083 vxge_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
3085 struct vxgedev
*vdev
;
3086 struct vxge_vpath
*vpath
;
3089 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3091 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3093 vlan_group_set_device(vdev
->vlgrp
, vid
, NULL
);
3095 /* Delete this vlan from the vid table */
3096 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3097 vpath
= &vdev
->vpaths
[vp_id
];
3098 if (!vpath
->is_open
)
3100 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3102 vxge_debug_entryexit(VXGE_TRACE
,
3103 "%s:%d Exiting...", __func__
, __LINE__
);
3106 static const struct net_device_ops vxge_netdev_ops
= {
3107 .ndo_open
= vxge_open
,
3108 .ndo_stop
= vxge_close
,
3109 .ndo_get_stats64
= vxge_get_stats64
,
3110 .ndo_start_xmit
= vxge_xmit
,
3111 .ndo_validate_addr
= eth_validate_addr
,
3112 .ndo_set_multicast_list
= vxge_set_multicast
,
3114 .ndo_do_ioctl
= vxge_ioctl
,
3116 .ndo_set_mac_address
= vxge_set_mac_addr
,
3117 .ndo_change_mtu
= vxge_change_mtu
,
3118 .ndo_vlan_rx_register
= vxge_vlan_rx_register
,
3119 .ndo_vlan_rx_kill_vid
= vxge_vlan_rx_kill_vid
,
3120 .ndo_vlan_rx_add_vid
= vxge_vlan_rx_add_vid
,
3122 .ndo_tx_timeout
= vxge_tx_watchdog
,
3123 #ifdef CONFIG_NET_POLL_CONTROLLER
3124 .ndo_poll_controller
= vxge_netpoll
,
3128 static int __devinit
vxge_device_register(struct __vxge_hw_device
*hldev
,
3129 struct vxge_config
*config
,
3130 int high_dma
, int no_of_vpath
,
3131 struct vxgedev
**vdev_out
)
3133 struct net_device
*ndev
;
3134 enum vxge_hw_status status
= VXGE_HW_OK
;
3135 struct vxgedev
*vdev
;
3136 int ret
= 0, no_of_queue
= 1;
3140 if (config
->tx_steering_type
)
3141 no_of_queue
= no_of_vpath
;
3143 ndev
= alloc_etherdev_mq(sizeof(struct vxgedev
),
3147 vxge_hw_device_trace_level_get(hldev
),
3148 "%s : device allocation failed", __func__
);
3153 vxge_debug_entryexit(
3154 vxge_hw_device_trace_level_get(hldev
),
3155 "%s: %s:%d Entering...",
3156 ndev
->name
, __func__
, __LINE__
);
3158 vdev
= netdev_priv(ndev
);
3159 memset(vdev
, 0, sizeof(struct vxgedev
));
3163 vdev
->pdev
= hldev
->pdev
;
3164 memcpy(&vdev
->config
, config
, sizeof(struct vxge_config
));
3165 vdev
->rx_csum
= 1; /* Enable Rx CSUM by default. */
3167 SET_NETDEV_DEV(ndev
, &vdev
->pdev
->dev
);
3169 ndev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
3170 NETIF_F_HW_VLAN_FILTER
;
3171 /* Driver entry points */
3172 ndev
->irq
= vdev
->pdev
->irq
;
3173 ndev
->base_addr
= (unsigned long) hldev
->bar0
;
3175 ndev
->netdev_ops
= &vxge_netdev_ops
;
3177 ndev
->watchdog_timeo
= VXGE_LL_WATCH_DOG_TIMEOUT
;
3179 vxge_initialize_ethtool_ops(ndev
);
3181 /* Allocate memory for vpath */
3182 vdev
->vpaths
= kzalloc((sizeof(struct vxge_vpath
)) *
3183 no_of_vpath
, GFP_KERNEL
);
3184 if (!vdev
->vpaths
) {
3185 vxge_debug_init(VXGE_ERR
,
3186 "%s: vpath memory allocation failed",
3192 ndev
->features
|= NETIF_F_SG
;
3194 ndev
->features
|= NETIF_F_HW_CSUM
;
3195 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3196 "%s : checksuming enabled", __func__
);
3199 ndev
->features
|= NETIF_F_HIGHDMA
;
3200 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3201 "%s : using High DMA", __func__
);
3204 ndev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
3206 if (vdev
->config
.gro_enable
)
3207 ndev
->features
|= NETIF_F_GRO
;
3209 if (register_netdev(ndev
)) {
3210 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3211 "%s: %s : device registration failed!",
3212 ndev
->name
, __func__
);
3217 /* Set the factory defined MAC address initially */
3218 ndev
->addr_len
= ETH_ALEN
;
3220 /* Make Link state as off at this point, when the Link change
3221 * interrupt comes the state will be automatically changed to
3224 netif_carrier_off(ndev
);
3226 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3227 "%s: Ethernet device registered",
3232 /* Resetting the Device stats */
3233 status
= vxge_hw_mrpcim_stats_access(
3235 VXGE_HW_STATS_OP_CLEAR_ALL_STATS
,
3240 if (status
== VXGE_HW_ERR_PRIVILAGED_OPEARATION
)
3242 vxge_hw_device_trace_level_get(hldev
),
3243 "%s: device stats clear returns"
3244 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev
->name
);
3246 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev
),
3247 "%s: %s:%d Exiting...",
3248 ndev
->name
, __func__
, __LINE__
);
3252 kfree(vdev
->vpaths
);
3260 * vxge_device_unregister
3262 * This function will unregister and free network device
3265 vxge_device_unregister(struct __vxge_hw_device
*hldev
)
3267 struct vxgedev
*vdev
;
3268 struct net_device
*dev
;
3270 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3271 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3276 vdev
= netdev_priv(dev
);
3277 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3278 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3279 level_trace
= vdev
->level_trace
;
3281 vxge_debug_entryexit(level_trace
,
3282 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3284 memcpy(buf
, vdev
->ndev
->name
, IFNAMSIZ
);
3286 /* in 2.6 will call stop() if device is up */
3287 unregister_netdev(dev
);
3289 flush_scheduled_work();
3291 vxge_debug_init(level_trace
, "%s: ethernet device unregistered", buf
);
3292 vxge_debug_entryexit(level_trace
,
3293 "%s: %s:%d Exiting...", buf
, __func__
, __LINE__
);
3297 * vxge_callback_crit_err
3299 * This function is called by the alarm handler in interrupt context.
3300 * Driver must analyze it based on the event type.
3303 vxge_callback_crit_err(struct __vxge_hw_device
*hldev
,
3304 enum vxge_hw_event type
, u64 vp_id
)
3306 struct net_device
*dev
= hldev
->ndev
;
3307 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
3308 struct vxge_vpath
*vpath
= NULL
;
3311 vxge_debug_entryexit(vdev
->level_trace
,
3312 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3314 /* Note: This event type should be used for device wide
3315 * indications only - Serious errors, Slot freeze and critical errors
3317 vdev
->cric_err_event
= type
;
3319 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
3320 vpath
= &vdev
->vpaths
[vpath_idx
];
3321 if (vpath
->device_id
== vp_id
)
3325 if (!test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
)) {
3326 if (type
== VXGE_HW_EVENT_SLOT_FREEZE
) {
3327 vxge_debug_init(VXGE_ERR
,
3328 "%s: Slot is frozen", vdev
->ndev
->name
);
3329 } else if (type
== VXGE_HW_EVENT_SERR
) {
3330 vxge_debug_init(VXGE_ERR
,
3331 "%s: Encountered Serious Error",
3333 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
)
3334 vxge_debug_init(VXGE_ERR
,
3335 "%s: Encountered Critical Error",
3339 if ((type
== VXGE_HW_EVENT_SERR
) ||
3340 (type
== VXGE_HW_EVENT_SLOT_FREEZE
)) {
3341 if (unlikely(vdev
->exec_mode
))
3342 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3343 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
) {
3344 vxge_hw_device_mask_all(hldev
);
3345 if (unlikely(vdev
->exec_mode
))
3346 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3347 } else if ((type
== VXGE_HW_EVENT_FIFO_ERR
) ||
3348 (type
== VXGE_HW_EVENT_VPATH_ERR
)) {
3350 if (unlikely(vdev
->exec_mode
))
3351 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3353 /* check if this vpath is already set for reset */
3354 if (!test_and_set_bit(vpath_idx
, &vdev
->vp_reset
)) {
3356 /* disable interrupts for this vpath */
3357 vxge_vpath_intr_disable(vdev
, vpath_idx
);
3359 /* stop the queue for this vpath */
3360 netif_tx_stop_queue(vpath
->fifo
.txq
);
3365 vxge_debug_entryexit(vdev
->level_trace
,
3366 "%s: %s:%d Exiting...",
3367 vdev
->ndev
->name
, __func__
, __LINE__
);
3370 static void verify_bandwidth(void)
3372 int i
, band_width
, total
= 0, equal_priority
= 0;
3374 /* 1. If user enters 0 for some fifo, give equal priority to all */
3375 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3376 if (bw_percentage
[i
] == 0) {
3382 if (!equal_priority
) {
3383 /* 2. If sum exceeds 100, give equal priority to all */
3384 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3385 if (bw_percentage
[i
] == 0xFF)
3388 total
+= bw_percentage
[i
];
3389 if (total
> VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3396 if (!equal_priority
) {
3397 /* Is all the bandwidth consumed? */
3398 if (total
< VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3399 if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
) {
3400 /* Split rest of bw equally among next VPs*/
3402 (VXGE_HW_VPATH_BANDWIDTH_MAX
- total
) /
3403 (VXGE_HW_MAX_VIRTUAL_PATHS
- i
);
3404 if (band_width
< 2) /* min of 2% */
3407 for (; i
< VXGE_HW_MAX_VIRTUAL_PATHS
;
3413 } else if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
)
3417 if (equal_priority
) {
3418 vxge_debug_init(VXGE_ERR
,
3419 "%s: Assigning equal bandwidth to all the vpaths",
3421 bw_percentage
[0] = VXGE_HW_VPATH_BANDWIDTH_MAX
/
3422 VXGE_HW_MAX_VIRTUAL_PATHS
;
3423 for (i
= 1; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3424 bw_percentage
[i
] = bw_percentage
[0];
3429 * Vpath configuration
3431 static int __devinit
vxge_config_vpaths(
3432 struct vxge_hw_device_config
*device_config
,
3433 u64 vpath_mask
, struct vxge_config
*config_param
)
3435 int i
, no_of_vpaths
= 0, default_no_vpath
= 0, temp
;
3436 u32 txdl_size
, txdl_per_memblock
;
3438 temp
= driver_config
->vpath_per_dev
;
3439 if ((driver_config
->vpath_per_dev
== VXGE_USE_DEFAULT
) &&
3440 (max_config_dev
== VXGE_MAX_CONFIG_DEV
)) {
3441 /* No more CPU. Return vpath number as zero.*/
3442 if (driver_config
->g_no_cpus
== -1)
3445 if (!driver_config
->g_no_cpus
)
3446 driver_config
->g_no_cpus
= num_online_cpus();
3448 driver_config
->vpath_per_dev
= driver_config
->g_no_cpus
>> 1;
3449 if (!driver_config
->vpath_per_dev
)
3450 driver_config
->vpath_per_dev
= 1;
3452 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3453 if (!vxge_bVALn(vpath_mask
, i
, 1))
3457 if (default_no_vpath
< driver_config
->vpath_per_dev
)
3458 driver_config
->vpath_per_dev
= default_no_vpath
;
3460 driver_config
->g_no_cpus
= driver_config
->g_no_cpus
-
3461 (driver_config
->vpath_per_dev
* 2);
3462 if (driver_config
->g_no_cpus
<= 0)
3463 driver_config
->g_no_cpus
= -1;
3466 if (driver_config
->vpath_per_dev
== 1) {
3467 vxge_debug_ll_config(VXGE_TRACE
,
3468 "%s: Disable tx and rx steering, "
3469 "as single vpath is configured", VXGE_DRIVER_NAME
);
3470 config_param
->rth_steering
= NO_STEERING
;
3471 config_param
->tx_steering_type
= NO_STEERING
;
3472 device_config
->rth_en
= 0;
3475 /* configure bandwidth */
3476 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3477 device_config
->vp_config
[i
].min_bandwidth
= bw_percentage
[i
];
3479 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3480 device_config
->vp_config
[i
].vp_id
= i
;
3481 device_config
->vp_config
[i
].mtu
= VXGE_HW_DEFAULT_MTU
;
3482 if (no_of_vpaths
< driver_config
->vpath_per_dev
) {
3483 if (!vxge_bVALn(vpath_mask
, i
, 1)) {
3484 vxge_debug_ll_config(VXGE_TRACE
,
3485 "%s: vpath: %d is not available",
3486 VXGE_DRIVER_NAME
, i
);
3489 vxge_debug_ll_config(VXGE_TRACE
,
3490 "%s: vpath: %d available",
3491 VXGE_DRIVER_NAME
, i
);
3495 vxge_debug_ll_config(VXGE_TRACE
,
3496 "%s: vpath: %d is not configured, "
3497 "max_config_vpath exceeded",
3498 VXGE_DRIVER_NAME
, i
);
3502 /* Configure Tx fifo's */
3503 device_config
->vp_config
[i
].fifo
.enable
=
3504 VXGE_HW_FIFO_ENABLE
;
3505 device_config
->vp_config
[i
].fifo
.max_frags
=
3507 device_config
->vp_config
[i
].fifo
.memblock_size
=
3508 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
;
3510 txdl_size
= device_config
->vp_config
[i
].fifo
.max_frags
*
3511 sizeof(struct vxge_hw_fifo_txd
);
3512 txdl_per_memblock
= VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
/ txdl_size
;
3514 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
3515 ((VXGE_DEF_FIFO_LENGTH
- 1) / txdl_per_memblock
) + 1;
3517 device_config
->vp_config
[i
].fifo
.intr
=
3518 VXGE_HW_FIFO_QUEUE_INTR_DISABLE
;
3520 /* Configure tti properties */
3521 device_config
->vp_config
[i
].tti
.intr_enable
=
3522 VXGE_HW_TIM_INTR_ENABLE
;
3524 device_config
->vp_config
[i
].tti
.btimer_val
=
3525 (VXGE_TTI_BTIMER_VAL
* 1000) / 272;
3527 device_config
->vp_config
[i
].tti
.timer_ac_en
=
3528 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3530 /* For msi-x with napi (each vector
3531 has a handler of its own) -
3532 Set CI to OFF for all vpaths */
3533 device_config
->vp_config
[i
].tti
.timer_ci_en
=
3534 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3536 device_config
->vp_config
[i
].tti
.timer_ri_en
=
3537 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3539 device_config
->vp_config
[i
].tti
.util_sel
=
3540 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL
;
3542 device_config
->vp_config
[i
].tti
.ltimer_val
=
3543 (VXGE_TTI_LTIMER_VAL
* 1000) / 272;
3545 device_config
->vp_config
[i
].tti
.rtimer_val
=
3546 (VXGE_TTI_RTIMER_VAL
* 1000) / 272;
3548 device_config
->vp_config
[i
].tti
.urange_a
= TTI_TX_URANGE_A
;
3549 device_config
->vp_config
[i
].tti
.urange_b
= TTI_TX_URANGE_B
;
3550 device_config
->vp_config
[i
].tti
.urange_c
= TTI_TX_URANGE_C
;
3551 device_config
->vp_config
[i
].tti
.uec_a
= TTI_TX_UFC_A
;
3552 device_config
->vp_config
[i
].tti
.uec_b
= TTI_TX_UFC_B
;
3553 device_config
->vp_config
[i
].tti
.uec_c
= TTI_TX_UFC_C
;
3554 device_config
->vp_config
[i
].tti
.uec_d
= TTI_TX_UFC_D
;
3556 /* Configure Rx rings */
3557 device_config
->vp_config
[i
].ring
.enable
=
3558 VXGE_HW_RING_ENABLE
;
3560 device_config
->vp_config
[i
].ring
.ring_blocks
=
3561 VXGE_HW_DEF_RING_BLOCKS
;
3562 device_config
->vp_config
[i
].ring
.buffer_mode
=
3563 VXGE_HW_RING_RXD_BUFFER_MODE_1
;
3564 device_config
->vp_config
[i
].ring
.rxds_limit
=
3565 VXGE_HW_DEF_RING_RXDS_LIMIT
;
3566 device_config
->vp_config
[i
].ring
.scatter_mode
=
3567 VXGE_HW_RING_SCATTER_MODE_A
;
3569 /* Configure rti properties */
3570 device_config
->vp_config
[i
].rti
.intr_enable
=
3571 VXGE_HW_TIM_INTR_ENABLE
;
3573 device_config
->vp_config
[i
].rti
.btimer_val
=
3574 (VXGE_RTI_BTIMER_VAL
* 1000)/272;
3576 device_config
->vp_config
[i
].rti
.timer_ac_en
=
3577 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3579 device_config
->vp_config
[i
].rti
.timer_ci_en
=
3580 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3582 device_config
->vp_config
[i
].rti
.timer_ri_en
=
3583 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3585 device_config
->vp_config
[i
].rti
.util_sel
=
3586 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL
;
3588 device_config
->vp_config
[i
].rti
.urange_a
=
3590 device_config
->vp_config
[i
].rti
.urange_b
=
3592 device_config
->vp_config
[i
].rti
.urange_c
=
3594 device_config
->vp_config
[i
].rti
.uec_a
= RTI_RX_UFC_A
;
3595 device_config
->vp_config
[i
].rti
.uec_b
= RTI_RX_UFC_B
;
3596 device_config
->vp_config
[i
].rti
.uec_c
= RTI_RX_UFC_C
;
3597 device_config
->vp_config
[i
].rti
.uec_d
= RTI_RX_UFC_D
;
3599 device_config
->vp_config
[i
].rti
.rtimer_val
=
3600 (VXGE_RTI_RTIMER_VAL
* 1000) / 272;
3602 device_config
->vp_config
[i
].rti
.ltimer_val
=
3603 (VXGE_RTI_LTIMER_VAL
* 1000) / 272;
3605 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
3609 driver_config
->vpath_per_dev
= temp
;
3610 return no_of_vpaths
;
3613 /* initialize device configuratrions */
3614 static void __devinit
vxge_device_config_init(
3615 struct vxge_hw_device_config
*device_config
,
3618 /* Used for CQRQ/SRQ. */
3619 device_config
->dma_blockpool_initial
=
3620 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
3622 device_config
->dma_blockpool_max
=
3623 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
3625 if (max_mac_vpath
> VXGE_MAX_MAC_ADDR_COUNT
)
3626 max_mac_vpath
= VXGE_MAX_MAC_ADDR_COUNT
;
3628 #ifndef CONFIG_PCI_MSI
3629 vxge_debug_init(VXGE_ERR
,
3630 "%s: This Kernel does not support "
3631 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME
);
3635 /* Configure whether MSI-X or IRQL. */
3636 switch (*intr_type
) {
3638 device_config
->intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
3642 device_config
->intr_mode
= VXGE_HW_INTR_MODE_MSIX
;
3645 /* Timer period between device poll */
3646 device_config
->device_poll_millis
= VXGE_TIMER_DELAY
;
3648 /* Configure mac based steering. */
3649 device_config
->rts_mac_en
= addr_learn_en
;
3651 /* Configure Vpaths */
3652 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_MULTI_IT
;
3654 vxge_debug_ll_config(VXGE_TRACE
, "%s : Device Config Params ",
3656 vxge_debug_ll_config(VXGE_TRACE
, "dma_blockpool_initial : %d",
3657 device_config
->dma_blockpool_initial
);
3658 vxge_debug_ll_config(VXGE_TRACE
, "dma_blockpool_max : %d",
3659 device_config
->dma_blockpool_max
);
3660 vxge_debug_ll_config(VXGE_TRACE
, "intr_mode : %d",
3661 device_config
->intr_mode
);
3662 vxge_debug_ll_config(VXGE_TRACE
, "device_poll_millis : %d",
3663 device_config
->device_poll_millis
);
3664 vxge_debug_ll_config(VXGE_TRACE
, "rts_mac_en : %d",
3665 device_config
->rts_mac_en
);
3666 vxge_debug_ll_config(VXGE_TRACE
, "rth_en : %d",
3667 device_config
->rth_en
);
3668 vxge_debug_ll_config(VXGE_TRACE
, "rth_it_type : %d",
3669 device_config
->rth_it_type
);
3672 static void __devinit
vxge_print_parm(struct vxgedev
*vdev
, u64 vpath_mask
)
3676 vxge_debug_init(VXGE_TRACE
,
3677 "%s: %d Vpath(s) opened",
3678 vdev
->ndev
->name
, vdev
->no_of_vpath
);
3680 switch (vdev
->config
.intr_type
) {
3682 vxge_debug_init(VXGE_TRACE
,
3683 "%s: Interrupt type INTA", vdev
->ndev
->name
);
3687 vxge_debug_init(VXGE_TRACE
,
3688 "%s: Interrupt type MSI-X", vdev
->ndev
->name
);
3692 if (vdev
->config
.rth_steering
) {
3693 vxge_debug_init(VXGE_TRACE
,
3694 "%s: RTH steering enabled for TCP_IPV4",
3697 vxge_debug_init(VXGE_TRACE
,
3698 "%s: RTH steering disabled", vdev
->ndev
->name
);
3701 switch (vdev
->config
.tx_steering_type
) {
3703 vxge_debug_init(VXGE_TRACE
,
3704 "%s: Tx steering disabled", vdev
->ndev
->name
);
3706 case TX_PRIORITY_STEERING
:
3707 vxge_debug_init(VXGE_TRACE
,
3708 "%s: Unsupported tx steering option",
3710 vxge_debug_init(VXGE_TRACE
,
3711 "%s: Tx steering disabled", vdev
->ndev
->name
);
3712 vdev
->config
.tx_steering_type
= 0;
3714 case TX_VLAN_STEERING
:
3715 vxge_debug_init(VXGE_TRACE
,
3716 "%s: Unsupported tx steering option",
3718 vxge_debug_init(VXGE_TRACE
,
3719 "%s: Tx steering disabled", vdev
->ndev
->name
);
3720 vdev
->config
.tx_steering_type
= 0;
3722 case TX_MULTIQ_STEERING
:
3723 vxge_debug_init(VXGE_TRACE
,
3724 "%s: Tx multiqueue steering enabled",
3727 case TX_PORT_STEERING
:
3728 vxge_debug_init(VXGE_TRACE
,
3729 "%s: Tx port steering enabled",
3733 vxge_debug_init(VXGE_ERR
,
3734 "%s: Unsupported tx steering type",
3736 vxge_debug_init(VXGE_TRACE
,
3737 "%s: Tx steering disabled", vdev
->ndev
->name
);
3738 vdev
->config
.tx_steering_type
= 0;
3741 if (vdev
->config
.gro_enable
) {
3742 vxge_debug_init(VXGE_ERR
,
3743 "%s: Generic receive offload enabled",
3746 vxge_debug_init(VXGE_TRACE
,
3747 "%s: Generic receive offload disabled",
3750 if (vdev
->config
.addr_learn_en
)
3751 vxge_debug_init(VXGE_TRACE
,
3752 "%s: MAC Address learning enabled", vdev
->ndev
->name
);
3754 vxge_debug_init(VXGE_TRACE
,
3755 "%s: Rx doorbell mode enabled", vdev
->ndev
->name
);
3757 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3758 if (!vxge_bVALn(vpath_mask
, i
, 1))
3760 vxge_debug_ll_config(VXGE_TRACE
,
3761 "%s: MTU size - %d", vdev
->ndev
->name
,
3762 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3763 config
.vp_config
[i
].mtu
);
3764 vxge_debug_init(VXGE_TRACE
,
3765 "%s: VLAN tag stripping %s", vdev
->ndev
->name
,
3766 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3767 config
.vp_config
[i
].rpa_strip_vlan_tag
3768 ? "Enabled" : "Disabled");
3769 vxge_debug_init(VXGE_TRACE
,
3770 "%s: Ring blocks : %d", vdev
->ndev
->name
,
3771 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3772 config
.vp_config
[i
].ring
.ring_blocks
);
3773 vxge_debug_init(VXGE_TRACE
,
3774 "%s: Fifo blocks : %d", vdev
->ndev
->name
,
3775 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3776 config
.vp_config
[i
].fifo
.fifo_blocks
);
3777 vxge_debug_ll_config(VXGE_TRACE
,
3778 "%s: Max frags : %d", vdev
->ndev
->name
,
3779 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3780 config
.vp_config
[i
].fifo
.max_frags
);
3787 * vxge_pm_suspend - vxge power management suspend entry point
3790 static int vxge_pm_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3795 * vxge_pm_resume - vxge power management resume entry point
3798 static int vxge_pm_resume(struct pci_dev
*pdev
)
3806 * vxge_io_error_detected - called when PCI error is detected
3807 * @pdev: Pointer to PCI device
3808 * @state: The current pci connection state
3810 * This function is called after a PCI bus error affecting
3811 * this device has been detected.
3813 static pci_ers_result_t
vxge_io_error_detected(struct pci_dev
*pdev
,
3814 pci_channel_state_t state
)
3816 struct __vxge_hw_device
*hldev
=
3817 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3818 struct net_device
*netdev
= hldev
->ndev
;
3820 netif_device_detach(netdev
);
3822 if (state
== pci_channel_io_perm_failure
)
3823 return PCI_ERS_RESULT_DISCONNECT
;
3825 if (netif_running(netdev
)) {
3826 /* Bring down the card, while avoiding PCI I/O */
3827 do_vxge_close(netdev
, 0);
3830 pci_disable_device(pdev
);
3832 return PCI_ERS_RESULT_NEED_RESET
;
3836 * vxge_io_slot_reset - called after the pci bus has been reset.
3837 * @pdev: Pointer to PCI device
3839 * Restart the card from scratch, as if from a cold-boot.
3840 * At this point, the card has exprienced a hard reset,
3841 * followed by fixups by BIOS, and has its config space
3842 * set up identically to what it was at cold boot.
3844 static pci_ers_result_t
vxge_io_slot_reset(struct pci_dev
*pdev
)
3846 struct __vxge_hw_device
*hldev
=
3847 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3848 struct net_device
*netdev
= hldev
->ndev
;
3850 struct vxgedev
*vdev
= netdev_priv(netdev
);
3852 if (pci_enable_device(pdev
)) {
3853 netdev_err(netdev
, "Cannot re-enable device after reset\n");
3854 return PCI_ERS_RESULT_DISCONNECT
;
3857 pci_set_master(pdev
);
3860 return PCI_ERS_RESULT_RECOVERED
;
3864 * vxge_io_resume - called when traffic can start flowing again.
3865 * @pdev: Pointer to PCI device
3867 * This callback is called when the error recovery driver tells
3868 * us that its OK to resume normal operation.
3870 static void vxge_io_resume(struct pci_dev
*pdev
)
3872 struct __vxge_hw_device
*hldev
=
3873 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3874 struct net_device
*netdev
= hldev
->ndev
;
3876 if (netif_running(netdev
)) {
3877 if (vxge_open(netdev
)) {
3879 "Can't bring device back up after reset\n");
3884 netif_device_attach(netdev
);
3887 static inline u32
vxge_get_num_vfs(u64 function_mode
)
3889 u32 num_functions
= 0;
3891 switch (function_mode
) {
3892 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
3893 case VXGE_HW_FUNCTION_MODE_SRIOV_8
:
3896 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
3899 case VXGE_HW_FUNCTION_MODE_SRIOV
:
3900 case VXGE_HW_FUNCTION_MODE_MRIOV
:
3901 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17
:
3904 case VXGE_HW_FUNCTION_MODE_SRIOV_4
:
3907 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2
:
3910 case VXGE_HW_FUNCTION_MODE_MRIOV_8
:
3911 num_functions
= 8; /* TODO */
3914 return num_functions
;
3919 * @pdev : structure containing the PCI related information of the device.
3920 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
3922 * This function is called when a new PCI device gets detected and initializes
3925 * returns 0 on success and negative on failure.
3928 static int __devinit
3929 vxge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
3931 struct __vxge_hw_device
*hldev
;
3932 enum vxge_hw_status status
;
3936 struct vxgedev
*vdev
;
3937 struct vxge_config
*ll_config
= NULL
;
3938 struct vxge_hw_device_config
*device_config
= NULL
;
3939 struct vxge_hw_device_attr attr
;
3940 int i
, j
, no_of_vpath
= 0, max_vpath_supported
= 0;
3942 struct vxge_mac_addrs
*entry
;
3943 static int bus
= -1, device
= -1;
3946 enum vxge_hw_status is_privileged
;
3950 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3953 /* In SRIOV-17 mode, functions of the same adapter
3954 * can be deployed on different buses */
3955 if ((!pdev
->is_virtfn
) && ((bus
!= pdev
->bus
->number
) ||
3956 (device
!= PCI_SLOT(pdev
->devfn
))))
3959 bus
= pdev
->bus
->number
;
3960 device
= PCI_SLOT(pdev
->devfn
);
3963 if (driver_config
->config_dev_cnt
&&
3964 (driver_config
->config_dev_cnt
!=
3965 driver_config
->total_dev_cnt
))
3966 vxge_debug_init(VXGE_ERR
,
3967 "%s: Configured %d of %d devices",
3969 driver_config
->config_dev_cnt
,
3970 driver_config
->total_dev_cnt
);
3971 driver_config
->config_dev_cnt
= 0;
3972 driver_config
->total_dev_cnt
= 0;
3974 /* Now making the CPU based no of vpath calculation
3975 * applicable for individual functions as well.
3977 driver_config
->g_no_cpus
= 0;
3978 driver_config
->vpath_per_dev
= max_config_vpath
;
3980 driver_config
->total_dev_cnt
++;
3981 if (++driver_config
->config_dev_cnt
> max_config_dev
) {
3986 device_config
= kzalloc(sizeof(struct vxge_hw_device_config
),
3988 if (!device_config
) {
3990 vxge_debug_init(VXGE_ERR
,
3991 "device_config : malloc failed %s %d",
3992 __FILE__
, __LINE__
);
3996 ll_config
= kzalloc(sizeof(*ll_config
), GFP_KERNEL
);
3999 vxge_debug_init(VXGE_ERR
,
4000 "ll_config : malloc failed %s %d",
4001 __FILE__
, __LINE__
);
4004 ll_config
->tx_steering_type
= TX_MULTIQ_STEERING
;
4005 ll_config
->intr_type
= MSI_X
;
4006 ll_config
->napi_weight
= NEW_NAPI_WEIGHT
;
4007 ll_config
->rth_steering
= RTH_STEERING
;
4009 /* get the default configuration parameters */
4010 vxge_hw_device_config_default_get(device_config
);
4012 /* initialize configuration parameters */
4013 vxge_device_config_init(device_config
, &ll_config
->intr_type
);
4015 ret
= pci_enable_device(pdev
);
4017 vxge_debug_init(VXGE_ERR
,
4018 "%s : can not enable PCI device", __func__
);
4022 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4023 vxge_debug_ll_config(VXGE_TRACE
,
4024 "%s : using 64bit DMA", __func__
);
4028 if (pci_set_consistent_dma_mask(pdev
,
4029 DMA_BIT_MASK(64))) {
4030 vxge_debug_init(VXGE_ERR
,
4031 "%s : unable to obtain 64bit DMA for "
4032 "consistent allocations", __func__
);
4036 } else if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
4037 vxge_debug_ll_config(VXGE_TRACE
,
4038 "%s : using 32bit DMA", __func__
);
4044 if (pci_request_regions(pdev
, VXGE_DRIVER_NAME
)) {
4045 vxge_debug_init(VXGE_ERR
,
4046 "%s : request regions failed", __func__
);
4051 pci_set_master(pdev
);
4053 attr
.bar0
= pci_ioremap_bar(pdev
, 0);
4055 vxge_debug_init(VXGE_ERR
,
4056 "%s : cannot remap io memory bar0", __func__
);
4060 vxge_debug_ll_config(VXGE_TRACE
,
4061 "pci ioremap bar0: %p:0x%llx",
4063 (unsigned long long)pci_resource_start(pdev
, 0));
4065 status
= vxge_hw_device_hw_info_get(attr
.bar0
,
4066 &ll_config
->device_hw_info
);
4067 if (status
!= VXGE_HW_OK
) {
4068 vxge_debug_init(VXGE_ERR
,
4069 "%s: Reading of hardware info failed."
4070 "Please try upgrading the firmware.", VXGE_DRIVER_NAME
);
4075 if (ll_config
->device_hw_info
.fw_version
.major
!=
4076 VXGE_DRIVER_FW_VERSION_MAJOR
) {
4077 vxge_debug_init(VXGE_ERR
,
4078 "%s: Incorrect firmware version."
4079 "Please upgrade the firmware to version 1.x.x",
4085 vpath_mask
= ll_config
->device_hw_info
.vpath_mask
;
4086 if (vpath_mask
== 0) {
4087 vxge_debug_ll_config(VXGE_TRACE
,
4088 "%s: No vpaths available in device", VXGE_DRIVER_NAME
);
4093 vxge_debug_ll_config(VXGE_TRACE
,
4094 "%s:%d Vpath mask = %llx", __func__
, __LINE__
,
4095 (unsigned long long)vpath_mask
);
4097 function_mode
= ll_config
->device_hw_info
.function_mode
;
4098 host_type
= ll_config
->device_hw_info
.host_type
;
4099 is_privileged
= __vxge_hw_device_is_privilaged(host_type
,
4100 ll_config
->device_hw_info
.func_id
);
4102 /* Check how many vpaths are available */
4103 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4104 if (!((vpath_mask
) & vxge_mBIT(i
)))
4106 max_vpath_supported
++;
4110 num_vfs
= vxge_get_num_vfs(function_mode
) - 1;
4112 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4113 if (is_sriov(function_mode
) && (max_config_dev
> 1) &&
4114 (ll_config
->intr_type
!= INTA
) &&
4115 (is_privileged
== VXGE_HW_OK
)) {
4116 ret
= pci_enable_sriov(pdev
, ((max_config_dev
- 1) < num_vfs
)
4117 ? (max_config_dev
- 1) : num_vfs
);
4119 vxge_debug_ll_config(VXGE_ERR
,
4120 "Failed in enabling SRIOV mode: %d\n", ret
);
4124 * Configure vpaths and get driver configured number of vpaths
4125 * which is less than or equal to the maximum vpaths per function.
4127 no_of_vpath
= vxge_config_vpaths(device_config
, vpath_mask
, ll_config
);
4129 vxge_debug_ll_config(VXGE_ERR
,
4130 "%s: No more vpaths to configure", VXGE_DRIVER_NAME
);
4135 /* Setting driver callbacks */
4136 attr
.uld_callbacks
.link_up
= vxge_callback_link_up
;
4137 attr
.uld_callbacks
.link_down
= vxge_callback_link_down
;
4138 attr
.uld_callbacks
.crit_err
= vxge_callback_crit_err
;
4140 status
= vxge_hw_device_initialize(&hldev
, &attr
, device_config
);
4141 if (status
!= VXGE_HW_OK
) {
4142 vxge_debug_init(VXGE_ERR
,
4143 "Failed to initialize device (%d)", status
);
4148 /* if FCS stripping is not disabled in MAC fail driver load */
4149 if (vxge_hw_vpath_strip_fcs_check(hldev
, vpath_mask
) != VXGE_HW_OK
) {
4150 vxge_debug_init(VXGE_ERR
,
4151 "%s: FCS stripping is not disabled in MAC"
4152 " failing driver load", VXGE_DRIVER_NAME
);
4157 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4159 /* set private device info */
4160 pci_set_drvdata(pdev
, hldev
);
4162 ll_config
->gro_enable
= VXGE_GRO_ALWAYS_AGGREGATE
;
4163 ll_config
->fifo_indicate_max_pkts
= VXGE_FIFO_INDICATE_MAX_PKTS
;
4164 ll_config
->addr_learn_en
= addr_learn_en
;
4165 ll_config
->rth_algorithm
= RTH_ALG_JENKINS
;
4166 ll_config
->rth_hash_type_tcpipv4
= VXGE_HW_RING_HASH_TYPE_TCP_IPV4
;
4167 ll_config
->rth_hash_type_ipv4
= VXGE_HW_RING_HASH_TYPE_NONE
;
4168 ll_config
->rth_hash_type_tcpipv6
= VXGE_HW_RING_HASH_TYPE_NONE
;
4169 ll_config
->rth_hash_type_ipv6
= VXGE_HW_RING_HASH_TYPE_NONE
;
4170 ll_config
->rth_hash_type_tcpipv6ex
= VXGE_HW_RING_HASH_TYPE_NONE
;
4171 ll_config
->rth_hash_type_ipv6ex
= VXGE_HW_RING_HASH_TYPE_NONE
;
4172 ll_config
->rth_bkt_sz
= RTH_BUCKET_SIZE
;
4173 ll_config
->tx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4174 ll_config
->rx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4176 if (vxge_device_register(hldev
, ll_config
, high_dma
, no_of_vpath
,
4182 vxge_hw_device_debug_set(hldev
, VXGE_TRACE
, VXGE_COMPONENT_LL
);
4183 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4184 vxge_hw_device_trace_level_get(hldev
));
4186 /* set private HW device info */
4187 hldev
->ndev
= vdev
->ndev
;
4188 vdev
->mtu
= VXGE_HW_DEFAULT_MTU
;
4189 vdev
->bar0
= attr
.bar0
;
4190 vdev
->max_vpath_supported
= max_vpath_supported
;
4191 vdev
->no_of_vpath
= no_of_vpath
;
4193 /* Virtual Path count */
4194 for (i
= 0, j
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4195 if (!vxge_bVALn(vpath_mask
, i
, 1))
4197 if (j
>= vdev
->no_of_vpath
)
4200 vdev
->vpaths
[j
].is_configured
= 1;
4201 vdev
->vpaths
[j
].device_id
= i
;
4202 vdev
->vpaths
[j
].ring
.driver_id
= j
;
4203 vdev
->vpaths
[j
].vdev
= vdev
;
4204 vdev
->vpaths
[j
].max_mac_addr_cnt
= max_mac_vpath
;
4205 memcpy((u8
*)vdev
->vpaths
[j
].macaddr
,
4206 ll_config
->device_hw_info
.mac_addrs
[i
],
4209 /* Initialize the mac address list header */
4210 INIT_LIST_HEAD(&vdev
->vpaths
[j
].mac_addr_list
);
4212 vdev
->vpaths
[j
].mac_addr_cnt
= 0;
4213 vdev
->vpaths
[j
].mcast_addr_cnt
= 0;
4216 vdev
->exec_mode
= VXGE_EXEC_MODE_DISABLE
;
4217 vdev
->max_config_port
= max_config_port
;
4219 vdev
->vlan_tag_strip
= vlan_tag_strip
;
4221 /* map the hashing selector table to the configured vpaths */
4222 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4223 vdev
->vpath_selector
[i
] = vpath_selector
[i
];
4225 macaddr
= (u8
*)vdev
->vpaths
[0].macaddr
;
4227 ll_config
->device_hw_info
.serial_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4228 ll_config
->device_hw_info
.product_desc
[VXGE_HW_INFO_LEN
- 1] = '\0';
4229 ll_config
->device_hw_info
.part_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4231 vxge_debug_init(VXGE_TRACE
, "%s: SERIAL NUMBER: %s",
4232 vdev
->ndev
->name
, ll_config
->device_hw_info
.serial_number
);
4234 vxge_debug_init(VXGE_TRACE
, "%s: PART NUMBER: %s",
4235 vdev
->ndev
->name
, ll_config
->device_hw_info
.part_number
);
4237 vxge_debug_init(VXGE_TRACE
, "%s: Neterion %s Server Adapter",
4238 vdev
->ndev
->name
, ll_config
->device_hw_info
.product_desc
);
4240 vxge_debug_init(VXGE_TRACE
, "%s: MAC ADDR: %pM",
4241 vdev
->ndev
->name
, macaddr
);
4243 vxge_debug_init(VXGE_TRACE
, "%s: Link Width x%d",
4244 vdev
->ndev
->name
, vxge_hw_device_link_width_get(hldev
));
4246 vxge_debug_init(VXGE_TRACE
,
4247 "%s: Firmware version : %s Date : %s", vdev
->ndev
->name
,
4248 ll_config
->device_hw_info
.fw_version
.version
,
4249 ll_config
->device_hw_info
.fw_date
.date
);
4252 switch (ll_config
->device_hw_info
.function_mode
) {
4253 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION
:
4254 vxge_debug_init(VXGE_TRACE
,
4255 "%s: Single Function Mode Enabled", vdev
->ndev
->name
);
4257 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
:
4258 vxge_debug_init(VXGE_TRACE
,
4259 "%s: Multi Function Mode Enabled", vdev
->ndev
->name
);
4261 case VXGE_HW_FUNCTION_MODE_SRIOV
:
4262 vxge_debug_init(VXGE_TRACE
,
4263 "%s: Single Root IOV Mode Enabled", vdev
->ndev
->name
);
4265 case VXGE_HW_FUNCTION_MODE_MRIOV
:
4266 vxge_debug_init(VXGE_TRACE
,
4267 "%s: Multi Root IOV Mode Enabled", vdev
->ndev
->name
);
4272 vxge_print_parm(vdev
, vpath_mask
);
4274 /* Store the fw version for ethttool option */
4275 strcpy(vdev
->fw_version
, ll_config
->device_hw_info
.fw_version
.version
);
4276 memcpy(vdev
->ndev
->dev_addr
, (u8
*)vdev
->vpaths
[0].macaddr
, ETH_ALEN
);
4277 memcpy(vdev
->ndev
->perm_addr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4279 /* Copy the station mac address to the list */
4280 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4281 entry
= (struct vxge_mac_addrs
*)
4282 kzalloc(sizeof(struct vxge_mac_addrs
),
4284 if (NULL
== entry
) {
4285 vxge_debug_init(VXGE_ERR
,
4286 "%s: mac_addr_list : memory allocation failed",
4291 macaddr
= (u8
*)&entry
->macaddr
;
4292 memcpy(macaddr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4293 list_add(&entry
->item
, &vdev
->vpaths
[i
].mac_addr_list
);
4294 vdev
->vpaths
[i
].mac_addr_cnt
= 1;
4297 kfree(device_config
);
4300 * INTA is shared in multi-function mode. This is unlike the INTA
4301 * implementation in MR mode, where each VH has its own INTA message.
4302 * - INTA is masked (disabled) as long as at least one function sets
4303 * its TITAN_MASK_ALL_INT.ALARM bit.
4304 * - INTA is unmasked (enabled) when all enabled functions have cleared
4305 * their own TITAN_MASK_ALL_INT.ALARM bit.
4306 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4307 * Though this driver leaves the top level interrupts unmasked while
4308 * leaving the required module interrupt bits masked on exit, there
4309 * could be a rougue driver around that does not follow this procedure
4310 * resulting in a failure to generate interrupts. The following code is
4311 * present to prevent such a failure.
4314 if (ll_config
->device_hw_info
.function_mode
==
4315 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
)
4316 if (vdev
->config
.intr_type
== INTA
)
4317 vxge_hw_device_unmask_all(hldev
);
4319 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
4320 vdev
->ndev
->name
, __func__
, __LINE__
);
4322 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4323 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4324 vxge_hw_device_trace_level_get(hldev
));
4330 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4331 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4333 vxge_device_unregister(hldev
);
4335 pci_disable_sriov(pdev
);
4336 vxge_hw_device_terminate(hldev
);
4340 pci_release_regions(pdev
);
4342 pci_disable_device(pdev
);
4345 kfree(device_config
);
4346 driver_config
->config_dev_cnt
--;
4347 pci_set_drvdata(pdev
, NULL
);
4352 * vxge_rem_nic - Free the PCI device
4353 * @pdev: structure containing the PCI related information of the device.
4354 * Description: This function is called by the Pci subsystem to release a
4355 * PCI device and free up all resource held up by the device.
4357 static void __devexit
4358 vxge_remove(struct pci_dev
*pdev
)
4360 struct __vxge_hw_device
*hldev
;
4361 struct vxgedev
*vdev
= NULL
;
4362 struct net_device
*dev
;
4364 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4365 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4369 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
4374 vdev
= netdev_priv(dev
);
4376 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4377 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4378 level_trace
= vdev
->level_trace
;
4380 vxge_debug_entryexit(level_trace
,
4381 "%s:%d", __func__
, __LINE__
);
4383 vxge_debug_init(level_trace
,
4384 "%s : removing PCI device...", __func__
);
4385 vxge_device_unregister(hldev
);
4387 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4388 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4389 vdev
->vpaths
[i
].mcast_addr_cnt
= 0;
4390 vdev
->vpaths
[i
].mac_addr_cnt
= 0;
4393 kfree(vdev
->vpaths
);
4395 iounmap(vdev
->bar0
);
4397 pci_disable_sriov(pdev
);
4399 /* we are safe to free it now */
4402 vxge_debug_init(level_trace
,
4403 "%s:%d Device unregistered", __func__
, __LINE__
);
4405 vxge_hw_device_terminate(hldev
);
4407 pci_disable_device(pdev
);
4408 pci_release_regions(pdev
);
4409 pci_set_drvdata(pdev
, NULL
);
4410 vxge_debug_entryexit(level_trace
,
4411 "%s:%d Exiting...", __func__
, __LINE__
);
4414 static struct pci_error_handlers vxge_err_handler
= {
4415 .error_detected
= vxge_io_error_detected
,
4416 .slot_reset
= vxge_io_slot_reset
,
4417 .resume
= vxge_io_resume
,
4420 static struct pci_driver vxge_driver
= {
4421 .name
= VXGE_DRIVER_NAME
,
4422 .id_table
= vxge_id_table
,
4423 .probe
= vxge_probe
,
4424 .remove
= __devexit_p(vxge_remove
),
4426 .suspend
= vxge_pm_suspend
,
4427 .resume
= vxge_pm_resume
,
4429 .err_handler
= &vxge_err_handler
,
4437 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4438 pr_info("Driver version: %s\n", DRV_VERSION
);
4442 driver_config
= kzalloc(sizeof(struct vxge_drv_config
), GFP_KERNEL
);
4446 ret
= pci_register_driver(&vxge_driver
);
4448 if (driver_config
->config_dev_cnt
&&
4449 (driver_config
->config_dev_cnt
!= driver_config
->total_dev_cnt
))
4450 vxge_debug_init(VXGE_ERR
,
4451 "%s: Configured %d of %d devices",
4452 VXGE_DRIVER_NAME
, driver_config
->config_dev_cnt
,
4453 driver_config
->total_dev_cnt
);
4456 kfree(driver_config
);
4464 pci_unregister_driver(&vxge_driver
);
4465 kfree(driver_config
);
4467 module_init(vxge_starter
);
4468 module_exit(vxge_closer
);