2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/if_link.h>
33 #include <linux/ethtool.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <net/ip6_checksum.h>
40 #include "cq_enet_desc.h"
42 #include "vnic_intr.h"
43 #include "vnic_stats.h"
48 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
50 #define MAX_TSO (1 << 16)
51 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
53 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
54 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
56 /* Supported devices */
57 static DEFINE_PCI_DEVICE_TABLE(enic_id_table
) = {
58 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET
) },
59 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
) },
60 { 0, } /* end of table */
63 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
64 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(DRV_VERSION
);
67 MODULE_DEVICE_TABLE(pci
, enic_id_table
);
70 char name
[ETH_GSTRING_LEN
];
74 #define ENIC_TX_STAT(stat) \
75 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
76 #define ENIC_RX_STAT(stat) \
77 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
79 static const struct enic_stat enic_tx_stats
[] = {
80 ENIC_TX_STAT(tx_frames_ok
),
81 ENIC_TX_STAT(tx_unicast_frames_ok
),
82 ENIC_TX_STAT(tx_multicast_frames_ok
),
83 ENIC_TX_STAT(tx_broadcast_frames_ok
),
84 ENIC_TX_STAT(tx_bytes_ok
),
85 ENIC_TX_STAT(tx_unicast_bytes_ok
),
86 ENIC_TX_STAT(tx_multicast_bytes_ok
),
87 ENIC_TX_STAT(tx_broadcast_bytes_ok
),
88 ENIC_TX_STAT(tx_drops
),
89 ENIC_TX_STAT(tx_errors
),
93 static const struct enic_stat enic_rx_stats
[] = {
94 ENIC_RX_STAT(rx_frames_ok
),
95 ENIC_RX_STAT(rx_frames_total
),
96 ENIC_RX_STAT(rx_unicast_frames_ok
),
97 ENIC_RX_STAT(rx_multicast_frames_ok
),
98 ENIC_RX_STAT(rx_broadcast_frames_ok
),
99 ENIC_RX_STAT(rx_bytes_ok
),
100 ENIC_RX_STAT(rx_unicast_bytes_ok
),
101 ENIC_RX_STAT(rx_multicast_bytes_ok
),
102 ENIC_RX_STAT(rx_broadcast_bytes_ok
),
103 ENIC_RX_STAT(rx_drop
),
104 ENIC_RX_STAT(rx_no_bufs
),
105 ENIC_RX_STAT(rx_errors
),
106 ENIC_RX_STAT(rx_rss
),
107 ENIC_RX_STAT(rx_crc_errors
),
108 ENIC_RX_STAT(rx_frames_64
),
109 ENIC_RX_STAT(rx_frames_127
),
110 ENIC_RX_STAT(rx_frames_255
),
111 ENIC_RX_STAT(rx_frames_511
),
112 ENIC_RX_STAT(rx_frames_1023
),
113 ENIC_RX_STAT(rx_frames_1518
),
114 ENIC_RX_STAT(rx_frames_to_max
),
117 static const unsigned int enic_n_tx_stats
= ARRAY_SIZE(enic_tx_stats
);
118 static const unsigned int enic_n_rx_stats
= ARRAY_SIZE(enic_rx_stats
);
120 static int enic_is_dynamic(struct enic
*enic
)
122 return enic
->pdev
->device
== PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
;
125 static int enic_get_settings(struct net_device
*netdev
,
126 struct ethtool_cmd
*ecmd
)
128 struct enic
*enic
= netdev_priv(netdev
);
130 ecmd
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
131 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
| ADVERTISED_FIBRE
);
132 ecmd
->port
= PORT_FIBRE
;
133 ecmd
->transceiver
= XCVR_EXTERNAL
;
135 if (netif_carrier_ok(netdev
)) {
136 ecmd
->speed
= vnic_dev_port_speed(enic
->vdev
);
137 ecmd
->duplex
= DUPLEX_FULL
;
143 ecmd
->autoneg
= AUTONEG_DISABLE
;
148 static void enic_get_drvinfo(struct net_device
*netdev
,
149 struct ethtool_drvinfo
*drvinfo
)
151 struct enic
*enic
= netdev_priv(netdev
);
152 struct vnic_devcmd_fw_info
*fw_info
;
154 spin_lock(&enic
->devcmd_lock
);
155 vnic_dev_fw_info(enic
->vdev
, &fw_info
);
156 spin_unlock(&enic
->devcmd_lock
);
158 strncpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
159 strncpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
160 strncpy(drvinfo
->fw_version
, fw_info
->fw_version
,
161 sizeof(drvinfo
->fw_version
));
162 strncpy(drvinfo
->bus_info
, pci_name(enic
->pdev
),
163 sizeof(drvinfo
->bus_info
));
166 static void enic_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
172 for (i
= 0; i
< enic_n_tx_stats
; i
++) {
173 memcpy(data
, enic_tx_stats
[i
].name
, ETH_GSTRING_LEN
);
174 data
+= ETH_GSTRING_LEN
;
176 for (i
= 0; i
< enic_n_rx_stats
; i
++) {
177 memcpy(data
, enic_rx_stats
[i
].name
, ETH_GSTRING_LEN
);
178 data
+= ETH_GSTRING_LEN
;
184 static int enic_get_sset_count(struct net_device
*netdev
, int sset
)
188 return enic_n_tx_stats
+ enic_n_rx_stats
;
194 static void enic_get_ethtool_stats(struct net_device
*netdev
,
195 struct ethtool_stats
*stats
, u64
*data
)
197 struct enic
*enic
= netdev_priv(netdev
);
198 struct vnic_stats
*vstats
;
201 spin_lock(&enic
->devcmd_lock
);
202 vnic_dev_stats_dump(enic
->vdev
, &vstats
);
203 spin_unlock(&enic
->devcmd_lock
);
205 for (i
= 0; i
< enic_n_tx_stats
; i
++)
206 *(data
++) = ((u64
*)&vstats
->tx
)[enic_tx_stats
[i
].offset
];
207 for (i
= 0; i
< enic_n_rx_stats
; i
++)
208 *(data
++) = ((u64
*)&vstats
->rx
)[enic_rx_stats
[i
].offset
];
211 static u32
enic_get_rx_csum(struct net_device
*netdev
)
213 struct enic
*enic
= netdev_priv(netdev
);
214 return enic
->csum_rx_enabled
;
217 static int enic_set_rx_csum(struct net_device
*netdev
, u32 data
)
219 struct enic
*enic
= netdev_priv(netdev
);
221 if (data
&& !ENIC_SETTING(enic
, RXCSUM
))
224 enic
->csum_rx_enabled
= !!data
;
229 static int enic_set_tx_csum(struct net_device
*netdev
, u32 data
)
231 struct enic
*enic
= netdev_priv(netdev
);
233 if (data
&& !ENIC_SETTING(enic
, TXCSUM
))
237 netdev
->features
|= NETIF_F_HW_CSUM
;
239 netdev
->features
&= ~NETIF_F_HW_CSUM
;
244 static int enic_set_tso(struct net_device
*netdev
, u32 data
)
246 struct enic
*enic
= netdev_priv(netdev
);
248 if (data
&& !ENIC_SETTING(enic
, TSO
))
253 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
256 ~(NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
);
261 static u32
enic_get_msglevel(struct net_device
*netdev
)
263 struct enic
*enic
= netdev_priv(netdev
);
264 return enic
->msg_enable
;
267 static void enic_set_msglevel(struct net_device
*netdev
, u32 value
)
269 struct enic
*enic
= netdev_priv(netdev
);
270 enic
->msg_enable
= value
;
273 static int enic_get_coalesce(struct net_device
*netdev
,
274 struct ethtool_coalesce
*ecmd
)
276 struct enic
*enic
= netdev_priv(netdev
);
278 ecmd
->tx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
279 ecmd
->rx_coalesce_usecs
= enic
->rx_coalesce_usecs
;
284 static int enic_set_coalesce(struct net_device
*netdev
,
285 struct ethtool_coalesce
*ecmd
)
287 struct enic
*enic
= netdev_priv(netdev
);
288 u32 tx_coalesce_usecs
;
289 u32 rx_coalesce_usecs
;
291 tx_coalesce_usecs
= min_t(u32
,
292 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX
),
293 ecmd
->tx_coalesce_usecs
);
294 rx_coalesce_usecs
= min_t(u32
,
295 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX
),
296 ecmd
->rx_coalesce_usecs
);
298 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
299 case VNIC_DEV_INTR_MODE_INTX
:
300 if (tx_coalesce_usecs
!= rx_coalesce_usecs
)
303 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_INTX_WQ_RQ
],
304 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
306 case VNIC_DEV_INTR_MODE_MSI
:
307 if (tx_coalesce_usecs
!= rx_coalesce_usecs
)
310 vnic_intr_coalescing_timer_set(&enic
->intr
[0],
311 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
313 case VNIC_DEV_INTR_MODE_MSIX
:
314 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_MSIX_WQ
],
315 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
316 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_MSIX_RQ
],
317 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs
));
323 enic
->tx_coalesce_usecs
= tx_coalesce_usecs
;
324 enic
->rx_coalesce_usecs
= rx_coalesce_usecs
;
329 static const struct ethtool_ops enic_ethtool_ops
= {
330 .get_settings
= enic_get_settings
,
331 .get_drvinfo
= enic_get_drvinfo
,
332 .get_msglevel
= enic_get_msglevel
,
333 .set_msglevel
= enic_set_msglevel
,
334 .get_link
= ethtool_op_get_link
,
335 .get_strings
= enic_get_strings
,
336 .get_sset_count
= enic_get_sset_count
,
337 .get_ethtool_stats
= enic_get_ethtool_stats
,
338 .get_rx_csum
= enic_get_rx_csum
,
339 .set_rx_csum
= enic_set_rx_csum
,
340 .get_tx_csum
= ethtool_op_get_tx_csum
,
341 .set_tx_csum
= enic_set_tx_csum
,
342 .get_sg
= ethtool_op_get_sg
,
343 .set_sg
= ethtool_op_set_sg
,
344 .get_tso
= ethtool_op_get_tso
,
345 .set_tso
= enic_set_tso
,
346 .get_coalesce
= enic_get_coalesce
,
347 .set_coalesce
= enic_set_coalesce
,
348 .get_flags
= ethtool_op_get_flags
,
349 .set_flags
= ethtool_op_set_flags
,
352 static void enic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
354 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
357 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
358 buf
->len
, PCI_DMA_TODEVICE
);
360 pci_unmap_page(enic
->pdev
, buf
->dma_addr
,
361 buf
->len
, PCI_DMA_TODEVICE
);
364 dev_kfree_skb_any(buf
->os_buf
);
367 static void enic_wq_free_buf(struct vnic_wq
*wq
,
368 struct cq_desc
*cq_desc
, struct vnic_wq_buf
*buf
, void *opaque
)
370 enic_free_wq_buf(wq
, buf
);
373 static int enic_wq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
374 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
376 struct enic
*enic
= vnic_dev_priv(vdev
);
378 spin_lock(&enic
->wq_lock
[q_number
]);
380 vnic_wq_service(&enic
->wq
[q_number
], cq_desc
,
381 completed_index
, enic_wq_free_buf
,
384 if (netif_queue_stopped(enic
->netdev
) &&
385 vnic_wq_desc_avail(&enic
->wq
[q_number
]) >=
386 (MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
))
387 netif_wake_queue(enic
->netdev
);
389 spin_unlock(&enic
->wq_lock
[q_number
]);
394 static void enic_log_q_error(struct enic
*enic
)
399 for (i
= 0; i
< enic
->wq_count
; i
++) {
400 error_status
= vnic_wq_error_status(&enic
->wq
[i
]);
402 printk(KERN_ERR PFX
"%s: WQ[%d] error_status %d\n",
403 enic
->netdev
->name
, i
, error_status
);
406 for (i
= 0; i
< enic
->rq_count
; i
++) {
407 error_status
= vnic_rq_error_status(&enic
->rq
[i
]);
409 printk(KERN_ERR PFX
"%s: RQ[%d] error_status %d\n",
410 enic
->netdev
->name
, i
, error_status
);
414 static void enic_link_check(struct enic
*enic
)
416 int link_status
= vnic_dev_link_status(enic
->vdev
);
417 int carrier_ok
= netif_carrier_ok(enic
->netdev
);
419 if (link_status
&& !carrier_ok
) {
420 printk(KERN_INFO PFX
"%s: Link UP\n", enic
->netdev
->name
);
421 netif_carrier_on(enic
->netdev
);
422 } else if (!link_status
&& carrier_ok
) {
423 printk(KERN_INFO PFX
"%s: Link DOWN\n", enic
->netdev
->name
);
424 netif_carrier_off(enic
->netdev
);
428 static void enic_mtu_check(struct enic
*enic
)
430 u32 mtu
= vnic_dev_mtu(enic
->vdev
);
432 if (mtu
&& mtu
!= enic
->port_mtu
) {
433 enic
->port_mtu
= mtu
;
434 if (mtu
< enic
->netdev
->mtu
)
435 printk(KERN_WARNING PFX
436 "%s: interface MTU (%d) set higher "
437 "than switch port MTU (%d)\n",
438 enic
->netdev
->name
, enic
->netdev
->mtu
, mtu
);
442 static void enic_msglvl_check(struct enic
*enic
)
444 u32 msg_enable
= vnic_dev_msg_lvl(enic
->vdev
);
446 if (msg_enable
!= enic
->msg_enable
) {
447 printk(KERN_INFO PFX
"%s: msg lvl changed from 0x%x to 0x%x\n",
448 enic
->netdev
->name
, enic
->msg_enable
, msg_enable
);
449 enic
->msg_enable
= msg_enable
;
453 static void enic_notify_check(struct enic
*enic
)
455 enic_msglvl_check(enic
);
456 enic_mtu_check(enic
);
457 enic_link_check(enic
);
460 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
462 static irqreturn_t
enic_isr_legacy(int irq
, void *data
)
464 struct net_device
*netdev
= data
;
465 struct enic
*enic
= netdev_priv(netdev
);
468 vnic_intr_mask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
470 pba
= vnic_intr_legacy_pba(enic
->legacy_pba
);
472 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
473 return IRQ_NONE
; /* not our interrupt */
476 if (ENIC_TEST_INTR(pba
, ENIC_INTX_NOTIFY
)) {
477 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_NOTIFY
]);
478 enic_notify_check(enic
);
481 if (ENIC_TEST_INTR(pba
, ENIC_INTX_ERR
)) {
482 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_ERR
]);
483 enic_log_q_error(enic
);
484 /* schedule recovery from WQ/RQ error */
485 schedule_work(&enic
->reset
);
489 if (ENIC_TEST_INTR(pba
, ENIC_INTX_WQ_RQ
)) {
490 if (napi_schedule_prep(&enic
->napi
))
491 __napi_schedule(&enic
->napi
);
493 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
499 static irqreturn_t
enic_isr_msi(int irq
, void *data
)
501 struct enic
*enic
= data
;
503 /* With MSI, there is no sharing of interrupts, so this is
504 * our interrupt and there is no need to ack it. The device
505 * is not providing per-vector masking, so the OS will not
506 * write to PCI config space to mask/unmask the interrupt.
507 * We're using mask_on_assertion for MSI, so the device
508 * automatically masks the interrupt when the interrupt is
509 * generated. Later, when exiting polling, the interrupt
510 * will be unmasked (see enic_poll).
512 * Also, the device uses the same PCIe Traffic Class (TC)
513 * for Memory Write data and MSI, so there are no ordering
514 * issues; the MSI will always arrive at the Root Complex
515 * _after_ corresponding Memory Writes (i.e. descriptor
519 napi_schedule(&enic
->napi
);
524 static irqreturn_t
enic_isr_msix_rq(int irq
, void *data
)
526 struct enic
*enic
= data
;
528 /* schedule NAPI polling for RQ cleanup */
529 napi_schedule(&enic
->napi
);
534 static irqreturn_t
enic_isr_msix_wq(int irq
, void *data
)
536 struct enic
*enic
= data
;
537 unsigned int wq_work_to_do
= -1; /* no limit */
538 unsigned int wq_work_done
;
540 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
541 wq_work_to_do
, enic_wq_service
, NULL
);
543 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_WQ
],
546 1 /* reset intr timer */);
551 static irqreturn_t
enic_isr_msix_err(int irq
, void *data
)
553 struct enic
*enic
= data
;
555 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_ERR
]);
557 enic_log_q_error(enic
);
559 /* schedule recovery from WQ/RQ error */
560 schedule_work(&enic
->reset
);
565 static irqreturn_t
enic_isr_msix_notify(int irq
, void *data
)
567 struct enic
*enic
= data
;
569 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_NOTIFY
]);
570 enic_notify_check(enic
);
575 static inline void enic_queue_wq_skb_cont(struct enic
*enic
,
576 struct vnic_wq
*wq
, struct sk_buff
*skb
,
577 unsigned int len_left
)
581 /* Queue additional data fragments */
582 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
583 len_left
-= frag
->size
;
584 enic_queue_wq_desc_cont(wq
, skb
,
585 pci_map_page(enic
->pdev
, frag
->page
,
586 frag
->page_offset
, frag
->size
,
589 (len_left
== 0)); /* EOP? */
593 static inline void enic_queue_wq_skb_vlan(struct enic
*enic
,
594 struct vnic_wq
*wq
, struct sk_buff
*skb
,
595 int vlan_tag_insert
, unsigned int vlan_tag
)
597 unsigned int head_len
= skb_headlen(skb
);
598 unsigned int len_left
= skb
->len
- head_len
;
599 int eop
= (len_left
== 0);
601 /* Queue the main skb fragment. The fragments are no larger
602 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
603 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
604 * per fragment is queued.
606 enic_queue_wq_desc(wq
, skb
,
607 pci_map_single(enic
->pdev
, skb
->data
,
608 head_len
, PCI_DMA_TODEVICE
),
610 vlan_tag_insert
, vlan_tag
,
614 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
);
617 static inline void enic_queue_wq_skb_csum_l4(struct enic
*enic
,
618 struct vnic_wq
*wq
, struct sk_buff
*skb
,
619 int vlan_tag_insert
, unsigned int vlan_tag
)
621 unsigned int head_len
= skb_headlen(skb
);
622 unsigned int len_left
= skb
->len
- head_len
;
623 unsigned int hdr_len
= skb_transport_offset(skb
);
624 unsigned int csum_offset
= hdr_len
+ skb
->csum_offset
;
625 int eop
= (len_left
== 0);
627 /* Queue the main skb fragment. The fragments are no larger
628 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
629 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
630 * per fragment is queued.
632 enic_queue_wq_desc_csum_l4(wq
, skb
,
633 pci_map_single(enic
->pdev
, skb
->data
,
634 head_len
, PCI_DMA_TODEVICE
),
638 vlan_tag_insert
, vlan_tag
,
642 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
);
645 static inline void enic_queue_wq_skb_tso(struct enic
*enic
,
646 struct vnic_wq
*wq
, struct sk_buff
*skb
, unsigned int mss
,
647 int vlan_tag_insert
, unsigned int vlan_tag
)
649 unsigned int frag_len_left
= skb_headlen(skb
);
650 unsigned int len_left
= skb
->len
- frag_len_left
;
651 unsigned int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
652 int eop
= (len_left
== 0);
655 unsigned int offset
= 0;
658 /* Preload TCP csum field with IP pseudo hdr calculated
659 * with IP length set to zero. HW will later add in length
660 * to each TCP segment resulting from the TSO.
663 if (skb
->protocol
== cpu_to_be16(ETH_P_IP
)) {
664 ip_hdr(skb
)->check
= 0;
665 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
666 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
667 } else if (skb
->protocol
== cpu_to_be16(ETH_P_IPV6
)) {
668 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
669 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
672 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
673 * for the main skb fragment
675 while (frag_len_left
) {
676 len
= min(frag_len_left
, (unsigned int)WQ_ENET_MAX_DESC_LEN
);
677 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
+ offset
,
678 len
, PCI_DMA_TODEVICE
);
679 enic_queue_wq_desc_tso(wq
, skb
,
683 vlan_tag_insert
, vlan_tag
,
684 eop
&& (len
== frag_len_left
));
685 frag_len_left
-= len
;
692 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
693 * for additional data fragments
695 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
696 len_left
-= frag
->size
;
697 frag_len_left
= frag
->size
;
698 offset
= frag
->page_offset
;
700 while (frag_len_left
) {
701 len
= min(frag_len_left
,
702 (unsigned int)WQ_ENET_MAX_DESC_LEN
);
703 dma_addr
= pci_map_page(enic
->pdev
, frag
->page
,
706 enic_queue_wq_desc_cont(wq
, skb
,
710 (len
== frag_len_left
)); /* EOP? */
711 frag_len_left
-= len
;
717 static inline void enic_queue_wq_skb(struct enic
*enic
,
718 struct vnic_wq
*wq
, struct sk_buff
*skb
)
720 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
721 unsigned int vlan_tag
= 0;
722 int vlan_tag_insert
= 0;
724 if (enic
->vlan_group
&& vlan_tx_tag_present(skb
)) {
725 /* VLAN tag from trunking driver */
727 vlan_tag
= vlan_tx_tag_get(skb
);
731 enic_queue_wq_skb_tso(enic
, wq
, skb
, mss
,
732 vlan_tag_insert
, vlan_tag
);
733 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
734 enic_queue_wq_skb_csum_l4(enic
, wq
, skb
,
735 vlan_tag_insert
, vlan_tag
);
737 enic_queue_wq_skb_vlan(enic
, wq
, skb
,
738 vlan_tag_insert
, vlan_tag
);
741 /* netif_tx_lock held, process context with BHs disabled, or BH */
742 static netdev_tx_t
enic_hard_start_xmit(struct sk_buff
*skb
,
743 struct net_device
*netdev
)
745 struct enic
*enic
= netdev_priv(netdev
);
746 struct vnic_wq
*wq
= &enic
->wq
[0];
754 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
755 * which is very likely. In the off chance it's going to take
756 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
759 if (skb_shinfo(skb
)->gso_size
== 0 &&
760 skb_shinfo(skb
)->nr_frags
+ 1 > ENIC_NON_TSO_MAX_DESC
&&
761 skb_linearize(skb
)) {
766 spin_lock_irqsave(&enic
->wq_lock
[0], flags
);
768 if (vnic_wq_desc_avail(wq
) <
769 skb_shinfo(skb
)->nr_frags
+ ENIC_DESC_MAX_SPLITS
) {
770 netif_stop_queue(netdev
);
771 /* This is a hard error, log it */
772 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when "
773 "queue awake!\n", netdev
->name
);
774 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
775 return NETDEV_TX_BUSY
;
778 enic_queue_wq_skb(enic
, wq
, skb
);
780 if (vnic_wq_desc_avail(wq
) < MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
)
781 netif_stop_queue(netdev
);
783 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
788 /* dev_base_lock rwlock held, nominally process context */
789 static struct net_device_stats
*enic_get_stats(struct net_device
*netdev
)
791 struct enic
*enic
= netdev_priv(netdev
);
792 struct net_device_stats
*net_stats
= &netdev
->stats
;
793 struct vnic_stats
*stats
;
795 spin_lock(&enic
->devcmd_lock
);
796 vnic_dev_stats_dump(enic
->vdev
, &stats
);
797 spin_unlock(&enic
->devcmd_lock
);
799 net_stats
->tx_packets
= stats
->tx
.tx_frames_ok
;
800 net_stats
->tx_bytes
= stats
->tx
.tx_bytes_ok
;
801 net_stats
->tx_errors
= stats
->tx
.tx_errors
;
802 net_stats
->tx_dropped
= stats
->tx
.tx_drops
;
804 net_stats
->rx_packets
= stats
->rx
.rx_frames_ok
;
805 net_stats
->rx_bytes
= stats
->rx
.rx_bytes_ok
;
806 net_stats
->rx_errors
= stats
->rx
.rx_errors
;
807 net_stats
->multicast
= stats
->rx
.rx_multicast_frames_ok
;
808 net_stats
->rx_over_errors
= enic
->rq_truncated_pkts
;
809 net_stats
->rx_crc_errors
= enic
->rq_bad_fcs
;
810 net_stats
->rx_dropped
= stats
->rx
.rx_no_bufs
+ stats
->rx
.rx_drop
;
815 static void enic_reset_mcaddrs(struct enic
*enic
)
820 static int enic_set_mac_addr(struct net_device
*netdev
, char *addr
)
822 struct enic
*enic
= netdev_priv(netdev
);
824 if (enic_is_dynamic(enic
)) {
825 if (!is_valid_ether_addr(addr
) && !is_zero_ether_addr(addr
))
826 return -EADDRNOTAVAIL
;
828 if (!is_valid_ether_addr(addr
))
829 return -EADDRNOTAVAIL
;
832 memcpy(netdev
->dev_addr
, addr
, netdev
->addr_len
);
837 static int enic_dev_add_station_addr(struct enic
*enic
)
841 if (is_valid_ether_addr(enic
->netdev
->dev_addr
)) {
842 spin_lock(&enic
->devcmd_lock
);
843 err
= vnic_dev_add_addr(enic
->vdev
, enic
->netdev
->dev_addr
);
844 spin_unlock(&enic
->devcmd_lock
);
850 static int enic_dev_del_station_addr(struct enic
*enic
)
854 if (is_valid_ether_addr(enic
->netdev
->dev_addr
)) {
855 spin_lock(&enic
->devcmd_lock
);
856 err
= vnic_dev_del_addr(enic
->vdev
, enic
->netdev
->dev_addr
);
857 spin_unlock(&enic
->devcmd_lock
);
863 static int enic_set_mac_address_dynamic(struct net_device
*netdev
, void *p
)
865 struct enic
*enic
= netdev_priv(netdev
);
866 struct sockaddr
*saddr
= p
;
867 char *addr
= saddr
->sa_data
;
870 if (netif_running(enic
->netdev
)) {
871 err
= enic_dev_del_station_addr(enic
);
876 err
= enic_set_mac_addr(netdev
, addr
);
880 if (netif_running(enic
->netdev
)) {
881 err
= enic_dev_add_station_addr(enic
);
889 static int enic_set_mac_address(struct net_device
*netdev
, void *p
)
894 /* netif_tx_lock held, BHs disabled */
895 static void enic_set_multicast_list(struct net_device
*netdev
)
897 struct enic
*enic
= netdev_priv(netdev
);
898 struct netdev_hw_addr
*ha
;
900 int multicast
= (netdev
->flags
& IFF_MULTICAST
) ? 1 : 0;
901 int broadcast
= (netdev
->flags
& IFF_BROADCAST
) ? 1 : 0;
902 int promisc
= (netdev
->flags
& IFF_PROMISC
) ? 1 : 0;
903 unsigned int mc_count
= netdev_mc_count(netdev
);
904 int allmulti
= (netdev
->flags
& IFF_ALLMULTI
) ||
905 mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
;
906 unsigned int flags
= netdev
->flags
| (allmulti
? IFF_ALLMULTI
: 0);
907 u8 mc_addr
[ENIC_MULTICAST_PERFECT_FILTERS
][ETH_ALEN
];
910 if (mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
)
911 mc_count
= ENIC_MULTICAST_PERFECT_FILTERS
;
913 spin_lock(&enic
->devcmd_lock
);
915 if (enic
->flags
!= flags
) {
917 vnic_dev_packet_filter(enic
->vdev
, directed
,
918 multicast
, broadcast
, promisc
, allmulti
);
921 /* Is there an easier way? Trying to minimize to
922 * calls to add/del multicast addrs. We keep the
923 * addrs from the last call in enic->mc_addr and
924 * look for changes to add/del.
928 netdev_for_each_mc_addr(ha
, netdev
) {
931 memcpy(mc_addr
[i
++], ha
->addr
, ETH_ALEN
);
934 for (i
= 0; i
< enic
->mc_count
; i
++) {
935 for (j
= 0; j
< mc_count
; j
++)
936 if (compare_ether_addr(enic
->mc_addr
[i
],
940 enic_del_multicast_addr(enic
, enic
->mc_addr
[i
]);
943 for (i
= 0; i
< mc_count
; i
++) {
944 for (j
= 0; j
< enic
->mc_count
; j
++)
945 if (compare_ether_addr(mc_addr
[i
],
946 enic
->mc_addr
[j
]) == 0)
948 if (j
== enic
->mc_count
)
949 enic_add_multicast_addr(enic
, mc_addr
[i
]);
952 /* Save the list to compare against next time
955 for (i
= 0; i
< mc_count
; i
++)
956 memcpy(enic
->mc_addr
[i
], mc_addr
[i
], ETH_ALEN
);
958 enic
->mc_count
= mc_count
;
960 spin_unlock(&enic
->devcmd_lock
);
963 /* rtnl lock is held */
964 static void enic_vlan_rx_register(struct net_device
*netdev
,
965 struct vlan_group
*vlan_group
)
967 struct enic
*enic
= netdev_priv(netdev
);
968 enic
->vlan_group
= vlan_group
;
971 /* rtnl lock is held */
972 static void enic_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
974 struct enic
*enic
= netdev_priv(netdev
);
976 spin_lock(&enic
->devcmd_lock
);
977 enic_add_vlan(enic
, vid
);
978 spin_unlock(&enic
->devcmd_lock
);
981 /* rtnl lock is held */
982 static void enic_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
984 struct enic
*enic
= netdev_priv(netdev
);
986 spin_lock(&enic
->devcmd_lock
);
987 enic_del_vlan(enic
, vid
);
988 spin_unlock(&enic
->devcmd_lock
);
991 /* netif_tx_lock held, BHs disabled */
992 static void enic_tx_timeout(struct net_device
*netdev
)
994 struct enic
*enic
= netdev_priv(netdev
);
995 schedule_work(&enic
->reset
);
998 static int enic_vnic_dev_deinit(struct enic
*enic
)
1002 spin_lock(&enic
->devcmd_lock
);
1003 err
= vnic_dev_deinit(enic
->vdev
);
1004 spin_unlock(&enic
->devcmd_lock
);
1009 static int enic_dev_init_prov(struct enic
*enic
, struct vic_provinfo
*vp
)
1013 spin_lock(&enic
->devcmd_lock
);
1014 err
= vnic_dev_init_prov(enic
->vdev
,
1015 (u8
*)vp
, vic_provinfo_size(vp
));
1016 spin_unlock(&enic
->devcmd_lock
);
1021 static int enic_dev_init_done(struct enic
*enic
, int *done
, int *error
)
1025 spin_lock(&enic
->devcmd_lock
);
1026 err
= vnic_dev_init_done(enic
->vdev
, done
, error
);
1027 spin_unlock(&enic
->devcmd_lock
);
1032 static int enic_set_port_profile(struct enic
*enic
, u8
*mac
)
1034 struct vic_provinfo
*vp
;
1035 u8 oui
[3] = VIC_PROVINFO_CISCO_OUI
;
1038 static char *uuid_fmt
= "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
1039 "%02X%02X-%02X%02X%02X%02X%0X%02X";
1042 err
= enic_vnic_dev_deinit(enic
);
1046 switch (enic
->pp
.request
) {
1048 case PORT_REQUEST_ASSOCIATE
:
1050 if (!(enic
->pp
.set
& ENIC_SET_NAME
) || !strlen(enic
->pp
.name
))
1053 if (!is_valid_ether_addr(mac
))
1054 return -EADDRNOTAVAIL
;
1056 vp
= vic_provinfo_alloc(GFP_KERNEL
, oui
,
1057 VIC_PROVINFO_LINUX_TYPE
);
1061 vic_provinfo_add_tlv(vp
,
1062 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR
,
1063 strlen(enic
->pp
.name
) + 1, enic
->pp
.name
);
1065 vic_provinfo_add_tlv(vp
,
1066 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR
,
1069 if (enic
->pp
.set
& ENIC_SET_INSTANCE
) {
1070 uuid
= enic
->pp
.instance_uuid
;
1071 sprintf(uuid_str
, uuid_fmt
,
1072 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1073 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1074 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1075 uuid
[12], uuid
[13], uuid
[14], uuid
[15]);
1076 vic_provinfo_add_tlv(vp
,
1077 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR
,
1078 sizeof(uuid_str
), uuid_str
);
1081 if (enic
->pp
.set
& ENIC_SET_HOST
) {
1082 uuid
= enic
->pp
.host_uuid
;
1083 sprintf(uuid_str
, uuid_fmt
,
1084 uuid
[0], uuid
[1], uuid
[2], uuid
[3],
1085 uuid
[4], uuid
[5], uuid
[6], uuid
[7],
1086 uuid
[8], uuid
[9], uuid
[10], uuid
[11],
1087 uuid
[12], uuid
[13], uuid
[14], uuid
[15]);
1088 vic_provinfo_add_tlv(vp
,
1089 VIC_LINUX_PROV_TLV_HOST_UUID_STR
,
1090 sizeof(uuid_str
), uuid_str
);
1093 err
= enic_dev_init_prov(enic
, vp
);
1094 vic_provinfo_free(vp
);
1099 case PORT_REQUEST_DISASSOCIATE
:
1106 enic
->pp
.set
|= ENIC_SET_APPLIED
;
1110 static int enic_set_vf_port(struct net_device
*netdev
, int vf
,
1111 struct nlattr
*port
[])
1113 struct enic
*enic
= netdev_priv(netdev
);
1115 memset(&enic
->pp
, 0, sizeof(enic
->pp
));
1117 if (port
[IFLA_PORT_REQUEST
]) {
1118 enic
->pp
.set
|= ENIC_SET_REQUEST
;
1119 enic
->pp
.request
= nla_get_u8(port
[IFLA_PORT_REQUEST
]);
1122 if (port
[IFLA_PORT_PROFILE
]) {
1123 enic
->pp
.set
|= ENIC_SET_NAME
;
1124 memcpy(enic
->pp
.name
, nla_data(port
[IFLA_PORT_PROFILE
]),
1128 if (port
[IFLA_PORT_INSTANCE_UUID
]) {
1129 enic
->pp
.set
|= ENIC_SET_INSTANCE
;
1130 memcpy(enic
->pp
.instance_uuid
,
1131 nla_data(port
[IFLA_PORT_INSTANCE_UUID
]), PORT_UUID_MAX
);
1134 if (port
[IFLA_PORT_HOST_UUID
]) {
1135 enic
->pp
.set
|= ENIC_SET_HOST
;
1136 memcpy(enic
->pp
.host_uuid
,
1137 nla_data(port
[IFLA_PORT_HOST_UUID
]), PORT_UUID_MAX
);
1140 /* don't support VFs, yet */
1141 if (vf
!= PORT_SELF_VF
)
1144 if (!(enic
->pp
.set
& ENIC_SET_REQUEST
))
1147 if (enic
->pp
.request
== PORT_REQUEST_ASSOCIATE
) {
1149 /* If the interface mac addr hasn't been assigned,
1150 * assign a random mac addr before setting port-
1154 if (is_zero_ether_addr(netdev
->dev_addr
))
1155 random_ether_addr(netdev
->dev_addr
);
1158 return enic_set_port_profile(enic
, netdev
->dev_addr
);
1161 static int enic_get_vf_port(struct net_device
*netdev
, int vf
,
1162 struct sk_buff
*skb
)
1164 struct enic
*enic
= netdev_priv(netdev
);
1165 int err
, error
, done
;
1166 u16 response
= PORT_PROFILE_RESPONSE_SUCCESS
;
1168 if (!(enic
->pp
.set
& ENIC_SET_APPLIED
))
1171 err
= enic_dev_init_done(enic
, &done
, &error
);
1178 response
= PORT_PROFILE_RESPONSE_INPROGRESS
;
1181 response
= PORT_PROFILE_RESPONSE_INVALID
;
1184 response
= PORT_PROFILE_RESPONSE_BADSTATE
;
1187 response
= PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES
;
1190 response
= PORT_PROFILE_RESPONSE_ERROR
;
1194 NLA_PUT_U16(skb
, IFLA_PORT_REQUEST
, enic
->pp
.request
);
1195 NLA_PUT_U16(skb
, IFLA_PORT_RESPONSE
, response
);
1196 if (enic
->pp
.set
& ENIC_SET_NAME
)
1197 NLA_PUT(skb
, IFLA_PORT_PROFILE
, PORT_PROFILE_MAX
,
1199 if (enic
->pp
.set
& ENIC_SET_INSTANCE
)
1200 NLA_PUT(skb
, IFLA_PORT_INSTANCE_UUID
, PORT_UUID_MAX
,
1201 enic
->pp
.instance_uuid
);
1202 if (enic
->pp
.set
& ENIC_SET_HOST
)
1203 NLA_PUT(skb
, IFLA_PORT_HOST_UUID
, PORT_UUID_MAX
,
1204 enic
->pp
.host_uuid
);
1212 static void enic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
1214 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1219 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1220 buf
->len
, PCI_DMA_FROMDEVICE
);
1221 dev_kfree_skb_any(buf
->os_buf
);
1224 static int enic_rq_alloc_buf(struct vnic_rq
*rq
)
1226 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1227 struct net_device
*netdev
= enic
->netdev
;
1228 struct sk_buff
*skb
;
1229 unsigned int len
= netdev
->mtu
+ ETH_HLEN
;
1230 unsigned int os_buf_index
= 0;
1231 dma_addr_t dma_addr
;
1233 skb
= netdev_alloc_skb_ip_align(netdev
, len
);
1237 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
,
1238 len
, PCI_DMA_FROMDEVICE
);
1240 enic_queue_rq_desc(rq
, skb
, os_buf_index
,
1246 static int enic_rq_alloc_buf_a1(struct vnic_rq
*rq
)
1248 struct rq_enet_desc
*desc
= vnic_rq_next_desc(rq
);
1250 if (vnic_rq_posting_soon(rq
)) {
1252 /* SW workaround for A0 HW erratum: if we're just about
1253 * to write posted_index, insert a dummy desc
1257 rq_enet_desc_enc(desc
, 0, RQ_ENET_TYPE_RESV2
, 0);
1258 vnic_rq_post(rq
, 0, 0, 0, 0);
1260 return enic_rq_alloc_buf(rq
);
1266 static int enic_set_rq_alloc_buf(struct enic
*enic
)
1268 enum vnic_dev_hw_version hw_ver
;
1271 err
= vnic_dev_hw_version(enic
->vdev
, &hw_ver
);
1276 case VNIC_DEV_HW_VER_A1
:
1277 enic
->rq_alloc_buf
= enic_rq_alloc_buf_a1
;
1279 case VNIC_DEV_HW_VER_A2
:
1280 case VNIC_DEV_HW_VER_UNKNOWN
:
1281 enic
->rq_alloc_buf
= enic_rq_alloc_buf
;
1290 static void enic_rq_indicate_buf(struct vnic_rq
*rq
,
1291 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
1292 int skipped
, void *opaque
)
1294 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1295 struct net_device
*netdev
= enic
->netdev
;
1296 struct sk_buff
*skb
;
1298 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
1299 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
1300 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
1301 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
1303 u16 q_number
, completed_index
, bytes_written
, vlan
, checksum
;
1310 prefetch(skb
->data
- NET_IP_ALIGN
);
1311 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1312 buf
->len
, PCI_DMA_FROMDEVICE
);
1314 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
1315 &type
, &color
, &q_number
, &completed_index
,
1316 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1317 &csum_not_calc
, &rss_hash
, &bytes_written
,
1318 &packet_error
, &vlan_stripped
, &vlan
, &checksum
,
1319 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1320 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1321 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1327 if (bytes_written
> 0)
1329 else if (bytes_written
== 0)
1330 enic
->rq_truncated_pkts
++;
1333 dev_kfree_skb_any(skb
);
1338 if (eop
&& bytes_written
> 0) {
1343 skb_put(skb
, bytes_written
);
1344 skb
->protocol
= eth_type_trans(skb
, netdev
);
1346 if (enic
->csum_rx_enabled
&& !csum_not_calc
) {
1347 skb
->csum
= htons(checksum
);
1348 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1353 if (enic
->vlan_group
&& vlan_stripped
) {
1355 if (netdev
->features
& NETIF_F_GRO
)
1356 vlan_gro_receive(&enic
->napi
, enic
->vlan_group
,
1359 vlan_hwaccel_receive_skb(skb
,
1360 enic
->vlan_group
, vlan
);
1364 if (netdev
->features
& NETIF_F_GRO
)
1365 napi_gro_receive(&enic
->napi
, skb
);
1367 netif_receive_skb(skb
);
1376 dev_kfree_skb_any(skb
);
1380 static int enic_rq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1381 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1383 struct enic
*enic
= vnic_dev_priv(vdev
);
1385 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1386 completed_index
, VNIC_RQ_RETURN_DESC
,
1387 enic_rq_indicate_buf
, opaque
);
1392 static int enic_poll(struct napi_struct
*napi
, int budget
)
1394 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1395 unsigned int rq_work_to_do
= budget
;
1396 unsigned int wq_work_to_do
= -1; /* no limit */
1397 unsigned int work_done
, rq_work_done
, wq_work_done
;
1400 /* Service RQ (first) and WQ
1403 rq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1404 rq_work_to_do
, enic_rq_service
, NULL
);
1406 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
1407 wq_work_to_do
, enic_wq_service
, NULL
);
1409 /* Accumulate intr event credits for this polling
1410 * cycle. An intr event is the completion of a
1411 * a WQ or RQ packet.
1414 work_done
= rq_work_done
+ wq_work_done
;
1417 vnic_intr_return_credits(&enic
->intr
[ENIC_INTX_WQ_RQ
],
1419 0 /* don't unmask intr */,
1420 0 /* don't reset intr timer */);
1422 err
= vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1424 /* Buffer allocation failed. Stay in polling
1425 * mode so we can try to fill the ring again.
1429 rq_work_done
= rq_work_to_do
;
1431 if (rq_work_done
< rq_work_to_do
) {
1433 /* Some work done, but not enough to stay in polling,
1437 napi_complete(napi
);
1438 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
1441 return rq_work_done
;
1444 static int enic_poll_msix(struct napi_struct
*napi
, int budget
)
1446 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1447 unsigned int work_to_do
= budget
;
1448 unsigned int work_done
;
1454 work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1455 work_to_do
, enic_rq_service
, NULL
);
1457 /* Return intr event credits for this polling
1458 * cycle. An intr event is the completion of a
1463 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_RQ
],
1465 0 /* don't unmask intr */,
1466 0 /* don't reset intr timer */);
1468 err
= vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1470 /* Buffer allocation failed. Stay in polling mode
1471 * so we can try to fill the ring again.
1475 work_done
= work_to_do
;
1477 if (work_done
< work_to_do
) {
1479 /* Some work done, but not enough to stay in polling,
1483 napi_complete(napi
);
1484 vnic_intr_unmask(&enic
->intr
[ENIC_MSIX_RQ
]);
1490 static void enic_notify_timer(unsigned long data
)
1492 struct enic
*enic
= (struct enic
*)data
;
1494 enic_notify_check(enic
);
1496 mod_timer(&enic
->notify_timer
,
1497 round_jiffies(jiffies
+ ENIC_NOTIFY_TIMER_PERIOD
));
1500 static void enic_free_intr(struct enic
*enic
)
1502 struct net_device
*netdev
= enic
->netdev
;
1505 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1506 case VNIC_DEV_INTR_MODE_INTX
:
1507 free_irq(enic
->pdev
->irq
, netdev
);
1509 case VNIC_DEV_INTR_MODE_MSI
:
1510 free_irq(enic
->pdev
->irq
, enic
);
1512 case VNIC_DEV_INTR_MODE_MSIX
:
1513 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1514 if (enic
->msix
[i
].requested
)
1515 free_irq(enic
->msix_entry
[i
].vector
,
1516 enic
->msix
[i
].devid
);
1523 static int enic_request_intr(struct enic
*enic
)
1525 struct net_device
*netdev
= enic
->netdev
;
1529 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1531 case VNIC_DEV_INTR_MODE_INTX
:
1533 err
= request_irq(enic
->pdev
->irq
, enic_isr_legacy
,
1534 IRQF_SHARED
, netdev
->name
, netdev
);
1537 case VNIC_DEV_INTR_MODE_MSI
:
1539 err
= request_irq(enic
->pdev
->irq
, enic_isr_msi
,
1540 0, netdev
->name
, enic
);
1543 case VNIC_DEV_INTR_MODE_MSIX
:
1545 sprintf(enic
->msix
[ENIC_MSIX_RQ
].devname
,
1546 "%.11s-rx-0", netdev
->name
);
1547 enic
->msix
[ENIC_MSIX_RQ
].isr
= enic_isr_msix_rq
;
1548 enic
->msix
[ENIC_MSIX_RQ
].devid
= enic
;
1550 sprintf(enic
->msix
[ENIC_MSIX_WQ
].devname
,
1551 "%.11s-tx-0", netdev
->name
);
1552 enic
->msix
[ENIC_MSIX_WQ
].isr
= enic_isr_msix_wq
;
1553 enic
->msix
[ENIC_MSIX_WQ
].devid
= enic
;
1555 sprintf(enic
->msix
[ENIC_MSIX_ERR
].devname
,
1556 "%.11s-err", netdev
->name
);
1557 enic
->msix
[ENIC_MSIX_ERR
].isr
= enic_isr_msix_err
;
1558 enic
->msix
[ENIC_MSIX_ERR
].devid
= enic
;
1560 sprintf(enic
->msix
[ENIC_MSIX_NOTIFY
].devname
,
1561 "%.11s-notify", netdev
->name
);
1562 enic
->msix
[ENIC_MSIX_NOTIFY
].isr
= enic_isr_msix_notify
;
1563 enic
->msix
[ENIC_MSIX_NOTIFY
].devid
= enic
;
1565 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++) {
1566 err
= request_irq(enic
->msix_entry
[i
].vector
,
1567 enic
->msix
[i
].isr
, 0,
1568 enic
->msix
[i
].devname
,
1569 enic
->msix
[i
].devid
);
1571 enic_free_intr(enic
);
1574 enic
->msix
[i
].requested
= 1;
1586 static void enic_synchronize_irqs(struct enic
*enic
)
1590 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1591 case VNIC_DEV_INTR_MODE_INTX
:
1592 case VNIC_DEV_INTR_MODE_MSI
:
1593 synchronize_irq(enic
->pdev
->irq
);
1595 case VNIC_DEV_INTR_MODE_MSIX
:
1596 for (i
= 0; i
< enic
->intr_count
; i
++)
1597 synchronize_irq(enic
->msix_entry
[i
].vector
);
1604 static int enic_notify_set(struct enic
*enic
)
1608 spin_lock(&enic
->devcmd_lock
);
1609 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1610 case VNIC_DEV_INTR_MODE_INTX
:
1611 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_INTX_NOTIFY
);
1613 case VNIC_DEV_INTR_MODE_MSIX
:
1614 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_MSIX_NOTIFY
);
1617 err
= vnic_dev_notify_set(enic
->vdev
, -1 /* no intr */);
1620 spin_unlock(&enic
->devcmd_lock
);
1625 static void enic_notify_timer_start(struct enic
*enic
)
1627 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1628 case VNIC_DEV_INTR_MODE_MSI
:
1629 mod_timer(&enic
->notify_timer
, jiffies
);
1632 /* Using intr for notification for INTx/MSI-X */
1637 /* rtnl lock is held, process context */
1638 static int enic_open(struct net_device
*netdev
)
1640 struct enic
*enic
= netdev_priv(netdev
);
1644 err
= enic_request_intr(enic
);
1646 printk(KERN_ERR PFX
"%s: Unable to request irq.\n",
1651 err
= enic_notify_set(enic
);
1654 "%s: Failed to alloc notify buffer, aborting.\n",
1656 goto err_out_free_intr
;
1659 for (i
= 0; i
< enic
->rq_count
; i
++) {
1660 vnic_rq_fill(&enic
->rq
[i
], enic
->rq_alloc_buf
);
1661 /* Need at least one buffer on ring to get going */
1662 if (vnic_rq_desc_used(&enic
->rq
[i
]) == 0) {
1664 "%s: Unable to alloc receive buffers.\n",
1667 goto err_out_notify_unset
;
1671 for (i
= 0; i
< enic
->wq_count
; i
++)
1672 vnic_wq_enable(&enic
->wq
[i
]);
1673 for (i
= 0; i
< enic
->rq_count
; i
++)
1674 vnic_rq_enable(&enic
->rq
[i
]);
1676 enic_dev_add_station_addr(enic
);
1677 enic_set_multicast_list(netdev
);
1679 netif_wake_queue(netdev
);
1680 napi_enable(&enic
->napi
);
1681 spin_lock(&enic
->devcmd_lock
);
1682 vnic_dev_enable(enic
->vdev
);
1683 spin_unlock(&enic
->devcmd_lock
);
1685 for (i
= 0; i
< enic
->intr_count
; i
++)
1686 vnic_intr_unmask(&enic
->intr
[i
]);
1688 enic_notify_timer_start(enic
);
1692 err_out_notify_unset
:
1693 spin_lock(&enic
->devcmd_lock
);
1694 vnic_dev_notify_unset(enic
->vdev
);
1695 spin_unlock(&enic
->devcmd_lock
);
1697 enic_free_intr(enic
);
1702 /* rtnl lock is held, process context */
1703 static int enic_stop(struct net_device
*netdev
)
1705 struct enic
*enic
= netdev_priv(netdev
);
1709 for (i
= 0; i
< enic
->intr_count
; i
++)
1710 vnic_intr_mask(&enic
->intr
[i
]);
1712 enic_synchronize_irqs(enic
);
1714 del_timer_sync(&enic
->notify_timer
);
1716 spin_lock(&enic
->devcmd_lock
);
1717 vnic_dev_disable(enic
->vdev
);
1718 spin_unlock(&enic
->devcmd_lock
);
1719 napi_disable(&enic
->napi
);
1720 netif_carrier_off(netdev
);
1721 netif_tx_disable(netdev
);
1723 enic_dev_del_station_addr(enic
);
1725 for (i
= 0; i
< enic
->wq_count
; i
++) {
1726 err
= vnic_wq_disable(&enic
->wq
[i
]);
1730 for (i
= 0; i
< enic
->rq_count
; i
++) {
1731 err
= vnic_rq_disable(&enic
->rq
[i
]);
1736 spin_lock(&enic
->devcmd_lock
);
1737 vnic_dev_notify_unset(enic
->vdev
);
1738 spin_unlock(&enic
->devcmd_lock
);
1739 enic_free_intr(enic
);
1741 for (i
= 0; i
< enic
->wq_count
; i
++)
1742 vnic_wq_clean(&enic
->wq
[i
], enic_free_wq_buf
);
1743 for (i
= 0; i
< enic
->rq_count
; i
++)
1744 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1745 for (i
= 0; i
< enic
->cq_count
; i
++)
1746 vnic_cq_clean(&enic
->cq
[i
]);
1747 for (i
= 0; i
< enic
->intr_count
; i
++)
1748 vnic_intr_clean(&enic
->intr
[i
]);
1753 static int enic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1755 struct enic
*enic
= netdev_priv(netdev
);
1756 int running
= netif_running(netdev
);
1758 if (new_mtu
< ENIC_MIN_MTU
|| new_mtu
> ENIC_MAX_MTU
)
1764 netdev
->mtu
= new_mtu
;
1766 if (netdev
->mtu
> enic
->port_mtu
)
1767 printk(KERN_WARNING PFX
1768 "%s: interface MTU (%d) set higher "
1769 "than port MTU (%d)\n",
1770 netdev
->name
, netdev
->mtu
, enic
->port_mtu
);
1778 #ifdef CONFIG_NET_POLL_CONTROLLER
1779 static void enic_poll_controller(struct net_device
*netdev
)
1781 struct enic
*enic
= netdev_priv(netdev
);
1782 struct vnic_dev
*vdev
= enic
->vdev
;
1784 switch (vnic_dev_get_intr_mode(vdev
)) {
1785 case VNIC_DEV_INTR_MODE_MSIX
:
1786 enic_isr_msix_rq(enic
->pdev
->irq
, enic
);
1787 enic_isr_msix_wq(enic
->pdev
->irq
, enic
);
1789 case VNIC_DEV_INTR_MODE_MSI
:
1790 enic_isr_msi(enic
->pdev
->irq
, enic
);
1792 case VNIC_DEV_INTR_MODE_INTX
:
1793 enic_isr_legacy(enic
->pdev
->irq
, netdev
);
1801 static int enic_dev_wait(struct vnic_dev
*vdev
,
1802 int (*start
)(struct vnic_dev
*, int),
1803 int (*finished
)(struct vnic_dev
*, int *),
1810 BUG_ON(in_interrupt());
1812 err
= start(vdev
, arg
);
1816 /* Wait for func to complete...2 seconds max
1819 time
= jiffies
+ (HZ
* 2);
1822 err
= finished(vdev
, &done
);
1829 schedule_timeout_uninterruptible(HZ
/ 10);
1831 } while (time_after(time
, jiffies
));
1836 static int enic_dev_open(struct enic
*enic
)
1840 err
= enic_dev_wait(enic
->vdev
, vnic_dev_open
,
1841 vnic_dev_open_done
, 0);
1844 "vNIC device open failed, err %d.\n", err
);
1849 static int enic_dev_soft_reset(struct enic
*enic
)
1853 err
= enic_dev_wait(enic
->vdev
, vnic_dev_soft_reset
,
1854 vnic_dev_soft_reset_done
, 0);
1857 "vNIC soft reset failed, err %d.\n", err
);
1862 static int enic_set_niccfg(struct enic
*enic
)
1864 const u8 rss_default_cpu
= 0;
1865 const u8 rss_hash_type
= 0;
1866 const u8 rss_hash_bits
= 0;
1867 const u8 rss_base_cpu
= 0;
1868 const u8 rss_enable
= 0;
1869 const u8 tso_ipid_split_en
= 0;
1870 const u8 ig_vlan_strip_en
= 1;
1872 /* Enable VLAN tag stripping. RSS not enabled (yet).
1875 return enic_set_nic_cfg(enic
,
1876 rss_default_cpu
, rss_hash_type
,
1877 rss_hash_bits
, rss_base_cpu
,
1878 rss_enable
, tso_ipid_split_en
,
1882 static void enic_reset(struct work_struct
*work
)
1884 struct enic
*enic
= container_of(work
, struct enic
, reset
);
1886 if (!netif_running(enic
->netdev
))
1891 spin_lock(&enic
->devcmd_lock
);
1892 vnic_dev_hang_notify(enic
->vdev
);
1893 spin_unlock(&enic
->devcmd_lock
);
1895 enic_stop(enic
->netdev
);
1896 enic_dev_soft_reset(enic
);
1897 vnic_dev_init(enic
->vdev
, 0);
1898 enic_reset_mcaddrs(enic
);
1899 enic_init_vnic_resources(enic
);
1900 enic_set_niccfg(enic
);
1901 enic_open(enic
->netdev
);
1906 static int enic_set_intr_mode(struct enic
*enic
)
1912 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1913 * system capabilities.
1917 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1918 * (the second to last INTR is used for WQ/RQ errors)
1919 * (the last INTR is used for notifications)
1922 BUG_ON(ARRAY_SIZE(enic
->msix_entry
) < n
+ m
+ 2);
1923 for (i
= 0; i
< n
+ m
+ 2; i
++)
1924 enic
->msix_entry
[i
].entry
= i
;
1926 if (enic
->config
.intr_mode
< 1 &&
1927 enic
->rq_count
>= n
&&
1928 enic
->wq_count
>= m
&&
1929 enic
->cq_count
>= n
+ m
&&
1930 enic
->intr_count
>= n
+ m
+ 2 &&
1931 !pci_enable_msix(enic
->pdev
, enic
->msix_entry
, n
+ m
+ 2)) {
1935 enic
->cq_count
= n
+ m
;
1936 enic
->intr_count
= n
+ m
+ 2;
1938 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSIX
);
1945 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1948 if (enic
->config
.intr_mode
< 2 &&
1949 enic
->rq_count
>= 1 &&
1950 enic
->wq_count
>= 1 &&
1951 enic
->cq_count
>= 2 &&
1952 enic
->intr_count
>= 1 &&
1953 !pci_enable_msi(enic
->pdev
)) {
1958 enic
->intr_count
= 1;
1960 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSI
);
1967 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1968 * (the first INTR is used for WQ/RQ)
1969 * (the second INTR is used for WQ/RQ errors)
1970 * (the last INTR is used for notifications)
1973 if (enic
->config
.intr_mode
< 3 &&
1974 enic
->rq_count
>= 1 &&
1975 enic
->wq_count
>= 1 &&
1976 enic
->cq_count
>= 2 &&
1977 enic
->intr_count
>= 3) {
1982 enic
->intr_count
= 3;
1984 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_INTX
);
1989 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
1994 static void enic_clear_intr_mode(struct enic
*enic
)
1996 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1997 case VNIC_DEV_INTR_MODE_MSIX
:
1998 pci_disable_msix(enic
->pdev
);
2000 case VNIC_DEV_INTR_MODE_MSI
:
2001 pci_disable_msi(enic
->pdev
);
2007 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2010 static const struct net_device_ops enic_netdev_dynamic_ops
= {
2011 .ndo_open
= enic_open
,
2012 .ndo_stop
= enic_stop
,
2013 .ndo_start_xmit
= enic_hard_start_xmit
,
2014 .ndo_get_stats
= enic_get_stats
,
2015 .ndo_validate_addr
= eth_validate_addr
,
2016 .ndo_set_multicast_list
= enic_set_multicast_list
,
2017 .ndo_set_mac_address
= enic_set_mac_address_dynamic
,
2018 .ndo_change_mtu
= enic_change_mtu
,
2019 .ndo_vlan_rx_register
= enic_vlan_rx_register
,
2020 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2021 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2022 .ndo_tx_timeout
= enic_tx_timeout
,
2023 .ndo_set_vf_port
= enic_set_vf_port
,
2024 .ndo_get_vf_port
= enic_get_vf_port
,
2025 #ifdef CONFIG_NET_POLL_CONTROLLER
2026 .ndo_poll_controller
= enic_poll_controller
,
2030 static const struct net_device_ops enic_netdev_ops
= {
2031 .ndo_open
= enic_open
,
2032 .ndo_stop
= enic_stop
,
2033 .ndo_start_xmit
= enic_hard_start_xmit
,
2034 .ndo_get_stats
= enic_get_stats
,
2035 .ndo_validate_addr
= eth_validate_addr
,
2036 .ndo_set_multicast_list
= enic_set_multicast_list
,
2037 .ndo_set_mac_address
= enic_set_mac_address
,
2038 .ndo_change_mtu
= enic_change_mtu
,
2039 .ndo_vlan_rx_register
= enic_vlan_rx_register
,
2040 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2041 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2042 .ndo_tx_timeout
= enic_tx_timeout
,
2043 #ifdef CONFIG_NET_POLL_CONTROLLER
2044 .ndo_poll_controller
= enic_poll_controller
,
2048 void enic_dev_deinit(struct enic
*enic
)
2050 netif_napi_del(&enic
->napi
);
2051 enic_free_vnic_resources(enic
);
2052 enic_clear_intr_mode(enic
);
2055 int enic_dev_init(struct enic
*enic
)
2057 struct net_device
*netdev
= enic
->netdev
;
2060 /* Get vNIC configuration
2063 err
= enic_get_vnic_config(enic
);
2066 "Get vNIC configuration failed, aborting.\n");
2070 /* Get available resource counts
2073 enic_get_res_counts(enic
);
2075 /* Set interrupt mode based on resource counts and system
2079 err
= enic_set_intr_mode(enic
);
2082 "Failed to set intr mode based on resource "
2083 "counts and system capabilities, aborting.\n");
2087 /* Allocate and configure vNIC resources
2090 err
= enic_alloc_vnic_resources(enic
);
2093 "Failed to alloc vNIC resources, aborting.\n");
2094 goto err_out_free_vnic_resources
;
2097 enic_init_vnic_resources(enic
);
2099 err
= enic_set_rq_alloc_buf(enic
);
2102 "Failed to set RQ buffer allocator, aborting.\n");
2103 goto err_out_free_vnic_resources
;
2106 err
= enic_set_niccfg(enic
);
2109 "Failed to config nic, aborting.\n");
2110 goto err_out_free_vnic_resources
;
2113 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2115 netif_napi_add(netdev
, &enic
->napi
, enic_poll
, 64);
2117 case VNIC_DEV_INTR_MODE_MSIX
:
2118 netif_napi_add(netdev
, &enic
->napi
, enic_poll_msix
, 64);
2124 err_out_free_vnic_resources
:
2125 enic_clear_intr_mode(enic
);
2126 enic_free_vnic_resources(enic
);
2131 static void enic_iounmap(struct enic
*enic
)
2135 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++)
2136 if (enic
->bar
[i
].vaddr
)
2137 iounmap(enic
->bar
[i
].vaddr
);
2140 static int __devinit
enic_probe(struct pci_dev
*pdev
,
2141 const struct pci_device_id
*ent
)
2143 struct net_device
*netdev
;
2149 /* Allocate net device structure and initialize. Private
2150 * instance data is initialized to zero.
2153 netdev
= alloc_etherdev(sizeof(struct enic
));
2155 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
2159 pci_set_drvdata(pdev
, netdev
);
2161 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2163 enic
= netdev_priv(netdev
);
2164 enic
->netdev
= netdev
;
2167 /* Setup PCI resources
2170 err
= pci_enable_device(pdev
);
2173 "Cannot enable PCI device, aborting.\n");
2174 goto err_out_free_netdev
;
2177 err
= pci_request_regions(pdev
, DRV_NAME
);
2180 "Cannot request PCI regions, aborting.\n");
2181 goto err_out_disable_device
;
2184 pci_set_master(pdev
);
2186 /* Query PCI controller on system for DMA addressing
2187 * limitation for the device. Try 40-bit first, and
2191 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(40));
2193 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2196 "No usable DMA configuration, aborting.\n");
2197 goto err_out_release_regions
;
2199 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2202 "Unable to obtain 32-bit DMA "
2203 "for consistent allocations, aborting.\n");
2204 goto err_out_release_regions
;
2207 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(40));
2210 "Unable to obtain 40-bit DMA "
2211 "for consistent allocations, aborting.\n");
2212 goto err_out_release_regions
;
2217 /* Map vNIC resources from BAR0-5
2220 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++) {
2221 if (!(pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
))
2223 enic
->bar
[i
].len
= pci_resource_len(pdev
, i
);
2224 enic
->bar
[i
].vaddr
= pci_iomap(pdev
, i
, enic
->bar
[i
].len
);
2225 if (!enic
->bar
[i
].vaddr
) {
2227 "Cannot memory-map BAR %d, aborting.\n", i
);
2229 goto err_out_iounmap
;
2231 enic
->bar
[i
].bus_addr
= pci_resource_start(pdev
, i
);
2234 /* Register vNIC device
2237 enic
->vdev
= vnic_dev_register(NULL
, enic
, pdev
, enic
->bar
,
2238 ARRAY_SIZE(enic
->bar
));
2241 "vNIC registration failed, aborting.\n");
2243 goto err_out_iounmap
;
2246 /* Issue device open to get device in known state
2249 err
= enic_dev_open(enic
);
2252 "vNIC dev open failed, aborting.\n");
2253 goto err_out_vnic_unregister
;
2256 /* Issue device init to initialize the vnic-to-switch link.
2257 * We'll start with carrier off and wait for link UP
2258 * notification later to turn on carrier. We don't need
2259 * to wait here for the vnic-to-switch link initialization
2260 * to complete; link UP notification is the indication that
2261 * the process is complete.
2264 netif_carrier_off(netdev
);
2266 if (!enic_is_dynamic(enic
)) {
2267 err
= vnic_dev_init(enic
->vdev
, 0);
2270 "vNIC dev init failed, aborting.\n");
2271 goto err_out_dev_close
;
2275 err
= enic_dev_init(enic
);
2278 "Device initialization failed, aborting.\n");
2279 goto err_out_dev_close
;
2282 /* Setup notification timer, HW reset task, and locks
2285 init_timer(&enic
->notify_timer
);
2286 enic
->notify_timer
.function
= enic_notify_timer
;
2287 enic
->notify_timer
.data
= (unsigned long)enic
;
2289 INIT_WORK(&enic
->reset
, enic_reset
);
2291 for (i
= 0; i
< enic
->wq_count
; i
++)
2292 spin_lock_init(&enic
->wq_lock
[i
]);
2294 spin_lock_init(&enic
->devcmd_lock
);
2296 /* Register net device
2299 enic
->port_mtu
= enic
->config
.mtu
;
2300 (void)enic_change_mtu(netdev
, enic
->port_mtu
);
2302 err
= enic_set_mac_addr(netdev
, enic
->mac_addr
);
2305 "Invalid MAC address, aborting.\n");
2306 goto err_out_dev_deinit
;
2309 enic
->tx_coalesce_usecs
= enic
->config
.intr_timer_usec
;
2310 enic
->rx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
2312 if (enic_is_dynamic(enic
))
2313 netdev
->netdev_ops
= &enic_netdev_dynamic_ops
;
2315 netdev
->netdev_ops
= &enic_netdev_ops
;
2317 netdev
->watchdog_timeo
= 2 * HZ
;
2318 netdev
->ethtool_ops
= &enic_ethtool_ops
;
2320 netdev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
2321 if (ENIC_SETTING(enic
, TXCSUM
))
2322 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2323 if (ENIC_SETTING(enic
, TSO
))
2324 netdev
->features
|= NETIF_F_TSO
|
2325 NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
2326 if (ENIC_SETTING(enic
, LRO
))
2327 netdev
->features
|= NETIF_F_GRO
;
2329 netdev
->features
|= NETIF_F_HIGHDMA
;
2331 enic
->csum_rx_enabled
= ENIC_SETTING(enic
, RXCSUM
);
2333 err
= register_netdev(netdev
);
2336 "Cannot register net device, aborting.\n");
2337 goto err_out_dev_deinit
;
2343 enic_dev_deinit(enic
);
2345 vnic_dev_close(enic
->vdev
);
2346 err_out_vnic_unregister
:
2347 vnic_dev_unregister(enic
->vdev
);
2350 err_out_release_regions
:
2351 pci_release_regions(pdev
);
2352 err_out_disable_device
:
2353 pci_disable_device(pdev
);
2354 err_out_free_netdev
:
2355 pci_set_drvdata(pdev
, NULL
);
2356 free_netdev(netdev
);
2361 static void __devexit
enic_remove(struct pci_dev
*pdev
)
2363 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2366 struct enic
*enic
= netdev_priv(netdev
);
2368 flush_scheduled_work();
2369 unregister_netdev(netdev
);
2370 enic_dev_deinit(enic
);
2371 vnic_dev_close(enic
->vdev
);
2372 vnic_dev_unregister(enic
->vdev
);
2374 pci_release_regions(pdev
);
2375 pci_disable_device(pdev
);
2376 pci_set_drvdata(pdev
, NULL
);
2377 free_netdev(netdev
);
2381 static struct pci_driver enic_driver
= {
2383 .id_table
= enic_id_table
,
2384 .probe
= enic_probe
,
2385 .remove
= __devexit_p(enic_remove
),
2388 static int __init
enic_init_module(void)
2390 printk(KERN_INFO PFX
"%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2392 return pci_register_driver(&enic_driver
);
2395 static void __exit
enic_cleanup_module(void)
2397 pci_unregister_driver(&enic_driver
);
2400 module_init(enic_init_module
);
2401 module_exit(enic_cleanup_module
);