2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/workqueue.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/ethtool.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/rtnetlink.h>
38 #include <net/ip6_checksum.h>
40 #include "cq_enet_desc.h"
42 #include "vnic_intr.h"
43 #include "vnic_stats.h"
48 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
50 #define MAX_TSO (1 << 16)
51 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
53 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
54 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
56 /* Supported devices */
57 static DEFINE_PCI_DEVICE_TABLE(enic_id_table
) = {
58 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET
) },
59 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
) },
60 { 0, } /* end of table */
63 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
64 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(DRV_VERSION
);
67 MODULE_DEVICE_TABLE(pci
, enic_id_table
);
70 char name
[ETH_GSTRING_LEN
];
74 #define ENIC_TX_STAT(stat) \
75 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
76 #define ENIC_RX_STAT(stat) \
77 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
79 static const struct enic_stat enic_tx_stats
[] = {
80 ENIC_TX_STAT(tx_frames_ok
),
81 ENIC_TX_STAT(tx_unicast_frames_ok
),
82 ENIC_TX_STAT(tx_multicast_frames_ok
),
83 ENIC_TX_STAT(tx_broadcast_frames_ok
),
84 ENIC_TX_STAT(tx_bytes_ok
),
85 ENIC_TX_STAT(tx_unicast_bytes_ok
),
86 ENIC_TX_STAT(tx_multicast_bytes_ok
),
87 ENIC_TX_STAT(tx_broadcast_bytes_ok
),
88 ENIC_TX_STAT(tx_drops
),
89 ENIC_TX_STAT(tx_errors
),
93 static const struct enic_stat enic_rx_stats
[] = {
94 ENIC_RX_STAT(rx_frames_ok
),
95 ENIC_RX_STAT(rx_frames_total
),
96 ENIC_RX_STAT(rx_unicast_frames_ok
),
97 ENIC_RX_STAT(rx_multicast_frames_ok
),
98 ENIC_RX_STAT(rx_broadcast_frames_ok
),
99 ENIC_RX_STAT(rx_bytes_ok
),
100 ENIC_RX_STAT(rx_unicast_bytes_ok
),
101 ENIC_RX_STAT(rx_multicast_bytes_ok
),
102 ENIC_RX_STAT(rx_broadcast_bytes_ok
),
103 ENIC_RX_STAT(rx_drop
),
104 ENIC_RX_STAT(rx_no_bufs
),
105 ENIC_RX_STAT(rx_errors
),
106 ENIC_RX_STAT(rx_rss
),
107 ENIC_RX_STAT(rx_crc_errors
),
108 ENIC_RX_STAT(rx_frames_64
),
109 ENIC_RX_STAT(rx_frames_127
),
110 ENIC_RX_STAT(rx_frames_255
),
111 ENIC_RX_STAT(rx_frames_511
),
112 ENIC_RX_STAT(rx_frames_1023
),
113 ENIC_RX_STAT(rx_frames_1518
),
114 ENIC_RX_STAT(rx_frames_to_max
),
117 static const unsigned int enic_n_tx_stats
= ARRAY_SIZE(enic_tx_stats
);
118 static const unsigned int enic_n_rx_stats
= ARRAY_SIZE(enic_rx_stats
);
120 static int enic_is_dynamic(struct enic
*enic
)
122 return enic
->pdev
->device
== PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
;
125 static int enic_get_settings(struct net_device
*netdev
,
126 struct ethtool_cmd
*ecmd
)
128 struct enic
*enic
= netdev_priv(netdev
);
130 ecmd
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
131 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
| ADVERTISED_FIBRE
);
132 ecmd
->port
= PORT_FIBRE
;
133 ecmd
->transceiver
= XCVR_EXTERNAL
;
135 if (netif_carrier_ok(netdev
)) {
136 ecmd
->speed
= vnic_dev_port_speed(enic
->vdev
);
137 ecmd
->duplex
= DUPLEX_FULL
;
143 ecmd
->autoneg
= AUTONEG_DISABLE
;
148 static int enic_dev_fw_info(struct enic
*enic
,
149 struct vnic_devcmd_fw_info
**fw_info
)
153 spin_lock(&enic
->devcmd_lock
);
154 err
= vnic_dev_fw_info(enic
->vdev
, fw_info
);
155 spin_unlock(&enic
->devcmd_lock
);
160 static void enic_get_drvinfo(struct net_device
*netdev
,
161 struct ethtool_drvinfo
*drvinfo
)
163 struct enic
*enic
= netdev_priv(netdev
);
164 struct vnic_devcmd_fw_info
*fw_info
;
166 enic_dev_fw_info(enic
, &fw_info
);
168 strncpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
169 strncpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
170 strncpy(drvinfo
->fw_version
, fw_info
->fw_version
,
171 sizeof(drvinfo
->fw_version
));
172 strncpy(drvinfo
->bus_info
, pci_name(enic
->pdev
),
173 sizeof(drvinfo
->bus_info
));
176 static void enic_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
182 for (i
= 0; i
< enic_n_tx_stats
; i
++) {
183 memcpy(data
, enic_tx_stats
[i
].name
, ETH_GSTRING_LEN
);
184 data
+= ETH_GSTRING_LEN
;
186 for (i
= 0; i
< enic_n_rx_stats
; i
++) {
187 memcpy(data
, enic_rx_stats
[i
].name
, ETH_GSTRING_LEN
);
188 data
+= ETH_GSTRING_LEN
;
194 static int enic_get_sset_count(struct net_device
*netdev
, int sset
)
198 return enic_n_tx_stats
+ enic_n_rx_stats
;
204 static int enic_dev_stats_dump(struct enic
*enic
, struct vnic_stats
**vstats
)
208 spin_lock(&enic
->devcmd_lock
);
209 err
= vnic_dev_stats_dump(enic
->vdev
, vstats
);
210 spin_unlock(&enic
->devcmd_lock
);
215 static void enic_get_ethtool_stats(struct net_device
*netdev
,
216 struct ethtool_stats
*stats
, u64
*data
)
218 struct enic
*enic
= netdev_priv(netdev
);
219 struct vnic_stats
*vstats
;
222 enic_dev_stats_dump(enic
, &vstats
);
224 for (i
= 0; i
< enic_n_tx_stats
; i
++)
225 *(data
++) = ((u64
*)&vstats
->tx
)[enic_tx_stats
[i
].offset
];
226 for (i
= 0; i
< enic_n_rx_stats
; i
++)
227 *(data
++) = ((u64
*)&vstats
->rx
)[enic_rx_stats
[i
].offset
];
230 static u32
enic_get_rx_csum(struct net_device
*netdev
)
232 struct enic
*enic
= netdev_priv(netdev
);
233 return enic
->csum_rx_enabled
;
236 static int enic_set_rx_csum(struct net_device
*netdev
, u32 data
)
238 struct enic
*enic
= netdev_priv(netdev
);
240 if (data
&& !ENIC_SETTING(enic
, RXCSUM
))
243 enic
->csum_rx_enabled
= !!data
;
248 static int enic_set_tx_csum(struct net_device
*netdev
, u32 data
)
250 struct enic
*enic
= netdev_priv(netdev
);
252 if (data
&& !ENIC_SETTING(enic
, TXCSUM
))
256 netdev
->features
|= NETIF_F_HW_CSUM
;
258 netdev
->features
&= ~NETIF_F_HW_CSUM
;
263 static int enic_set_tso(struct net_device
*netdev
, u32 data
)
265 struct enic
*enic
= netdev_priv(netdev
);
267 if (data
&& !ENIC_SETTING(enic
, TSO
))
272 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
275 ~(NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_TSO_ECN
);
280 static u32
enic_get_msglevel(struct net_device
*netdev
)
282 struct enic
*enic
= netdev_priv(netdev
);
283 return enic
->msg_enable
;
286 static void enic_set_msglevel(struct net_device
*netdev
, u32 value
)
288 struct enic
*enic
= netdev_priv(netdev
);
289 enic
->msg_enable
= value
;
292 static int enic_get_coalesce(struct net_device
*netdev
,
293 struct ethtool_coalesce
*ecmd
)
295 struct enic
*enic
= netdev_priv(netdev
);
297 ecmd
->tx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
298 ecmd
->rx_coalesce_usecs
= enic
->rx_coalesce_usecs
;
303 static int enic_set_coalesce(struct net_device
*netdev
,
304 struct ethtool_coalesce
*ecmd
)
306 struct enic
*enic
= netdev_priv(netdev
);
307 u32 tx_coalesce_usecs
;
308 u32 rx_coalesce_usecs
;
310 tx_coalesce_usecs
= min_t(u32
,
311 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX
),
312 ecmd
->tx_coalesce_usecs
);
313 rx_coalesce_usecs
= min_t(u32
,
314 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX
),
315 ecmd
->rx_coalesce_usecs
);
317 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
318 case VNIC_DEV_INTR_MODE_INTX
:
319 if (tx_coalesce_usecs
!= rx_coalesce_usecs
)
322 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_INTX_WQ_RQ
],
323 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
325 case VNIC_DEV_INTR_MODE_MSI
:
326 if (tx_coalesce_usecs
!= rx_coalesce_usecs
)
329 vnic_intr_coalescing_timer_set(&enic
->intr
[0],
330 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
332 case VNIC_DEV_INTR_MODE_MSIX
:
333 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_MSIX_WQ
],
334 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs
));
335 vnic_intr_coalescing_timer_set(&enic
->intr
[ENIC_MSIX_RQ
],
336 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs
));
342 enic
->tx_coalesce_usecs
= tx_coalesce_usecs
;
343 enic
->rx_coalesce_usecs
= rx_coalesce_usecs
;
348 static const struct ethtool_ops enic_ethtool_ops
= {
349 .get_settings
= enic_get_settings
,
350 .get_drvinfo
= enic_get_drvinfo
,
351 .get_msglevel
= enic_get_msglevel
,
352 .set_msglevel
= enic_set_msglevel
,
353 .get_link
= ethtool_op_get_link
,
354 .get_strings
= enic_get_strings
,
355 .get_sset_count
= enic_get_sset_count
,
356 .get_ethtool_stats
= enic_get_ethtool_stats
,
357 .get_rx_csum
= enic_get_rx_csum
,
358 .set_rx_csum
= enic_set_rx_csum
,
359 .get_tx_csum
= ethtool_op_get_tx_csum
,
360 .set_tx_csum
= enic_set_tx_csum
,
361 .get_sg
= ethtool_op_get_sg
,
362 .set_sg
= ethtool_op_set_sg
,
363 .get_tso
= ethtool_op_get_tso
,
364 .set_tso
= enic_set_tso
,
365 .get_coalesce
= enic_get_coalesce
,
366 .set_coalesce
= enic_set_coalesce
,
367 .get_flags
= ethtool_op_get_flags
,
370 static void enic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
372 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
375 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
376 buf
->len
, PCI_DMA_TODEVICE
);
378 pci_unmap_page(enic
->pdev
, buf
->dma_addr
,
379 buf
->len
, PCI_DMA_TODEVICE
);
382 dev_kfree_skb_any(buf
->os_buf
);
385 static void enic_wq_free_buf(struct vnic_wq
*wq
,
386 struct cq_desc
*cq_desc
, struct vnic_wq_buf
*buf
, void *opaque
)
388 enic_free_wq_buf(wq
, buf
);
391 static int enic_wq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
392 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
394 struct enic
*enic
= vnic_dev_priv(vdev
);
396 spin_lock(&enic
->wq_lock
[q_number
]);
398 vnic_wq_service(&enic
->wq
[q_number
], cq_desc
,
399 completed_index
, enic_wq_free_buf
,
402 if (netif_queue_stopped(enic
->netdev
) &&
403 vnic_wq_desc_avail(&enic
->wq
[q_number
]) >=
404 (MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
))
405 netif_wake_queue(enic
->netdev
);
407 spin_unlock(&enic
->wq_lock
[q_number
]);
412 static void enic_log_q_error(struct enic
*enic
)
417 for (i
= 0; i
< enic
->wq_count
; i
++) {
418 error_status
= vnic_wq_error_status(&enic
->wq
[i
]);
420 netdev_err(enic
->netdev
, "WQ[%d] error_status %d\n",
424 for (i
= 0; i
< enic
->rq_count
; i
++) {
425 error_status
= vnic_rq_error_status(&enic
->rq
[i
]);
427 netdev_err(enic
->netdev
, "RQ[%d] error_status %d\n",
432 static void enic_msglvl_check(struct enic
*enic
)
434 u32 msg_enable
= vnic_dev_msg_lvl(enic
->vdev
);
436 if (msg_enable
!= enic
->msg_enable
) {
437 netdev_info(enic
->netdev
, "msg lvl changed from 0x%x to 0x%x\n",
438 enic
->msg_enable
, msg_enable
);
439 enic
->msg_enable
= msg_enable
;
443 static void enic_mtu_check(struct enic
*enic
)
445 u32 mtu
= vnic_dev_mtu(enic
->vdev
);
446 struct net_device
*netdev
= enic
->netdev
;
448 if (mtu
&& mtu
!= enic
->port_mtu
) {
449 enic
->port_mtu
= mtu
;
450 if (mtu
< netdev
->mtu
)
452 "interface MTU (%d) set higher "
453 "than switch port MTU (%d)\n",
458 static void enic_link_check(struct enic
*enic
)
460 int link_status
= vnic_dev_link_status(enic
->vdev
);
461 int carrier_ok
= netif_carrier_ok(enic
->netdev
);
463 if (link_status
&& !carrier_ok
) {
464 netdev_info(enic
->netdev
, "Link UP\n");
465 netif_carrier_on(enic
->netdev
);
466 } else if (!link_status
&& carrier_ok
) {
467 netdev_info(enic
->netdev
, "Link DOWN\n");
468 netif_carrier_off(enic
->netdev
);
472 static void enic_notify_check(struct enic
*enic
)
474 enic_msglvl_check(enic
);
475 enic_mtu_check(enic
);
476 enic_link_check(enic
);
479 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
481 static irqreturn_t
enic_isr_legacy(int irq
, void *data
)
483 struct net_device
*netdev
= data
;
484 struct enic
*enic
= netdev_priv(netdev
);
487 vnic_intr_mask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
489 pba
= vnic_intr_legacy_pba(enic
->legacy_pba
);
491 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
492 return IRQ_NONE
; /* not our interrupt */
495 if (ENIC_TEST_INTR(pba
, ENIC_INTX_NOTIFY
)) {
496 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_NOTIFY
]);
497 enic_notify_check(enic
);
500 if (ENIC_TEST_INTR(pba
, ENIC_INTX_ERR
)) {
501 vnic_intr_return_all_credits(&enic
->intr
[ENIC_INTX_ERR
]);
502 enic_log_q_error(enic
);
503 /* schedule recovery from WQ/RQ error */
504 schedule_work(&enic
->reset
);
508 if (ENIC_TEST_INTR(pba
, ENIC_INTX_WQ_RQ
)) {
509 if (napi_schedule_prep(&enic
->napi
))
510 __napi_schedule(&enic
->napi
);
512 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
518 static irqreturn_t
enic_isr_msi(int irq
, void *data
)
520 struct enic
*enic
= data
;
522 /* With MSI, there is no sharing of interrupts, so this is
523 * our interrupt and there is no need to ack it. The device
524 * is not providing per-vector masking, so the OS will not
525 * write to PCI config space to mask/unmask the interrupt.
526 * We're using mask_on_assertion for MSI, so the device
527 * automatically masks the interrupt when the interrupt is
528 * generated. Later, when exiting polling, the interrupt
529 * will be unmasked (see enic_poll).
531 * Also, the device uses the same PCIe Traffic Class (TC)
532 * for Memory Write data and MSI, so there are no ordering
533 * issues; the MSI will always arrive at the Root Complex
534 * _after_ corresponding Memory Writes (i.e. descriptor
538 napi_schedule(&enic
->napi
);
543 static irqreturn_t
enic_isr_msix_rq(int irq
, void *data
)
545 struct enic
*enic
= data
;
547 /* schedule NAPI polling for RQ cleanup */
548 napi_schedule(&enic
->napi
);
553 static irqreturn_t
enic_isr_msix_wq(int irq
, void *data
)
555 struct enic
*enic
= data
;
556 unsigned int wq_work_to_do
= -1; /* no limit */
557 unsigned int wq_work_done
;
559 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
560 wq_work_to_do
, enic_wq_service
, NULL
);
562 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_WQ
],
565 1 /* reset intr timer */);
570 static irqreturn_t
enic_isr_msix_err(int irq
, void *data
)
572 struct enic
*enic
= data
;
574 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_ERR
]);
576 enic_log_q_error(enic
);
578 /* schedule recovery from WQ/RQ error */
579 schedule_work(&enic
->reset
);
584 static irqreturn_t
enic_isr_msix_notify(int irq
, void *data
)
586 struct enic
*enic
= data
;
588 vnic_intr_return_all_credits(&enic
->intr
[ENIC_MSIX_NOTIFY
]);
589 enic_notify_check(enic
);
594 static inline void enic_queue_wq_skb_cont(struct enic
*enic
,
595 struct vnic_wq
*wq
, struct sk_buff
*skb
,
596 unsigned int len_left
, int loopback
)
600 /* Queue additional data fragments */
601 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
602 len_left
-= frag
->size
;
603 enic_queue_wq_desc_cont(wq
, skb
,
604 pci_map_page(enic
->pdev
, frag
->page
,
605 frag
->page_offset
, frag
->size
,
608 (len_left
== 0), /* EOP? */
613 static inline void enic_queue_wq_skb_vlan(struct enic
*enic
,
614 struct vnic_wq
*wq
, struct sk_buff
*skb
,
615 int vlan_tag_insert
, unsigned int vlan_tag
, int loopback
)
617 unsigned int head_len
= skb_headlen(skb
);
618 unsigned int len_left
= skb
->len
- head_len
;
619 int eop
= (len_left
== 0);
621 /* Queue the main skb fragment. The fragments are no larger
622 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
623 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
624 * per fragment is queued.
626 enic_queue_wq_desc(wq
, skb
,
627 pci_map_single(enic
->pdev
, skb
->data
,
628 head_len
, PCI_DMA_TODEVICE
),
630 vlan_tag_insert
, vlan_tag
,
634 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
, loopback
);
637 static inline void enic_queue_wq_skb_csum_l4(struct enic
*enic
,
638 struct vnic_wq
*wq
, struct sk_buff
*skb
,
639 int vlan_tag_insert
, unsigned int vlan_tag
, int loopback
)
641 unsigned int head_len
= skb_headlen(skb
);
642 unsigned int len_left
= skb
->len
- head_len
;
643 unsigned int hdr_len
= skb_transport_offset(skb
);
644 unsigned int csum_offset
= hdr_len
+ skb
->csum_offset
;
645 int eop
= (len_left
== 0);
647 /* Queue the main skb fragment. The fragments are no larger
648 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
649 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
650 * per fragment is queued.
652 enic_queue_wq_desc_csum_l4(wq
, skb
,
653 pci_map_single(enic
->pdev
, skb
->data
,
654 head_len
, PCI_DMA_TODEVICE
),
658 vlan_tag_insert
, vlan_tag
,
662 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
, loopback
);
665 static inline void enic_queue_wq_skb_tso(struct enic
*enic
,
666 struct vnic_wq
*wq
, struct sk_buff
*skb
, unsigned int mss
,
667 int vlan_tag_insert
, unsigned int vlan_tag
, int loopback
)
669 unsigned int frag_len_left
= skb_headlen(skb
);
670 unsigned int len_left
= skb
->len
- frag_len_left
;
671 unsigned int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
672 int eop
= (len_left
== 0);
675 unsigned int offset
= 0;
678 /* Preload TCP csum field with IP pseudo hdr calculated
679 * with IP length set to zero. HW will later add in length
680 * to each TCP segment resulting from the TSO.
683 if (skb
->protocol
== cpu_to_be16(ETH_P_IP
)) {
684 ip_hdr(skb
)->check
= 0;
685 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
686 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
687 } else if (skb
->protocol
== cpu_to_be16(ETH_P_IPV6
)) {
688 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
689 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
692 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
693 * for the main skb fragment
695 while (frag_len_left
) {
696 len
= min(frag_len_left
, (unsigned int)WQ_ENET_MAX_DESC_LEN
);
697 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
+ offset
,
698 len
, PCI_DMA_TODEVICE
);
699 enic_queue_wq_desc_tso(wq
, skb
,
703 vlan_tag_insert
, vlan_tag
,
704 eop
&& (len
== frag_len_left
), loopback
);
705 frag_len_left
-= len
;
712 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
713 * for additional data fragments
715 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
716 len_left
-= frag
->size
;
717 frag_len_left
= frag
->size
;
718 offset
= frag
->page_offset
;
720 while (frag_len_left
) {
721 len
= min(frag_len_left
,
722 (unsigned int)WQ_ENET_MAX_DESC_LEN
);
723 dma_addr
= pci_map_page(enic
->pdev
, frag
->page
,
726 enic_queue_wq_desc_cont(wq
, skb
,
730 (len
== frag_len_left
), /* EOP? */
732 frag_len_left
-= len
;
738 static inline void enic_queue_wq_skb(struct enic
*enic
,
739 struct vnic_wq
*wq
, struct sk_buff
*skb
)
741 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
742 unsigned int vlan_tag
= 0;
743 int vlan_tag_insert
= 0;
746 if (enic
->vlan_group
&& vlan_tx_tag_present(skb
)) {
747 /* VLAN tag from trunking driver */
749 vlan_tag
= vlan_tx_tag_get(skb
);
750 } else if (enic
->loop_enable
) {
751 vlan_tag
= enic
->loop_tag
;
756 enic_queue_wq_skb_tso(enic
, wq
, skb
, mss
,
757 vlan_tag_insert
, vlan_tag
, loopback
);
758 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
759 enic_queue_wq_skb_csum_l4(enic
, wq
, skb
,
760 vlan_tag_insert
, vlan_tag
, loopback
);
762 enic_queue_wq_skb_vlan(enic
, wq
, skb
,
763 vlan_tag_insert
, vlan_tag
, loopback
);
766 /* netif_tx_lock held, process context with BHs disabled, or BH */
767 static netdev_tx_t
enic_hard_start_xmit(struct sk_buff
*skb
,
768 struct net_device
*netdev
)
770 struct enic
*enic
= netdev_priv(netdev
);
771 struct vnic_wq
*wq
= &enic
->wq
[0];
779 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
780 * which is very likely. In the off chance it's going to take
781 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
784 if (skb_shinfo(skb
)->gso_size
== 0 &&
785 skb_shinfo(skb
)->nr_frags
+ 1 > ENIC_NON_TSO_MAX_DESC
&&
786 skb_linearize(skb
)) {
791 spin_lock_irqsave(&enic
->wq_lock
[0], flags
);
793 if (vnic_wq_desc_avail(wq
) <
794 skb_shinfo(skb
)->nr_frags
+ ENIC_DESC_MAX_SPLITS
) {
795 netif_stop_queue(netdev
);
796 /* This is a hard error, log it */
797 netdev_err(netdev
, "BUG! Tx ring full when queue awake!\n");
798 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
799 return NETDEV_TX_BUSY
;
802 enic_queue_wq_skb(enic
, wq
, skb
);
804 if (vnic_wq_desc_avail(wq
) < MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
)
805 netif_stop_queue(netdev
);
807 spin_unlock_irqrestore(&enic
->wq_lock
[0], flags
);
812 /* dev_base_lock rwlock held, nominally process context */
813 static struct net_device_stats
*enic_get_stats(struct net_device
*netdev
)
815 struct enic
*enic
= netdev_priv(netdev
);
816 struct net_device_stats
*net_stats
= &netdev
->stats
;
817 struct vnic_stats
*stats
;
819 enic_dev_stats_dump(enic
, &stats
);
821 net_stats
->tx_packets
= stats
->tx
.tx_frames_ok
;
822 net_stats
->tx_bytes
= stats
->tx
.tx_bytes_ok
;
823 net_stats
->tx_errors
= stats
->tx
.tx_errors
;
824 net_stats
->tx_dropped
= stats
->tx
.tx_drops
;
826 net_stats
->rx_packets
= stats
->rx
.rx_frames_ok
;
827 net_stats
->rx_bytes
= stats
->rx
.rx_bytes_ok
;
828 net_stats
->rx_errors
= stats
->rx
.rx_errors
;
829 net_stats
->multicast
= stats
->rx
.rx_multicast_frames_ok
;
830 net_stats
->rx_over_errors
= enic
->rq_truncated_pkts
;
831 net_stats
->rx_crc_errors
= enic
->rq_bad_fcs
;
832 net_stats
->rx_dropped
= stats
->rx
.rx_no_bufs
+ stats
->rx
.rx_drop
;
837 static void enic_reset_multicast_list(struct enic
*enic
)
843 static int enic_set_mac_addr(struct net_device
*netdev
, char *addr
)
845 struct enic
*enic
= netdev_priv(netdev
);
847 if (enic_is_dynamic(enic
)) {
848 if (!is_valid_ether_addr(addr
) && !is_zero_ether_addr(addr
))
849 return -EADDRNOTAVAIL
;
851 if (!is_valid_ether_addr(addr
))
852 return -EADDRNOTAVAIL
;
855 memcpy(netdev
->dev_addr
, addr
, netdev
->addr_len
);
860 static int enic_dev_add_station_addr(struct enic
*enic
)
864 if (is_valid_ether_addr(enic
->netdev
->dev_addr
)) {
865 spin_lock(&enic
->devcmd_lock
);
866 err
= vnic_dev_add_addr(enic
->vdev
, enic
->netdev
->dev_addr
);
867 spin_unlock(&enic
->devcmd_lock
);
873 static int enic_dev_del_station_addr(struct enic
*enic
)
877 if (is_valid_ether_addr(enic
->netdev
->dev_addr
)) {
878 spin_lock(&enic
->devcmd_lock
);
879 err
= vnic_dev_del_addr(enic
->vdev
, enic
->netdev
->dev_addr
);
880 spin_unlock(&enic
->devcmd_lock
);
886 static int enic_set_mac_address_dynamic(struct net_device
*netdev
, void *p
)
888 struct enic
*enic
= netdev_priv(netdev
);
889 struct sockaddr
*saddr
= p
;
890 char *addr
= saddr
->sa_data
;
893 if (netif_running(enic
->netdev
)) {
894 err
= enic_dev_del_station_addr(enic
);
899 err
= enic_set_mac_addr(netdev
, addr
);
903 if (netif_running(enic
->netdev
)) {
904 err
= enic_dev_add_station_addr(enic
);
912 static int enic_set_mac_address(struct net_device
*netdev
, void *p
)
917 static int enic_dev_packet_filter(struct enic
*enic
, int directed
,
918 int multicast
, int broadcast
, int promisc
, int allmulti
)
922 spin_lock(&enic
->devcmd_lock
);
923 err
= vnic_dev_packet_filter(enic
->vdev
, directed
,
924 multicast
, broadcast
, promisc
, allmulti
);
925 spin_unlock(&enic
->devcmd_lock
);
930 static int enic_dev_add_multicast_addr(struct enic
*enic
, u8
*addr
)
934 spin_lock(&enic
->devcmd_lock
);
935 err
= vnic_dev_add_addr(enic
->vdev
, addr
);
936 spin_unlock(&enic
->devcmd_lock
);
941 static int enic_dev_del_multicast_addr(struct enic
*enic
, u8
*addr
)
945 spin_lock(&enic
->devcmd_lock
);
946 err
= vnic_dev_del_addr(enic
->vdev
, addr
);
947 spin_unlock(&enic
->devcmd_lock
);
952 /* netif_tx_lock held, BHs disabled */
953 static void enic_set_multicast_list(struct net_device
*netdev
)
955 struct enic
*enic
= netdev_priv(netdev
);
956 struct netdev_hw_addr
*ha
;
958 int multicast
= (netdev
->flags
& IFF_MULTICAST
) ? 1 : 0;
959 int broadcast
= (netdev
->flags
& IFF_BROADCAST
) ? 1 : 0;
960 int promisc
= (netdev
->flags
& IFF_PROMISC
) ? 1 : 0;
961 unsigned int mc_count
= netdev_mc_count(netdev
);
962 int allmulti
= (netdev
->flags
& IFF_ALLMULTI
) ||
963 mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
;
964 unsigned int flags
= netdev
->flags
| (allmulti
? IFF_ALLMULTI
: 0);
965 u8 mc_addr
[ENIC_MULTICAST_PERFECT_FILTERS
][ETH_ALEN
];
968 if (mc_count
> ENIC_MULTICAST_PERFECT_FILTERS
)
969 mc_count
= ENIC_MULTICAST_PERFECT_FILTERS
;
971 if (enic
->flags
!= flags
) {
973 enic_dev_packet_filter(enic
, directed
,
974 multicast
, broadcast
, promisc
, allmulti
);
977 /* Is there an easier way? Trying to minimize to
978 * calls to add/del multicast addrs. We keep the
979 * addrs from the last call in enic->mc_addr and
980 * look for changes to add/del.
984 netdev_for_each_mc_addr(ha
, netdev
) {
987 memcpy(mc_addr
[i
++], ha
->addr
, ETH_ALEN
);
990 for (i
= 0; i
< enic
->mc_count
; i
++) {
991 for (j
= 0; j
< mc_count
; j
++)
992 if (compare_ether_addr(enic
->mc_addr
[i
],
996 enic_dev_del_multicast_addr(enic
, enic
->mc_addr
[i
]);
999 for (i
= 0; i
< mc_count
; i
++) {
1000 for (j
= 0; j
< enic
->mc_count
; j
++)
1001 if (compare_ether_addr(mc_addr
[i
],
1002 enic
->mc_addr
[j
]) == 0)
1004 if (j
== enic
->mc_count
)
1005 enic_dev_add_multicast_addr(enic
, mc_addr
[i
]);
1008 /* Save the list to compare against next time
1011 for (i
= 0; i
< mc_count
; i
++)
1012 memcpy(enic
->mc_addr
[i
], mc_addr
[i
], ETH_ALEN
);
1014 enic
->mc_count
= mc_count
;
1017 /* rtnl lock is held */
1018 static void enic_vlan_rx_register(struct net_device
*netdev
,
1019 struct vlan_group
*vlan_group
)
1021 struct enic
*enic
= netdev_priv(netdev
);
1022 enic
->vlan_group
= vlan_group
;
1025 /* rtnl lock is held */
1026 static void enic_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1028 struct enic
*enic
= netdev_priv(netdev
);
1030 spin_lock(&enic
->devcmd_lock
);
1031 enic_add_vlan(enic
, vid
);
1032 spin_unlock(&enic
->devcmd_lock
);
1035 /* rtnl lock is held */
1036 static void enic_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1038 struct enic
*enic
= netdev_priv(netdev
);
1040 spin_lock(&enic
->devcmd_lock
);
1041 enic_del_vlan(enic
, vid
);
1042 spin_unlock(&enic
->devcmd_lock
);
1045 /* netif_tx_lock held, BHs disabled */
1046 static void enic_tx_timeout(struct net_device
*netdev
)
1048 struct enic
*enic
= netdev_priv(netdev
);
1049 schedule_work(&enic
->reset
);
1052 static int enic_vnic_dev_deinit(struct enic
*enic
)
1056 spin_lock(&enic
->devcmd_lock
);
1057 err
= vnic_dev_deinit(enic
->vdev
);
1058 spin_unlock(&enic
->devcmd_lock
);
1063 static int enic_dev_init_prov(struct enic
*enic
, struct vic_provinfo
*vp
)
1067 spin_lock(&enic
->devcmd_lock
);
1068 err
= vnic_dev_init_prov(enic
->vdev
,
1069 (u8
*)vp
, vic_provinfo_size(vp
));
1070 spin_unlock(&enic
->devcmd_lock
);
1075 static int enic_dev_init_done(struct enic
*enic
, int *done
, int *error
)
1079 spin_lock(&enic
->devcmd_lock
);
1080 err
= vnic_dev_init_done(enic
->vdev
, done
, error
);
1081 spin_unlock(&enic
->devcmd_lock
);
1086 static int enic_set_port_profile(struct enic
*enic
, u8
*mac
)
1088 struct vic_provinfo
*vp
;
1089 u8 oui
[3] = VIC_PROVINFO_CISCO_OUI
;
1093 err
= enic_vnic_dev_deinit(enic
);
1097 switch (enic
->pp
.request
) {
1099 case PORT_REQUEST_ASSOCIATE
:
1101 if (!(enic
->pp
.set
& ENIC_SET_NAME
) || !strlen(enic
->pp
.name
))
1104 if (!is_valid_ether_addr(mac
))
1105 return -EADDRNOTAVAIL
;
1107 vp
= vic_provinfo_alloc(GFP_KERNEL
, oui
,
1108 VIC_PROVINFO_LINUX_TYPE
);
1112 vic_provinfo_add_tlv(vp
,
1113 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR
,
1114 strlen(enic
->pp
.name
) + 1, enic
->pp
.name
);
1116 vic_provinfo_add_tlv(vp
,
1117 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR
,
1120 if (enic
->pp
.set
& ENIC_SET_INSTANCE
) {
1121 sprintf(uuid_str
, "%pUB", enic
->pp
.instance_uuid
);
1122 vic_provinfo_add_tlv(vp
,
1123 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR
,
1124 sizeof(uuid_str
), uuid_str
);
1127 if (enic
->pp
.set
& ENIC_SET_HOST
) {
1128 sprintf(uuid_str
, "%pUB", enic
->pp
.host_uuid
);
1129 vic_provinfo_add_tlv(vp
,
1130 VIC_LINUX_PROV_TLV_HOST_UUID_STR
,
1131 sizeof(uuid_str
), uuid_str
);
1134 err
= enic_dev_init_prov(enic
, vp
);
1135 vic_provinfo_free(vp
);
1140 case PORT_REQUEST_DISASSOCIATE
:
1147 enic
->pp
.set
|= ENIC_SET_APPLIED
;
1151 static int enic_set_vf_port(struct net_device
*netdev
, int vf
,
1152 struct nlattr
*port
[])
1154 struct enic
*enic
= netdev_priv(netdev
);
1156 memset(&enic
->pp
, 0, sizeof(enic
->pp
));
1158 if (port
[IFLA_PORT_REQUEST
]) {
1159 enic
->pp
.set
|= ENIC_SET_REQUEST
;
1160 enic
->pp
.request
= nla_get_u8(port
[IFLA_PORT_REQUEST
]);
1163 if (port
[IFLA_PORT_PROFILE
]) {
1164 enic
->pp
.set
|= ENIC_SET_NAME
;
1165 memcpy(enic
->pp
.name
, nla_data(port
[IFLA_PORT_PROFILE
]),
1169 if (port
[IFLA_PORT_INSTANCE_UUID
]) {
1170 enic
->pp
.set
|= ENIC_SET_INSTANCE
;
1171 memcpy(enic
->pp
.instance_uuid
,
1172 nla_data(port
[IFLA_PORT_INSTANCE_UUID
]), PORT_UUID_MAX
);
1175 if (port
[IFLA_PORT_HOST_UUID
]) {
1176 enic
->pp
.set
|= ENIC_SET_HOST
;
1177 memcpy(enic
->pp
.host_uuid
,
1178 nla_data(port
[IFLA_PORT_HOST_UUID
]), PORT_UUID_MAX
);
1181 /* don't support VFs, yet */
1182 if (vf
!= PORT_SELF_VF
)
1185 if (!(enic
->pp
.set
& ENIC_SET_REQUEST
))
1188 if (enic
->pp
.request
== PORT_REQUEST_ASSOCIATE
) {
1190 /* If the interface mac addr hasn't been assigned,
1191 * assign a random mac addr before setting port-
1195 if (is_zero_ether_addr(netdev
->dev_addr
))
1196 random_ether_addr(netdev
->dev_addr
);
1199 return enic_set_port_profile(enic
, netdev
->dev_addr
);
1202 static int enic_get_vf_port(struct net_device
*netdev
, int vf
,
1203 struct sk_buff
*skb
)
1205 struct enic
*enic
= netdev_priv(netdev
);
1206 int err
, error
, done
;
1207 u16 response
= PORT_PROFILE_RESPONSE_SUCCESS
;
1209 if (!(enic
->pp
.set
& ENIC_SET_APPLIED
))
1212 err
= enic_dev_init_done(enic
, &done
, &error
);
1219 response
= PORT_PROFILE_RESPONSE_INPROGRESS
;
1222 response
= PORT_PROFILE_RESPONSE_INVALID
;
1225 response
= PORT_PROFILE_RESPONSE_BADSTATE
;
1228 response
= PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES
;
1231 response
= PORT_PROFILE_RESPONSE_ERROR
;
1235 NLA_PUT_U16(skb
, IFLA_PORT_REQUEST
, enic
->pp
.request
);
1236 NLA_PUT_U16(skb
, IFLA_PORT_RESPONSE
, response
);
1237 if (enic
->pp
.set
& ENIC_SET_NAME
)
1238 NLA_PUT(skb
, IFLA_PORT_PROFILE
, PORT_PROFILE_MAX
,
1240 if (enic
->pp
.set
& ENIC_SET_INSTANCE
)
1241 NLA_PUT(skb
, IFLA_PORT_INSTANCE_UUID
, PORT_UUID_MAX
,
1242 enic
->pp
.instance_uuid
);
1243 if (enic
->pp
.set
& ENIC_SET_HOST
)
1244 NLA_PUT(skb
, IFLA_PORT_HOST_UUID
, PORT_UUID_MAX
,
1245 enic
->pp
.host_uuid
);
1253 static void enic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
1255 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1260 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1261 buf
->len
, PCI_DMA_FROMDEVICE
);
1262 dev_kfree_skb_any(buf
->os_buf
);
1265 static int enic_rq_alloc_buf(struct vnic_rq
*rq
)
1267 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1268 struct net_device
*netdev
= enic
->netdev
;
1269 struct sk_buff
*skb
;
1270 unsigned int len
= netdev
->mtu
+ VLAN_ETH_HLEN
;
1271 unsigned int os_buf_index
= 0;
1272 dma_addr_t dma_addr
;
1274 skb
= netdev_alloc_skb_ip_align(netdev
, len
);
1278 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
,
1279 len
, PCI_DMA_FROMDEVICE
);
1281 enic_queue_rq_desc(rq
, skb
, os_buf_index
,
1287 static int enic_rq_alloc_buf_a1(struct vnic_rq
*rq
)
1289 struct rq_enet_desc
*desc
= vnic_rq_next_desc(rq
);
1291 if (vnic_rq_posting_soon(rq
)) {
1293 /* SW workaround for A0 HW erratum: if we're just about
1294 * to write posted_index, insert a dummy desc
1298 rq_enet_desc_enc(desc
, 0, RQ_ENET_TYPE_RESV2
, 0);
1299 vnic_rq_post(rq
, 0, 0, 0, 0);
1301 return enic_rq_alloc_buf(rq
);
1307 static int enic_dev_hw_version(struct enic
*enic
,
1308 enum vnic_dev_hw_version
*hw_ver
)
1312 spin_lock(&enic
->devcmd_lock
);
1313 err
= vnic_dev_hw_version(enic
->vdev
, hw_ver
);
1314 spin_unlock(&enic
->devcmd_lock
);
1319 static int enic_set_rq_alloc_buf(struct enic
*enic
)
1321 enum vnic_dev_hw_version hw_ver
;
1324 err
= enic_dev_hw_version(enic
, &hw_ver
);
1329 case VNIC_DEV_HW_VER_A1
:
1330 enic
->rq_alloc_buf
= enic_rq_alloc_buf_a1
;
1332 case VNIC_DEV_HW_VER_A2
:
1333 case VNIC_DEV_HW_VER_UNKNOWN
:
1334 enic
->rq_alloc_buf
= enic_rq_alloc_buf
;
1343 static void enic_rq_indicate_buf(struct vnic_rq
*rq
,
1344 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
1345 int skipped
, void *opaque
)
1347 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1348 struct net_device
*netdev
= enic
->netdev
;
1349 struct sk_buff
*skb
;
1351 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
1352 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
1353 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
1354 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
1356 u16 q_number
, completed_index
, bytes_written
, vlan_tci
, checksum
;
1363 prefetch(skb
->data
- NET_IP_ALIGN
);
1364 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1365 buf
->len
, PCI_DMA_FROMDEVICE
);
1367 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
1368 &type
, &color
, &q_number
, &completed_index
,
1369 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1370 &csum_not_calc
, &rss_hash
, &bytes_written
,
1371 &packet_error
, &vlan_stripped
, &vlan_tci
, &checksum
,
1372 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1373 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1374 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1380 if (bytes_written
> 0)
1382 else if (bytes_written
== 0)
1383 enic
->rq_truncated_pkts
++;
1386 dev_kfree_skb_any(skb
);
1391 if (eop
&& bytes_written
> 0) {
1396 skb_put(skb
, bytes_written
);
1397 skb
->protocol
= eth_type_trans(skb
, netdev
);
1399 if (enic
->csum_rx_enabled
&& !csum_not_calc
) {
1400 skb
->csum
= htons(checksum
);
1401 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1406 if (enic
->vlan_group
&& vlan_stripped
&&
1407 (vlan_tci
& CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK
)) {
1409 if (netdev
->features
& NETIF_F_GRO
)
1410 vlan_gro_receive(&enic
->napi
, enic
->vlan_group
,
1413 vlan_hwaccel_receive_skb(skb
,
1414 enic
->vlan_group
, vlan_tci
);
1418 if (netdev
->features
& NETIF_F_GRO
)
1419 napi_gro_receive(&enic
->napi
, skb
);
1421 netif_receive_skb(skb
);
1430 dev_kfree_skb_any(skb
);
1434 static int enic_rq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1435 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1437 struct enic
*enic
= vnic_dev_priv(vdev
);
1439 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1440 completed_index
, VNIC_RQ_RETURN_DESC
,
1441 enic_rq_indicate_buf
, opaque
);
1446 static int enic_poll(struct napi_struct
*napi
, int budget
)
1448 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1449 unsigned int rq_work_to_do
= budget
;
1450 unsigned int wq_work_to_do
= -1; /* no limit */
1451 unsigned int work_done
, rq_work_done
, wq_work_done
;
1454 /* Service RQ (first) and WQ
1457 rq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1458 rq_work_to_do
, enic_rq_service
, NULL
);
1460 wq_work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_WQ
],
1461 wq_work_to_do
, enic_wq_service
, NULL
);
1463 /* Accumulate intr event credits for this polling
1464 * cycle. An intr event is the completion of a
1465 * a WQ or RQ packet.
1468 work_done
= rq_work_done
+ wq_work_done
;
1471 vnic_intr_return_credits(&enic
->intr
[ENIC_INTX_WQ_RQ
],
1473 0 /* don't unmask intr */,
1474 0 /* don't reset intr timer */);
1476 err
= vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1478 /* Buffer allocation failed. Stay in polling
1479 * mode so we can try to fill the ring again.
1483 rq_work_done
= rq_work_to_do
;
1485 if (rq_work_done
< rq_work_to_do
) {
1487 /* Some work done, but not enough to stay in polling,
1491 napi_complete(napi
);
1492 vnic_intr_unmask(&enic
->intr
[ENIC_INTX_WQ_RQ
]);
1495 return rq_work_done
;
1498 static int enic_poll_msix(struct napi_struct
*napi
, int budget
)
1500 struct enic
*enic
= container_of(napi
, struct enic
, napi
);
1501 unsigned int work_to_do
= budget
;
1502 unsigned int work_done
;
1508 work_done
= vnic_cq_service(&enic
->cq
[ENIC_CQ_RQ
],
1509 work_to_do
, enic_rq_service
, NULL
);
1511 /* Return intr event credits for this polling
1512 * cycle. An intr event is the completion of a
1517 vnic_intr_return_credits(&enic
->intr
[ENIC_MSIX_RQ
],
1519 0 /* don't unmask intr */,
1520 0 /* don't reset intr timer */);
1522 err
= vnic_rq_fill(&enic
->rq
[0], enic
->rq_alloc_buf
);
1524 /* Buffer allocation failed. Stay in polling mode
1525 * so we can try to fill the ring again.
1529 work_done
= work_to_do
;
1531 if (work_done
< work_to_do
) {
1533 /* Some work done, but not enough to stay in polling,
1537 napi_complete(napi
);
1538 vnic_intr_unmask(&enic
->intr
[ENIC_MSIX_RQ
]);
1544 static void enic_notify_timer(unsigned long data
)
1546 struct enic
*enic
= (struct enic
*)data
;
1548 enic_notify_check(enic
);
1550 mod_timer(&enic
->notify_timer
,
1551 round_jiffies(jiffies
+ ENIC_NOTIFY_TIMER_PERIOD
));
1554 static void enic_free_intr(struct enic
*enic
)
1556 struct net_device
*netdev
= enic
->netdev
;
1559 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1560 case VNIC_DEV_INTR_MODE_INTX
:
1561 free_irq(enic
->pdev
->irq
, netdev
);
1563 case VNIC_DEV_INTR_MODE_MSI
:
1564 free_irq(enic
->pdev
->irq
, enic
);
1566 case VNIC_DEV_INTR_MODE_MSIX
:
1567 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1568 if (enic
->msix
[i
].requested
)
1569 free_irq(enic
->msix_entry
[i
].vector
,
1570 enic
->msix
[i
].devid
);
1577 static int enic_request_intr(struct enic
*enic
)
1579 struct net_device
*netdev
= enic
->netdev
;
1583 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1585 case VNIC_DEV_INTR_MODE_INTX
:
1587 err
= request_irq(enic
->pdev
->irq
, enic_isr_legacy
,
1588 IRQF_SHARED
, netdev
->name
, netdev
);
1591 case VNIC_DEV_INTR_MODE_MSI
:
1593 err
= request_irq(enic
->pdev
->irq
, enic_isr_msi
,
1594 0, netdev
->name
, enic
);
1597 case VNIC_DEV_INTR_MODE_MSIX
:
1599 sprintf(enic
->msix
[ENIC_MSIX_RQ
].devname
,
1600 "%.11s-rx-0", netdev
->name
);
1601 enic
->msix
[ENIC_MSIX_RQ
].isr
= enic_isr_msix_rq
;
1602 enic
->msix
[ENIC_MSIX_RQ
].devid
= enic
;
1604 sprintf(enic
->msix
[ENIC_MSIX_WQ
].devname
,
1605 "%.11s-tx-0", netdev
->name
);
1606 enic
->msix
[ENIC_MSIX_WQ
].isr
= enic_isr_msix_wq
;
1607 enic
->msix
[ENIC_MSIX_WQ
].devid
= enic
;
1609 sprintf(enic
->msix
[ENIC_MSIX_ERR
].devname
,
1610 "%.11s-err", netdev
->name
);
1611 enic
->msix
[ENIC_MSIX_ERR
].isr
= enic_isr_msix_err
;
1612 enic
->msix
[ENIC_MSIX_ERR
].devid
= enic
;
1614 sprintf(enic
->msix
[ENIC_MSIX_NOTIFY
].devname
,
1615 "%.11s-notify", netdev
->name
);
1616 enic
->msix
[ENIC_MSIX_NOTIFY
].isr
= enic_isr_msix_notify
;
1617 enic
->msix
[ENIC_MSIX_NOTIFY
].devid
= enic
;
1619 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++) {
1620 err
= request_irq(enic
->msix_entry
[i
].vector
,
1621 enic
->msix
[i
].isr
, 0,
1622 enic
->msix
[i
].devname
,
1623 enic
->msix
[i
].devid
);
1625 enic_free_intr(enic
);
1628 enic
->msix
[i
].requested
= 1;
1640 static void enic_synchronize_irqs(struct enic
*enic
)
1644 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1645 case VNIC_DEV_INTR_MODE_INTX
:
1646 case VNIC_DEV_INTR_MODE_MSI
:
1647 synchronize_irq(enic
->pdev
->irq
);
1649 case VNIC_DEV_INTR_MODE_MSIX
:
1650 for (i
= 0; i
< enic
->intr_count
; i
++)
1651 synchronize_irq(enic
->msix_entry
[i
].vector
);
1658 static int enic_dev_notify_set(struct enic
*enic
)
1662 spin_lock(&enic
->devcmd_lock
);
1663 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1664 case VNIC_DEV_INTR_MODE_INTX
:
1665 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_INTX_NOTIFY
);
1667 case VNIC_DEV_INTR_MODE_MSIX
:
1668 err
= vnic_dev_notify_set(enic
->vdev
, ENIC_MSIX_NOTIFY
);
1671 err
= vnic_dev_notify_set(enic
->vdev
, -1 /* no intr */);
1674 spin_unlock(&enic
->devcmd_lock
);
1679 static int enic_dev_notify_unset(struct enic
*enic
)
1683 spin_lock(&enic
->devcmd_lock
);
1684 err
= vnic_dev_notify_unset(enic
->vdev
);
1685 spin_unlock(&enic
->devcmd_lock
);
1690 static int enic_dev_enable(struct enic
*enic
)
1694 spin_lock(&enic
->devcmd_lock
);
1695 err
= vnic_dev_enable(enic
->vdev
);
1696 spin_unlock(&enic
->devcmd_lock
);
1701 static int enic_dev_disable(struct enic
*enic
)
1705 spin_lock(&enic
->devcmd_lock
);
1706 err
= vnic_dev_disable(enic
->vdev
);
1707 spin_unlock(&enic
->devcmd_lock
);
1712 static void enic_notify_timer_start(struct enic
*enic
)
1714 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1715 case VNIC_DEV_INTR_MODE_MSI
:
1716 mod_timer(&enic
->notify_timer
, jiffies
);
1719 /* Using intr for notification for INTx/MSI-X */
1724 /* rtnl lock is held, process context */
1725 static int enic_open(struct net_device
*netdev
)
1727 struct enic
*enic
= netdev_priv(netdev
);
1731 err
= enic_request_intr(enic
);
1733 netdev_err(netdev
, "Unable to request irq.\n");
1737 err
= enic_dev_notify_set(enic
);
1740 "Failed to alloc notify buffer, aborting.\n");
1741 goto err_out_free_intr
;
1744 for (i
= 0; i
< enic
->rq_count
; i
++) {
1745 vnic_rq_fill(&enic
->rq
[i
], enic
->rq_alloc_buf
);
1746 /* Need at least one buffer on ring to get going */
1747 if (vnic_rq_desc_used(&enic
->rq
[i
]) == 0) {
1748 netdev_err(netdev
, "Unable to alloc receive buffers\n");
1750 goto err_out_notify_unset
;
1754 for (i
= 0; i
< enic
->wq_count
; i
++)
1755 vnic_wq_enable(&enic
->wq
[i
]);
1756 for (i
= 0; i
< enic
->rq_count
; i
++)
1757 vnic_rq_enable(&enic
->rq
[i
]);
1759 enic_dev_add_station_addr(enic
);
1760 enic_set_multicast_list(netdev
);
1762 netif_wake_queue(netdev
);
1763 napi_enable(&enic
->napi
);
1764 enic_dev_enable(enic
);
1766 for (i
= 0; i
< enic
->intr_count
; i
++)
1767 vnic_intr_unmask(&enic
->intr
[i
]);
1769 enic_notify_timer_start(enic
);
1773 err_out_notify_unset
:
1774 enic_dev_notify_unset(enic
);
1776 enic_free_intr(enic
);
1781 /* rtnl lock is held, process context */
1782 static int enic_stop(struct net_device
*netdev
)
1784 struct enic
*enic
= netdev_priv(netdev
);
1788 for (i
= 0; i
< enic
->intr_count
; i
++) {
1789 vnic_intr_mask(&enic
->intr
[i
]);
1790 (void)vnic_intr_masked(&enic
->intr
[i
]); /* flush write */
1793 enic_synchronize_irqs(enic
);
1795 del_timer_sync(&enic
->notify_timer
);
1797 enic_dev_disable(enic
);
1798 napi_disable(&enic
->napi
);
1799 netif_carrier_off(netdev
);
1800 netif_tx_disable(netdev
);
1801 enic_dev_del_station_addr(enic
);
1803 for (i
= 0; i
< enic
->wq_count
; i
++) {
1804 err
= vnic_wq_disable(&enic
->wq
[i
]);
1808 for (i
= 0; i
< enic
->rq_count
; i
++) {
1809 err
= vnic_rq_disable(&enic
->rq
[i
]);
1814 enic_dev_notify_unset(enic
);
1815 enic_free_intr(enic
);
1817 for (i
= 0; i
< enic
->wq_count
; i
++)
1818 vnic_wq_clean(&enic
->wq
[i
], enic_free_wq_buf
);
1819 for (i
= 0; i
< enic
->rq_count
; i
++)
1820 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1821 for (i
= 0; i
< enic
->cq_count
; i
++)
1822 vnic_cq_clean(&enic
->cq
[i
]);
1823 for (i
= 0; i
< enic
->intr_count
; i
++)
1824 vnic_intr_clean(&enic
->intr
[i
]);
1829 static int enic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1831 struct enic
*enic
= netdev_priv(netdev
);
1832 int running
= netif_running(netdev
);
1834 if (new_mtu
< ENIC_MIN_MTU
|| new_mtu
> ENIC_MAX_MTU
)
1840 netdev
->mtu
= new_mtu
;
1842 if (netdev
->mtu
> enic
->port_mtu
)
1844 "interface MTU (%d) set higher than port MTU (%d)\n",
1845 netdev
->mtu
, enic
->port_mtu
);
1853 #ifdef CONFIG_NET_POLL_CONTROLLER
1854 static void enic_poll_controller(struct net_device
*netdev
)
1856 struct enic
*enic
= netdev_priv(netdev
);
1857 struct vnic_dev
*vdev
= enic
->vdev
;
1859 switch (vnic_dev_get_intr_mode(vdev
)) {
1860 case VNIC_DEV_INTR_MODE_MSIX
:
1861 enic_isr_msix_rq(enic
->pdev
->irq
, enic
);
1862 enic_isr_msix_wq(enic
->pdev
->irq
, enic
);
1864 case VNIC_DEV_INTR_MODE_MSI
:
1865 enic_isr_msi(enic
->pdev
->irq
, enic
);
1867 case VNIC_DEV_INTR_MODE_INTX
:
1868 enic_isr_legacy(enic
->pdev
->irq
, netdev
);
1876 static int enic_dev_wait(struct vnic_dev
*vdev
,
1877 int (*start
)(struct vnic_dev
*, int),
1878 int (*finished
)(struct vnic_dev
*, int *),
1885 BUG_ON(in_interrupt());
1887 err
= start(vdev
, arg
);
1891 /* Wait for func to complete...2 seconds max
1894 time
= jiffies
+ (HZ
* 2);
1897 err
= finished(vdev
, &done
);
1904 schedule_timeout_uninterruptible(HZ
/ 10);
1906 } while (time_after(time
, jiffies
));
1911 static int enic_dev_open(struct enic
*enic
)
1915 err
= enic_dev_wait(enic
->vdev
, vnic_dev_open
,
1916 vnic_dev_open_done
, 0);
1918 dev_err(enic_get_dev(enic
), "vNIC device open failed, err %d\n",
1924 static int enic_dev_hang_reset(struct enic
*enic
)
1928 err
= enic_dev_wait(enic
->vdev
, vnic_dev_hang_reset
,
1929 vnic_dev_hang_reset_done
, 0);
1931 netdev_err(enic
->netdev
, "vNIC hang reset failed, err %d\n",
1937 static int enic_set_niccfg(struct enic
*enic
)
1939 const u8 rss_default_cpu
= 0;
1940 const u8 rss_hash_type
= 0;
1941 const u8 rss_hash_bits
= 0;
1942 const u8 rss_base_cpu
= 0;
1943 const u8 rss_enable
= 0;
1944 const u8 tso_ipid_split_en
= 0;
1945 const u8 ig_vlan_strip_en
= 1;
1948 /* Enable VLAN tag stripping. RSS not enabled (yet).
1951 spin_lock(&enic
->devcmd_lock
);
1952 err
= enic_set_nic_cfg(enic
,
1953 rss_default_cpu
, rss_hash_type
,
1954 rss_hash_bits
, rss_base_cpu
,
1955 rss_enable
, tso_ipid_split_en
,
1957 spin_unlock(&enic
->devcmd_lock
);
1962 static int enic_dev_hang_notify(struct enic
*enic
)
1966 spin_lock(&enic
->devcmd_lock
);
1967 err
= vnic_dev_hang_notify(enic
->vdev
);
1968 spin_unlock(&enic
->devcmd_lock
);
1973 int enic_dev_set_ig_vlan_rewrite_mode(struct enic
*enic
)
1977 spin_lock(&enic
->devcmd_lock
);
1978 err
= vnic_dev_set_ig_vlan_rewrite_mode(enic
->vdev
,
1979 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN
);
1980 spin_unlock(&enic
->devcmd_lock
);
1985 static void enic_reset(struct work_struct
*work
)
1987 struct enic
*enic
= container_of(work
, struct enic
, reset
);
1989 if (!netif_running(enic
->netdev
))
1994 enic_dev_hang_notify(enic
);
1995 enic_stop(enic
->netdev
);
1996 enic_dev_hang_reset(enic
);
1997 enic_reset_multicast_list(enic
);
1998 enic_init_vnic_resources(enic
);
1999 enic_set_niccfg(enic
);
2000 enic_dev_set_ig_vlan_rewrite_mode(enic
);
2001 enic_open(enic
->netdev
);
2006 static int enic_set_intr_mode(struct enic
*enic
)
2012 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2013 * system capabilities.
2017 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2018 * (the second to last INTR is used for WQ/RQ errors)
2019 * (the last INTR is used for notifications)
2022 BUG_ON(ARRAY_SIZE(enic
->msix_entry
) < n
+ m
+ 2);
2023 for (i
= 0; i
< n
+ m
+ 2; i
++)
2024 enic
->msix_entry
[i
].entry
= i
;
2026 if (enic
->config
.intr_mode
< 1 &&
2027 enic
->rq_count
>= n
&&
2028 enic
->wq_count
>= m
&&
2029 enic
->cq_count
>= n
+ m
&&
2030 enic
->intr_count
>= n
+ m
+ 2 &&
2031 !pci_enable_msix(enic
->pdev
, enic
->msix_entry
, n
+ m
+ 2)) {
2035 enic
->cq_count
= n
+ m
;
2036 enic
->intr_count
= n
+ m
+ 2;
2038 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSIX
);
2045 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2048 if (enic
->config
.intr_mode
< 2 &&
2049 enic
->rq_count
>= 1 &&
2050 enic
->wq_count
>= 1 &&
2051 enic
->cq_count
>= 2 &&
2052 enic
->intr_count
>= 1 &&
2053 !pci_enable_msi(enic
->pdev
)) {
2058 enic
->intr_count
= 1;
2060 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSI
);
2067 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2068 * (the first INTR is used for WQ/RQ)
2069 * (the second INTR is used for WQ/RQ errors)
2070 * (the last INTR is used for notifications)
2073 if (enic
->config
.intr_mode
< 3 &&
2074 enic
->rq_count
>= 1 &&
2075 enic
->wq_count
>= 1 &&
2076 enic
->cq_count
>= 2 &&
2077 enic
->intr_count
>= 3) {
2082 enic
->intr_count
= 3;
2084 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_INTX
);
2089 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2094 static void enic_clear_intr_mode(struct enic
*enic
)
2096 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2097 case VNIC_DEV_INTR_MODE_MSIX
:
2098 pci_disable_msix(enic
->pdev
);
2100 case VNIC_DEV_INTR_MODE_MSI
:
2101 pci_disable_msi(enic
->pdev
);
2107 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2110 static const struct net_device_ops enic_netdev_dynamic_ops
= {
2111 .ndo_open
= enic_open
,
2112 .ndo_stop
= enic_stop
,
2113 .ndo_start_xmit
= enic_hard_start_xmit
,
2114 .ndo_get_stats
= enic_get_stats
,
2115 .ndo_validate_addr
= eth_validate_addr
,
2116 .ndo_set_multicast_list
= enic_set_multicast_list
,
2117 .ndo_set_mac_address
= enic_set_mac_address_dynamic
,
2118 .ndo_change_mtu
= enic_change_mtu
,
2119 .ndo_vlan_rx_register
= enic_vlan_rx_register
,
2120 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2121 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2122 .ndo_tx_timeout
= enic_tx_timeout
,
2123 .ndo_set_vf_port
= enic_set_vf_port
,
2124 .ndo_get_vf_port
= enic_get_vf_port
,
2125 #ifdef CONFIG_NET_POLL_CONTROLLER
2126 .ndo_poll_controller
= enic_poll_controller
,
2130 static const struct net_device_ops enic_netdev_ops
= {
2131 .ndo_open
= enic_open
,
2132 .ndo_stop
= enic_stop
,
2133 .ndo_start_xmit
= enic_hard_start_xmit
,
2134 .ndo_get_stats
= enic_get_stats
,
2135 .ndo_validate_addr
= eth_validate_addr
,
2136 .ndo_set_mac_address
= enic_set_mac_address
,
2137 .ndo_set_multicast_list
= enic_set_multicast_list
,
2138 .ndo_change_mtu
= enic_change_mtu
,
2139 .ndo_vlan_rx_register
= enic_vlan_rx_register
,
2140 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2141 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2142 .ndo_tx_timeout
= enic_tx_timeout
,
2143 #ifdef CONFIG_NET_POLL_CONTROLLER
2144 .ndo_poll_controller
= enic_poll_controller
,
2148 void enic_dev_deinit(struct enic
*enic
)
2150 netif_napi_del(&enic
->napi
);
2151 enic_free_vnic_resources(enic
);
2152 enic_clear_intr_mode(enic
);
2155 static int enic_dev_stats_clear(struct enic
*enic
)
2159 spin_lock(&enic
->devcmd_lock
);
2160 err
= vnic_dev_stats_clear(enic
->vdev
);
2161 spin_unlock(&enic
->devcmd_lock
);
2166 int enic_dev_init(struct enic
*enic
)
2168 struct device
*dev
= enic_get_dev(enic
);
2169 struct net_device
*netdev
= enic
->netdev
;
2172 /* Get vNIC configuration
2175 err
= enic_get_vnic_config(enic
);
2177 dev_err(dev
, "Get vNIC configuration failed, aborting\n");
2181 /* Get available resource counts
2184 enic_get_res_counts(enic
);
2186 /* Set interrupt mode based on resource counts and system
2190 err
= enic_set_intr_mode(enic
);
2192 dev_err(dev
, "Failed to set intr mode based on resource "
2193 "counts and system capabilities, aborting\n");
2197 /* Allocate and configure vNIC resources
2200 err
= enic_alloc_vnic_resources(enic
);
2202 dev_err(dev
, "Failed to alloc vNIC resources, aborting\n");
2203 goto err_out_free_vnic_resources
;
2206 enic_init_vnic_resources(enic
);
2210 enic_dev_stats_clear(enic
);
2212 err
= enic_set_rq_alloc_buf(enic
);
2214 dev_err(dev
, "Failed to set RQ buffer allocator, aborting\n");
2215 goto err_out_free_vnic_resources
;
2218 err
= enic_set_niccfg(enic
);
2220 dev_err(dev
, "Failed to config nic, aborting\n");
2221 goto err_out_free_vnic_resources
;
2224 err
= enic_dev_set_ig_vlan_rewrite_mode(enic
);
2227 "Failed to set ingress vlan rewrite mode, aborting.\n");
2228 goto err_out_free_vnic_resources
;
2231 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2233 netif_napi_add(netdev
, &enic
->napi
, enic_poll
, 64);
2235 case VNIC_DEV_INTR_MODE_MSIX
:
2236 netif_napi_add(netdev
, &enic
->napi
, enic_poll_msix
, 64);
2242 err_out_free_vnic_resources
:
2243 enic_clear_intr_mode(enic
);
2244 enic_free_vnic_resources(enic
);
2249 static void enic_iounmap(struct enic
*enic
)
2253 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++)
2254 if (enic
->bar
[i
].vaddr
)
2255 iounmap(enic
->bar
[i
].vaddr
);
2258 static int __devinit
enic_probe(struct pci_dev
*pdev
,
2259 const struct pci_device_id
*ent
)
2261 struct device
*dev
= &pdev
->dev
;
2262 struct net_device
*netdev
;
2268 /* Allocate net device structure and initialize. Private
2269 * instance data is initialized to zero.
2272 netdev
= alloc_etherdev(sizeof(struct enic
));
2274 pr_err("Etherdev alloc failed, aborting\n");
2278 pci_set_drvdata(pdev
, netdev
);
2280 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2282 enic
= netdev_priv(netdev
);
2283 enic
->netdev
= netdev
;
2286 /* Setup PCI resources
2289 err
= pci_enable_device_mem(pdev
);
2291 dev_err(dev
, "Cannot enable PCI device, aborting\n");
2292 goto err_out_free_netdev
;
2295 err
= pci_request_regions(pdev
, DRV_NAME
);
2297 dev_err(dev
, "Cannot request PCI regions, aborting\n");
2298 goto err_out_disable_device
;
2301 pci_set_master(pdev
);
2303 /* Query PCI controller on system for DMA addressing
2304 * limitation for the device. Try 40-bit first, and
2308 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(40));
2310 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2312 dev_err(dev
, "No usable DMA configuration, aborting\n");
2313 goto err_out_release_regions
;
2315 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2317 dev_err(dev
, "Unable to obtain %u-bit DMA "
2318 "for consistent allocations, aborting\n", 32);
2319 goto err_out_release_regions
;
2322 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(40));
2324 dev_err(dev
, "Unable to obtain %u-bit DMA "
2325 "for consistent allocations, aborting\n", 40);
2326 goto err_out_release_regions
;
2331 /* Map vNIC resources from BAR0-5
2334 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++) {
2335 if (!(pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
))
2337 enic
->bar
[i
].len
= pci_resource_len(pdev
, i
);
2338 enic
->bar
[i
].vaddr
= pci_iomap(pdev
, i
, enic
->bar
[i
].len
);
2339 if (!enic
->bar
[i
].vaddr
) {
2340 dev_err(dev
, "Cannot memory-map BAR %d, aborting\n", i
);
2342 goto err_out_iounmap
;
2344 enic
->bar
[i
].bus_addr
= pci_resource_start(pdev
, i
);
2347 /* Register vNIC device
2350 enic
->vdev
= vnic_dev_register(NULL
, enic
, pdev
, enic
->bar
,
2351 ARRAY_SIZE(enic
->bar
));
2353 dev_err(dev
, "vNIC registration failed, aborting\n");
2355 goto err_out_iounmap
;
2358 /* Issue device open to get device in known state
2361 err
= enic_dev_open(enic
);
2363 dev_err(dev
, "vNIC dev open failed, aborting\n");
2364 goto err_out_vnic_unregister
;
2367 /* Issue device init to initialize the vnic-to-switch link.
2368 * We'll start with carrier off and wait for link UP
2369 * notification later to turn on carrier. We don't need
2370 * to wait here for the vnic-to-switch link initialization
2371 * to complete; link UP notification is the indication that
2372 * the process is complete.
2375 netif_carrier_off(netdev
);
2377 /* Do not call dev_init for a dynamic vnic.
2378 * For a dynamic vnic, init_prov_info will be
2379 * called later by an upper layer.
2382 if (!enic_is_dynamic(enic
)) {
2383 err
= vnic_dev_init(enic
->vdev
, 0);
2385 dev_err(dev
, "vNIC dev init failed, aborting\n");
2386 goto err_out_dev_close
;
2390 /* Setup devcmd lock
2393 spin_lock_init(&enic
->devcmd_lock
);
2395 err
= enic_dev_init(enic
);
2397 dev_err(dev
, "Device initialization failed, aborting\n");
2398 goto err_out_dev_close
;
2401 /* Setup notification timer, HW reset task, and wq locks
2404 init_timer(&enic
->notify_timer
);
2405 enic
->notify_timer
.function
= enic_notify_timer
;
2406 enic
->notify_timer
.data
= (unsigned long)enic
;
2408 INIT_WORK(&enic
->reset
, enic_reset
);
2410 for (i
= 0; i
< enic
->wq_count
; i
++)
2411 spin_lock_init(&enic
->wq_lock
[i
]);
2413 /* Register net device
2416 enic
->port_mtu
= enic
->config
.mtu
;
2417 (void)enic_change_mtu(netdev
, enic
->port_mtu
);
2419 err
= enic_set_mac_addr(netdev
, enic
->mac_addr
);
2421 dev_err(dev
, "Invalid MAC address, aborting\n");
2422 goto err_out_dev_deinit
;
2425 enic
->tx_coalesce_usecs
= enic
->config
.intr_timer_usec
;
2426 enic
->rx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
2428 if (enic_is_dynamic(enic
))
2429 netdev
->netdev_ops
= &enic_netdev_dynamic_ops
;
2431 netdev
->netdev_ops
= &enic_netdev_ops
;
2433 netdev
->watchdog_timeo
= 2 * HZ
;
2434 netdev
->ethtool_ops
= &enic_ethtool_ops
;
2436 netdev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
2437 if (ENIC_SETTING(enic
, LOOP
)) {
2438 netdev
->features
&= ~NETIF_F_HW_VLAN_TX
;
2439 enic
->loop_enable
= 1;
2440 enic
->loop_tag
= enic
->config
.loop_tag
;
2441 dev_info(dev
, "loopback tag=0x%04x\n", enic
->loop_tag
);
2443 if (ENIC_SETTING(enic
, TXCSUM
))
2444 netdev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2445 if (ENIC_SETTING(enic
, TSO
))
2446 netdev
->features
|= NETIF_F_TSO
|
2447 NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
2448 if (ENIC_SETTING(enic
, LRO
))
2449 netdev
->features
|= NETIF_F_GRO
;
2451 netdev
->features
|= NETIF_F_HIGHDMA
;
2453 enic
->csum_rx_enabled
= ENIC_SETTING(enic
, RXCSUM
);
2455 err
= register_netdev(netdev
);
2457 dev_err(dev
, "Cannot register net device, aborting\n");
2458 goto err_out_dev_deinit
;
2464 enic_dev_deinit(enic
);
2466 vnic_dev_close(enic
->vdev
);
2467 err_out_vnic_unregister
:
2468 vnic_dev_unregister(enic
->vdev
);
2471 err_out_release_regions
:
2472 pci_release_regions(pdev
);
2473 err_out_disable_device
:
2474 pci_disable_device(pdev
);
2475 err_out_free_netdev
:
2476 pci_set_drvdata(pdev
, NULL
);
2477 free_netdev(netdev
);
2482 static void __devexit
enic_remove(struct pci_dev
*pdev
)
2484 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2487 struct enic
*enic
= netdev_priv(netdev
);
2489 flush_scheduled_work();
2490 unregister_netdev(netdev
);
2491 enic_dev_deinit(enic
);
2492 vnic_dev_close(enic
->vdev
);
2493 vnic_dev_unregister(enic
->vdev
);
2495 pci_release_regions(pdev
);
2496 pci_disable_device(pdev
);
2497 pci_set_drvdata(pdev
, NULL
);
2498 free_netdev(netdev
);
2502 static struct pci_driver enic_driver
= {
2504 .id_table
= enic_id_table
,
2505 .probe
= enic_probe
,
2506 .remove
= __devexit_p(enic_remove
),
2509 static int __init
enic_init_module(void)
2511 pr_info("%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2513 return pci_register_driver(&enic_driver
);
2516 static void __exit
enic_cleanup_module(void)
2518 pci_unregister_driver(&enic_driver
);
2521 module_init(enic_init_module
);
2522 module_exit(enic_cleanup_module
);