1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
33 #define EFX_MAX_MTU (9 * 1024)
35 /* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
40 static struct workqueue_struct
*refill_workqueue
;
42 /**************************************************************************
46 *************************************************************************/
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
51 * This sets the default for new devices. It can be controlled later
54 static int lro
= true;
55 module_param(lro
, int, 0644);
56 MODULE_PARM_DESC(lro
, "Large receive offload acceleration");
59 * Use separate channels for TX and RX events
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
67 static unsigned int separate_tx_and_rx_channels
= true;
69 /* This is the weight assigned to each of the (per-channel) virtual
72 static int napi_weight
= 64;
74 /* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
78 unsigned int efx_monitor_interval
= 1 * HZ
;
80 /* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
83 static unsigned int monitor_reset
= true;
85 /* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated
88 * MAC address. This allows for loading the sfc_mtd driver to
89 * reprogram the flash, even if the flash contents (including the MAC
90 * address) have previously been erased.
92 static unsigned int allow_bad_hwaddr
;
94 /* Initial interrupt moderation settings. They can be modified after
95 * module load with ethtool.
97 * The default for RX should strike a balance between increasing the
98 * round-trip latency and reducing overhead.
100 static unsigned int rx_irq_mod_usec
= 60;
102 /* Initial interrupt moderation settings. They can be modified after
103 * module load with ethtool.
105 * This default is chosen to ensure that a 10G link does not go idle
106 * while a TX queue is stopped after it has become full. A queue is
107 * restarted when it drops below half full. The time this takes (assuming
108 * worst case 3 descriptors per packet and 1024 descriptors) is
109 * 512 / 3 * 1.2 = 205 usec.
111 static unsigned int tx_irq_mod_usec
= 150;
113 /* This is the first interrupt mode to try out of:
118 static unsigned int interrupt_mode
;
120 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
121 * i.e. the number of CPUs among which we may distribute simultaneous
122 * interrupt handling.
124 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
125 * The default (0) means to assign an interrupt to each package (level II cache)
127 static unsigned int rss_cpus
;
128 module_param(rss_cpus
, uint
, 0444);
129 MODULE_PARM_DESC(rss_cpus
, "Number of CPUs to use for Receive-Side Scaling");
131 /**************************************************************************
133 * Utility functions and prototypes
135 *************************************************************************/
136 static void efx_remove_channel(struct efx_channel
*channel
);
137 static void efx_remove_port(struct efx_nic
*efx
);
138 static void efx_fini_napi(struct efx_nic
*efx
);
139 static void efx_fini_channels(struct efx_nic
*efx
);
141 #define EFX_ASSERT_RESET_SERIALISED(efx) \
143 if ((efx->state == STATE_RUNNING) || \
144 (efx->state == STATE_RESETTING)) \
148 /**************************************************************************
150 * Event queue processing
152 *************************************************************************/
154 /* Process channel's event queue
156 * This function is responsible for processing the event queue of a
157 * single channel. The caller must guarantee that this function will
158 * never be concurrently called more than once on the same channel,
159 * though different channels may be being processed concurrently.
161 static int efx_process_channel(struct efx_channel
*channel
, int rx_quota
)
164 struct efx_rx_queue
*rx_queue
;
166 if (unlikely(channel
->efx
->reset_pending
!= RESET_TYPE_NONE
||
170 rxdmaqs
= falcon_process_eventq(channel
, &rx_quota
);
172 /* Deliver last RX packet. */
173 if (channel
->rx_pkt
) {
174 __efx_rx_packet(channel
, channel
->rx_pkt
,
175 channel
->rx_pkt_csummed
);
176 channel
->rx_pkt
= NULL
;
179 efx_flush_lro(channel
);
180 efx_rx_strategy(channel
);
182 /* Refill descriptor rings as necessary */
183 rx_queue
= &channel
->efx
->rx_queue
[0];
186 efx_fast_push_rx_descriptors(rx_queue
);
194 /* Mark channel as finished processing
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
200 static inline void efx_channel_processed(struct efx_channel
*channel
)
202 /* The interrupt handler for this channel may set work_pending
203 * as soon as we acknowledge the events we've seen. Make sure
204 * it's cleared before then. */
205 channel
->work_pending
= false;
208 falcon_eventq_read_ack(channel
);
213 * NAPI guarantees serialisation of polls of the same device, which
214 * provides the guarantee required by efx_process_channel().
216 static int efx_poll(struct napi_struct
*napi
, int budget
)
218 struct efx_channel
*channel
=
219 container_of(napi
, struct efx_channel
, napi_str
);
220 struct net_device
*napi_dev
= channel
->napi_dev
;
224 EFX_TRACE(channel
->efx
, "channel %d NAPI poll executing on CPU %d\n",
225 channel
->channel
, raw_smp_processor_id());
227 unused
= efx_process_channel(channel
, budget
);
228 rx_packets
= (budget
- unused
);
230 if (rx_packets
< budget
) {
231 /* There is no race here; although napi_disable() will
232 * only wait for netif_rx_complete(), this isn't a problem
233 * since efx_channel_processed() will have no effect if
234 * interrupts have already been disabled.
236 netif_rx_complete(napi_dev
, napi
);
237 efx_channel_processed(channel
);
243 /* Process the eventq of the specified channel immediately on this CPU
245 * Disable hardware generated interrupts, wait for any existing
246 * processing to finish, then directly poll (and ack ) the eventq.
247 * Finally reenable NAPI and interrupts.
249 * Since we are touching interrupts the caller should hold the suspend lock
251 void efx_process_channel_now(struct efx_channel
*channel
)
253 struct efx_nic
*efx
= channel
->efx
;
255 BUG_ON(!channel
->used_flags
);
256 BUG_ON(!channel
->enabled
);
258 /* Disable interrupts and wait for ISRs to complete */
259 falcon_disable_interrupts(efx
);
261 synchronize_irq(efx
->legacy_irq
);
263 synchronize_irq(channel
->irq
);
265 /* Wait for any NAPI processing to complete */
266 napi_disable(&channel
->napi_str
);
268 /* Poll the channel */
269 efx_process_channel(channel
, efx
->type
->evq_size
);
271 /* Ack the eventq. This may cause an interrupt to be generated
272 * when they are reenabled */
273 efx_channel_processed(channel
);
275 napi_enable(&channel
->napi_str
);
276 falcon_enable_interrupts(efx
);
279 /* Create event queue
280 * Event queue memory allocations are done only once. If the channel
281 * is reset, the memory buffer will be reused; this guards against
282 * errors during channel reset and also simplifies interrupt handling.
284 static int efx_probe_eventq(struct efx_channel
*channel
)
286 EFX_LOG(channel
->efx
, "chan %d create event queue\n", channel
->channel
);
288 return falcon_probe_eventq(channel
);
291 /* Prepare channel's event queue */
292 static int efx_init_eventq(struct efx_channel
*channel
)
294 EFX_LOG(channel
->efx
, "chan %d init event queue\n", channel
->channel
);
296 channel
->eventq_read_ptr
= 0;
298 return falcon_init_eventq(channel
);
301 static void efx_fini_eventq(struct efx_channel
*channel
)
303 EFX_LOG(channel
->efx
, "chan %d fini event queue\n", channel
->channel
);
305 falcon_fini_eventq(channel
);
308 static void efx_remove_eventq(struct efx_channel
*channel
)
310 EFX_LOG(channel
->efx
, "chan %d remove event queue\n", channel
->channel
);
312 falcon_remove_eventq(channel
);
315 /**************************************************************************
319 *************************************************************************/
321 static int efx_probe_channel(struct efx_channel
*channel
)
323 struct efx_tx_queue
*tx_queue
;
324 struct efx_rx_queue
*rx_queue
;
327 EFX_LOG(channel
->efx
, "creating channel %d\n", channel
->channel
);
329 rc
= efx_probe_eventq(channel
);
333 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
334 rc
= efx_probe_tx_queue(tx_queue
);
339 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
340 rc
= efx_probe_rx_queue(rx_queue
);
345 channel
->n_rx_frm_trunc
= 0;
350 efx_for_each_channel_rx_queue(rx_queue
, channel
)
351 efx_remove_rx_queue(rx_queue
);
353 efx_for_each_channel_tx_queue(tx_queue
, channel
)
354 efx_remove_tx_queue(tx_queue
);
360 /* Channels are shutdown and reinitialised whilst the NIC is running
361 * to propagate configuration changes (mtu, checksum offload), or
362 * to clear hardware error conditions
364 static int efx_init_channels(struct efx_nic
*efx
)
366 struct efx_tx_queue
*tx_queue
;
367 struct efx_rx_queue
*rx_queue
;
368 struct efx_channel
*channel
;
371 /* Calculate the rx buffer allocation parameters required to
372 * support the current MTU, including padding for header
373 * alignment and overruns.
375 efx
->rx_buffer_len
= (max(EFX_PAGE_IP_ALIGN
, NET_IP_ALIGN
) +
376 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
377 efx
->type
->rx_buffer_padding
);
378 efx
->rx_buffer_order
= get_order(efx
->rx_buffer_len
);
380 /* Initialise the channels */
381 efx_for_each_channel(channel
, efx
) {
382 EFX_LOG(channel
->efx
, "init chan %d\n", channel
->channel
);
384 rc
= efx_init_eventq(channel
);
388 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
389 rc
= efx_init_tx_queue(tx_queue
);
394 /* The rx buffer allocation strategy is MTU dependent */
395 efx_rx_strategy(channel
);
397 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
398 rc
= efx_init_rx_queue(rx_queue
);
403 WARN_ON(channel
->rx_pkt
!= NULL
);
404 efx_rx_strategy(channel
);
410 EFX_ERR(efx
, "failed to initialise channel %d\n",
411 channel
? channel
->channel
: -1);
412 efx_fini_channels(efx
);
416 /* This enables event queue processing and packet transmission.
418 * Note that this function is not allowed to fail, since that would
419 * introduce too much complexity into the suspend/resume path.
421 static void efx_start_channel(struct efx_channel
*channel
)
423 struct efx_rx_queue
*rx_queue
;
425 EFX_LOG(channel
->efx
, "starting chan %d\n", channel
->channel
);
427 if (!(channel
->efx
->net_dev
->flags
& IFF_UP
))
428 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
,
429 efx_poll
, napi_weight
);
431 /* The interrupt handler for this channel may set work_pending
432 * as soon as we enable it. Make sure it's cleared before
433 * then. Similarly, make sure it sees the enabled flag set. */
434 channel
->work_pending
= false;
435 channel
->enabled
= true;
438 napi_enable(&channel
->napi_str
);
440 /* Load up RX descriptors */
441 efx_for_each_channel_rx_queue(rx_queue
, channel
)
442 efx_fast_push_rx_descriptors(rx_queue
);
445 /* This disables event queue processing and packet transmission.
446 * This function does not guarantee that all queue processing
447 * (e.g. RX refill) is complete.
449 static void efx_stop_channel(struct efx_channel
*channel
)
451 struct efx_rx_queue
*rx_queue
;
453 if (!channel
->enabled
)
456 EFX_LOG(channel
->efx
, "stop chan %d\n", channel
->channel
);
458 channel
->enabled
= false;
459 napi_disable(&channel
->napi_str
);
461 /* Ensure that any worker threads have exited or will be no-ops */
462 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
463 spin_lock_bh(&rx_queue
->add_lock
);
464 spin_unlock_bh(&rx_queue
->add_lock
);
468 static void efx_fini_channels(struct efx_nic
*efx
)
470 struct efx_channel
*channel
;
471 struct efx_tx_queue
*tx_queue
;
472 struct efx_rx_queue
*rx_queue
;
474 EFX_ASSERT_RESET_SERIALISED(efx
);
475 BUG_ON(efx
->port_enabled
);
477 efx_for_each_channel(channel
, efx
) {
478 EFX_LOG(channel
->efx
, "shut down chan %d\n", channel
->channel
);
480 efx_for_each_channel_rx_queue(rx_queue
, channel
)
481 efx_fini_rx_queue(rx_queue
);
482 efx_for_each_channel_tx_queue(tx_queue
, channel
)
483 efx_fini_tx_queue(tx_queue
);
486 /* Do the event queues last so that we can handle flush events
487 * for all DMA queues. */
488 efx_for_each_channel(channel
, efx
) {
489 EFX_LOG(channel
->efx
, "shut down evq %d\n", channel
->channel
);
491 efx_fini_eventq(channel
);
495 static void efx_remove_channel(struct efx_channel
*channel
)
497 struct efx_tx_queue
*tx_queue
;
498 struct efx_rx_queue
*rx_queue
;
500 EFX_LOG(channel
->efx
, "destroy chan %d\n", channel
->channel
);
502 efx_for_each_channel_rx_queue(rx_queue
, channel
)
503 efx_remove_rx_queue(rx_queue
);
504 efx_for_each_channel_tx_queue(tx_queue
, channel
)
505 efx_remove_tx_queue(tx_queue
);
506 efx_remove_eventq(channel
);
508 channel
->used_flags
= 0;
511 void efx_schedule_slow_fill(struct efx_rx_queue
*rx_queue
, int delay
)
513 queue_delayed_work(refill_workqueue
, &rx_queue
->work
, delay
);
516 /**************************************************************************
520 **************************************************************************/
522 /* This ensures that the kernel is kept informed (via
523 * netif_carrier_on/off) of the link status, and also maintains the
524 * link status's stop on the port's TX queue.
526 static void efx_link_status_changed(struct efx_nic
*efx
)
528 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
529 * that no events are triggered between unregister_netdev() and the
530 * driver unloading. A more general condition is that NETDEV_CHANGE
531 * can only be generated between NETDEV_UP and NETDEV_DOWN */
532 if (!netif_running(efx
->net_dev
))
535 if (efx
->link_up
!= netif_carrier_ok(efx
->net_dev
)) {
536 efx
->n_link_state_changes
++;
539 netif_carrier_on(efx
->net_dev
);
541 netif_carrier_off(efx
->net_dev
);
544 /* Status message for kernel log */
546 struct mii_if_info
*gmii
= &efx
->mii
;
548 /* NONE here means direct XAUI from the controller, with no
549 * MDIO-attached device we can query. */
550 if (efx
->phy_type
!= PHY_TYPE_NONE
) {
551 adv
= gmii_advertised(gmii
);
552 lpa
= gmii_lpa(gmii
);
554 lpa
= GM_LPA_10000
| LPA_DUPLEX
;
557 EFX_INFO(efx
, "link up at %dMbps %s-duplex "
558 "(adv %04x lpa %04x) (MTU %d)%s\n",
559 (efx
->link_options
& GM_LPA_10000
? 10000 :
560 (efx
->link_options
& GM_LPA_1000
? 1000 :
561 (efx
->link_options
& GM_LPA_100
? 100 :
563 (efx
->link_options
& GM_LPA_DUPLEX
?
567 (efx
->promiscuous
? " [PROMISC]" : ""));
569 EFX_INFO(efx
, "link down\n");
574 /* This call reinitialises the MAC to pick up new PHY settings. The
575 * caller must hold the mac_lock */
576 static void __efx_reconfigure_port(struct efx_nic
*efx
)
578 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
580 EFX_LOG(efx
, "reconfiguring MAC from PHY settings on CPU %d\n",
581 raw_smp_processor_id());
583 falcon_reconfigure_xmac(efx
);
585 /* Inform kernel of loss/gain of carrier */
586 efx_link_status_changed(efx
);
589 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
591 void efx_reconfigure_port(struct efx_nic
*efx
)
593 EFX_ASSERT_RESET_SERIALISED(efx
);
595 mutex_lock(&efx
->mac_lock
);
596 __efx_reconfigure_port(efx
);
597 mutex_unlock(&efx
->mac_lock
);
600 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
601 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
602 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
603 static void efx_reconfigure_work(struct work_struct
*data
)
605 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
608 mutex_lock(&efx
->mac_lock
);
609 if (efx
->port_enabled
)
610 __efx_reconfigure_port(efx
);
611 mutex_unlock(&efx
->mac_lock
);
614 static int efx_probe_port(struct efx_nic
*efx
)
618 EFX_LOG(efx
, "create port\n");
620 /* Connect up MAC/PHY operations table and read MAC address */
621 rc
= falcon_probe_port(efx
);
625 /* Sanity check MAC address */
626 if (is_valid_ether_addr(efx
->mac_address
)) {
627 memcpy(efx
->net_dev
->dev_addr
, efx
->mac_address
, ETH_ALEN
);
629 DECLARE_MAC_BUF(mac
);
631 EFX_ERR(efx
, "invalid MAC address %s\n",
632 print_mac(mac
, efx
->mac_address
));
633 if (!allow_bad_hwaddr
) {
637 random_ether_addr(efx
->net_dev
->dev_addr
);
638 EFX_INFO(efx
, "using locally-generated MAC %s\n",
639 print_mac(mac
, efx
->net_dev
->dev_addr
));
645 efx_remove_port(efx
);
649 static int efx_init_port(struct efx_nic
*efx
)
653 EFX_LOG(efx
, "init port\n");
655 /* Initialise the MAC and PHY */
656 rc
= falcon_init_xmac(efx
);
660 efx
->port_initialized
= true;
662 /* Reconfigure port to program MAC registers */
663 falcon_reconfigure_xmac(efx
);
668 /* Allow efx_reconfigure_port() to be scheduled, and close the window
669 * between efx_stop_port and efx_flush_all whereby a previously scheduled
670 * efx_reconfigure_port() may have been cancelled */
671 static void efx_start_port(struct efx_nic
*efx
)
673 EFX_LOG(efx
, "start port\n");
674 BUG_ON(efx
->port_enabled
);
676 mutex_lock(&efx
->mac_lock
);
677 efx
->port_enabled
= true;
678 __efx_reconfigure_port(efx
);
679 mutex_unlock(&efx
->mac_lock
);
682 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
683 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
684 * efx_reconfigure_work can still be scheduled via NAPI processing
685 * until efx_flush_all() is called */
686 static void efx_stop_port(struct efx_nic
*efx
)
688 EFX_LOG(efx
, "stop port\n");
690 mutex_lock(&efx
->mac_lock
);
691 efx
->port_enabled
= false;
692 mutex_unlock(&efx
->mac_lock
);
694 /* Serialise against efx_set_multicast_list() */
695 if (efx_dev_registered(efx
)) {
696 netif_addr_lock_bh(efx
->net_dev
);
697 netif_addr_unlock_bh(efx
->net_dev
);
701 static void efx_fini_port(struct efx_nic
*efx
)
703 EFX_LOG(efx
, "shut down port\n");
705 if (!efx
->port_initialized
)
708 falcon_fini_xmac(efx
);
709 efx
->port_initialized
= false;
711 efx
->link_up
= false;
712 efx_link_status_changed(efx
);
715 static void efx_remove_port(struct efx_nic
*efx
)
717 EFX_LOG(efx
, "destroying port\n");
719 falcon_remove_port(efx
);
722 /**************************************************************************
726 **************************************************************************/
728 /* This configures the PCI device to enable I/O and DMA. */
729 static int efx_init_io(struct efx_nic
*efx
)
731 struct pci_dev
*pci_dev
= efx
->pci_dev
;
732 dma_addr_t dma_mask
= efx
->type
->max_dma_mask
;
735 EFX_LOG(efx
, "initialising I/O\n");
737 rc
= pci_enable_device(pci_dev
);
739 EFX_ERR(efx
, "failed to enable PCI device\n");
743 pci_set_master(pci_dev
);
745 /* Set the PCI DMA mask. Try all possibilities from our
746 * genuine mask down to 32 bits, because some architectures
747 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
748 * masks event though they reject 46 bit masks.
750 while (dma_mask
> 0x7fffffffUL
) {
751 if (pci_dma_supported(pci_dev
, dma_mask
) &&
752 ((rc
= pci_set_dma_mask(pci_dev
, dma_mask
)) == 0))
757 EFX_ERR(efx
, "could not find a suitable DMA mask\n");
760 EFX_LOG(efx
, "using DMA mask %llx\n", (unsigned long long) dma_mask
);
761 rc
= pci_set_consistent_dma_mask(pci_dev
, dma_mask
);
763 /* pci_set_consistent_dma_mask() is not *allowed* to
764 * fail with a mask that pci_set_dma_mask() accepted,
765 * but just in case...
767 EFX_ERR(efx
, "failed to set consistent DMA mask\n");
771 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
,
773 rc
= pci_request_region(pci_dev
, efx
->type
->mem_bar
, "sfc");
775 EFX_ERR(efx
, "request for memory BAR failed\n");
779 efx
->membase
= ioremap_nocache(efx
->membase_phys
,
780 efx
->type
->mem_map_size
);
782 EFX_ERR(efx
, "could not map memory BAR %d at %llx+%x\n",
784 (unsigned long long)efx
->membase_phys
,
785 efx
->type
->mem_map_size
);
789 EFX_LOG(efx
, "memory BAR %u at %llx+%x (virtual %p)\n",
790 efx
->type
->mem_bar
, (unsigned long long)efx
->membase_phys
,
791 efx
->type
->mem_map_size
, efx
->membase
);
796 release_mem_region(efx
->membase_phys
, efx
->type
->mem_map_size
);
798 efx
->membase_phys
= 0;
800 pci_disable_device(efx
->pci_dev
);
805 static void efx_fini_io(struct efx_nic
*efx
)
807 EFX_LOG(efx
, "shutting down I/O\n");
810 iounmap(efx
->membase
);
814 if (efx
->membase_phys
) {
815 pci_release_region(efx
->pci_dev
, efx
->type
->mem_bar
);
816 efx
->membase_phys
= 0;
819 pci_disable_device(efx
->pci_dev
);
822 /* Get number of RX queues wanted. Return number of online CPU
823 * packages in the expectation that an IRQ balancer will spread
824 * interrupts across them. */
825 static int efx_wanted_rx_queues(void)
831 cpus_clear(core_mask
);
833 for_each_online_cpu(cpu
) {
834 if (!cpu_isset(cpu
, core_mask
)) {
836 cpus_or(core_mask
, core_mask
,
837 topology_core_siblings(cpu
));
844 /* Probe the number and type of interrupts we are able to obtain, and
845 * the resulting numbers of channels and RX queues.
847 static void efx_probe_interrupts(struct efx_nic
*efx
)
850 min_t(int, efx
->type
->phys_addr_channels
, EFX_MAX_CHANNELS
);
853 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
854 struct msix_entry xentries
[EFX_MAX_CHANNELS
];
857 /* We want one RX queue and interrupt per CPU package
858 * (or as specified by the rss_cpus module parameter).
859 * We will need one channel per interrupt.
861 wanted_ints
= rss_cpus
? rss_cpus
: efx_wanted_rx_queues();
862 efx
->n_rx_queues
= min(wanted_ints
, max_channels
);
864 for (i
= 0; i
< efx
->n_rx_queues
; i
++)
865 xentries
[i
].entry
= i
;
866 rc
= pci_enable_msix(efx
->pci_dev
, xentries
, efx
->n_rx_queues
);
868 EFX_BUG_ON_PARANOID(rc
>= efx
->n_rx_queues
);
869 efx
->n_rx_queues
= rc
;
870 rc
= pci_enable_msix(efx
->pci_dev
, xentries
,
875 for (i
= 0; i
< efx
->n_rx_queues
; i
++)
876 efx
->channel
[i
].irq
= xentries
[i
].vector
;
878 /* Fall back to single channel MSI */
879 efx
->interrupt_mode
= EFX_INT_MODE_MSI
;
880 EFX_ERR(efx
, "could not enable MSI-X\n");
884 /* Try single interrupt MSI */
885 if (efx
->interrupt_mode
== EFX_INT_MODE_MSI
) {
886 efx
->n_rx_queues
= 1;
887 rc
= pci_enable_msi(efx
->pci_dev
);
889 efx
->channel
[0].irq
= efx
->pci_dev
->irq
;
891 EFX_ERR(efx
, "could not enable MSI\n");
892 efx
->interrupt_mode
= EFX_INT_MODE_LEGACY
;
896 /* Assume legacy interrupts */
897 if (efx
->interrupt_mode
== EFX_INT_MODE_LEGACY
) {
898 efx
->n_rx_queues
= 1;
899 efx
->legacy_irq
= efx
->pci_dev
->irq
;
903 static void efx_remove_interrupts(struct efx_nic
*efx
)
905 struct efx_channel
*channel
;
907 /* Remove MSI/MSI-X interrupts */
908 efx_for_each_channel(channel
, efx
)
910 pci_disable_msi(efx
->pci_dev
);
911 pci_disable_msix(efx
->pci_dev
);
913 /* Remove legacy interrupt */
917 static void efx_set_channels(struct efx_nic
*efx
)
919 struct efx_tx_queue
*tx_queue
;
920 struct efx_rx_queue
*rx_queue
;
922 efx_for_each_tx_queue(tx_queue
, efx
) {
923 if (!EFX_INT_MODE_USE_MSI(efx
) && separate_tx_and_rx_channels
)
924 tx_queue
->channel
= &efx
->channel
[1];
926 tx_queue
->channel
= &efx
->channel
[0];
927 tx_queue
->channel
->used_flags
|= EFX_USED_BY_TX
;
930 efx_for_each_rx_queue(rx_queue
, efx
) {
931 rx_queue
->channel
= &efx
->channel
[rx_queue
->queue
];
932 rx_queue
->channel
->used_flags
|= EFX_USED_BY_RX
;
936 static int efx_probe_nic(struct efx_nic
*efx
)
940 EFX_LOG(efx
, "creating NIC\n");
942 /* Carry out hardware-type specific initialisation */
943 rc
= falcon_probe_nic(efx
);
947 /* Determine the number of channels and RX queues by trying to hook
948 * in MSI-X interrupts. */
949 efx_probe_interrupts(efx
);
951 efx_set_channels(efx
);
953 /* Initialise the interrupt moderation settings */
954 efx_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
);
959 static void efx_remove_nic(struct efx_nic
*efx
)
961 EFX_LOG(efx
, "destroying NIC\n");
963 efx_remove_interrupts(efx
);
964 falcon_remove_nic(efx
);
967 /**************************************************************************
969 * NIC startup/shutdown
971 *************************************************************************/
973 static int efx_probe_all(struct efx_nic
*efx
)
975 struct efx_channel
*channel
;
979 rc
= efx_probe_nic(efx
);
981 EFX_ERR(efx
, "failed to create NIC\n");
986 rc
= efx_probe_port(efx
);
988 EFX_ERR(efx
, "failed to create port\n");
992 /* Create channels */
993 efx_for_each_channel(channel
, efx
) {
994 rc
= efx_probe_channel(channel
);
996 EFX_ERR(efx
, "failed to create channel %d\n",
1005 efx_for_each_channel(channel
, efx
)
1006 efx_remove_channel(channel
);
1007 efx_remove_port(efx
);
1009 efx_remove_nic(efx
);
1014 /* Called after previous invocation(s) of efx_stop_all, restarts the
1015 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1016 * and ensures that the port is scheduled to be reconfigured.
1017 * This function is safe to call multiple times when the NIC is in any
1019 static void efx_start_all(struct efx_nic
*efx
)
1021 struct efx_channel
*channel
;
1023 EFX_ASSERT_RESET_SERIALISED(efx
);
1025 /* Check that it is appropriate to restart the interface. All
1026 * of these flags are safe to read under just the rtnl lock */
1027 if (efx
->port_enabled
)
1029 if ((efx
->state
!= STATE_RUNNING
) && (efx
->state
!= STATE_INIT
))
1031 if (efx_dev_registered(efx
) && !netif_running(efx
->net_dev
))
1034 /* Mark the port as enabled so port reconfigurations can start, then
1035 * restart the transmit interface early so the watchdog timer stops */
1036 efx_start_port(efx
);
1037 efx_wake_queue(efx
);
1039 efx_for_each_channel(channel
, efx
)
1040 efx_start_channel(channel
);
1042 falcon_enable_interrupts(efx
);
1044 /* Start hardware monitor if we're in RUNNING */
1045 if (efx
->state
== STATE_RUNNING
)
1046 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1047 efx_monitor_interval
);
1050 /* Flush all delayed work. Should only be called when no more delayed work
1051 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1052 * since we're holding the rtnl_lock at this point. */
1053 static void efx_flush_all(struct efx_nic
*efx
)
1055 struct efx_rx_queue
*rx_queue
;
1057 /* Make sure the hardware monitor is stopped */
1058 cancel_delayed_work_sync(&efx
->monitor_work
);
1060 /* Ensure that all RX slow refills are complete. */
1061 efx_for_each_rx_queue(rx_queue
, efx
)
1062 cancel_delayed_work_sync(&rx_queue
->work
);
1064 /* Stop scheduled port reconfigurations */
1065 cancel_work_sync(&efx
->reconfigure_work
);
1069 /* Quiesce hardware and software without bringing the link down.
1070 * Safe to call multiple times, when the nic and interface is in any
1071 * state. The caller is guaranteed to subsequently be in a position
1072 * to modify any hardware and software state they see fit without
1074 static void efx_stop_all(struct efx_nic
*efx
)
1076 struct efx_channel
*channel
;
1078 EFX_ASSERT_RESET_SERIALISED(efx
);
1080 /* port_enabled can be read safely under the rtnl lock */
1081 if (!efx
->port_enabled
)
1084 /* Disable interrupts and wait for ISR to complete */
1085 falcon_disable_interrupts(efx
);
1086 if (efx
->legacy_irq
)
1087 synchronize_irq(efx
->legacy_irq
);
1088 efx_for_each_channel(channel
, efx
) {
1090 synchronize_irq(channel
->irq
);
1093 /* Stop all NAPI processing and synchronous rx refills */
1094 efx_for_each_channel(channel
, efx
)
1095 efx_stop_channel(channel
);
1097 /* Stop all asynchronous port reconfigurations. Since all
1098 * event processing has already been stopped, there is no
1099 * window to loose phy events */
1102 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1105 /* Isolate the MAC from the TX and RX engines, so that queue
1106 * flushes will complete in a timely fashion. */
1107 falcon_deconfigure_mac_wrapper(efx
);
1108 falcon_drain_tx_fifo(efx
);
1110 /* Stop the kernel transmit interface late, so the watchdog
1111 * timer isn't ticking over the flush */
1112 efx_stop_queue(efx
);
1113 if (efx_dev_registered(efx
)) {
1114 netif_tx_lock_bh(efx
->net_dev
);
1115 netif_tx_unlock_bh(efx
->net_dev
);
1119 static void efx_remove_all(struct efx_nic
*efx
)
1121 struct efx_channel
*channel
;
1123 efx_for_each_channel(channel
, efx
)
1124 efx_remove_channel(channel
);
1125 efx_remove_port(efx
);
1126 efx_remove_nic(efx
);
1129 /* A convinience function to safely flush all the queues */
1130 int efx_flush_queues(struct efx_nic
*efx
)
1134 EFX_ASSERT_RESET_SERIALISED(efx
);
1138 efx_fini_channels(efx
);
1139 rc
= efx_init_channels(efx
);
1141 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1150 /**************************************************************************
1152 * Interrupt moderation
1154 **************************************************************************/
1156 /* Set interrupt moderation parameters */
1157 void efx_init_irq_moderation(struct efx_nic
*efx
, int tx_usecs
, int rx_usecs
)
1159 struct efx_tx_queue
*tx_queue
;
1160 struct efx_rx_queue
*rx_queue
;
1162 EFX_ASSERT_RESET_SERIALISED(efx
);
1164 efx_for_each_tx_queue(tx_queue
, efx
)
1165 tx_queue
->channel
->irq_moderation
= tx_usecs
;
1167 efx_for_each_rx_queue(rx_queue
, efx
)
1168 rx_queue
->channel
->irq_moderation
= rx_usecs
;
1171 /**************************************************************************
1175 **************************************************************************/
1177 /* Run periodically off the general workqueue. Serialised against
1178 * efx_reconfigure_port via the mac_lock */
1179 static void efx_monitor(struct work_struct
*data
)
1181 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
1185 EFX_TRACE(efx
, "hardware monitor executing on CPU %d\n",
1186 raw_smp_processor_id());
1189 /* If the mac_lock is already held then it is likely a port
1190 * reconfiguration is already in place, which will likely do
1191 * most of the work of check_hw() anyway. */
1192 if (!mutex_trylock(&efx
->mac_lock
)) {
1193 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1194 efx_monitor_interval
);
1198 if (efx
->port_enabled
)
1199 rc
= falcon_check_xmac(efx
);
1200 mutex_unlock(&efx
->mac_lock
);
1203 if (monitor_reset
) {
1204 EFX_ERR(efx
, "hardware monitor detected a fault: "
1205 "triggering reset\n");
1206 efx_schedule_reset(efx
, RESET_TYPE_MONITOR
);
1208 EFX_ERR(efx
, "hardware monitor detected a fault, "
1209 "skipping reset\n");
1213 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1214 efx_monitor_interval
);
1217 /**************************************************************************
1221 *************************************************************************/
1224 * Context: process, rtnl_lock() held.
1226 static int efx_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
1228 struct efx_nic
*efx
= netdev_priv(net_dev
);
1230 EFX_ASSERT_RESET_SERIALISED(efx
);
1232 return generic_mii_ioctl(&efx
->mii
, if_mii(ifr
), cmd
, NULL
);
1235 /**************************************************************************
1239 **************************************************************************/
1241 static int efx_init_napi(struct efx_nic
*efx
)
1243 struct efx_channel
*channel
;
1246 efx_for_each_channel(channel
, efx
) {
1247 channel
->napi_dev
= efx
->net_dev
;
1248 rc
= efx_lro_init(&channel
->lro_mgr
, efx
);
1258 static void efx_fini_napi(struct efx_nic
*efx
)
1260 struct efx_channel
*channel
;
1262 efx_for_each_channel(channel
, efx
) {
1263 efx_lro_fini(&channel
->lro_mgr
);
1264 channel
->napi_dev
= NULL
;
1268 /**************************************************************************
1270 * Kernel netpoll interface
1272 *************************************************************************/
1274 #ifdef CONFIG_NET_POLL_CONTROLLER
1276 /* Although in the common case interrupts will be disabled, this is not
1277 * guaranteed. However, all our work happens inside the NAPI callback,
1278 * so no locking is required.
1280 static void efx_netpoll(struct net_device
*net_dev
)
1282 struct efx_nic
*efx
= netdev_priv(net_dev
);
1283 struct efx_channel
*channel
;
1285 efx_for_each_channel(channel
, efx
)
1286 efx_schedule_channel(channel
);
1291 /**************************************************************************
1293 * Kernel net device interface
1295 *************************************************************************/
1297 /* Context: process, rtnl_lock() held. */
1298 static int efx_net_open(struct net_device
*net_dev
)
1300 struct efx_nic
*efx
= netdev_priv(net_dev
);
1301 EFX_ASSERT_RESET_SERIALISED(efx
);
1303 EFX_LOG(efx
, "opening device %s on CPU %d\n", net_dev
->name
,
1304 raw_smp_processor_id());
1310 /* Context: process, rtnl_lock() held.
1311 * Note that the kernel will ignore our return code; this method
1312 * should really be a void.
1314 static int efx_net_stop(struct net_device
*net_dev
)
1316 struct efx_nic
*efx
= netdev_priv(net_dev
);
1319 EFX_LOG(efx
, "closing %s on CPU %d\n", net_dev
->name
,
1320 raw_smp_processor_id());
1322 /* Stop the device and flush all the channels */
1324 efx_fini_channels(efx
);
1325 rc
= efx_init_channels(efx
);
1327 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1332 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1333 static struct net_device_stats
*efx_net_stats(struct net_device
*net_dev
)
1335 struct efx_nic
*efx
= netdev_priv(net_dev
);
1336 struct efx_mac_stats
*mac_stats
= &efx
->mac_stats
;
1337 struct net_device_stats
*stats
= &net_dev
->stats
;
1339 /* Update stats if possible, but do not wait if another thread
1340 * is updating them (or resetting the NIC); slightly stale
1341 * stats are acceptable.
1343 if (!spin_trylock(&efx
->stats_lock
))
1345 if (efx
->state
== STATE_RUNNING
) {
1346 falcon_update_stats_xmac(efx
);
1347 falcon_update_nic_stats(efx
);
1349 spin_unlock(&efx
->stats_lock
);
1351 stats
->rx_packets
= mac_stats
->rx_packets
;
1352 stats
->tx_packets
= mac_stats
->tx_packets
;
1353 stats
->rx_bytes
= mac_stats
->rx_bytes
;
1354 stats
->tx_bytes
= mac_stats
->tx_bytes
;
1355 stats
->multicast
= mac_stats
->rx_multicast
;
1356 stats
->collisions
= mac_stats
->tx_collision
;
1357 stats
->rx_length_errors
= (mac_stats
->rx_gtjumbo
+
1358 mac_stats
->rx_length_error
);
1359 stats
->rx_over_errors
= efx
->n_rx_nodesc_drop_cnt
;
1360 stats
->rx_crc_errors
= mac_stats
->rx_bad
;
1361 stats
->rx_frame_errors
= mac_stats
->rx_align_error
;
1362 stats
->rx_fifo_errors
= mac_stats
->rx_overflow
;
1363 stats
->rx_missed_errors
= mac_stats
->rx_missed
;
1364 stats
->tx_window_errors
= mac_stats
->tx_late_collision
;
1366 stats
->rx_errors
= (stats
->rx_length_errors
+
1367 stats
->rx_over_errors
+
1368 stats
->rx_crc_errors
+
1369 stats
->rx_frame_errors
+
1370 stats
->rx_fifo_errors
+
1371 stats
->rx_missed_errors
+
1372 mac_stats
->rx_symbol_error
);
1373 stats
->tx_errors
= (stats
->tx_window_errors
+
1379 /* Context: netif_tx_lock held, BHs disabled. */
1380 static void efx_watchdog(struct net_device
*net_dev
)
1382 struct efx_nic
*efx
= netdev_priv(net_dev
);
1384 EFX_ERR(efx
, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1385 atomic_read(&efx
->netif_stop_count
), efx
->port_enabled
,
1386 monitor_reset
? "resetting channels" : "skipping reset");
1389 efx_schedule_reset(efx
, RESET_TYPE_MONITOR
);
1393 /* Context: process, rtnl_lock() held. */
1394 static int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
1396 struct efx_nic
*efx
= netdev_priv(net_dev
);
1399 EFX_ASSERT_RESET_SERIALISED(efx
);
1401 if (new_mtu
> EFX_MAX_MTU
)
1406 EFX_LOG(efx
, "changing MTU to %d\n", new_mtu
);
1408 efx_fini_channels(efx
);
1409 net_dev
->mtu
= new_mtu
;
1410 rc
= efx_init_channels(efx
);
1418 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1422 static int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
1424 struct efx_nic
*efx
= netdev_priv(net_dev
);
1425 struct sockaddr
*addr
= data
;
1426 char *new_addr
= addr
->sa_data
;
1428 EFX_ASSERT_RESET_SERIALISED(efx
);
1430 if (!is_valid_ether_addr(new_addr
)) {
1431 DECLARE_MAC_BUF(mac
);
1432 EFX_ERR(efx
, "invalid ethernet MAC address requested: %s\n",
1433 print_mac(mac
, new_addr
));
1437 memcpy(net_dev
->dev_addr
, new_addr
, net_dev
->addr_len
);
1439 /* Reconfigure the MAC */
1440 efx_reconfigure_port(efx
);
1445 /* Context: netif_tx_lock held, BHs disabled. */
1446 static void efx_set_multicast_list(struct net_device
*net_dev
)
1448 struct efx_nic
*efx
= netdev_priv(net_dev
);
1449 struct dev_mc_list
*mc_list
= net_dev
->mc_list
;
1450 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
1456 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
1457 promiscuous
= !!(net_dev
->flags
& IFF_PROMISC
);
1458 if (efx
->promiscuous
!= promiscuous
) {
1459 efx
->promiscuous
= promiscuous
;
1460 /* Close the window between efx_stop_port() and efx_flush_all()
1461 * by only queuing work when the port is enabled. */
1462 if (efx
->port_enabled
)
1463 queue_work(efx
->workqueue
, &efx
->reconfigure_work
);
1466 /* Build multicast hash table */
1467 if (promiscuous
|| (net_dev
->flags
& IFF_ALLMULTI
)) {
1468 memset(mc_hash
, 0xff, sizeof(*mc_hash
));
1470 memset(mc_hash
, 0x00, sizeof(*mc_hash
));
1471 for (i
= 0; i
< net_dev
->mc_count
; i
++) {
1472 crc
= ether_crc_le(ETH_ALEN
, mc_list
->dmi_addr
);
1473 bit
= crc
& (EFX_MCAST_HASH_ENTRIES
- 1);
1474 set_bit_le(bit
, mc_hash
->byte
);
1475 mc_list
= mc_list
->next
;
1479 /* Create and activate new global multicast hash table */
1480 falcon_set_multicast_hash(efx
);
1483 static int efx_netdev_event(struct notifier_block
*this,
1484 unsigned long event
, void *ptr
)
1486 struct net_device
*net_dev
= ptr
;
1488 if (net_dev
->open
== efx_net_open
&& event
== NETDEV_CHANGENAME
) {
1489 struct efx_nic
*efx
= netdev_priv(net_dev
);
1491 strcpy(efx
->name
, net_dev
->name
);
1497 static struct notifier_block efx_netdev_notifier
= {
1498 .notifier_call
= efx_netdev_event
,
1501 static int efx_register_netdev(struct efx_nic
*efx
)
1503 struct net_device
*net_dev
= efx
->net_dev
;
1506 net_dev
->watchdog_timeo
= 5 * HZ
;
1507 net_dev
->irq
= efx
->pci_dev
->irq
;
1508 net_dev
->open
= efx_net_open
;
1509 net_dev
->stop
= efx_net_stop
;
1510 net_dev
->get_stats
= efx_net_stats
;
1511 net_dev
->tx_timeout
= &efx_watchdog
;
1512 net_dev
->hard_start_xmit
= efx_hard_start_xmit
;
1513 net_dev
->do_ioctl
= efx_ioctl
;
1514 net_dev
->change_mtu
= efx_change_mtu
;
1515 net_dev
->set_mac_address
= efx_set_mac_address
;
1516 net_dev
->set_multicast_list
= efx_set_multicast_list
;
1517 #ifdef CONFIG_NET_POLL_CONTROLLER
1518 net_dev
->poll_controller
= efx_netpoll
;
1520 SET_NETDEV_DEV(net_dev
, &efx
->pci_dev
->dev
);
1521 SET_ETHTOOL_OPS(net_dev
, &efx_ethtool_ops
);
1523 /* Always start with carrier off; PHY events will detect the link */
1524 netif_carrier_off(efx
->net_dev
);
1526 /* Clear MAC statistics */
1527 falcon_update_stats_xmac(efx
);
1528 memset(&efx
->mac_stats
, 0, sizeof(efx
->mac_stats
));
1530 rc
= register_netdev(net_dev
);
1532 EFX_ERR(efx
, "could not register net dev\n");
1535 strcpy(efx
->name
, net_dev
->name
);
1540 static void efx_unregister_netdev(struct efx_nic
*efx
)
1542 struct efx_tx_queue
*tx_queue
;
1547 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
1549 /* Free up any skbs still remaining. This has to happen before
1550 * we try to unregister the netdev as running their destructors
1551 * may be needed to get the device ref. count to 0. */
1552 efx_for_each_tx_queue(tx_queue
, efx
)
1553 efx_release_tx_buffers(tx_queue
);
1555 if (efx_dev_registered(efx
)) {
1556 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
1557 unregister_netdev(efx
->net_dev
);
1561 /**************************************************************************
1563 * Device reset and suspend
1565 **************************************************************************/
1567 /* The final hardware and software finalisation before reset. */
1568 static int efx_reset_down(struct efx_nic
*efx
, struct ethtool_cmd
*ecmd
)
1572 EFX_ASSERT_RESET_SERIALISED(efx
);
1574 rc
= falcon_xmac_get_settings(efx
, ecmd
);
1576 EFX_ERR(efx
, "could not back up PHY settings\n");
1580 efx_fini_channels(efx
);
1587 /* The first part of software initialisation after a hardware reset
1588 * This function does not handle serialisation with the kernel, it
1589 * assumes the caller has done this */
1590 static int efx_reset_up(struct efx_nic
*efx
, struct ethtool_cmd
*ecmd
)
1594 rc
= efx_init_channels(efx
);
1598 /* Restore MAC and PHY settings. */
1599 rc
= falcon_xmac_set_settings(efx
, ecmd
);
1601 EFX_ERR(efx
, "could not restore PHY settings\n");
1608 efx_fini_channels(efx
);
1613 /* Reset the NIC as transparently as possible. Do not reset the PHY
1614 * Note that the reset may fail, in which case the card will be left
1615 * in a most-probably-unusable state.
1617 * This function will sleep. You cannot reset from within an atomic
1618 * state; use efx_schedule_reset() instead.
1620 * Grabs the rtnl_lock.
1622 static int efx_reset(struct efx_nic
*efx
)
1624 struct ethtool_cmd ecmd
;
1625 enum reset_type method
= efx
->reset_pending
;
1628 /* Serialise with kernel interfaces */
1631 /* If we're not RUNNING then don't reset. Leave the reset_pending
1632 * flag set so that efx_pci_probe_main will be retried */
1633 if (efx
->state
!= STATE_RUNNING
) {
1634 EFX_INFO(efx
, "scheduled reset quenched. NIC not RUNNING\n");
1638 efx
->state
= STATE_RESETTING
;
1639 EFX_INFO(efx
, "resetting (%d)\n", method
);
1641 /* The net_dev->get_stats handler is quite slow, and will fail
1642 * if a fetch is pending over reset. Serialise against it. */
1643 spin_lock(&efx
->stats_lock
);
1644 spin_unlock(&efx
->stats_lock
);
1647 mutex_lock(&efx
->mac_lock
);
1649 rc
= efx_reset_down(efx
, &ecmd
);
1653 rc
= falcon_reset_hw(efx
, method
);
1655 EFX_ERR(efx
, "failed to reset hardware\n");
1659 /* Allow resets to be rescheduled. */
1660 efx
->reset_pending
= RESET_TYPE_NONE
;
1662 /* Reinitialise bus-mastering, which may have been turned off before
1663 * the reset was scheduled. This is still appropriate, even in the
1664 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1665 * can respond to requests. */
1666 pci_set_master(efx
->pci_dev
);
1668 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1669 * case so the driver can talk to external SRAM */
1670 rc
= falcon_init_nic(efx
);
1672 EFX_ERR(efx
, "failed to initialise NIC\n");
1676 /* Leave device stopped if necessary */
1677 if (method
== RESET_TYPE_DISABLE
) {
1678 /* Reinitialise the device anyway so the driver unload sequence
1679 * can talk to the external SRAM */
1680 falcon_init_nic(efx
);
1685 rc
= efx_reset_up(efx
, &ecmd
);
1689 mutex_unlock(&efx
->mac_lock
);
1690 EFX_LOG(efx
, "reset complete\n");
1692 efx
->state
= STATE_RUNNING
;
1704 EFX_ERR(efx
, "has been disabled\n");
1705 efx
->state
= STATE_DISABLED
;
1707 mutex_unlock(&efx
->mac_lock
);
1709 efx_unregister_netdev(efx
);
1714 /* The worker thread exists so that code that cannot sleep can
1715 * schedule a reset for later.
1717 static void efx_reset_work(struct work_struct
*data
)
1719 struct efx_nic
*nic
= container_of(data
, struct efx_nic
, reset_work
);
1724 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
1726 enum reset_type method
;
1728 if (efx
->reset_pending
!= RESET_TYPE_NONE
) {
1729 EFX_INFO(efx
, "quenching already scheduled reset\n");
1734 case RESET_TYPE_INVISIBLE
:
1735 case RESET_TYPE_ALL
:
1736 case RESET_TYPE_WORLD
:
1737 case RESET_TYPE_DISABLE
:
1740 case RESET_TYPE_RX_RECOVERY
:
1741 case RESET_TYPE_RX_DESC_FETCH
:
1742 case RESET_TYPE_TX_DESC_FETCH
:
1743 case RESET_TYPE_TX_SKIP
:
1744 method
= RESET_TYPE_INVISIBLE
;
1747 method
= RESET_TYPE_ALL
;
1752 EFX_LOG(efx
, "scheduling reset (%d:%d)\n", type
, method
);
1754 EFX_LOG(efx
, "scheduling reset (%d)\n", method
);
1756 efx
->reset_pending
= method
;
1758 queue_work(efx
->reset_workqueue
, &efx
->reset_work
);
1761 /**************************************************************************
1763 * List of NICs we support
1765 **************************************************************************/
1767 /* PCI device ID table */
1768 static struct pci_device_id efx_pci_table
[] __devinitdata
= {
1769 {PCI_DEVICE(EFX_VENDID_SFC
, FALCON_A_P_DEVID
),
1770 .driver_data
= (unsigned long) &falcon_a_nic_type
},
1771 {PCI_DEVICE(EFX_VENDID_SFC
, FALCON_B_P_DEVID
),
1772 .driver_data
= (unsigned long) &falcon_b_nic_type
},
1773 {0} /* end of list */
1776 /**************************************************************************
1778 * Dummy PHY/MAC/Board operations
1780 * Can be used where the MAC does not implement this operation
1781 * Needed so all function pointers are valid and do not have to be tested
1784 **************************************************************************/
1785 int efx_port_dummy_op_int(struct efx_nic
*efx
)
1789 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
1790 void efx_port_dummy_op_blink(struct efx_nic
*efx
, bool blink
) {}
1792 static struct efx_phy_operations efx_dummy_phy_operations
= {
1793 .init
= efx_port_dummy_op_int
,
1794 .reconfigure
= efx_port_dummy_op_void
,
1795 .check_hw
= efx_port_dummy_op_int
,
1796 .fini
= efx_port_dummy_op_void
,
1797 .clear_interrupt
= efx_port_dummy_op_void
,
1798 .reset_xaui
= efx_port_dummy_op_void
,
1801 /* Dummy board operations */
1802 static int efx_nic_dummy_op_int(struct efx_nic
*nic
)
1807 static struct efx_board efx_dummy_board_info
= {
1808 .init
= efx_nic_dummy_op_int
,
1809 .init_leds
= efx_port_dummy_op_int
,
1810 .set_fault_led
= efx_port_dummy_op_blink
,
1811 .fini
= efx_port_dummy_op_void
,
1814 /**************************************************************************
1818 **************************************************************************/
1820 /* This zeroes out and then fills in the invariants in a struct
1821 * efx_nic (including all sub-structures).
1823 static int efx_init_struct(struct efx_nic
*efx
, struct efx_nic_type
*type
,
1824 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
1826 struct efx_channel
*channel
;
1827 struct efx_tx_queue
*tx_queue
;
1828 struct efx_rx_queue
*rx_queue
;
1831 /* Initialise common structures */
1832 memset(efx
, 0, sizeof(*efx
));
1833 spin_lock_init(&efx
->biu_lock
);
1834 spin_lock_init(&efx
->phy_lock
);
1835 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
1836 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
1837 efx
->pci_dev
= pci_dev
;
1838 efx
->state
= STATE_INIT
;
1839 efx
->reset_pending
= RESET_TYPE_NONE
;
1840 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
1841 efx
->board_info
= efx_dummy_board_info
;
1843 efx
->net_dev
= net_dev
;
1844 efx
->rx_checksum_enabled
= true;
1845 spin_lock_init(&efx
->netif_stop_lock
);
1846 spin_lock_init(&efx
->stats_lock
);
1847 mutex_init(&efx
->mac_lock
);
1848 efx
->phy_op
= &efx_dummy_phy_operations
;
1849 efx
->mii
.dev
= net_dev
;
1850 INIT_WORK(&efx
->reconfigure_work
, efx_reconfigure_work
);
1851 atomic_set(&efx
->netif_stop_count
, 1);
1853 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++) {
1854 channel
= &efx
->channel
[i
];
1856 channel
->channel
= i
;
1857 channel
->evqnum
= i
;
1858 channel
->work_pending
= false;
1860 for (i
= 0; i
< EFX_TX_QUEUE_COUNT
; i
++) {
1861 tx_queue
= &efx
->tx_queue
[i
];
1862 tx_queue
->efx
= efx
;
1863 tx_queue
->queue
= i
;
1864 tx_queue
->buffer
= NULL
;
1865 tx_queue
->channel
= &efx
->channel
[0]; /* for safety */
1866 tx_queue
->tso_headers_free
= NULL
;
1868 for (i
= 0; i
< EFX_MAX_RX_QUEUES
; i
++) {
1869 rx_queue
= &efx
->rx_queue
[i
];
1870 rx_queue
->efx
= efx
;
1871 rx_queue
->queue
= i
;
1872 rx_queue
->channel
= &efx
->channel
[0]; /* for safety */
1873 rx_queue
->buffer
= NULL
;
1874 spin_lock_init(&rx_queue
->add_lock
);
1875 INIT_DELAYED_WORK(&rx_queue
->work
, efx_rx_work
);
1880 /* Sanity-check NIC type */
1881 EFX_BUG_ON_PARANOID(efx
->type
->txd_ring_mask
&
1882 (efx
->type
->txd_ring_mask
+ 1));
1883 EFX_BUG_ON_PARANOID(efx
->type
->rxd_ring_mask
&
1884 (efx
->type
->rxd_ring_mask
+ 1));
1885 EFX_BUG_ON_PARANOID(efx
->type
->evq_size
&
1886 (efx
->type
->evq_size
- 1));
1887 /* As close as we can get to guaranteeing that we don't overflow */
1888 EFX_BUG_ON_PARANOID(efx
->type
->evq_size
<
1889 (efx
->type
->txd_ring_mask
+ 1 +
1890 efx
->type
->rxd_ring_mask
+ 1));
1891 EFX_BUG_ON_PARANOID(efx
->type
->phys_addr_channels
> EFX_MAX_CHANNELS
);
1893 /* Higher numbered interrupt modes are less capable! */
1894 efx
->interrupt_mode
= max(efx
->type
->max_interrupt_mode
,
1897 efx
->workqueue
= create_singlethread_workqueue("sfc_work");
1898 if (!efx
->workqueue
) {
1903 efx
->reset_workqueue
= create_singlethread_workqueue("sfc_reset");
1904 if (!efx
->reset_workqueue
) {
1912 destroy_workqueue(efx
->workqueue
);
1913 efx
->workqueue
= NULL
;
1919 static void efx_fini_struct(struct efx_nic
*efx
)
1921 if (efx
->reset_workqueue
) {
1922 destroy_workqueue(efx
->reset_workqueue
);
1923 efx
->reset_workqueue
= NULL
;
1925 if (efx
->workqueue
) {
1926 destroy_workqueue(efx
->workqueue
);
1927 efx
->workqueue
= NULL
;
1931 /**************************************************************************
1935 **************************************************************************/
1937 /* Main body of final NIC shutdown code
1938 * This is called only at module unload (or hotplug removal).
1940 static void efx_pci_remove_main(struct efx_nic
*efx
)
1942 EFX_ASSERT_RESET_SERIALISED(efx
);
1944 /* Skip everything if we never obtained a valid membase */
1948 efx_fini_channels(efx
);
1951 /* Shutdown the board, then the NIC and board state */
1952 efx
->board_info
.fini(efx
);
1953 falcon_fini_interrupt(efx
);
1956 efx_remove_all(efx
);
1959 /* Final NIC shutdown
1960 * This is called only at module unload (or hotplug removal).
1962 static void efx_pci_remove(struct pci_dev
*pci_dev
)
1964 struct efx_nic
*efx
;
1966 efx
= pci_get_drvdata(pci_dev
);
1970 /* Mark the NIC as fini, then stop the interface */
1972 efx
->state
= STATE_FINI
;
1973 dev_close(efx
->net_dev
);
1975 /* Allow any queued efx_resets() to complete */
1978 if (efx
->membase
== NULL
)
1981 efx_unregister_netdev(efx
);
1983 /* Wait for any scheduled resets to complete. No more will be
1984 * scheduled from this point because efx_stop_all() has been
1985 * called, we are no longer registered with driverlink, and
1986 * the net_device's have been removed. */
1987 flush_workqueue(efx
->reset_workqueue
);
1989 efx_pci_remove_main(efx
);
1993 EFX_LOG(efx
, "shutdown successful\n");
1995 pci_set_drvdata(pci_dev
, NULL
);
1996 efx_fini_struct(efx
);
1997 free_netdev(efx
->net_dev
);
2000 /* Main body of NIC initialisation
2001 * This is called at module load (or hotplug insertion, theoretically).
2003 static int efx_pci_probe_main(struct efx_nic
*efx
)
2007 /* Do start-of-day initialisation */
2008 rc
= efx_probe_all(efx
);
2012 rc
= efx_init_napi(efx
);
2016 /* Initialise the board */
2017 rc
= efx
->board_info
.init(efx
);
2019 EFX_ERR(efx
, "failed to initialise board\n");
2023 rc
= falcon_init_nic(efx
);
2025 EFX_ERR(efx
, "failed to initialise NIC\n");
2029 rc
= efx_init_port(efx
);
2031 EFX_ERR(efx
, "failed to initialise port\n");
2035 rc
= efx_init_channels(efx
);
2039 rc
= falcon_init_interrupt(efx
);
2046 efx_fini_channels(efx
);
2054 efx_remove_all(efx
);
2059 /* NIC initialisation
2061 * This is called at module load (or hotplug insertion,
2062 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2063 * sets up and registers the network devices with the kernel and hooks
2064 * the interrupt service routine. It does not prepare the device for
2065 * transmission; this is left to the first time one of the network
2066 * interfaces is brought up (i.e. efx_net_open).
2068 static int __devinit
efx_pci_probe(struct pci_dev
*pci_dev
,
2069 const struct pci_device_id
*entry
)
2071 struct efx_nic_type
*type
= (struct efx_nic_type
*) entry
->driver_data
;
2072 struct net_device
*net_dev
;
2073 struct efx_nic
*efx
;
2076 /* Allocate and initialise a struct net_device and struct efx_nic */
2077 net_dev
= alloc_etherdev(sizeof(*efx
));
2080 net_dev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_SG
|
2081 NETIF_F_HIGHDMA
| NETIF_F_TSO
);
2083 net_dev
->features
|= NETIF_F_LRO
;
2084 /* Mask for features that also apply to VLAN devices */
2085 net_dev
->vlan_features
|= (NETIF_F_ALL_CSUM
| NETIF_F_SG
|
2087 efx
= netdev_priv(net_dev
);
2088 pci_set_drvdata(pci_dev
, efx
);
2089 rc
= efx_init_struct(efx
, type
, pci_dev
, net_dev
);
2093 EFX_INFO(efx
, "Solarflare Communications NIC detected\n");
2095 /* Set up basic I/O (BAR mappings etc) */
2096 rc
= efx_init_io(efx
);
2100 /* No serialisation is required with the reset path because
2101 * we're in STATE_INIT. */
2102 for (i
= 0; i
< 5; i
++) {
2103 rc
= efx_pci_probe_main(efx
);
2107 /* Serialise against efx_reset(). No more resets will be
2108 * scheduled since efx_stop_all() has been called, and we
2109 * have not and never have been registered with either
2110 * the rtnetlink or driverlink layers. */
2111 flush_workqueue(efx
->reset_workqueue
);
2113 /* Retry if a recoverably reset event has been scheduled */
2114 if ((efx
->reset_pending
!= RESET_TYPE_INVISIBLE
) &&
2115 (efx
->reset_pending
!= RESET_TYPE_ALL
))
2118 efx
->reset_pending
= RESET_TYPE_NONE
;
2122 EFX_ERR(efx
, "Could not reset NIC\n");
2126 /* Switch to the running state before we expose the device to
2127 * the OS. This is to ensure that the initial gathering of
2128 * MAC stats succeeds. */
2130 efx
->state
= STATE_RUNNING
;
2133 rc
= efx_register_netdev(efx
);
2137 EFX_LOG(efx
, "initialisation successful\n");
2142 efx_pci_remove_main(efx
);
2147 efx_fini_struct(efx
);
2149 EFX_LOG(efx
, "initialisation failed. rc=%d\n", rc
);
2150 free_netdev(net_dev
);
2154 static struct pci_driver efx_pci_driver
= {
2155 .name
= EFX_DRIVER_NAME
,
2156 .id_table
= efx_pci_table
,
2157 .probe
= efx_pci_probe
,
2158 .remove
= efx_pci_remove
,
2161 /**************************************************************************
2163 * Kernel module interface
2165 *************************************************************************/
2167 module_param(interrupt_mode
, uint
, 0444);
2168 MODULE_PARM_DESC(interrupt_mode
,
2169 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2171 static int __init
efx_init_module(void)
2175 printk(KERN_INFO
"Solarflare NET driver v" EFX_DRIVER_VERSION
"\n");
2177 rc
= register_netdevice_notifier(&efx_netdev_notifier
);
2181 refill_workqueue
= create_workqueue("sfc_refill");
2182 if (!refill_workqueue
) {
2187 rc
= pci_register_driver(&efx_pci_driver
);
2194 destroy_workqueue(refill_workqueue
);
2196 unregister_netdevice_notifier(&efx_netdev_notifier
);
2201 static void __exit
efx_exit_module(void)
2203 printk(KERN_INFO
"Solarflare NET driver unloading\n");
2205 pci_unregister_driver(&efx_pci_driver
);
2206 destroy_workqueue(refill_workqueue
);
2207 unregister_netdevice_notifier(&efx_netdev_notifier
);
2211 module_init(efx_init_module
);
2212 module_exit(efx_exit_module
);
2214 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2215 "Solarflare Communications");
2216 MODULE_DESCRIPTION("Solarflare Communications network driver");
2217 MODULE_LICENSE("GPL");
2218 MODULE_DEVICE_TABLE(pci
, efx_pci_table
);