1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
33 #define EFX_MAX_MTU (9 * 1024)
35 /* RX slow fill workqueue. If memory allocation fails in the fast path,
36 * a work item is pushed onto this work queue to retry the allocation later,
37 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
38 * workqueue, there is nothing to be gained in making it per NIC
40 static struct workqueue_struct
*refill_workqueue
;
42 /**************************************************************************
46 *************************************************************************/
49 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
51 * This sets the default for new devices. It can be controlled later
54 static int lro
= true;
55 module_param(lro
, int, 0644);
56 MODULE_PARM_DESC(lro
, "Large receive offload acceleration");
59 * Use separate channels for TX and RX events
61 * Set this to 1 to use separate channels for TX and RX. It allows us to
62 * apply a higher level of interrupt moderation to TX events.
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
67 static unsigned int separate_tx_and_rx_channels
= true;
69 /* This is the weight assigned to each of the (per-channel) virtual
72 static int napi_weight
= 64;
74 /* This is the time (in jiffies) between invocations of the hardware
75 * monitor, which checks for known hardware bugs and resets the
76 * hardware and driver as necessary.
78 unsigned int efx_monitor_interval
= 1 * HZ
;
80 /* This controls whether or not the driver will initialise devices
81 * with invalid MAC addresses stored in the EEPROM or flash. If true,
82 * such devices will be initialised with a random locally-generated
83 * MAC address. This allows for loading the sfc_mtd driver to
84 * reprogram the flash, even if the flash contents (including the MAC
85 * address) have previously been erased.
87 static unsigned int allow_bad_hwaddr
;
89 /* Initial interrupt moderation settings. They can be modified after
90 * module load with ethtool.
92 * The default for RX should strike a balance between increasing the
93 * round-trip latency and reducing overhead.
95 static unsigned int rx_irq_mod_usec
= 60;
97 /* Initial interrupt moderation settings. They can be modified after
98 * module load with ethtool.
100 * This default is chosen to ensure that a 10G link does not go idle
101 * while a TX queue is stopped after it has become full. A queue is
102 * restarted when it drops below half full. The time this takes (assuming
103 * worst case 3 descriptors per packet and 1024 descriptors) is
104 * 512 / 3 * 1.2 = 205 usec.
106 static unsigned int tx_irq_mod_usec
= 150;
108 /* This is the first interrupt mode to try out of:
113 static unsigned int interrupt_mode
;
115 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
116 * i.e. the number of CPUs among which we may distribute simultaneous
117 * interrupt handling.
119 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
120 * The default (0) means to assign an interrupt to each package (level II cache)
122 static unsigned int rss_cpus
;
123 module_param(rss_cpus
, uint
, 0444);
124 MODULE_PARM_DESC(rss_cpus
, "Number of CPUs to use for Receive-Side Scaling");
126 /**************************************************************************
128 * Utility functions and prototypes
130 *************************************************************************/
131 static void efx_remove_channel(struct efx_channel
*channel
);
132 static void efx_remove_port(struct efx_nic
*efx
);
133 static void efx_fini_napi(struct efx_nic
*efx
);
134 static void efx_fini_channels(struct efx_nic
*efx
);
136 #define EFX_ASSERT_RESET_SERIALISED(efx) \
138 if (efx->state == STATE_RUNNING) \
142 /**************************************************************************
144 * Event queue processing
146 *************************************************************************/
148 /* Process channel's event queue
150 * This function is responsible for processing the event queue of a
151 * single channel. The caller must guarantee that this function will
152 * never be concurrently called more than once on the same channel,
153 * though different channels may be being processed concurrently.
155 static int efx_process_channel(struct efx_channel
*channel
, int rx_quota
)
157 struct efx_nic
*efx
= channel
->efx
;
160 if (unlikely(efx
->reset_pending
!= RESET_TYPE_NONE
||
164 rx_packets
= falcon_process_eventq(channel
, rx_quota
);
168 /* Deliver last RX packet. */
169 if (channel
->rx_pkt
) {
170 __efx_rx_packet(channel
, channel
->rx_pkt
,
171 channel
->rx_pkt_csummed
);
172 channel
->rx_pkt
= NULL
;
175 efx_flush_lro(channel
);
176 efx_rx_strategy(channel
);
178 efx_fast_push_rx_descriptors(&efx
->rx_queue
[channel
->channel
]);
183 /* Mark channel as finished processing
185 * Note that since we will not receive further interrupts for this
186 * channel before we finish processing and call the eventq_read_ack()
187 * method, there is no need to use the interrupt hold-off timers.
189 static inline void efx_channel_processed(struct efx_channel
*channel
)
191 /* The interrupt handler for this channel may set work_pending
192 * as soon as we acknowledge the events we've seen. Make sure
193 * it's cleared before then. */
194 channel
->work_pending
= false;
197 falcon_eventq_read_ack(channel
);
202 * NAPI guarantees serialisation of polls of the same device, which
203 * provides the guarantee required by efx_process_channel().
205 static int efx_poll(struct napi_struct
*napi
, int budget
)
207 struct efx_channel
*channel
=
208 container_of(napi
, struct efx_channel
, napi_str
);
209 struct net_device
*napi_dev
= channel
->napi_dev
;
212 EFX_TRACE(channel
->efx
, "channel %d NAPI poll executing on CPU %d\n",
213 channel
->channel
, raw_smp_processor_id());
215 rx_packets
= efx_process_channel(channel
, budget
);
217 if (rx_packets
< budget
) {
218 /* There is no race here; although napi_disable() will
219 * only wait for netif_rx_complete(), this isn't a problem
220 * since efx_channel_processed() will have no effect if
221 * interrupts have already been disabled.
223 netif_rx_complete(napi_dev
, napi
);
224 efx_channel_processed(channel
);
230 /* Process the eventq of the specified channel immediately on this CPU
232 * Disable hardware generated interrupts, wait for any existing
233 * processing to finish, then directly poll (and ack ) the eventq.
234 * Finally reenable NAPI and interrupts.
236 * Since we are touching interrupts the caller should hold the suspend lock
238 void efx_process_channel_now(struct efx_channel
*channel
)
240 struct efx_nic
*efx
= channel
->efx
;
242 BUG_ON(!channel
->used_flags
);
243 BUG_ON(!channel
->enabled
);
245 /* Disable interrupts and wait for ISRs to complete */
246 falcon_disable_interrupts(efx
);
248 synchronize_irq(efx
->legacy_irq
);
250 synchronize_irq(channel
->irq
);
252 /* Wait for any NAPI processing to complete */
253 napi_disable(&channel
->napi_str
);
255 /* Poll the channel */
256 efx_process_channel(channel
, efx
->type
->evq_size
);
258 /* Ack the eventq. This may cause an interrupt to be generated
259 * when they are reenabled */
260 efx_channel_processed(channel
);
262 napi_enable(&channel
->napi_str
);
263 falcon_enable_interrupts(efx
);
266 /* Create event queue
267 * Event queue memory allocations are done only once. If the channel
268 * is reset, the memory buffer will be reused; this guards against
269 * errors during channel reset and also simplifies interrupt handling.
271 static int efx_probe_eventq(struct efx_channel
*channel
)
273 EFX_LOG(channel
->efx
, "chan %d create event queue\n", channel
->channel
);
275 return falcon_probe_eventq(channel
);
278 /* Prepare channel's event queue */
279 static void efx_init_eventq(struct efx_channel
*channel
)
281 EFX_LOG(channel
->efx
, "chan %d init event queue\n", channel
->channel
);
283 channel
->eventq_read_ptr
= 0;
285 falcon_init_eventq(channel
);
288 static void efx_fini_eventq(struct efx_channel
*channel
)
290 EFX_LOG(channel
->efx
, "chan %d fini event queue\n", channel
->channel
);
292 falcon_fini_eventq(channel
);
295 static void efx_remove_eventq(struct efx_channel
*channel
)
297 EFX_LOG(channel
->efx
, "chan %d remove event queue\n", channel
->channel
);
299 falcon_remove_eventq(channel
);
302 /**************************************************************************
306 *************************************************************************/
308 static int efx_probe_channel(struct efx_channel
*channel
)
310 struct efx_tx_queue
*tx_queue
;
311 struct efx_rx_queue
*rx_queue
;
314 EFX_LOG(channel
->efx
, "creating channel %d\n", channel
->channel
);
316 rc
= efx_probe_eventq(channel
);
320 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
321 rc
= efx_probe_tx_queue(tx_queue
);
326 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
327 rc
= efx_probe_rx_queue(rx_queue
);
332 channel
->n_rx_frm_trunc
= 0;
337 efx_for_each_channel_rx_queue(rx_queue
, channel
)
338 efx_remove_rx_queue(rx_queue
);
340 efx_for_each_channel_tx_queue(tx_queue
, channel
)
341 efx_remove_tx_queue(tx_queue
);
347 /* Channels are shutdown and reinitialised whilst the NIC is running
348 * to propagate configuration changes (mtu, checksum offload), or
349 * to clear hardware error conditions
351 static void efx_init_channels(struct efx_nic
*efx
)
353 struct efx_tx_queue
*tx_queue
;
354 struct efx_rx_queue
*rx_queue
;
355 struct efx_channel
*channel
;
357 /* Calculate the rx buffer allocation parameters required to
358 * support the current MTU, including padding for header
359 * alignment and overruns.
361 efx
->rx_buffer_len
= (max(EFX_PAGE_IP_ALIGN
, NET_IP_ALIGN
) +
362 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
363 efx
->type
->rx_buffer_padding
);
364 efx
->rx_buffer_order
= get_order(efx
->rx_buffer_len
);
366 /* Initialise the channels */
367 efx_for_each_channel(channel
, efx
) {
368 EFX_LOG(channel
->efx
, "init chan %d\n", channel
->channel
);
370 efx_init_eventq(channel
);
372 efx_for_each_channel_tx_queue(tx_queue
, channel
)
373 efx_init_tx_queue(tx_queue
);
375 /* The rx buffer allocation strategy is MTU dependent */
376 efx_rx_strategy(channel
);
378 efx_for_each_channel_rx_queue(rx_queue
, channel
)
379 efx_init_rx_queue(rx_queue
);
381 WARN_ON(channel
->rx_pkt
!= NULL
);
382 efx_rx_strategy(channel
);
386 /* This enables event queue processing and packet transmission.
388 * Note that this function is not allowed to fail, since that would
389 * introduce too much complexity into the suspend/resume path.
391 static void efx_start_channel(struct efx_channel
*channel
)
393 struct efx_rx_queue
*rx_queue
;
395 EFX_LOG(channel
->efx
, "starting chan %d\n", channel
->channel
);
397 if (!(channel
->efx
->net_dev
->flags
& IFF_UP
))
398 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
,
399 efx_poll
, napi_weight
);
401 /* The interrupt handler for this channel may set work_pending
402 * as soon as we enable it. Make sure it's cleared before
403 * then. Similarly, make sure it sees the enabled flag set. */
404 channel
->work_pending
= false;
405 channel
->enabled
= true;
408 napi_enable(&channel
->napi_str
);
410 /* Load up RX descriptors */
411 efx_for_each_channel_rx_queue(rx_queue
, channel
)
412 efx_fast_push_rx_descriptors(rx_queue
);
415 /* This disables event queue processing and packet transmission.
416 * This function does not guarantee that all queue processing
417 * (e.g. RX refill) is complete.
419 static void efx_stop_channel(struct efx_channel
*channel
)
421 struct efx_rx_queue
*rx_queue
;
423 if (!channel
->enabled
)
426 EFX_LOG(channel
->efx
, "stop chan %d\n", channel
->channel
);
428 channel
->enabled
= false;
429 napi_disable(&channel
->napi_str
);
431 /* Ensure that any worker threads have exited or will be no-ops */
432 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
433 spin_lock_bh(&rx_queue
->add_lock
);
434 spin_unlock_bh(&rx_queue
->add_lock
);
438 static void efx_fini_channels(struct efx_nic
*efx
)
440 struct efx_channel
*channel
;
441 struct efx_tx_queue
*tx_queue
;
442 struct efx_rx_queue
*rx_queue
;
445 EFX_ASSERT_RESET_SERIALISED(efx
);
446 BUG_ON(efx
->port_enabled
);
448 rc
= falcon_flush_queues(efx
);
450 EFX_ERR(efx
, "failed to flush queues\n");
452 EFX_LOG(efx
, "successfully flushed all queues\n");
454 efx_for_each_channel(channel
, efx
) {
455 EFX_LOG(channel
->efx
, "shut down chan %d\n", channel
->channel
);
457 efx_for_each_channel_rx_queue(rx_queue
, channel
)
458 efx_fini_rx_queue(rx_queue
);
459 efx_for_each_channel_tx_queue(tx_queue
, channel
)
460 efx_fini_tx_queue(tx_queue
);
461 efx_fini_eventq(channel
);
465 static void efx_remove_channel(struct efx_channel
*channel
)
467 struct efx_tx_queue
*tx_queue
;
468 struct efx_rx_queue
*rx_queue
;
470 EFX_LOG(channel
->efx
, "destroy chan %d\n", channel
->channel
);
472 efx_for_each_channel_rx_queue(rx_queue
, channel
)
473 efx_remove_rx_queue(rx_queue
);
474 efx_for_each_channel_tx_queue(tx_queue
, channel
)
475 efx_remove_tx_queue(tx_queue
);
476 efx_remove_eventq(channel
);
478 channel
->used_flags
= 0;
481 void efx_schedule_slow_fill(struct efx_rx_queue
*rx_queue
, int delay
)
483 queue_delayed_work(refill_workqueue
, &rx_queue
->work
, delay
);
486 /**************************************************************************
490 **************************************************************************/
492 /* This ensures that the kernel is kept informed (via
493 * netif_carrier_on/off) of the link status, and also maintains the
494 * link status's stop on the port's TX queue.
496 static void efx_link_status_changed(struct efx_nic
*efx
)
498 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
499 * that no events are triggered between unregister_netdev() and the
500 * driver unloading. A more general condition is that NETDEV_CHANGE
501 * can only be generated between NETDEV_UP and NETDEV_DOWN */
502 if (!netif_running(efx
->net_dev
))
505 if (efx
->port_inhibited
) {
506 netif_carrier_off(efx
->net_dev
);
510 if (efx
->link_up
!= netif_carrier_ok(efx
->net_dev
)) {
511 efx
->n_link_state_changes
++;
514 netif_carrier_on(efx
->net_dev
);
516 netif_carrier_off(efx
->net_dev
);
519 /* Status message for kernel log */
521 struct mii_if_info
*gmii
= &efx
->mii
;
523 /* NONE here means direct XAUI from the controller, with no
524 * MDIO-attached device we can query. */
525 if (efx
->phy_type
!= PHY_TYPE_NONE
) {
526 adv
= gmii_advertised(gmii
);
527 lpa
= gmii_lpa(gmii
);
529 lpa
= GM_LPA_10000
| LPA_DUPLEX
;
532 EFX_INFO(efx
, "link up at %dMbps %s-duplex "
533 "(adv %04x lpa %04x) (MTU %d)%s\n",
534 (efx
->link_options
& GM_LPA_10000
? 10000 :
535 (efx
->link_options
& GM_LPA_1000
? 1000 :
536 (efx
->link_options
& GM_LPA_100
? 100 :
538 (efx
->link_options
& GM_LPA_DUPLEX
?
542 (efx
->promiscuous
? " [PROMISC]" : ""));
544 EFX_INFO(efx
, "link down\n");
549 /* This call reinitialises the MAC to pick up new PHY settings. The
550 * caller must hold the mac_lock */
551 void __efx_reconfigure_port(struct efx_nic
*efx
)
553 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
555 EFX_LOG(efx
, "reconfiguring MAC from PHY settings on CPU %d\n",
556 raw_smp_processor_id());
558 /* Serialise the promiscuous flag with efx_set_multicast_list. */
559 if (efx_dev_registered(efx
)) {
560 netif_addr_lock_bh(efx
->net_dev
);
561 netif_addr_unlock_bh(efx
->net_dev
);
564 falcon_reconfigure_xmac(efx
);
566 /* Inform kernel of loss/gain of carrier */
567 efx_link_status_changed(efx
);
570 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
572 void efx_reconfigure_port(struct efx_nic
*efx
)
574 EFX_ASSERT_RESET_SERIALISED(efx
);
576 mutex_lock(&efx
->mac_lock
);
577 __efx_reconfigure_port(efx
);
578 mutex_unlock(&efx
->mac_lock
);
581 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
582 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
583 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
584 static void efx_reconfigure_work(struct work_struct
*data
)
586 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
589 mutex_lock(&efx
->mac_lock
);
590 if (efx
->port_enabled
)
591 __efx_reconfigure_port(efx
);
592 mutex_unlock(&efx
->mac_lock
);
595 static int efx_probe_port(struct efx_nic
*efx
)
599 EFX_LOG(efx
, "create port\n");
601 /* Connect up MAC/PHY operations table and read MAC address */
602 rc
= falcon_probe_port(efx
);
606 /* Sanity check MAC address */
607 if (is_valid_ether_addr(efx
->mac_address
)) {
608 memcpy(efx
->net_dev
->dev_addr
, efx
->mac_address
, ETH_ALEN
);
610 EFX_ERR(efx
, "invalid MAC address %pM\n",
612 if (!allow_bad_hwaddr
) {
616 random_ether_addr(efx
->net_dev
->dev_addr
);
617 EFX_INFO(efx
, "using locally-generated MAC %pM\n",
618 efx
->net_dev
->dev_addr
);
624 efx_remove_port(efx
);
628 static int efx_init_port(struct efx_nic
*efx
)
632 EFX_LOG(efx
, "init port\n");
634 /* Initialise the MAC and PHY */
635 rc
= falcon_init_xmac(efx
);
639 efx
->port_initialized
= true;
640 efx
->stats_enabled
= true;
642 /* Reconfigure port to program MAC registers */
643 falcon_reconfigure_xmac(efx
);
648 /* Allow efx_reconfigure_port() to be scheduled, and close the window
649 * between efx_stop_port and efx_flush_all whereby a previously scheduled
650 * efx_reconfigure_port() may have been cancelled */
651 static void efx_start_port(struct efx_nic
*efx
)
653 EFX_LOG(efx
, "start port\n");
654 BUG_ON(efx
->port_enabled
);
656 mutex_lock(&efx
->mac_lock
);
657 efx
->port_enabled
= true;
658 __efx_reconfigure_port(efx
);
659 mutex_unlock(&efx
->mac_lock
);
662 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
663 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
664 * efx_reconfigure_work can still be scheduled via NAPI processing
665 * until efx_flush_all() is called */
666 static void efx_stop_port(struct efx_nic
*efx
)
668 EFX_LOG(efx
, "stop port\n");
670 mutex_lock(&efx
->mac_lock
);
671 efx
->port_enabled
= false;
672 mutex_unlock(&efx
->mac_lock
);
674 /* Serialise against efx_set_multicast_list() */
675 if (efx_dev_registered(efx
)) {
676 netif_addr_lock_bh(efx
->net_dev
);
677 netif_addr_unlock_bh(efx
->net_dev
);
681 static void efx_fini_port(struct efx_nic
*efx
)
683 EFX_LOG(efx
, "shut down port\n");
685 if (!efx
->port_initialized
)
688 falcon_fini_xmac(efx
);
689 efx
->port_initialized
= false;
691 efx
->link_up
= false;
692 efx_link_status_changed(efx
);
695 static void efx_remove_port(struct efx_nic
*efx
)
697 EFX_LOG(efx
, "destroying port\n");
699 falcon_remove_port(efx
);
702 /**************************************************************************
706 **************************************************************************/
708 /* This configures the PCI device to enable I/O and DMA. */
709 static int efx_init_io(struct efx_nic
*efx
)
711 struct pci_dev
*pci_dev
= efx
->pci_dev
;
712 dma_addr_t dma_mask
= efx
->type
->max_dma_mask
;
715 EFX_LOG(efx
, "initialising I/O\n");
717 rc
= pci_enable_device(pci_dev
);
719 EFX_ERR(efx
, "failed to enable PCI device\n");
723 pci_set_master(pci_dev
);
725 /* Set the PCI DMA mask. Try all possibilities from our
726 * genuine mask down to 32 bits, because some architectures
727 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
728 * masks event though they reject 46 bit masks.
730 while (dma_mask
> 0x7fffffffUL
) {
731 if (pci_dma_supported(pci_dev
, dma_mask
) &&
732 ((rc
= pci_set_dma_mask(pci_dev
, dma_mask
)) == 0))
737 EFX_ERR(efx
, "could not find a suitable DMA mask\n");
740 EFX_LOG(efx
, "using DMA mask %llx\n", (unsigned long long) dma_mask
);
741 rc
= pci_set_consistent_dma_mask(pci_dev
, dma_mask
);
743 /* pci_set_consistent_dma_mask() is not *allowed* to
744 * fail with a mask that pci_set_dma_mask() accepted,
745 * but just in case...
747 EFX_ERR(efx
, "failed to set consistent DMA mask\n");
751 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
,
753 rc
= pci_request_region(pci_dev
, efx
->type
->mem_bar
, "sfc");
755 EFX_ERR(efx
, "request for memory BAR failed\n");
759 efx
->membase
= ioremap_nocache(efx
->membase_phys
,
760 efx
->type
->mem_map_size
);
762 EFX_ERR(efx
, "could not map memory BAR %d at %llx+%x\n",
764 (unsigned long long)efx
->membase_phys
,
765 efx
->type
->mem_map_size
);
769 EFX_LOG(efx
, "memory BAR %u at %llx+%x (virtual %p)\n",
770 efx
->type
->mem_bar
, (unsigned long long)efx
->membase_phys
,
771 efx
->type
->mem_map_size
, efx
->membase
);
776 pci_release_region(efx
->pci_dev
, efx
->type
->mem_bar
);
778 efx
->membase_phys
= 0;
780 pci_disable_device(efx
->pci_dev
);
785 static void efx_fini_io(struct efx_nic
*efx
)
787 EFX_LOG(efx
, "shutting down I/O\n");
790 iounmap(efx
->membase
);
794 if (efx
->membase_phys
) {
795 pci_release_region(efx
->pci_dev
, efx
->type
->mem_bar
);
796 efx
->membase_phys
= 0;
799 pci_disable_device(efx
->pci_dev
);
802 /* Get number of RX queues wanted. Return number of online CPU
803 * packages in the expectation that an IRQ balancer will spread
804 * interrupts across them. */
805 static int efx_wanted_rx_queues(void)
811 cpus_clear(core_mask
);
813 for_each_online_cpu(cpu
) {
814 if (!cpu_isset(cpu
, core_mask
)) {
816 cpus_or(core_mask
, core_mask
,
817 topology_core_siblings(cpu
));
824 /* Probe the number and type of interrupts we are able to obtain, and
825 * the resulting numbers of channels and RX queues.
827 static void efx_probe_interrupts(struct efx_nic
*efx
)
830 min_t(int, efx
->type
->phys_addr_channels
, EFX_MAX_CHANNELS
);
833 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
834 struct msix_entry xentries
[EFX_MAX_CHANNELS
];
837 /* We want one RX queue and interrupt per CPU package
838 * (or as specified by the rss_cpus module parameter).
839 * We will need one channel per interrupt.
841 wanted_ints
= rss_cpus
? rss_cpus
: efx_wanted_rx_queues();
842 efx
->n_rx_queues
= min(wanted_ints
, max_channels
);
844 for (i
= 0; i
< efx
->n_rx_queues
; i
++)
845 xentries
[i
].entry
= i
;
846 rc
= pci_enable_msix(efx
->pci_dev
, xentries
, efx
->n_rx_queues
);
848 EFX_BUG_ON_PARANOID(rc
>= efx
->n_rx_queues
);
849 efx
->n_rx_queues
= rc
;
850 rc
= pci_enable_msix(efx
->pci_dev
, xentries
,
855 for (i
= 0; i
< efx
->n_rx_queues
; i
++)
856 efx
->channel
[i
].irq
= xentries
[i
].vector
;
858 /* Fall back to single channel MSI */
859 efx
->interrupt_mode
= EFX_INT_MODE_MSI
;
860 EFX_ERR(efx
, "could not enable MSI-X\n");
864 /* Try single interrupt MSI */
865 if (efx
->interrupt_mode
== EFX_INT_MODE_MSI
) {
866 efx
->n_rx_queues
= 1;
867 rc
= pci_enable_msi(efx
->pci_dev
);
869 efx
->channel
[0].irq
= efx
->pci_dev
->irq
;
871 EFX_ERR(efx
, "could not enable MSI\n");
872 efx
->interrupt_mode
= EFX_INT_MODE_LEGACY
;
876 /* Assume legacy interrupts */
877 if (efx
->interrupt_mode
== EFX_INT_MODE_LEGACY
) {
878 efx
->n_rx_queues
= 1;
879 efx
->legacy_irq
= efx
->pci_dev
->irq
;
883 static void efx_remove_interrupts(struct efx_nic
*efx
)
885 struct efx_channel
*channel
;
887 /* Remove MSI/MSI-X interrupts */
888 efx_for_each_channel(channel
, efx
)
890 pci_disable_msi(efx
->pci_dev
);
891 pci_disable_msix(efx
->pci_dev
);
893 /* Remove legacy interrupt */
897 static void efx_set_channels(struct efx_nic
*efx
)
899 struct efx_tx_queue
*tx_queue
;
900 struct efx_rx_queue
*rx_queue
;
902 efx_for_each_tx_queue(tx_queue
, efx
) {
903 if (!EFX_INT_MODE_USE_MSI(efx
) && separate_tx_and_rx_channels
)
904 tx_queue
->channel
= &efx
->channel
[1];
906 tx_queue
->channel
= &efx
->channel
[0];
907 tx_queue
->channel
->used_flags
|= EFX_USED_BY_TX
;
910 efx_for_each_rx_queue(rx_queue
, efx
) {
911 rx_queue
->channel
= &efx
->channel
[rx_queue
->queue
];
912 rx_queue
->channel
->used_flags
|= EFX_USED_BY_RX
;
916 static int efx_probe_nic(struct efx_nic
*efx
)
920 EFX_LOG(efx
, "creating NIC\n");
922 /* Carry out hardware-type specific initialisation */
923 rc
= falcon_probe_nic(efx
);
927 /* Determine the number of channels and RX queues by trying to hook
928 * in MSI-X interrupts. */
929 efx_probe_interrupts(efx
);
931 efx_set_channels(efx
);
933 /* Initialise the interrupt moderation settings */
934 efx_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
);
939 static void efx_remove_nic(struct efx_nic
*efx
)
941 EFX_LOG(efx
, "destroying NIC\n");
943 efx_remove_interrupts(efx
);
944 falcon_remove_nic(efx
);
947 /**************************************************************************
949 * NIC startup/shutdown
951 *************************************************************************/
953 static int efx_probe_all(struct efx_nic
*efx
)
955 struct efx_channel
*channel
;
959 rc
= efx_probe_nic(efx
);
961 EFX_ERR(efx
, "failed to create NIC\n");
966 rc
= efx_probe_port(efx
);
968 EFX_ERR(efx
, "failed to create port\n");
972 /* Create channels */
973 efx_for_each_channel(channel
, efx
) {
974 rc
= efx_probe_channel(channel
);
976 EFX_ERR(efx
, "failed to create channel %d\n",
985 efx_for_each_channel(channel
, efx
)
986 efx_remove_channel(channel
);
987 efx_remove_port(efx
);
994 /* Called after previous invocation(s) of efx_stop_all, restarts the
995 * port, kernel transmit queue, NAPI processing and hardware interrupts,
996 * and ensures that the port is scheduled to be reconfigured.
997 * This function is safe to call multiple times when the NIC is in any
999 static void efx_start_all(struct efx_nic
*efx
)
1001 struct efx_channel
*channel
;
1003 EFX_ASSERT_RESET_SERIALISED(efx
);
1005 /* Check that it is appropriate to restart the interface. All
1006 * of these flags are safe to read under just the rtnl lock */
1007 if (efx
->port_enabled
)
1009 if ((efx
->state
!= STATE_RUNNING
) && (efx
->state
!= STATE_INIT
))
1011 if (efx_dev_registered(efx
) && !netif_running(efx
->net_dev
))
1014 /* Mark the port as enabled so port reconfigurations can start, then
1015 * restart the transmit interface early so the watchdog timer stops */
1016 efx_start_port(efx
);
1017 if (efx_dev_registered(efx
))
1018 efx_wake_queue(efx
);
1020 efx_for_each_channel(channel
, efx
)
1021 efx_start_channel(channel
);
1023 falcon_enable_interrupts(efx
);
1025 /* Start hardware monitor if we're in RUNNING */
1026 if (efx
->state
== STATE_RUNNING
)
1027 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1028 efx_monitor_interval
);
1031 /* Flush all delayed work. Should only be called when no more delayed work
1032 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1033 * since we're holding the rtnl_lock at this point. */
1034 static void efx_flush_all(struct efx_nic
*efx
)
1036 struct efx_rx_queue
*rx_queue
;
1038 /* Make sure the hardware monitor is stopped */
1039 cancel_delayed_work_sync(&efx
->monitor_work
);
1041 /* Ensure that all RX slow refills are complete. */
1042 efx_for_each_rx_queue(rx_queue
, efx
)
1043 cancel_delayed_work_sync(&rx_queue
->work
);
1045 /* Stop scheduled port reconfigurations */
1046 cancel_work_sync(&efx
->reconfigure_work
);
1050 /* Quiesce hardware and software without bringing the link down.
1051 * Safe to call multiple times, when the nic and interface is in any
1052 * state. The caller is guaranteed to subsequently be in a position
1053 * to modify any hardware and software state they see fit without
1055 static void efx_stop_all(struct efx_nic
*efx
)
1057 struct efx_channel
*channel
;
1059 EFX_ASSERT_RESET_SERIALISED(efx
);
1061 /* port_enabled can be read safely under the rtnl lock */
1062 if (!efx
->port_enabled
)
1065 /* Disable interrupts and wait for ISR to complete */
1066 falcon_disable_interrupts(efx
);
1067 if (efx
->legacy_irq
)
1068 synchronize_irq(efx
->legacy_irq
);
1069 efx_for_each_channel(channel
, efx
) {
1071 synchronize_irq(channel
->irq
);
1074 /* Stop all NAPI processing and synchronous rx refills */
1075 efx_for_each_channel(channel
, efx
)
1076 efx_stop_channel(channel
);
1078 /* Stop all asynchronous port reconfigurations. Since all
1079 * event processing has already been stopped, there is no
1080 * window to loose phy events */
1083 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1086 /* Isolate the MAC from the TX and RX engines, so that queue
1087 * flushes will complete in a timely fashion. */
1088 falcon_drain_tx_fifo(efx
);
1090 /* Stop the kernel transmit interface late, so the watchdog
1091 * timer isn't ticking over the flush */
1092 if (efx_dev_registered(efx
)) {
1093 efx_stop_queue(efx
);
1094 netif_tx_lock_bh(efx
->net_dev
);
1095 netif_tx_unlock_bh(efx
->net_dev
);
1099 static void efx_remove_all(struct efx_nic
*efx
)
1101 struct efx_channel
*channel
;
1103 efx_for_each_channel(channel
, efx
)
1104 efx_remove_channel(channel
);
1105 efx_remove_port(efx
);
1106 efx_remove_nic(efx
);
1109 /* A convinience function to safely flush all the queues */
1110 void efx_flush_queues(struct efx_nic
*efx
)
1112 EFX_ASSERT_RESET_SERIALISED(efx
);
1116 efx_fini_channels(efx
);
1117 efx_init_channels(efx
);
1122 /**************************************************************************
1124 * Interrupt moderation
1126 **************************************************************************/
1128 /* Set interrupt moderation parameters */
1129 void efx_init_irq_moderation(struct efx_nic
*efx
, int tx_usecs
, int rx_usecs
)
1131 struct efx_tx_queue
*tx_queue
;
1132 struct efx_rx_queue
*rx_queue
;
1134 EFX_ASSERT_RESET_SERIALISED(efx
);
1136 efx_for_each_tx_queue(tx_queue
, efx
)
1137 tx_queue
->channel
->irq_moderation
= tx_usecs
;
1139 efx_for_each_rx_queue(rx_queue
, efx
)
1140 rx_queue
->channel
->irq_moderation
= rx_usecs
;
1143 /**************************************************************************
1147 **************************************************************************/
1149 /* Run periodically off the general workqueue. Serialised against
1150 * efx_reconfigure_port via the mac_lock */
1151 static void efx_monitor(struct work_struct
*data
)
1153 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
1157 EFX_TRACE(efx
, "hardware monitor executing on CPU %d\n",
1158 raw_smp_processor_id());
1161 /* If the mac_lock is already held then it is likely a port
1162 * reconfiguration is already in place, which will likely do
1163 * most of the work of check_hw() anyway. */
1164 if (!mutex_trylock(&efx
->mac_lock
)) {
1165 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1166 efx_monitor_interval
);
1170 if (efx
->port_enabled
)
1171 rc
= falcon_check_xmac(efx
);
1172 mutex_unlock(&efx
->mac_lock
);
1174 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1175 efx_monitor_interval
);
1178 /**************************************************************************
1182 *************************************************************************/
1185 * Context: process, rtnl_lock() held.
1187 static int efx_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
1189 struct efx_nic
*efx
= netdev_priv(net_dev
);
1191 EFX_ASSERT_RESET_SERIALISED(efx
);
1193 return generic_mii_ioctl(&efx
->mii
, if_mii(ifr
), cmd
, NULL
);
1196 /**************************************************************************
1200 **************************************************************************/
1202 static int efx_init_napi(struct efx_nic
*efx
)
1204 struct efx_channel
*channel
;
1207 efx_for_each_channel(channel
, efx
) {
1208 channel
->napi_dev
= efx
->net_dev
;
1209 rc
= efx_lro_init(&channel
->lro_mgr
, efx
);
1219 static void efx_fini_napi(struct efx_nic
*efx
)
1221 struct efx_channel
*channel
;
1223 efx_for_each_channel(channel
, efx
) {
1224 efx_lro_fini(&channel
->lro_mgr
);
1225 channel
->napi_dev
= NULL
;
1229 /**************************************************************************
1231 * Kernel netpoll interface
1233 *************************************************************************/
1235 #ifdef CONFIG_NET_POLL_CONTROLLER
1237 /* Although in the common case interrupts will be disabled, this is not
1238 * guaranteed. However, all our work happens inside the NAPI callback,
1239 * so no locking is required.
1241 static void efx_netpoll(struct net_device
*net_dev
)
1243 struct efx_nic
*efx
= netdev_priv(net_dev
);
1244 struct efx_channel
*channel
;
1246 efx_for_each_channel(channel
, efx
)
1247 efx_schedule_channel(channel
);
1252 /**************************************************************************
1254 * Kernel net device interface
1256 *************************************************************************/
1258 /* Context: process, rtnl_lock() held. */
1259 static int efx_net_open(struct net_device
*net_dev
)
1261 struct efx_nic
*efx
= netdev_priv(net_dev
);
1262 EFX_ASSERT_RESET_SERIALISED(efx
);
1264 EFX_LOG(efx
, "opening device %s on CPU %d\n", net_dev
->name
,
1265 raw_smp_processor_id());
1267 if (efx
->phy_mode
& PHY_MODE_SPECIAL
)
1274 /* Context: process, rtnl_lock() held.
1275 * Note that the kernel will ignore our return code; this method
1276 * should really be a void.
1278 static int efx_net_stop(struct net_device
*net_dev
)
1280 struct efx_nic
*efx
= netdev_priv(net_dev
);
1282 EFX_LOG(efx
, "closing %s on CPU %d\n", net_dev
->name
,
1283 raw_smp_processor_id());
1285 /* Stop the device and flush all the channels */
1287 efx_fini_channels(efx
);
1288 efx_init_channels(efx
);
1293 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1294 static struct net_device_stats
*efx_net_stats(struct net_device
*net_dev
)
1296 struct efx_nic
*efx
= netdev_priv(net_dev
);
1297 struct efx_mac_stats
*mac_stats
= &efx
->mac_stats
;
1298 struct net_device_stats
*stats
= &net_dev
->stats
;
1300 /* Update stats if possible, but do not wait if another thread
1301 * is updating them (or resetting the NIC); slightly stale
1302 * stats are acceptable.
1304 if (!spin_trylock(&efx
->stats_lock
))
1306 if (efx
->stats_enabled
) {
1307 falcon_update_stats_xmac(efx
);
1308 falcon_update_nic_stats(efx
);
1310 spin_unlock(&efx
->stats_lock
);
1312 stats
->rx_packets
= mac_stats
->rx_packets
;
1313 stats
->tx_packets
= mac_stats
->tx_packets
;
1314 stats
->rx_bytes
= mac_stats
->rx_bytes
;
1315 stats
->tx_bytes
= mac_stats
->tx_bytes
;
1316 stats
->multicast
= mac_stats
->rx_multicast
;
1317 stats
->collisions
= mac_stats
->tx_collision
;
1318 stats
->rx_length_errors
= (mac_stats
->rx_gtjumbo
+
1319 mac_stats
->rx_length_error
);
1320 stats
->rx_over_errors
= efx
->n_rx_nodesc_drop_cnt
;
1321 stats
->rx_crc_errors
= mac_stats
->rx_bad
;
1322 stats
->rx_frame_errors
= mac_stats
->rx_align_error
;
1323 stats
->rx_fifo_errors
= mac_stats
->rx_overflow
;
1324 stats
->rx_missed_errors
= mac_stats
->rx_missed
;
1325 stats
->tx_window_errors
= mac_stats
->tx_late_collision
;
1327 stats
->rx_errors
= (stats
->rx_length_errors
+
1328 stats
->rx_over_errors
+
1329 stats
->rx_crc_errors
+
1330 stats
->rx_frame_errors
+
1331 stats
->rx_fifo_errors
+
1332 stats
->rx_missed_errors
+
1333 mac_stats
->rx_symbol_error
);
1334 stats
->tx_errors
= (stats
->tx_window_errors
+
1340 /* Context: netif_tx_lock held, BHs disabled. */
1341 static void efx_watchdog(struct net_device
*net_dev
)
1343 struct efx_nic
*efx
= netdev_priv(net_dev
);
1345 EFX_ERR(efx
, "TX stuck with stop_count=%d port_enabled=%d:"
1346 " resetting channels\n",
1347 atomic_read(&efx
->netif_stop_count
), efx
->port_enabled
);
1349 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
1353 /* Context: process, rtnl_lock() held. */
1354 static int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
1356 struct efx_nic
*efx
= netdev_priv(net_dev
);
1359 EFX_ASSERT_RESET_SERIALISED(efx
);
1361 if (new_mtu
> EFX_MAX_MTU
)
1366 EFX_LOG(efx
, "changing MTU to %d\n", new_mtu
);
1368 efx_fini_channels(efx
);
1369 net_dev
->mtu
= new_mtu
;
1370 efx_init_channels(efx
);
1376 static int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
1378 struct efx_nic
*efx
= netdev_priv(net_dev
);
1379 struct sockaddr
*addr
= data
;
1380 char *new_addr
= addr
->sa_data
;
1382 EFX_ASSERT_RESET_SERIALISED(efx
);
1384 if (!is_valid_ether_addr(new_addr
)) {
1385 EFX_ERR(efx
, "invalid ethernet MAC address requested: %pM\n",
1390 memcpy(net_dev
->dev_addr
, new_addr
, net_dev
->addr_len
);
1392 /* Reconfigure the MAC */
1393 efx_reconfigure_port(efx
);
1398 /* Context: netif_addr_lock held, BHs disabled. */
1399 static void efx_set_multicast_list(struct net_device
*net_dev
)
1401 struct efx_nic
*efx
= netdev_priv(net_dev
);
1402 struct dev_mc_list
*mc_list
= net_dev
->mc_list
;
1403 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
1404 bool promiscuous
= !!(net_dev
->flags
& IFF_PROMISC
);
1405 bool changed
= (efx
->promiscuous
!= promiscuous
);
1410 efx
->promiscuous
= promiscuous
;
1412 /* Build multicast hash table */
1413 if (promiscuous
|| (net_dev
->flags
& IFF_ALLMULTI
)) {
1414 memset(mc_hash
, 0xff, sizeof(*mc_hash
));
1416 memset(mc_hash
, 0x00, sizeof(*mc_hash
));
1417 for (i
= 0; i
< net_dev
->mc_count
; i
++) {
1418 crc
= ether_crc_le(ETH_ALEN
, mc_list
->dmi_addr
);
1419 bit
= crc
& (EFX_MCAST_HASH_ENTRIES
- 1);
1420 set_bit_le(bit
, mc_hash
->byte
);
1421 mc_list
= mc_list
->next
;
1425 if (!efx
->port_enabled
)
1426 /* Delay pushing settings until efx_start_port() */
1430 queue_work(efx
->workqueue
, &efx
->reconfigure_work
);
1432 /* Create and activate new global multicast hash table */
1433 falcon_set_multicast_hash(efx
);
1436 static const struct net_device_ops efx_netdev_ops
= {
1437 .ndo_open
= efx_net_open
,
1438 .ndo_stop
= efx_net_stop
,
1439 .ndo_get_stats
= efx_net_stats
,
1440 .ndo_tx_timeout
= efx_watchdog
,
1441 .ndo_start_xmit
= efx_hard_start_xmit
,
1442 .ndo_validate_addr
= eth_validate_addr
,
1443 .ndo_do_ioctl
= efx_ioctl
,
1444 .ndo_change_mtu
= efx_change_mtu
,
1445 .ndo_set_mac_address
= efx_set_mac_address
,
1446 .ndo_set_multicast_list
= efx_set_multicast_list
,
1447 #ifdef CONFIG_NET_POLL_CONTROLLER
1448 .ndo_poll_controller
= efx_netpoll
,
1452 static int efx_netdev_event(struct notifier_block
*this,
1453 unsigned long event
, void *ptr
)
1455 struct net_device
*net_dev
= ptr
;
1457 if (net_dev
->netdev_ops
== &efx_netdev_ops
&& event
== NETDEV_CHANGENAME
) {
1458 struct efx_nic
*efx
= netdev_priv(net_dev
);
1460 strcpy(efx
->name
, net_dev
->name
);
1461 efx_mtd_rename(efx
);
1467 static struct notifier_block efx_netdev_notifier
= {
1468 .notifier_call
= efx_netdev_event
,
1471 static int efx_register_netdev(struct efx_nic
*efx
)
1473 struct net_device
*net_dev
= efx
->net_dev
;
1476 net_dev
->watchdog_timeo
= 5 * HZ
;
1477 net_dev
->irq
= efx
->pci_dev
->irq
;
1478 net_dev
->netdev_ops
= &efx_netdev_ops
;
1479 SET_NETDEV_DEV(net_dev
, &efx
->pci_dev
->dev
);
1480 SET_ETHTOOL_OPS(net_dev
, &efx_ethtool_ops
);
1482 /* Always start with carrier off; PHY events will detect the link */
1483 netif_carrier_off(efx
->net_dev
);
1485 /* Clear MAC statistics */
1486 falcon_update_stats_xmac(efx
);
1487 memset(&efx
->mac_stats
, 0, sizeof(efx
->mac_stats
));
1489 rc
= register_netdev(net_dev
);
1491 EFX_ERR(efx
, "could not register net dev\n");
1494 strcpy(efx
->name
, net_dev
->name
);
1499 static void efx_unregister_netdev(struct efx_nic
*efx
)
1501 struct efx_tx_queue
*tx_queue
;
1506 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
1508 /* Free up any skbs still remaining. This has to happen before
1509 * we try to unregister the netdev as running their destructors
1510 * may be needed to get the device ref. count to 0. */
1511 efx_for_each_tx_queue(tx_queue
, efx
)
1512 efx_release_tx_buffers(tx_queue
);
1514 if (efx_dev_registered(efx
)) {
1515 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
1516 unregister_netdev(efx
->net_dev
);
1520 /**************************************************************************
1522 * Device reset and suspend
1524 **************************************************************************/
1526 /* Tears down the entire software state and most of the hardware state
1528 void efx_reset_down(struct efx_nic
*efx
, struct ethtool_cmd
*ecmd
)
1532 EFX_ASSERT_RESET_SERIALISED(efx
);
1534 /* The net_dev->get_stats handler is quite slow, and will fail
1535 * if a fetch is pending over reset. Serialise against it. */
1536 spin_lock(&efx
->stats_lock
);
1537 efx
->stats_enabled
= false;
1538 spin_unlock(&efx
->stats_lock
);
1541 mutex_lock(&efx
->mac_lock
);
1542 mutex_lock(&efx
->spi_lock
);
1544 rc
= falcon_xmac_get_settings(efx
, ecmd
);
1546 EFX_ERR(efx
, "could not back up PHY settings\n");
1548 efx_fini_channels(efx
);
1551 /* This function will always ensure that the locks acquired in
1552 * efx_reset_down() are released. A failure return code indicates
1553 * that we were unable to reinitialise the hardware, and the
1554 * driver should be disabled. If ok is false, then the rx and tx
1555 * engines are not restarted, pending a RESET_DISABLE. */
1556 int efx_reset_up(struct efx_nic
*efx
, struct ethtool_cmd
*ecmd
, bool ok
)
1560 EFX_ASSERT_RESET_SERIALISED(efx
);
1562 rc
= falcon_init_nic(efx
);
1564 EFX_ERR(efx
, "failed to initialise NIC\n");
1569 efx_init_channels(efx
);
1571 if (falcon_xmac_set_settings(efx
, ecmd
))
1572 EFX_ERR(efx
, "could not restore PHY settings\n");
1575 mutex_unlock(&efx
->spi_lock
);
1576 mutex_unlock(&efx
->mac_lock
);
1580 efx
->stats_enabled
= true;
1585 /* Reset the NIC as transparently as possible. Do not reset the PHY
1586 * Note that the reset may fail, in which case the card will be left
1587 * in a most-probably-unusable state.
1589 * This function will sleep. You cannot reset from within an atomic
1590 * state; use efx_schedule_reset() instead.
1592 * Grabs the rtnl_lock.
1594 static int efx_reset(struct efx_nic
*efx
)
1596 struct ethtool_cmd ecmd
;
1597 enum reset_type method
= efx
->reset_pending
;
1600 /* Serialise with kernel interfaces */
1603 /* If we're not RUNNING then don't reset. Leave the reset_pending
1604 * flag set so that efx_pci_probe_main will be retried */
1605 if (efx
->state
!= STATE_RUNNING
) {
1606 EFX_INFO(efx
, "scheduled reset quenched. NIC not RUNNING\n");
1610 EFX_INFO(efx
, "resetting (%d)\n", method
);
1612 efx_reset_down(efx
, &ecmd
);
1614 rc
= falcon_reset_hw(efx
, method
);
1616 EFX_ERR(efx
, "failed to reset hardware\n");
1620 /* Allow resets to be rescheduled. */
1621 efx
->reset_pending
= RESET_TYPE_NONE
;
1623 /* Reinitialise bus-mastering, which may have been turned off before
1624 * the reset was scheduled. This is still appropriate, even in the
1625 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1626 * can respond to requests. */
1627 pci_set_master(efx
->pci_dev
);
1629 /* Leave device stopped if necessary */
1630 if (method
== RESET_TYPE_DISABLE
) {
1635 rc
= efx_reset_up(efx
, &ecmd
, true);
1639 EFX_LOG(efx
, "reset complete\n");
1645 efx_reset_up(efx
, &ecmd
, false);
1647 EFX_ERR(efx
, "has been disabled\n");
1648 efx
->state
= STATE_DISABLED
;
1651 efx_unregister_netdev(efx
);
1656 /* The worker thread exists so that code that cannot sleep can
1657 * schedule a reset for later.
1659 static void efx_reset_work(struct work_struct
*data
)
1661 struct efx_nic
*nic
= container_of(data
, struct efx_nic
, reset_work
);
1666 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
1668 enum reset_type method
;
1670 if (efx
->reset_pending
!= RESET_TYPE_NONE
) {
1671 EFX_INFO(efx
, "quenching already scheduled reset\n");
1676 case RESET_TYPE_INVISIBLE
:
1677 case RESET_TYPE_ALL
:
1678 case RESET_TYPE_WORLD
:
1679 case RESET_TYPE_DISABLE
:
1682 case RESET_TYPE_RX_RECOVERY
:
1683 case RESET_TYPE_RX_DESC_FETCH
:
1684 case RESET_TYPE_TX_DESC_FETCH
:
1685 case RESET_TYPE_TX_SKIP
:
1686 method
= RESET_TYPE_INVISIBLE
;
1689 method
= RESET_TYPE_ALL
;
1694 EFX_LOG(efx
, "scheduling reset (%d:%d)\n", type
, method
);
1696 EFX_LOG(efx
, "scheduling reset (%d)\n", method
);
1698 efx
->reset_pending
= method
;
1700 queue_work(efx
->reset_workqueue
, &efx
->reset_work
);
1703 /**************************************************************************
1705 * List of NICs we support
1707 **************************************************************************/
1709 /* PCI device ID table */
1710 static struct pci_device_id efx_pci_table
[] __devinitdata
= {
1711 {PCI_DEVICE(EFX_VENDID_SFC
, FALCON_A_P_DEVID
),
1712 .driver_data
= (unsigned long) &falcon_a_nic_type
},
1713 {PCI_DEVICE(EFX_VENDID_SFC
, FALCON_B_P_DEVID
),
1714 .driver_data
= (unsigned long) &falcon_b_nic_type
},
1715 {0} /* end of list */
1718 /**************************************************************************
1720 * Dummy PHY/MAC/Board operations
1722 * Can be used for some unimplemented operations
1723 * Needed so all function pointers are valid and do not have to be tested
1726 **************************************************************************/
1727 int efx_port_dummy_op_int(struct efx_nic
*efx
)
1731 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
1732 void efx_port_dummy_op_blink(struct efx_nic
*efx
, bool blink
) {}
1734 static struct efx_phy_operations efx_dummy_phy_operations
= {
1735 .init
= efx_port_dummy_op_int
,
1736 .reconfigure
= efx_port_dummy_op_void
,
1737 .check_hw
= efx_port_dummy_op_int
,
1738 .fini
= efx_port_dummy_op_void
,
1739 .clear_interrupt
= efx_port_dummy_op_void
,
1742 static struct efx_board efx_dummy_board_info
= {
1743 .init
= efx_port_dummy_op_int
,
1744 .init_leds
= efx_port_dummy_op_int
,
1745 .set_fault_led
= efx_port_dummy_op_blink
,
1746 .monitor
= efx_port_dummy_op_int
,
1747 .blink
= efx_port_dummy_op_blink
,
1748 .fini
= efx_port_dummy_op_void
,
1751 /**************************************************************************
1755 **************************************************************************/
1757 /* This zeroes out and then fills in the invariants in a struct
1758 * efx_nic (including all sub-structures).
1760 static int efx_init_struct(struct efx_nic
*efx
, struct efx_nic_type
*type
,
1761 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
1763 struct efx_channel
*channel
;
1764 struct efx_tx_queue
*tx_queue
;
1765 struct efx_rx_queue
*rx_queue
;
1768 /* Initialise common structures */
1769 memset(efx
, 0, sizeof(*efx
));
1770 spin_lock_init(&efx
->biu_lock
);
1771 spin_lock_init(&efx
->phy_lock
);
1772 mutex_init(&efx
->spi_lock
);
1773 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
1774 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
1775 efx
->pci_dev
= pci_dev
;
1776 efx
->state
= STATE_INIT
;
1777 efx
->reset_pending
= RESET_TYPE_NONE
;
1778 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
1779 efx
->board_info
= efx_dummy_board_info
;
1781 efx
->net_dev
= net_dev
;
1782 efx
->rx_checksum_enabled
= true;
1783 spin_lock_init(&efx
->netif_stop_lock
);
1784 spin_lock_init(&efx
->stats_lock
);
1785 mutex_init(&efx
->mac_lock
);
1786 efx
->phy_op
= &efx_dummy_phy_operations
;
1787 efx
->mii
.dev
= net_dev
;
1788 INIT_WORK(&efx
->reconfigure_work
, efx_reconfigure_work
);
1789 atomic_set(&efx
->netif_stop_count
, 1);
1791 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++) {
1792 channel
= &efx
->channel
[i
];
1794 channel
->channel
= i
;
1795 channel
->work_pending
= false;
1797 for (i
= 0; i
< EFX_TX_QUEUE_COUNT
; i
++) {
1798 tx_queue
= &efx
->tx_queue
[i
];
1799 tx_queue
->efx
= efx
;
1800 tx_queue
->queue
= i
;
1801 tx_queue
->buffer
= NULL
;
1802 tx_queue
->channel
= &efx
->channel
[0]; /* for safety */
1803 tx_queue
->tso_headers_free
= NULL
;
1805 for (i
= 0; i
< EFX_MAX_RX_QUEUES
; i
++) {
1806 rx_queue
= &efx
->rx_queue
[i
];
1807 rx_queue
->efx
= efx
;
1808 rx_queue
->queue
= i
;
1809 rx_queue
->channel
= &efx
->channel
[0]; /* for safety */
1810 rx_queue
->buffer
= NULL
;
1811 spin_lock_init(&rx_queue
->add_lock
);
1812 INIT_DELAYED_WORK(&rx_queue
->work
, efx_rx_work
);
1817 /* Sanity-check NIC type */
1818 EFX_BUG_ON_PARANOID(efx
->type
->txd_ring_mask
&
1819 (efx
->type
->txd_ring_mask
+ 1));
1820 EFX_BUG_ON_PARANOID(efx
->type
->rxd_ring_mask
&
1821 (efx
->type
->rxd_ring_mask
+ 1));
1822 EFX_BUG_ON_PARANOID(efx
->type
->evq_size
&
1823 (efx
->type
->evq_size
- 1));
1824 /* As close as we can get to guaranteeing that we don't overflow */
1825 EFX_BUG_ON_PARANOID(efx
->type
->evq_size
<
1826 (efx
->type
->txd_ring_mask
+ 1 +
1827 efx
->type
->rxd_ring_mask
+ 1));
1828 EFX_BUG_ON_PARANOID(efx
->type
->phys_addr_channels
> EFX_MAX_CHANNELS
);
1830 /* Higher numbered interrupt modes are less capable! */
1831 efx
->interrupt_mode
= max(efx
->type
->max_interrupt_mode
,
1834 efx
->workqueue
= create_singlethread_workqueue("sfc_work");
1835 if (!efx
->workqueue
) {
1840 efx
->reset_workqueue
= create_singlethread_workqueue("sfc_reset");
1841 if (!efx
->reset_workqueue
) {
1849 destroy_workqueue(efx
->workqueue
);
1850 efx
->workqueue
= NULL
;
1856 static void efx_fini_struct(struct efx_nic
*efx
)
1858 if (efx
->reset_workqueue
) {
1859 destroy_workqueue(efx
->reset_workqueue
);
1860 efx
->reset_workqueue
= NULL
;
1862 if (efx
->workqueue
) {
1863 destroy_workqueue(efx
->workqueue
);
1864 efx
->workqueue
= NULL
;
1868 /**************************************************************************
1872 **************************************************************************/
1874 /* Main body of final NIC shutdown code
1875 * This is called only at module unload (or hotplug removal).
1877 static void efx_pci_remove_main(struct efx_nic
*efx
)
1879 EFX_ASSERT_RESET_SERIALISED(efx
);
1881 /* Skip everything if we never obtained a valid membase */
1885 efx_fini_channels(efx
);
1888 /* Shutdown the board, then the NIC and board state */
1889 efx
->board_info
.fini(efx
);
1890 falcon_fini_interrupt(efx
);
1893 efx_remove_all(efx
);
1896 /* Final NIC shutdown
1897 * This is called only at module unload (or hotplug removal).
1899 static void efx_pci_remove(struct pci_dev
*pci_dev
)
1901 struct efx_nic
*efx
;
1903 efx
= pci_get_drvdata(pci_dev
);
1907 efx_mtd_remove(efx
);
1909 /* Mark the NIC as fini, then stop the interface */
1911 efx
->state
= STATE_FINI
;
1912 dev_close(efx
->net_dev
);
1914 /* Allow any queued efx_resets() to complete */
1917 if (efx
->membase
== NULL
)
1920 efx_unregister_netdev(efx
);
1922 /* Wait for any scheduled resets to complete. No more will be
1923 * scheduled from this point because efx_stop_all() has been
1924 * called, we are no longer registered with driverlink, and
1925 * the net_device's have been removed. */
1926 flush_workqueue(efx
->reset_workqueue
);
1928 efx_pci_remove_main(efx
);
1932 EFX_LOG(efx
, "shutdown successful\n");
1934 pci_set_drvdata(pci_dev
, NULL
);
1935 efx_fini_struct(efx
);
1936 free_netdev(efx
->net_dev
);
1939 /* Main body of NIC initialisation
1940 * This is called at module load (or hotplug insertion, theoretically).
1942 static int efx_pci_probe_main(struct efx_nic
*efx
)
1946 /* Do start-of-day initialisation */
1947 rc
= efx_probe_all(efx
);
1951 rc
= efx_init_napi(efx
);
1955 /* Initialise the board */
1956 rc
= efx
->board_info
.init(efx
);
1958 EFX_ERR(efx
, "failed to initialise board\n");
1962 rc
= falcon_init_nic(efx
);
1964 EFX_ERR(efx
, "failed to initialise NIC\n");
1968 rc
= efx_init_port(efx
);
1970 EFX_ERR(efx
, "failed to initialise port\n");
1974 efx_init_channels(efx
);
1976 rc
= falcon_init_interrupt(efx
);
1983 efx_fini_channels(efx
);
1987 efx
->board_info
.fini(efx
);
1991 efx_remove_all(efx
);
1996 /* NIC initialisation
1998 * This is called at module load (or hotplug insertion,
1999 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2000 * sets up and registers the network devices with the kernel and hooks
2001 * the interrupt service routine. It does not prepare the device for
2002 * transmission; this is left to the first time one of the network
2003 * interfaces is brought up (i.e. efx_net_open).
2005 static int __devinit
efx_pci_probe(struct pci_dev
*pci_dev
,
2006 const struct pci_device_id
*entry
)
2008 struct efx_nic_type
*type
= (struct efx_nic_type
*) entry
->driver_data
;
2009 struct net_device
*net_dev
;
2010 struct efx_nic
*efx
;
2013 /* Allocate and initialise a struct net_device and struct efx_nic */
2014 net_dev
= alloc_etherdev(sizeof(*efx
));
2017 net_dev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_SG
|
2018 NETIF_F_HIGHDMA
| NETIF_F_TSO
);
2020 net_dev
->features
|= NETIF_F_LRO
;
2021 /* Mask for features that also apply to VLAN devices */
2022 net_dev
->vlan_features
|= (NETIF_F_ALL_CSUM
| NETIF_F_SG
|
2023 NETIF_F_HIGHDMA
| NETIF_F_TSO
);
2024 efx
= netdev_priv(net_dev
);
2025 pci_set_drvdata(pci_dev
, efx
);
2026 rc
= efx_init_struct(efx
, type
, pci_dev
, net_dev
);
2030 EFX_INFO(efx
, "Solarflare Communications NIC detected\n");
2032 /* Set up basic I/O (BAR mappings etc) */
2033 rc
= efx_init_io(efx
);
2037 /* No serialisation is required with the reset path because
2038 * we're in STATE_INIT. */
2039 for (i
= 0; i
< 5; i
++) {
2040 rc
= efx_pci_probe_main(efx
);
2044 /* Serialise against efx_reset(). No more resets will be
2045 * scheduled since efx_stop_all() has been called, and we
2046 * have not and never have been registered with either
2047 * the rtnetlink or driverlink layers. */
2048 flush_workqueue(efx
->reset_workqueue
);
2050 /* Retry if a recoverably reset event has been scheduled */
2051 if ((efx
->reset_pending
!= RESET_TYPE_INVISIBLE
) &&
2052 (efx
->reset_pending
!= RESET_TYPE_ALL
))
2055 efx
->reset_pending
= RESET_TYPE_NONE
;
2059 EFX_ERR(efx
, "Could not reset NIC\n");
2063 /* Switch to the running state before we expose the device to
2064 * the OS. This is to ensure that the initial gathering of
2065 * MAC stats succeeds. */
2067 efx
->state
= STATE_RUNNING
;
2070 rc
= efx_register_netdev(efx
);
2074 EFX_LOG(efx
, "initialisation successful\n");
2076 efx_mtd_probe(efx
); /* allowed to fail */
2080 efx_pci_remove_main(efx
);
2085 efx_fini_struct(efx
);
2087 EFX_LOG(efx
, "initialisation failed. rc=%d\n", rc
);
2088 free_netdev(net_dev
);
2092 static struct pci_driver efx_pci_driver
= {
2093 .name
= EFX_DRIVER_NAME
,
2094 .id_table
= efx_pci_table
,
2095 .probe
= efx_pci_probe
,
2096 .remove
= efx_pci_remove
,
2099 /**************************************************************************
2101 * Kernel module interface
2103 *************************************************************************/
2105 module_param(interrupt_mode
, uint
, 0444);
2106 MODULE_PARM_DESC(interrupt_mode
,
2107 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2109 static int __init
efx_init_module(void)
2113 printk(KERN_INFO
"Solarflare NET driver v" EFX_DRIVER_VERSION
"\n");
2115 rc
= register_netdevice_notifier(&efx_netdev_notifier
);
2119 refill_workqueue
= create_workqueue("sfc_refill");
2120 if (!refill_workqueue
) {
2125 rc
= pci_register_driver(&efx_pci_driver
);
2132 destroy_workqueue(refill_workqueue
);
2134 unregister_netdevice_notifier(&efx_netdev_notifier
);
2139 static void __exit
efx_exit_module(void)
2141 printk(KERN_INFO
"Solarflare NET driver unloading\n");
2143 pci_unregister_driver(&efx_pci_driver
);
2144 destroy_workqueue(refill_workqueue
);
2145 unregister_netdevice_notifier(&efx_netdev_notifier
);
2149 module_init(efx_init_module
);
2150 module_exit(efx_exit_module
);
2152 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2153 "Solarflare Communications");
2154 MODULE_DESCRIPTION("Solarflare Communications network driver");
2155 MODULE_LICENSE("GPL");
2156 MODULE_DEVICE_TABLE(pci
, efx_pci_table
);