1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/i2c-algo-bit.h>
18 #include "net_driver.h"
25 #include "falcon_hwdefs.h"
26 #include "falcon_io.h"
30 #include "workarounds.h"
32 /* Falcon hardware control.
33 * Falcon is the internal codename for the SFC4000 controller that is
34 * present in SFE400X evaluation boards
38 * struct falcon_nic_data - Falcon NIC state
39 * @next_buffer_table: First available buffer table id
40 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm
43 struct falcon_nic_data
{
44 unsigned next_buffer_table
;
45 struct pci_dev
*pci_dev2
;
46 struct i2c_algo_bit_data i2c_data
;
49 /**************************************************************************
53 **************************************************************************
56 static int disable_dma_stats
;
58 /* This is set to 16 for a good reason. In summary, if larger than
59 * 16, the descriptor cache holds more than a default socket
60 * buffer's worth of packets (for UDP we can only have at most one
61 * socket buffer's worth outstanding). This combined with the fact
62 * that we only get 1 TX event per descriptor cache means the NIC
65 #define TX_DC_ENTRIES 16
66 #define TX_DC_ENTRIES_ORDER 0
67 #define TX_DC_BASE 0x130000
69 #define RX_DC_ENTRIES 64
70 #define RX_DC_ENTRIES_ORDER 2
71 #define RX_DC_BASE 0x100000
73 /* RX FIFO XOFF watermark
75 * When the amount of the RX FIFO increases used increases past this
76 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
77 * This also has an effect on RX/TX arbitration
79 static int rx_xoff_thresh_bytes
= -1;
80 module_param(rx_xoff_thresh_bytes
, int, 0644);
81 MODULE_PARM_DESC(rx_xoff_thresh_bytes
, "RX fifo XOFF threshold");
83 /* RX FIFO XON watermark
85 * When the amount of the RX FIFO used decreases below this
86 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
87 * This also has an effect on RX/TX arbitration
89 static int rx_xon_thresh_bytes
= -1;
90 module_param(rx_xon_thresh_bytes
, int, 0644);
91 MODULE_PARM_DESC(rx_xon_thresh_bytes
, "RX fifo XON threshold");
93 /* TX descriptor ring size - min 512 max 4k */
94 #define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
95 #define FALCON_TXD_RING_SIZE 1024
96 #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
98 /* RX descriptor ring size - min 512 max 4k */
99 #define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
100 #define FALCON_RXD_RING_SIZE 1024
101 #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
103 /* Event queue size - max 32k */
104 #define FALCON_EVQ_ORDER EVQ_SIZE_4K
105 #define FALCON_EVQ_SIZE 4096
106 #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
108 /* Max number of internal errors. After this resets will not be performed */
109 #define FALCON_MAX_INT_ERRORS 4
111 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
113 #define FALCON_FLUSH_INTERVAL 10
114 #define FALCON_FLUSH_POLL_COUNT 100
116 /**************************************************************************
120 **************************************************************************
123 /* DMA address mask */
124 #define FALCON_DMA_MASK DMA_BIT_MASK(46)
126 /* TX DMA length mask (13-bit) */
127 #define FALCON_TX_DMA_MASK (4096 - 1)
129 /* Size and alignment of special buffers (4KB) */
130 #define FALCON_BUF_SIZE 4096
132 /* Dummy SRAM size code */
133 #define SRM_NB_BSZ_ONCHIP_ONLY (-1)
135 /* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
136 #define PCI_EXP_DEVCAP_PWR_VAL_LBN 18
137 #define PCI_EXP_DEVCAP_PWR_SCL_LBN 26
138 #define PCI_EXP_DEVCTL_PAYLOAD_LBN 5
139 #define PCI_EXP_LNKSTA_LNK_WID 0x3f0
140 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4
142 #define FALCON_IS_DUAL_FUNC(efx) \
143 (falcon_rev(efx) < FALCON_REV_B0)
145 /**************************************************************************
147 * Falcon hardware access
149 **************************************************************************/
151 /* Read the current event from the event queue */
152 static inline efx_qword_t
*falcon_event(struct efx_channel
*channel
,
155 return (((efx_qword_t
*) (channel
->eventq
.addr
)) + index
);
158 /* See if an event is present
160 * We check both the high and low dword of the event for all ones. We
161 * wrote all ones when we cleared the event, and no valid event can
162 * have all ones in either its high or low dwords. This approach is
163 * robust against reordering.
165 * Note that using a single 64-bit comparison is incorrect; even
166 * though the CPU read will be atomic, the DMA write may not be.
168 static inline int falcon_event_present(efx_qword_t
*event
)
170 return (!(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
171 EFX_DWORD_IS_ALL_ONES(event
->dword
[1])));
174 /**************************************************************************
176 * I2C bus - this is a bit-bashing interface using GPIO pins
177 * Note that it uses the output enables to tristate the outputs
178 * SDA is the data pin and SCL is the clock
180 **************************************************************************
182 static void falcon_setsda(void *data
, int state
)
184 struct efx_nic
*efx
= (struct efx_nic
*)data
;
187 falcon_read(efx
, ®
, GPIO_CTL_REG_KER
);
188 EFX_SET_OWORD_FIELD(reg
, GPIO3_OEN
, !state
);
189 falcon_write(efx
, ®
, GPIO_CTL_REG_KER
);
192 static void falcon_setscl(void *data
, int state
)
194 struct efx_nic
*efx
= (struct efx_nic
*)data
;
197 falcon_read(efx
, ®
, GPIO_CTL_REG_KER
);
198 EFX_SET_OWORD_FIELD(reg
, GPIO0_OEN
, !state
);
199 falcon_write(efx
, ®
, GPIO_CTL_REG_KER
);
202 static int falcon_getsda(void *data
)
204 struct efx_nic
*efx
= (struct efx_nic
*)data
;
207 falcon_read(efx
, ®
, GPIO_CTL_REG_KER
);
208 return EFX_OWORD_FIELD(reg
, GPIO3_IN
);
211 static int falcon_getscl(void *data
)
213 struct efx_nic
*efx
= (struct efx_nic
*)data
;
216 falcon_read(efx
, ®
, GPIO_CTL_REG_KER
);
217 return EFX_OWORD_FIELD(reg
, GPIO0_IN
);
220 static struct i2c_algo_bit_data falcon_i2c_bit_operations
= {
221 .setsda
= falcon_setsda
,
222 .setscl
= falcon_setscl
,
223 .getsda
= falcon_getsda
,
224 .getscl
= falcon_getscl
,
226 /* Wait up to 50 ms for slave to let us pull SCL high */
227 .timeout
= DIV_ROUND_UP(HZ
, 20),
230 /**************************************************************************
232 * Falcon special buffer handling
233 * Special buffers are used for event queues and the TX and RX
236 *************************************************************************/
239 * Initialise a Falcon special buffer
241 * This will define a buffer (previously allocated via
242 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
243 * it to be used for event queues, descriptor rings etc.
246 falcon_init_special_buffer(struct efx_nic
*efx
,
247 struct efx_special_buffer
*buffer
)
249 efx_qword_t buf_desc
;
254 EFX_BUG_ON_PARANOID(!buffer
->addr
);
256 /* Write buffer descriptors to NIC */
257 for (i
= 0; i
< buffer
->entries
; i
++) {
258 index
= buffer
->index
+ i
;
259 dma_addr
= buffer
->dma_addr
+ (i
* 4096);
260 EFX_LOG(efx
, "mapping special buffer %d at %llx\n",
261 index
, (unsigned long long)dma_addr
);
262 EFX_POPULATE_QWORD_4(buf_desc
,
263 IP_DAT_BUF_SIZE
, IP_DAT_BUF_SIZE_4K
,
265 BUF_ADR_FBUF
, (dma_addr
>> 12),
266 BUF_OWNER_ID_FBUF
, 0);
267 falcon_write_sram(efx
, &buf_desc
, index
);
271 /* Unmaps a buffer from Falcon and clears the buffer table entries */
273 falcon_fini_special_buffer(struct efx_nic
*efx
,
274 struct efx_special_buffer
*buffer
)
276 efx_oword_t buf_tbl_upd
;
277 unsigned int start
= buffer
->index
;
278 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
280 if (!buffer
->entries
)
283 EFX_LOG(efx
, "unmapping special buffers %d-%d\n",
284 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
286 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
290 BUF_CLR_START_ID
, start
);
291 falcon_write(efx
, &buf_tbl_upd
, BUF_TBL_UPD_REG_KER
);
295 * Allocate a new Falcon special buffer
297 * This allocates memory for a new buffer, clears it and allocates a
298 * new buffer ID range. It does not write into Falcon's buffer table.
300 * This call will allocate 4KB buffers, since Falcon can't use 8KB
301 * buffers for event queues and descriptor rings.
303 static int falcon_alloc_special_buffer(struct efx_nic
*efx
,
304 struct efx_special_buffer
*buffer
,
307 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
309 len
= ALIGN(len
, FALCON_BUF_SIZE
);
311 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
316 buffer
->entries
= len
/ FALCON_BUF_SIZE
;
317 BUG_ON(buffer
->dma_addr
& (FALCON_BUF_SIZE
- 1));
319 /* All zeros is a potentially valid event so memset to 0xff */
320 memset(buffer
->addr
, 0xff, len
);
322 /* Select new buffer ID */
323 buffer
->index
= nic_data
->next_buffer_table
;
324 nic_data
->next_buffer_table
+= buffer
->entries
;
326 EFX_LOG(efx
, "allocating special buffers %d-%d at %llx+%x "
327 "(virt %p phys %lx)\n", buffer
->index
,
328 buffer
->index
+ buffer
->entries
- 1,
329 (unsigned long long)buffer
->dma_addr
, len
,
330 buffer
->addr
, virt_to_phys(buffer
->addr
));
335 static void falcon_free_special_buffer(struct efx_nic
*efx
,
336 struct efx_special_buffer
*buffer
)
341 EFX_LOG(efx
, "deallocating special buffers %d-%d at %llx+%x "
342 "(virt %p phys %lx)\n", buffer
->index
,
343 buffer
->index
+ buffer
->entries
- 1,
344 (unsigned long long)buffer
->dma_addr
, buffer
->len
,
345 buffer
->addr
, virt_to_phys(buffer
->addr
));
347 pci_free_consistent(efx
->pci_dev
, buffer
->len
, buffer
->addr
,
353 /**************************************************************************
355 * Falcon generic buffer handling
356 * These buffers are used for interrupt status and MAC stats
358 **************************************************************************/
360 static int falcon_alloc_buffer(struct efx_nic
*efx
,
361 struct efx_buffer
*buffer
, unsigned int len
)
363 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
368 memset(buffer
->addr
, 0, len
);
372 static void falcon_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
375 pci_free_consistent(efx
->pci_dev
, buffer
->len
,
376 buffer
->addr
, buffer
->dma_addr
);
381 /**************************************************************************
385 **************************************************************************/
387 /* Returns a pointer to the specified transmit descriptor in the TX
388 * descriptor queue belonging to the specified channel.
390 static inline efx_qword_t
*falcon_tx_desc(struct efx_tx_queue
*tx_queue
,
393 return (((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
);
396 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
397 static inline void falcon_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
402 write_ptr
= tx_queue
->write_count
& FALCON_TXD_RING_MASK
;
403 EFX_POPULATE_DWORD_1(reg
, TX_DESC_WPTR_DWORD
, write_ptr
);
404 falcon_writel_page(tx_queue
->efx
, ®
,
405 TX_DESC_UPD_REG_KER_DWORD
, tx_queue
->queue
);
409 /* For each entry inserted into the software descriptor ring, create a
410 * descriptor in the hardware TX descriptor ring (in host memory), and
413 void falcon_push_buffers(struct efx_tx_queue
*tx_queue
)
416 struct efx_tx_buffer
*buffer
;
420 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
423 write_ptr
= tx_queue
->write_count
& FALCON_TXD_RING_MASK
;
424 buffer
= &tx_queue
->buffer
[write_ptr
];
425 txd
= falcon_tx_desc(tx_queue
, write_ptr
);
426 ++tx_queue
->write_count
;
428 /* Create TX descriptor ring entry */
429 EFX_POPULATE_QWORD_5(*txd
,
431 TX_KER_CONT
, buffer
->continuation
,
432 TX_KER_BYTE_CNT
, buffer
->len
,
433 TX_KER_BUF_REGION
, 0,
434 TX_KER_BUF_ADR
, buffer
->dma_addr
);
435 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
437 wmb(); /* Ensure descriptors are written before they are fetched */
438 falcon_notify_tx_desc(tx_queue
);
441 /* Allocate hardware resources for a TX queue */
442 int falcon_probe_tx(struct efx_tx_queue
*tx_queue
)
444 struct efx_nic
*efx
= tx_queue
->efx
;
445 return falcon_alloc_special_buffer(efx
, &tx_queue
->txd
,
446 FALCON_TXD_RING_SIZE
*
447 sizeof(efx_qword_t
));
450 void falcon_init_tx(struct efx_tx_queue
*tx_queue
)
452 efx_oword_t tx_desc_ptr
;
453 struct efx_nic
*efx
= tx_queue
->efx
;
455 tx_queue
->flushed
= false;
457 /* Pin TX descriptor ring */
458 falcon_init_special_buffer(efx
, &tx_queue
->txd
);
460 /* Push TX descriptor ring to card */
461 EFX_POPULATE_OWORD_10(tx_desc_ptr
,
465 TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
466 TX_DESCQ_EVQ_ID
, tx_queue
->channel
->channel
,
467 TX_DESCQ_OWNER_ID
, 0,
468 TX_DESCQ_LABEL
, tx_queue
->queue
,
469 TX_DESCQ_SIZE
, FALCON_TXD_RING_ORDER
,
471 TX_NON_IP_DROP_DIS_B0
, 1);
473 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
474 int csum
= tx_queue
->queue
== EFX_TX_QUEUE_OFFLOAD_CSUM
;
475 EFX_SET_OWORD_FIELD(tx_desc_ptr
, TX_IP_CHKSM_DIS_B0
, !csum
);
476 EFX_SET_OWORD_FIELD(tx_desc_ptr
, TX_TCP_CHKSM_DIS_B0
, !csum
);
479 falcon_write_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
482 if (falcon_rev(efx
) < FALCON_REV_B0
) {
485 /* Only 128 bits in this register */
486 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT
>= 128);
488 falcon_read(efx
, ®
, TX_CHKSM_CFG_REG_KER_A1
);
489 if (tx_queue
->queue
== EFX_TX_QUEUE_OFFLOAD_CSUM
)
490 clear_bit_le(tx_queue
->queue
, (void *)®
);
492 set_bit_le(tx_queue
->queue
, (void *)®
);
493 falcon_write(efx
, ®
, TX_CHKSM_CFG_REG_KER_A1
);
497 static void falcon_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
499 struct efx_nic
*efx
= tx_queue
->efx
;
500 efx_oword_t tx_flush_descq
;
502 /* Post a flush command */
503 EFX_POPULATE_OWORD_2(tx_flush_descq
,
504 TX_FLUSH_DESCQ_CMD
, 1,
505 TX_FLUSH_DESCQ
, tx_queue
->queue
);
506 falcon_write(efx
, &tx_flush_descq
, TX_FLUSH_DESCQ_REG_KER
);
509 void falcon_fini_tx(struct efx_tx_queue
*tx_queue
)
511 struct efx_nic
*efx
= tx_queue
->efx
;
512 efx_oword_t tx_desc_ptr
;
514 /* The queue should have been flushed */
515 WARN_ON(!tx_queue
->flushed
);
517 /* Remove TX descriptor ring from card */
518 EFX_ZERO_OWORD(tx_desc_ptr
);
519 falcon_write_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
522 /* Unpin TX descriptor ring */
523 falcon_fini_special_buffer(efx
, &tx_queue
->txd
);
526 /* Free buffers backing TX queue */
527 void falcon_remove_tx(struct efx_tx_queue
*tx_queue
)
529 falcon_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
532 /**************************************************************************
536 **************************************************************************/
538 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
539 static inline efx_qword_t
*falcon_rx_desc(struct efx_rx_queue
*rx_queue
,
542 return (((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
);
545 /* This creates an entry in the RX descriptor queue */
546 static inline void falcon_build_rx_desc(struct efx_rx_queue
*rx_queue
,
549 struct efx_rx_buffer
*rx_buf
;
552 rxd
= falcon_rx_desc(rx_queue
, index
);
553 rx_buf
= efx_rx_buffer(rx_queue
, index
);
554 EFX_POPULATE_QWORD_3(*rxd
,
557 rx_queue
->efx
->type
->rx_buffer_padding
,
558 RX_KER_BUF_REGION
, 0,
559 RX_KER_BUF_ADR
, rx_buf
->dma_addr
);
562 /* This writes to the RX_DESC_WPTR register for the specified receive
565 void falcon_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
570 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
571 falcon_build_rx_desc(rx_queue
,
572 rx_queue
->notified_count
&
573 FALCON_RXD_RING_MASK
);
574 ++rx_queue
->notified_count
;
578 write_ptr
= rx_queue
->added_count
& FALCON_RXD_RING_MASK
;
579 EFX_POPULATE_DWORD_1(reg
, RX_DESC_WPTR_DWORD
, write_ptr
);
580 falcon_writel_page(rx_queue
->efx
, ®
,
581 RX_DESC_UPD_REG_KER_DWORD
, rx_queue
->queue
);
584 int falcon_probe_rx(struct efx_rx_queue
*rx_queue
)
586 struct efx_nic
*efx
= rx_queue
->efx
;
587 return falcon_alloc_special_buffer(efx
, &rx_queue
->rxd
,
588 FALCON_RXD_RING_SIZE
*
589 sizeof(efx_qword_t
));
592 void falcon_init_rx(struct efx_rx_queue
*rx_queue
)
594 efx_oword_t rx_desc_ptr
;
595 struct efx_nic
*efx
= rx_queue
->efx
;
596 bool is_b0
= falcon_rev(efx
) >= FALCON_REV_B0
;
597 bool iscsi_digest_en
= is_b0
;
599 EFX_LOG(efx
, "RX queue %d ring in special buffers %d-%d\n",
600 rx_queue
->queue
, rx_queue
->rxd
.index
,
601 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
603 rx_queue
->flushed
= false;
605 /* Pin RX descriptor ring */
606 falcon_init_special_buffer(efx
, &rx_queue
->rxd
);
608 /* Push RX descriptor ring to card */
609 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
610 RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
611 RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
612 RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
613 RX_DESCQ_EVQ_ID
, rx_queue
->channel
->channel
,
614 RX_DESCQ_OWNER_ID
, 0,
615 RX_DESCQ_LABEL
, rx_queue
->queue
,
616 RX_DESCQ_SIZE
, FALCON_RXD_RING_ORDER
,
617 RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
618 /* For >=B0 this is scatter so disable */
619 RX_DESCQ_JUMBO
, !is_b0
,
621 falcon_write_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
625 static void falcon_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
627 struct efx_nic
*efx
= rx_queue
->efx
;
628 efx_oword_t rx_flush_descq
;
630 /* Post a flush command */
631 EFX_POPULATE_OWORD_2(rx_flush_descq
,
632 RX_FLUSH_DESCQ_CMD
, 1,
633 RX_FLUSH_DESCQ
, rx_queue
->queue
);
634 falcon_write(efx
, &rx_flush_descq
, RX_FLUSH_DESCQ_REG_KER
);
637 void falcon_fini_rx(struct efx_rx_queue
*rx_queue
)
639 efx_oword_t rx_desc_ptr
;
640 struct efx_nic
*efx
= rx_queue
->efx
;
642 /* The queue should already have been flushed */
643 WARN_ON(!rx_queue
->flushed
);
645 /* Remove RX descriptor ring from card */
646 EFX_ZERO_OWORD(rx_desc_ptr
);
647 falcon_write_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
650 /* Unpin RX descriptor ring */
651 falcon_fini_special_buffer(efx
, &rx_queue
->rxd
);
654 /* Free buffers backing RX queue */
655 void falcon_remove_rx(struct efx_rx_queue
*rx_queue
)
657 falcon_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
660 /**************************************************************************
662 * Falcon event queue processing
663 * Event queues are processed by per-channel tasklets.
665 **************************************************************************/
667 /* Update a channel's event queue's read pointer (RPTR) register
669 * This writes the EVQ_RPTR_REG register for the specified channel's
672 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
673 * whereas channel->eventq_read_ptr contains the index of the "next to
676 void falcon_eventq_read_ack(struct efx_channel
*channel
)
679 struct efx_nic
*efx
= channel
->efx
;
681 EFX_POPULATE_DWORD_1(reg
, EVQ_RPTR_DWORD
, channel
->eventq_read_ptr
);
682 falcon_writel_table(efx
, ®
, efx
->type
->evq_rptr_tbl_base
,
686 /* Use HW to insert a SW defined event */
687 void falcon_generate_event(struct efx_channel
*channel
, efx_qword_t
*event
)
689 efx_oword_t drv_ev_reg
;
691 EFX_POPULATE_OWORD_2(drv_ev_reg
,
692 DRV_EV_QID
, channel
->channel
,
694 EFX_QWORD_FIELD64(*event
, WHOLE_EVENT
));
695 falcon_write(channel
->efx
, &drv_ev_reg
, DRV_EV_REG_KER
);
698 /* Handle a transmit completion event
700 * Falcon batches TX completion events; the message we receive is of
701 * the form "complete all TX events up to this index".
703 static void falcon_handle_tx_event(struct efx_channel
*channel
,
706 unsigned int tx_ev_desc_ptr
;
707 unsigned int tx_ev_q_label
;
708 struct efx_tx_queue
*tx_queue
;
709 struct efx_nic
*efx
= channel
->efx
;
711 if (likely(EFX_QWORD_FIELD(*event
, TX_EV_COMP
))) {
712 /* Transmit completion */
713 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, TX_EV_DESC_PTR
);
714 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, TX_EV_Q_LABEL
);
715 tx_queue
= &efx
->tx_queue
[tx_ev_q_label
];
716 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
717 } else if (EFX_QWORD_FIELD(*event
, TX_EV_WQ_FF_FULL
)) {
718 /* Rewrite the FIFO write pointer */
719 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, TX_EV_Q_LABEL
);
720 tx_queue
= &efx
->tx_queue
[tx_ev_q_label
];
722 if (efx_dev_registered(efx
))
723 netif_tx_lock(efx
->net_dev
);
724 falcon_notify_tx_desc(tx_queue
);
725 if (efx_dev_registered(efx
))
726 netif_tx_unlock(efx
->net_dev
);
727 } else if (EFX_QWORD_FIELD(*event
, TX_EV_PKT_ERR
) &&
728 EFX_WORKAROUND_10727(efx
)) {
729 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
731 EFX_ERR(efx
, "channel %d unexpected TX event "
732 EFX_QWORD_FMT
"\n", channel
->channel
,
733 EFX_QWORD_VAL(*event
));
737 /* Detect errors included in the rx_evt_pkt_ok bit. */
738 static void falcon_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
739 const efx_qword_t
*event
,
743 struct efx_nic
*efx
= rx_queue
->efx
;
744 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
745 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
746 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
747 bool rx_ev_other_err
, rx_ev_pause_frm
;
748 bool rx_ev_ip_frag_err
, rx_ev_hdr_type
, rx_ev_mcast_pkt
;
749 unsigned rx_ev_pkt_type
;
751 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, RX_EV_HDR_TYPE
);
752 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, RX_EV_MCAST_PKT
);
753 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, RX_EV_TOBE_DISC
);
754 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, RX_EV_PKT_TYPE
);
755 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
756 RX_EV_BUF_OWNER_ID_ERR
);
757 rx_ev_ip_frag_err
= EFX_QWORD_FIELD(*event
, RX_EV_IF_FRAG_ERR
);
758 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
759 RX_EV_IP_HDR_CHKSUM_ERR
);
760 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
761 RX_EV_TCP_UDP_CHKSUM_ERR
);
762 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, RX_EV_ETH_CRC_ERR
);
763 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, RX_EV_FRM_TRUNC
);
764 rx_ev_drib_nib
= ((falcon_rev(efx
) >= FALCON_REV_B0
) ?
765 0 : EFX_QWORD_FIELD(*event
, RX_EV_DRIB_NIB
));
766 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, RX_EV_PAUSE_FRM_ERR
);
768 /* Every error apart from tobe_disc and pause_frm */
769 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
770 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
771 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
773 /* Count errors that are not in MAC stats. */
775 ++rx_queue
->channel
->n_rx_frm_trunc
;
776 else if (rx_ev_tobe_disc
)
777 ++rx_queue
->channel
->n_rx_tobe_disc
;
778 else if (rx_ev_ip_hdr_chksum_err
)
779 ++rx_queue
->channel
->n_rx_ip_hdr_chksum_err
;
780 else if (rx_ev_tcp_udp_chksum_err
)
781 ++rx_queue
->channel
->n_rx_tcp_udp_chksum_err
;
782 if (rx_ev_ip_frag_err
)
783 ++rx_queue
->channel
->n_rx_ip_frag_err
;
785 /* The frame must be discarded if any of these are true. */
786 *discard
= (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
787 rx_ev_tobe_disc
| rx_ev_pause_frm
);
789 /* TOBE_DISC is expected on unicast mismatches; don't print out an
790 * error message. FRM_TRUNC indicates RXDP dropped the packet due
791 * to a FIFO overflow.
793 #ifdef EFX_ENABLE_DEBUG
794 if (rx_ev_other_err
) {
795 EFX_INFO_RL(efx
, " RX queue %d unexpected RX event "
796 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
797 rx_queue
->queue
, EFX_QWORD_VAL(*event
),
798 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
799 rx_ev_ip_hdr_chksum_err
?
800 " [IP_HDR_CHKSUM_ERR]" : "",
801 rx_ev_tcp_udp_chksum_err
?
802 " [TCP_UDP_CHKSUM_ERR]" : "",
803 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
804 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
805 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
806 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
807 rx_ev_pause_frm
? " [PAUSE]" : "");
811 if (unlikely(rx_ev_eth_crc_err
&& EFX_WORKAROUND_10750(efx
) &&
812 efx
->phy_type
== PHY_TYPE_10XPRESS
))
813 tenxpress_crc_err(efx
);
816 /* Handle receive events that are not in-order. */
817 static void falcon_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
,
820 struct efx_nic
*efx
= rx_queue
->efx
;
821 unsigned expected
, dropped
;
823 expected
= rx_queue
->removed_count
& FALCON_RXD_RING_MASK
;
824 dropped
= ((index
+ FALCON_RXD_RING_SIZE
- expected
) &
825 FALCON_RXD_RING_MASK
);
826 EFX_INFO(efx
, "dropped %d events (index=%d expected=%d)\n",
827 dropped
, index
, expected
);
829 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
830 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
833 /* Handle a packet received event
835 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
836 * wrong destination address
837 * Also "is multicast" and "matches multicast filter" flags can be used to
838 * discard non-matching multicast packets.
840 static void falcon_handle_rx_event(struct efx_channel
*channel
,
841 const efx_qword_t
*event
)
843 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
844 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
845 unsigned expected_ptr
;
846 bool rx_ev_pkt_ok
, discard
= false, checksummed
;
847 struct efx_rx_queue
*rx_queue
;
848 struct efx_nic
*efx
= channel
->efx
;
850 /* Basic packet information */
851 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, RX_EV_BYTE_CNT
);
852 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, RX_EV_PKT_OK
);
853 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, RX_EV_HDR_TYPE
);
854 WARN_ON(EFX_QWORD_FIELD(*event
, RX_EV_JUMBO_CONT
));
855 WARN_ON(EFX_QWORD_FIELD(*event
, RX_EV_SOP
) != 1);
856 WARN_ON(EFX_QWORD_FIELD(*event
, RX_EV_Q_LABEL
) != channel
->channel
);
858 rx_queue
= &efx
->rx_queue
[channel
->channel
];
860 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, RX_EV_DESC_PTR
);
861 expected_ptr
= rx_queue
->removed_count
& FALCON_RXD_RING_MASK
;
862 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
))
863 falcon_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
);
865 if (likely(rx_ev_pkt_ok
)) {
866 /* If packet is marked as OK and packet type is TCP/IPv4 or
867 * UDP/IPv4, then we can rely on the hardware checksum.
869 checksummed
= RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type
);
871 falcon_handle_rx_not_ok(rx_queue
, event
, &rx_ev_pkt_ok
,
876 /* Detect multicast packets that didn't match the filter */
877 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, RX_EV_MCAST_PKT
);
878 if (rx_ev_mcast_pkt
) {
879 unsigned int rx_ev_mcast_hash_match
=
880 EFX_QWORD_FIELD(*event
, RX_EV_MCAST_HASH_MATCH
);
882 if (unlikely(!rx_ev_mcast_hash_match
))
886 /* Handle received packet */
887 efx_rx_packet(rx_queue
, rx_ev_desc_ptr
, rx_ev_byte_cnt
,
888 checksummed
, discard
);
891 /* Global events are basically PHY events */
892 static void falcon_handle_global_event(struct efx_channel
*channel
,
895 struct efx_nic
*efx
= channel
->efx
;
896 bool is_phy_event
= false, handled
= false;
898 /* Check for interrupt on either port. Some boards have a
899 * single PHY wired to the interrupt line for port 1. */
900 if (EFX_QWORD_FIELD(*event
, G_PHY0_INTR
) ||
901 EFX_QWORD_FIELD(*event
, G_PHY1_INTR
) ||
902 EFX_QWORD_FIELD(*event
, XG_PHY_INTR
))
905 if ((falcon_rev(efx
) >= FALCON_REV_B0
) &&
906 EFX_QWORD_FIELD(*event
, XG_MNT_INTR_B0
))
910 efx
->phy_op
->clear_interrupt(efx
);
911 queue_work(efx
->workqueue
, &efx
->reconfigure_work
);
915 if (EFX_QWORD_FIELD_VER(efx
, *event
, RX_RECOVERY
)) {
916 EFX_ERR(efx
, "channel %d seen global RX_RESET "
917 "event. Resetting.\n", channel
->channel
);
919 atomic_inc(&efx
->rx_reset
);
920 efx_schedule_reset(efx
, EFX_WORKAROUND_6555(efx
) ?
921 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
926 EFX_ERR(efx
, "channel %d unknown global event "
927 EFX_QWORD_FMT
"\n", channel
->channel
,
928 EFX_QWORD_VAL(*event
));
931 static void falcon_handle_driver_event(struct efx_channel
*channel
,
934 struct efx_nic
*efx
= channel
->efx
;
935 unsigned int ev_sub_code
;
936 unsigned int ev_sub_data
;
938 ev_sub_code
= EFX_QWORD_FIELD(*event
, DRIVER_EV_SUB_CODE
);
939 ev_sub_data
= EFX_QWORD_FIELD(*event
, DRIVER_EV_SUB_DATA
);
941 switch (ev_sub_code
) {
942 case TX_DESCQ_FLS_DONE_EV_DECODE
:
943 EFX_TRACE(efx
, "channel %d TXQ %d flushed\n",
944 channel
->channel
, ev_sub_data
);
946 case RX_DESCQ_FLS_DONE_EV_DECODE
:
947 EFX_TRACE(efx
, "channel %d RXQ %d flushed\n",
948 channel
->channel
, ev_sub_data
);
950 case EVQ_INIT_DONE_EV_DECODE
:
951 EFX_LOG(efx
, "channel %d EVQ %d initialised\n",
952 channel
->channel
, ev_sub_data
);
954 case SRM_UPD_DONE_EV_DECODE
:
955 EFX_TRACE(efx
, "channel %d SRAM update done\n",
958 case WAKE_UP_EV_DECODE
:
959 EFX_TRACE(efx
, "channel %d RXQ %d wakeup event\n",
960 channel
->channel
, ev_sub_data
);
962 case TIMER_EV_DECODE
:
963 EFX_TRACE(efx
, "channel %d RX queue %d timer expired\n",
964 channel
->channel
, ev_sub_data
);
966 case RX_RECOVERY_EV_DECODE
:
967 EFX_ERR(efx
, "channel %d seen DRIVER RX_RESET event. "
968 "Resetting.\n", channel
->channel
);
969 atomic_inc(&efx
->rx_reset
);
970 efx_schedule_reset(efx
,
971 EFX_WORKAROUND_6555(efx
) ?
972 RESET_TYPE_RX_RECOVERY
:
975 case RX_DSC_ERROR_EV_DECODE
:
976 EFX_ERR(efx
, "RX DMA Q %d reports descriptor fetch error."
977 " RX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
978 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
980 case TX_DSC_ERROR_EV_DECODE
:
981 EFX_ERR(efx
, "TX DMA Q %d reports descriptor fetch error."
982 " TX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
983 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
986 EFX_TRACE(efx
, "channel %d unknown driver event code %d "
987 "data %04x\n", channel
->channel
, ev_sub_code
,
993 int falcon_process_eventq(struct efx_channel
*channel
, int rx_quota
)
995 unsigned int read_ptr
;
996 efx_qword_t event
, *p_event
;
1000 read_ptr
= channel
->eventq_read_ptr
;
1003 p_event
= falcon_event(channel
, read_ptr
);
1006 if (!falcon_event_present(&event
))
1010 EFX_TRACE(channel
->efx
, "channel %d event is "EFX_QWORD_FMT
"\n",
1011 channel
->channel
, EFX_QWORD_VAL(event
));
1013 /* Clear this event by marking it all ones */
1014 EFX_SET_QWORD(*p_event
);
1016 ev_code
= EFX_QWORD_FIELD(event
, EV_CODE
);
1019 case RX_IP_EV_DECODE
:
1020 falcon_handle_rx_event(channel
, &event
);
1023 case TX_IP_EV_DECODE
:
1024 falcon_handle_tx_event(channel
, &event
);
1026 case DRV_GEN_EV_DECODE
:
1027 channel
->eventq_magic
1028 = EFX_QWORD_FIELD(event
, EVQ_MAGIC
);
1029 EFX_LOG(channel
->efx
, "channel %d received generated "
1030 "event "EFX_QWORD_FMT
"\n", channel
->channel
,
1031 EFX_QWORD_VAL(event
));
1033 case GLOBAL_EV_DECODE
:
1034 falcon_handle_global_event(channel
, &event
);
1036 case DRIVER_EV_DECODE
:
1037 falcon_handle_driver_event(channel
, &event
);
1040 EFX_ERR(channel
->efx
, "channel %d unknown event type %d"
1041 " (data " EFX_QWORD_FMT
")\n", channel
->channel
,
1042 ev_code
, EFX_QWORD_VAL(event
));
1045 /* Increment read pointer */
1046 read_ptr
= (read_ptr
+ 1) & FALCON_EVQ_MASK
;
1048 } while (rx_packets
< rx_quota
);
1050 channel
->eventq_read_ptr
= read_ptr
;
1054 void falcon_set_int_moderation(struct efx_channel
*channel
)
1056 efx_dword_t timer_cmd
;
1057 struct efx_nic
*efx
= channel
->efx
;
1059 /* Set timer register */
1060 if (channel
->irq_moderation
) {
1061 /* Round to resolution supported by hardware. The value we
1062 * program is based at 0. So actual interrupt moderation
1063 * achieved is ((x + 1) * res).
1065 unsigned int res
= 5;
1066 channel
->irq_moderation
-= (channel
->irq_moderation
% res
);
1067 if (channel
->irq_moderation
< res
)
1068 channel
->irq_moderation
= res
;
1069 EFX_POPULATE_DWORD_2(timer_cmd
,
1070 TIMER_MODE
, TIMER_MODE_INT_HLDOFF
,
1072 (channel
->irq_moderation
/ res
) - 1);
1074 EFX_POPULATE_DWORD_2(timer_cmd
,
1075 TIMER_MODE
, TIMER_MODE_DIS
,
1078 falcon_writel_page_locked(efx
, &timer_cmd
, TIMER_CMD_REG_KER
,
1083 /* Allocate buffer table entries for event queue */
1084 int falcon_probe_eventq(struct efx_channel
*channel
)
1086 struct efx_nic
*efx
= channel
->efx
;
1087 unsigned int evq_size
;
1089 evq_size
= FALCON_EVQ_SIZE
* sizeof(efx_qword_t
);
1090 return falcon_alloc_special_buffer(efx
, &channel
->eventq
, evq_size
);
1093 void falcon_init_eventq(struct efx_channel
*channel
)
1095 efx_oword_t evq_ptr
;
1096 struct efx_nic
*efx
= channel
->efx
;
1098 EFX_LOG(efx
, "channel %d event queue in special buffers %d-%d\n",
1099 channel
->channel
, channel
->eventq
.index
,
1100 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1102 /* Pin event queue buffer */
1103 falcon_init_special_buffer(efx
, &channel
->eventq
);
1105 /* Fill event queue with all ones (i.e. empty events) */
1106 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1108 /* Push event queue to card */
1109 EFX_POPULATE_OWORD_3(evq_ptr
,
1111 EVQ_SIZE
, FALCON_EVQ_ORDER
,
1112 EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1113 falcon_write_table(efx
, &evq_ptr
, efx
->type
->evq_ptr_tbl_base
,
1116 falcon_set_int_moderation(channel
);
1119 void falcon_fini_eventq(struct efx_channel
*channel
)
1121 efx_oword_t eventq_ptr
;
1122 struct efx_nic
*efx
= channel
->efx
;
1124 /* Remove event queue from card */
1125 EFX_ZERO_OWORD(eventq_ptr
);
1126 falcon_write_table(efx
, &eventq_ptr
, efx
->type
->evq_ptr_tbl_base
,
1129 /* Unpin event queue */
1130 falcon_fini_special_buffer(efx
, &channel
->eventq
);
1133 /* Free buffers backing event queue */
1134 void falcon_remove_eventq(struct efx_channel
*channel
)
1136 falcon_free_special_buffer(channel
->efx
, &channel
->eventq
);
1140 /* Generates a test event on the event queue. A subsequent call to
1141 * process_eventq() should pick up the event and place the value of
1142 * "magic" into channel->eventq_magic;
1144 void falcon_generate_test_event(struct efx_channel
*channel
, unsigned int magic
)
1146 efx_qword_t test_event
;
1148 EFX_POPULATE_QWORD_2(test_event
,
1149 EV_CODE
, DRV_GEN_EV_DECODE
,
1151 falcon_generate_event(channel
, &test_event
);
1154 /**************************************************************************
1158 **************************************************************************/
1161 static void falcon_poll_flush_events(struct efx_nic
*efx
)
1163 struct efx_channel
*channel
= &efx
->channel
[0];
1164 struct efx_tx_queue
*tx_queue
;
1165 struct efx_rx_queue
*rx_queue
;
1166 unsigned int read_ptr
, i
;
1168 read_ptr
= channel
->eventq_read_ptr
;
1169 for (i
= 0; i
< FALCON_EVQ_SIZE
; ++i
) {
1170 efx_qword_t
*event
= falcon_event(channel
, read_ptr
);
1171 int ev_code
, ev_sub_code
, ev_queue
;
1173 if (!falcon_event_present(event
))
1176 ev_code
= EFX_QWORD_FIELD(*event
, EV_CODE
);
1177 if (ev_code
!= DRIVER_EV_DECODE
)
1180 ev_sub_code
= EFX_QWORD_FIELD(*event
, DRIVER_EV_SUB_CODE
);
1181 switch (ev_sub_code
) {
1182 case TX_DESCQ_FLS_DONE_EV_DECODE
:
1183 ev_queue
= EFX_QWORD_FIELD(*event
,
1184 DRIVER_EV_TX_DESCQ_ID
);
1185 if (ev_queue
< EFX_TX_QUEUE_COUNT
) {
1186 tx_queue
= efx
->tx_queue
+ ev_queue
;
1187 tx_queue
->flushed
= true;
1190 case RX_DESCQ_FLS_DONE_EV_DECODE
:
1191 ev_queue
= EFX_QWORD_FIELD(*event
,
1192 DRIVER_EV_RX_DESCQ_ID
);
1193 ev_failed
= EFX_QWORD_FIELD(*event
,
1194 DRIVER_EV_RX_FLUSH_FAIL
);
1195 if (ev_queue
< efx
->n_rx_queues
) {
1196 rx_queue
= efx
->rx_queue
+ ev_queue
;
1198 /* retry the rx flush */
1200 falcon_flush_rx_queue(rx_queue
);
1202 rx_queue
->flushed
= true;
1207 read_ptr
= (read_ptr
+ 1) & FALCON_EVQ_MASK
;
1211 /* Handle tx and rx flushes at the same time, since they run in
1212 * parallel in the hardware and there's no reason for us to
1214 int falcon_flush_queues(struct efx_nic
*efx
)
1216 struct efx_rx_queue
*rx_queue
;
1217 struct efx_tx_queue
*tx_queue
;
1221 /* Issue flush requests */
1222 efx_for_each_tx_queue(tx_queue
, efx
) {
1223 tx_queue
->flushed
= false;
1224 falcon_flush_tx_queue(tx_queue
);
1226 efx_for_each_rx_queue(rx_queue
, efx
) {
1227 rx_queue
->flushed
= false;
1228 falcon_flush_rx_queue(rx_queue
);
1231 /* Poll the evq looking for flush completions. Since we're not pushing
1232 * any more rx or tx descriptors at this point, we're in no danger of
1233 * overflowing the evq whilst we wait */
1234 for (i
= 0; i
< FALCON_FLUSH_POLL_COUNT
; ++i
) {
1235 msleep(FALCON_FLUSH_INTERVAL
);
1236 falcon_poll_flush_events(efx
);
1238 /* Check if every queue has been succesfully flushed */
1239 outstanding
= false;
1240 efx_for_each_tx_queue(tx_queue
, efx
)
1241 outstanding
|= !tx_queue
->flushed
;
1242 efx_for_each_rx_queue(rx_queue
, efx
)
1243 outstanding
|= !rx_queue
->flushed
;
1248 /* Mark the queues as all flushed. We're going to return failure
1249 * leading to a reset, or fake up success anyway. "flushed" now
1250 * indicates that we tried to flush. */
1251 efx_for_each_tx_queue(tx_queue
, efx
) {
1252 if (!tx_queue
->flushed
)
1253 EFX_ERR(efx
, "tx queue %d flush command timed out\n",
1255 tx_queue
->flushed
= true;
1257 efx_for_each_rx_queue(rx_queue
, efx
) {
1258 if (!rx_queue
->flushed
)
1259 EFX_ERR(efx
, "rx queue %d flush command timed out\n",
1261 rx_queue
->flushed
= true;
1264 if (EFX_WORKAROUND_7803(efx
))
1270 /**************************************************************************
1272 * Falcon hardware interrupts
1273 * The hardware interrupt handler does very little work; all the event
1274 * queue processing is carried out by per-channel tasklets.
1276 **************************************************************************/
1278 /* Enable/disable/generate Falcon interrupts */
1279 static inline void falcon_interrupts(struct efx_nic
*efx
, int enabled
,
1282 efx_oword_t int_en_reg_ker
;
1284 EFX_POPULATE_OWORD_2(int_en_reg_ker
,
1286 DRV_INT_EN_KER
, enabled
);
1287 falcon_write(efx
, &int_en_reg_ker
, INT_EN_REG_KER
);
1290 void falcon_enable_interrupts(struct efx_nic
*efx
)
1292 efx_oword_t int_adr_reg_ker
;
1293 struct efx_channel
*channel
;
1295 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1296 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1298 /* Program address */
1299 EFX_POPULATE_OWORD_2(int_adr_reg_ker
,
1300 NORM_INT_VEC_DIS_KER
, EFX_INT_MODE_USE_MSI(efx
),
1301 INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1302 falcon_write(efx
, &int_adr_reg_ker
, INT_ADR_REG_KER
);
1304 /* Enable interrupts */
1305 falcon_interrupts(efx
, 1, 0);
1307 /* Force processing of all the channels to get the EVQ RPTRs up to
1309 efx_for_each_channel(channel
, efx
)
1310 efx_schedule_channel(channel
);
1313 void falcon_disable_interrupts(struct efx_nic
*efx
)
1315 /* Disable interrupts */
1316 falcon_interrupts(efx
, 0, 0);
1319 /* Generate a Falcon test interrupt
1320 * Interrupt must already have been enabled, otherwise nasty things
1323 void falcon_generate_interrupt(struct efx_nic
*efx
)
1325 falcon_interrupts(efx
, 1, 1);
1328 /* Acknowledge a legacy interrupt from Falcon
1330 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1332 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1333 * BIU. Interrupt acknowledge is read sensitive so must write instead
1334 * (then read to ensure the BIU collector is flushed)
1336 * NB most hardware supports MSI interrupts
1338 static inline void falcon_irq_ack_a1(struct efx_nic
*efx
)
1342 EFX_POPULATE_DWORD_1(reg
, INT_ACK_DUMMY_DATA
, 0xb7eb7e);
1343 falcon_writel(efx
, ®
, INT_ACK_REG_KER_A1
);
1344 falcon_readl(efx
, ®
, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1
);
1347 /* Process a fatal interrupt
1348 * Disable bus mastering ASAP and schedule a reset
1350 static irqreturn_t
falcon_fatal_interrupt(struct efx_nic
*efx
)
1352 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1353 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1354 efx_oword_t fatal_intr
;
1355 int error
, mem_perr
;
1356 static int n_int_errors
;
1358 falcon_read(efx
, &fatal_intr
, FATAL_INTR_REG_KER
);
1359 error
= EFX_OWORD_FIELD(fatal_intr
, INT_KER_ERROR
);
1361 EFX_ERR(efx
, "SYSTEM ERROR " EFX_OWORD_FMT
" status "
1362 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1363 EFX_OWORD_VAL(fatal_intr
),
1364 error
? "disabling bus mastering" : "no recognised error");
1368 /* If this is a memory parity error dump which blocks are offending */
1369 mem_perr
= EFX_OWORD_FIELD(fatal_intr
, MEM_PERR_INT_KER
);
1372 falcon_read(efx
, ®
, MEM_STAT_REG_KER
);
1373 EFX_ERR(efx
, "SYSTEM ERROR: memory parity error "
1374 EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
));
1377 /* Disable both devices */
1378 pci_disable_device(efx
->pci_dev
);
1379 if (FALCON_IS_DUAL_FUNC(efx
))
1380 pci_disable_device(nic_data
->pci_dev2
);
1381 falcon_disable_interrupts(efx
);
1383 if (++n_int_errors
< FALCON_MAX_INT_ERRORS
) {
1384 EFX_ERR(efx
, "SYSTEM ERROR - reset scheduled\n");
1385 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1387 EFX_ERR(efx
, "SYSTEM ERROR - max number of errors seen."
1388 "NIC will be disabled\n");
1389 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1395 /* Handle a legacy interrupt from Falcon
1396 * Acknowledges the interrupt and schedule event queue processing.
1398 static irqreturn_t
falcon_legacy_interrupt_b0(int irq
, void *dev_id
)
1400 struct efx_nic
*efx
= dev_id
;
1401 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1402 struct efx_channel
*channel
;
1407 /* Read the ISR which also ACKs the interrupts */
1408 falcon_readl(efx
, ®
, INT_ISR0_B0
);
1409 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1411 /* Check to see if we have a serious error condition */
1412 syserr
= EFX_OWORD_FIELD(*int_ker
, FATAL_INT
);
1413 if (unlikely(syserr
))
1414 return falcon_fatal_interrupt(efx
);
1419 efx
->last_irq_cpu
= raw_smp_processor_id();
1420 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1421 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1423 /* Schedule processing of any interrupting queues */
1424 channel
= &efx
->channel
[0];
1427 efx_schedule_channel(channel
);
1436 static irqreturn_t
falcon_legacy_interrupt_a1(int irq
, void *dev_id
)
1438 struct efx_nic
*efx
= dev_id
;
1439 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1440 struct efx_channel
*channel
;
1444 /* Check to see if this is our interrupt. If it isn't, we
1445 * exit without having touched the hardware.
1447 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker
))) {
1448 EFX_TRACE(efx
, "IRQ %d on CPU %d not for me\n", irq
,
1449 raw_smp_processor_id());
1452 efx
->last_irq_cpu
= raw_smp_processor_id();
1453 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1454 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1456 /* Check to see if we have a serious error condition */
1457 syserr
= EFX_OWORD_FIELD(*int_ker
, FATAL_INT
);
1458 if (unlikely(syserr
))
1459 return falcon_fatal_interrupt(efx
);
1461 /* Determine interrupting queues, clear interrupt status
1462 * register and acknowledge the device interrupt.
1464 BUILD_BUG_ON(INT_EVQS_WIDTH
> EFX_MAX_CHANNELS
);
1465 queues
= EFX_OWORD_FIELD(*int_ker
, INT_EVQS
);
1466 EFX_ZERO_OWORD(*int_ker
);
1467 wmb(); /* Ensure the vector is cleared before interrupt ack */
1468 falcon_irq_ack_a1(efx
);
1470 /* Schedule processing of any interrupting queues */
1471 channel
= &efx
->channel
[0];
1474 efx_schedule_channel(channel
);
1482 /* Handle an MSI interrupt from Falcon
1484 * Handle an MSI hardware interrupt. This routine schedules event
1485 * queue processing. No interrupt acknowledgement cycle is necessary.
1486 * Also, we never need to check that the interrupt is for us, since
1487 * MSI interrupts cannot be shared.
1489 static irqreturn_t
falcon_msi_interrupt(int irq
, void *dev_id
)
1491 struct efx_channel
*channel
= dev_id
;
1492 struct efx_nic
*efx
= channel
->efx
;
1493 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1496 efx
->last_irq_cpu
= raw_smp_processor_id();
1497 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1498 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1500 /* Check to see if we have a serious error condition */
1501 syserr
= EFX_OWORD_FIELD(*int_ker
, FATAL_INT
);
1502 if (unlikely(syserr
))
1503 return falcon_fatal_interrupt(efx
);
1505 /* Schedule processing of the channel */
1506 efx_schedule_channel(channel
);
1512 /* Setup RSS indirection table.
1513 * This maps from the hash value of the packet to RXQ
1515 static void falcon_setup_rss_indir_table(struct efx_nic
*efx
)
1518 unsigned long offset
;
1521 if (falcon_rev(efx
) < FALCON_REV_B0
)
1524 for (offset
= RX_RSS_INDIR_TBL_B0
;
1525 offset
< RX_RSS_INDIR_TBL_B0
+ 0x800;
1527 EFX_POPULATE_DWORD_1(dword
, RX_RSS_INDIR_ENT_B0
,
1528 i
% efx
->n_rx_queues
);
1529 falcon_writel(efx
, &dword
, offset
);
1534 /* Hook interrupt handler(s)
1535 * Try MSI and then legacy interrupts.
1537 int falcon_init_interrupt(struct efx_nic
*efx
)
1539 struct efx_channel
*channel
;
1542 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1543 irq_handler_t handler
;
1544 if (falcon_rev(efx
) >= FALCON_REV_B0
)
1545 handler
= falcon_legacy_interrupt_b0
;
1547 handler
= falcon_legacy_interrupt_a1
;
1549 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1552 EFX_ERR(efx
, "failed to hook legacy IRQ %d\n",
1559 /* Hook MSI or MSI-X interrupt */
1560 efx_for_each_channel(channel
, efx
) {
1561 rc
= request_irq(channel
->irq
, falcon_msi_interrupt
,
1562 IRQF_PROBE_SHARED
, /* Not shared */
1563 efx
->name
, channel
);
1565 EFX_ERR(efx
, "failed to hook IRQ %d\n", channel
->irq
);
1573 efx_for_each_channel(channel
, efx
)
1574 free_irq(channel
->irq
, channel
);
1579 void falcon_fini_interrupt(struct efx_nic
*efx
)
1581 struct efx_channel
*channel
;
1584 /* Disable MSI/MSI-X interrupts */
1585 efx_for_each_channel(channel
, efx
) {
1587 free_irq(channel
->irq
, channel
);
1590 /* ACK legacy interrupt */
1591 if (falcon_rev(efx
) >= FALCON_REV_B0
)
1592 falcon_read(efx
, ®
, INT_ISR0_B0
);
1594 falcon_irq_ack_a1(efx
);
1596 /* Disable legacy interrupt */
1597 if (efx
->legacy_irq
)
1598 free_irq(efx
->legacy_irq
, efx
);
1601 /**************************************************************************
1605 **************************************************************************
1608 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1610 static int falcon_spi_poll(struct efx_nic
*efx
)
1613 falcon_read(efx
, ®
, EE_SPI_HCMD_REG_KER
);
1614 return EFX_OWORD_FIELD(reg
, EE_SPI_HCMD_CMD_EN
) ? -EBUSY
: 0;
1617 /* Wait for SPI command completion */
1618 static int falcon_spi_wait(struct efx_nic
*efx
)
1620 /* Most commands will finish quickly, so we start polling at
1621 * very short intervals. Sometimes the command may have to
1622 * wait for VPD or expansion ROM access outside of our
1623 * control, so we allow up to 100 ms. */
1624 unsigned long timeout
= jiffies
+ 1 + DIV_ROUND_UP(HZ
, 10);
1627 for (i
= 0; i
< 10; i
++) {
1628 if (!falcon_spi_poll(efx
))
1634 if (!falcon_spi_poll(efx
))
1636 if (time_after_eq(jiffies
, timeout
)) {
1637 EFX_ERR(efx
, "timed out waiting for SPI\n");
1640 schedule_timeout_uninterruptible(1);
1644 int falcon_spi_cmd(const struct efx_spi_device
*spi
,
1645 unsigned int command
, int address
,
1646 const void *in
, void *out
, size_t len
)
1648 struct efx_nic
*efx
= spi
->efx
;
1649 bool addressed
= (address
>= 0);
1650 bool reading
= (out
!= NULL
);
1654 /* Input validation */
1655 if (len
> FALCON_SPI_MAX_LEN
)
1657 BUG_ON(!mutex_is_locked(&efx
->spi_lock
));
1659 /* Check that previous command is not still running */
1660 rc
= falcon_spi_poll(efx
);
1664 /* Program address register, if we have an address */
1666 EFX_POPULATE_OWORD_1(reg
, EE_SPI_HADR_ADR
, address
);
1667 falcon_write(efx
, ®
, EE_SPI_HADR_REG_KER
);
1670 /* Program data register, if we have data */
1672 memcpy(®
, in
, len
);
1673 falcon_write(efx
, ®
, EE_SPI_HDATA_REG_KER
);
1676 /* Issue read/write command */
1677 EFX_POPULATE_OWORD_7(reg
,
1678 EE_SPI_HCMD_CMD_EN
, 1,
1679 EE_SPI_HCMD_SF_SEL
, spi
->device_id
,
1680 EE_SPI_HCMD_DABCNT
, len
,
1681 EE_SPI_HCMD_READ
, reading
,
1682 EE_SPI_HCMD_DUBCNT
, 0,
1684 (addressed
? spi
->addr_len
: 0),
1685 EE_SPI_HCMD_ENC
, command
);
1686 falcon_write(efx
, ®
, EE_SPI_HCMD_REG_KER
);
1688 /* Wait for read/write to complete */
1689 rc
= falcon_spi_wait(efx
);
1695 falcon_read(efx
, ®
, EE_SPI_HDATA_REG_KER
);
1696 memcpy(out
, ®
, len
);
1703 falcon_spi_write_limit(const struct efx_spi_device
*spi
, size_t start
)
1705 return min(FALCON_SPI_MAX_LEN
,
1706 (spi
->block_size
- (start
& (spi
->block_size
- 1))));
1710 efx_spi_munge_command(const struct efx_spi_device
*spi
,
1711 const u8 command
, const unsigned int address
)
1713 return command
| (((address
>> 8) & spi
->munge_address
) << 3);
1716 /* Wait up to 10 ms for buffered write completion */
1717 int falcon_spi_wait_write(const struct efx_spi_device
*spi
)
1719 struct efx_nic
*efx
= spi
->efx
;
1720 unsigned long timeout
= jiffies
+ 1 + DIV_ROUND_UP(HZ
, 100);
1725 rc
= falcon_spi_cmd(spi
, SPI_RDSR
, -1, NULL
,
1726 &status
, sizeof(status
));
1729 if (!(status
& SPI_STATUS_NRDY
))
1731 if (time_after_eq(jiffies
, timeout
)) {
1732 EFX_ERR(efx
, "SPI write timeout on device %d"
1733 " last status=0x%02x\n",
1734 spi
->device_id
, status
);
1737 schedule_timeout_uninterruptible(1);
1741 int falcon_spi_read(const struct efx_spi_device
*spi
, loff_t start
,
1742 size_t len
, size_t *retlen
, u8
*buffer
)
1744 size_t block_len
, pos
= 0;
1745 unsigned int command
;
1749 block_len
= min(len
- pos
, FALCON_SPI_MAX_LEN
);
1751 command
= efx_spi_munge_command(spi
, SPI_READ
, start
+ pos
);
1752 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
, NULL
,
1753 buffer
+ pos
, block_len
);
1758 /* Avoid locking up the system */
1760 if (signal_pending(current
)) {
1771 int falcon_spi_write(const struct efx_spi_device
*spi
, loff_t start
,
1772 size_t len
, size_t *retlen
, const u8
*buffer
)
1774 u8 verify_buffer
[FALCON_SPI_MAX_LEN
];
1775 size_t block_len
, pos
= 0;
1776 unsigned int command
;
1780 rc
= falcon_spi_cmd(spi
, SPI_WREN
, -1, NULL
, NULL
, 0);
1784 block_len
= min(len
- pos
,
1785 falcon_spi_write_limit(spi
, start
+ pos
));
1786 command
= efx_spi_munge_command(spi
, SPI_WRITE
, start
+ pos
);
1787 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
,
1788 buffer
+ pos
, NULL
, block_len
);
1792 rc
= falcon_spi_wait_write(spi
);
1796 command
= efx_spi_munge_command(spi
, SPI_READ
, start
+ pos
);
1797 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
,
1798 NULL
, verify_buffer
, block_len
);
1799 if (memcmp(verify_buffer
, buffer
+ pos
, block_len
)) {
1806 /* Avoid locking up the system */
1808 if (signal_pending(current
)) {
1819 /**************************************************************************
1823 **************************************************************************
1825 void falcon_drain_tx_fifo(struct efx_nic
*efx
)
1830 if ((falcon_rev(efx
) < FALCON_REV_B0
) ||
1831 (efx
->loopback_mode
!= LOOPBACK_NONE
))
1834 falcon_read(efx
, &temp
, MAC0_CTRL_REG_KER
);
1835 /* There is no point in draining more than once */
1836 if (EFX_OWORD_FIELD(temp
, TXFIFO_DRAIN_EN_B0
))
1839 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1840 * the drain sequence with the statistics fetch */
1841 spin_lock(&efx
->stats_lock
);
1843 EFX_SET_OWORD_FIELD(temp
, TXFIFO_DRAIN_EN_B0
, 1);
1844 falcon_write(efx
, &temp
, MAC0_CTRL_REG_KER
);
1846 /* Reset the MAC and EM block. */
1847 falcon_read(efx
, &temp
, GLB_CTL_REG_KER
);
1848 EFX_SET_OWORD_FIELD(temp
, RST_XGTX
, 1);
1849 EFX_SET_OWORD_FIELD(temp
, RST_XGRX
, 1);
1850 EFX_SET_OWORD_FIELD(temp
, RST_EM
, 1);
1851 falcon_write(efx
, &temp
, GLB_CTL_REG_KER
);
1855 falcon_read(efx
, &temp
, GLB_CTL_REG_KER
);
1856 if (!EFX_OWORD_FIELD(temp
, RST_XGTX
) &&
1857 !EFX_OWORD_FIELD(temp
, RST_XGRX
) &&
1858 !EFX_OWORD_FIELD(temp
, RST_EM
)) {
1859 EFX_LOG(efx
, "Completed MAC reset after %d loops\n",
1864 EFX_ERR(efx
, "MAC reset failed\n");
1871 spin_unlock(&efx
->stats_lock
);
1873 /* If we've reset the EM block and the link is up, then
1874 * we'll have to kick the XAUI link so the PHY can recover */
1875 if (efx
->link_up
&& EFX_WORKAROUND_5147(efx
))
1876 falcon_reset_xaui(efx
);
1879 void falcon_deconfigure_mac_wrapper(struct efx_nic
*efx
)
1883 if (falcon_rev(efx
) < FALCON_REV_B0
)
1886 /* Isolate the MAC -> RX */
1887 falcon_read(efx
, &temp
, RX_CFG_REG_KER
);
1888 EFX_SET_OWORD_FIELD(temp
, RX_INGR_EN_B0
, 0);
1889 falcon_write(efx
, &temp
, RX_CFG_REG_KER
);
1892 falcon_drain_tx_fifo(efx
);
1895 void falcon_reconfigure_mac_wrapper(struct efx_nic
*efx
)
1901 if (efx
->link_options
& GM_LPA_10000
)
1903 else if (efx
->link_options
& GM_LPA_1000
)
1905 else if (efx
->link_options
& GM_LPA_100
)
1909 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1910 * as advertised. Disable to ensure packets are not
1911 * indefinitely held and TX queue can be flushed at any point
1912 * while the link is down. */
1913 EFX_POPULATE_OWORD_5(reg
,
1914 MAC_XOFF_VAL
, 0xffff /* max pause time */,
1916 MAC_UC_PROM
, efx
->promiscuous
,
1917 MAC_LINK_STATUS
, 1, /* always set */
1918 MAC_SPEED
, link_speed
);
1919 /* On B0, MAC backpressure can be disabled and packets get
1921 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
1922 EFX_SET_OWORD_FIELD(reg
, TXFIFO_DRAIN_EN_B0
,
1926 falcon_write(efx
, ®
, MAC0_CTRL_REG_KER
);
1928 /* Restore the multicast hash registers. */
1929 falcon_set_multicast_hash(efx
);
1931 /* Transmission of pause frames when RX crosses the threshold is
1932 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1933 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1934 tx_fc
= !!(efx
->flow_control
& EFX_FC_TX
);
1935 falcon_read(efx
, ®
, RX_CFG_REG_KER
);
1936 EFX_SET_OWORD_FIELD_VER(efx
, reg
, RX_XOFF_MAC_EN
, tx_fc
);
1938 /* Unisolate the MAC -> RX */
1939 if (falcon_rev(efx
) >= FALCON_REV_B0
)
1940 EFX_SET_OWORD_FIELD(reg
, RX_INGR_EN_B0
, 1);
1941 falcon_write(efx
, ®
, RX_CFG_REG_KER
);
1944 int falcon_dma_stats(struct efx_nic
*efx
, unsigned int done_offset
)
1950 if (disable_dma_stats
)
1953 /* Statistics fetch will fail if the MAC is in TX drain */
1954 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
1956 falcon_read(efx
, &temp
, MAC0_CTRL_REG_KER
);
1957 if (EFX_OWORD_FIELD(temp
, TXFIFO_DRAIN_EN_B0
))
1961 dma_done
= (efx
->stats_buffer
.addr
+ done_offset
);
1962 *dma_done
= FALCON_STATS_NOT_DONE
;
1963 wmb(); /* ensure done flag is clear */
1965 /* Initiate DMA transfer of stats */
1966 EFX_POPULATE_OWORD_2(reg
,
1967 MAC_STAT_DMA_CMD
, 1,
1969 efx
->stats_buffer
.dma_addr
);
1970 falcon_write(efx
, ®
, MAC0_STAT_DMA_REG_KER
);
1972 /* Wait for transfer to complete */
1973 for (i
= 0; i
< 400; i
++) {
1974 if (*(volatile u32
*)dma_done
== FALCON_STATS_DONE
) {
1975 rmb(); /* Ensure the stats are valid. */
1981 EFX_ERR(efx
, "timed out waiting for statistics\n");
1985 /**************************************************************************
1987 * PHY access via GMII
1989 **************************************************************************
1992 /* Use the top bit of the MII PHY id to indicate the PHY type
1993 * (1G/10G), with the remaining bits as the actual PHY id.
1995 * This allows us to avoid leaking information from the mii_if_info
1996 * structure into other data structures.
1998 #define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR)
1999 #define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
2000 #define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1)
2001 #define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1)
2002 #define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1))
2005 /* Packing the clause 45 port and device fields into a single value */
2006 #define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
2007 #define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH
2008 #define MD_DEV_ADR_COMP_LBN 0
2009 #define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH
2012 /* Wait for GMII access to complete */
2013 static int falcon_gmii_wait(struct efx_nic
*efx
)
2015 efx_dword_t md_stat
;
2018 for (count
= 0; count
< 1000; count
++) { /* wait upto 10ms */
2019 falcon_readl(efx
, &md_stat
, MD_STAT_REG_KER
);
2020 if (EFX_DWORD_FIELD(md_stat
, MD_BSY
) == 0) {
2021 if (EFX_DWORD_FIELD(md_stat
, MD_LNFL
) != 0 ||
2022 EFX_DWORD_FIELD(md_stat
, MD_BSERR
) != 0) {
2023 EFX_ERR(efx
, "error from GMII access "
2025 EFX_DWORD_VAL(md_stat
));
2032 EFX_ERR(efx
, "timed out waiting for GMII\n");
2036 /* Writes a GMII register of a PHY connected to Falcon using MDIO. */
2037 static void falcon_mdio_write(struct net_device
*net_dev
, int phy_id
,
2038 int addr
, int value
)
2040 struct efx_nic
*efx
= netdev_priv(net_dev
);
2041 unsigned int phy_id2
= phy_id
& FALCON_PHY_ID_ID_MASK
;
2044 /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
2045 * chosen so that the only current user, Falcon, can take the
2046 * packed value and use them directly.
2047 * Fail to build if this assumption is broken.
2049 BUILD_BUG_ON(FALCON_PHY_ID_10G
!= MDIO45_XPRT_ID_IS10G
);
2050 BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH
!= MDIO45_PRT_DEV_WIDTH
);
2051 BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN
!= MDIO45_PRT_ID_COMP_LBN
);
2052 BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN
!= MDIO45_DEV_ID_COMP_LBN
);
2054 if (phy_id2
== PHY_ADDR_INVALID
)
2057 /* See falcon_mdio_read for an explanation. */
2058 if (!(phy_id
& FALCON_PHY_ID_10G
)) {
2059 int mmd
= ffs(efx
->phy_op
->mmds
) - 1;
2060 EFX_TRACE(efx
, "Fixing erroneous clause22 write\n");
2061 phy_id2
= mdio_clause45_pack(phy_id2
, mmd
)
2062 & FALCON_PHY_ID_ID_MASK
;
2065 EFX_REGDUMP(efx
, "writing GMII %d register %02x with %04x\n", phy_id
,
2068 spin_lock_bh(&efx
->phy_lock
);
2070 /* Check MII not currently being accessed */
2071 if (falcon_gmii_wait(efx
) != 0)
2074 /* Write the address/ID register */
2075 EFX_POPULATE_OWORD_1(reg
, MD_PHY_ADR
, addr
);
2076 falcon_write(efx
, ®
, MD_PHY_ADR_REG_KER
);
2078 EFX_POPULATE_OWORD_1(reg
, MD_PRT_DEV_ADR
, phy_id2
);
2079 falcon_write(efx
, ®
, MD_ID_REG_KER
);
2082 EFX_POPULATE_OWORD_1(reg
, MD_TXD
, value
);
2083 falcon_write(efx
, ®
, MD_TXD_REG_KER
);
2085 EFX_POPULATE_OWORD_2(reg
,
2088 falcon_write(efx
, ®
, MD_CS_REG_KER
);
2090 /* Wait for data to be written */
2091 if (falcon_gmii_wait(efx
) != 0) {
2092 /* Abort the write operation */
2093 EFX_POPULATE_OWORD_2(reg
,
2096 falcon_write(efx
, ®
, MD_CS_REG_KER
);
2101 spin_unlock_bh(&efx
->phy_lock
);
2104 /* Reads a GMII register from a PHY connected to Falcon. If no value
2105 * could be read, -1 will be returned. */
2106 static int falcon_mdio_read(struct net_device
*net_dev
, int phy_id
, int addr
)
2108 struct efx_nic
*efx
= netdev_priv(net_dev
);
2109 unsigned int phy_addr
= phy_id
& FALCON_PHY_ID_ID_MASK
;
2113 if (phy_addr
== PHY_ADDR_INVALID
)
2116 /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
2117 * but the generic Linux code does not make any distinction or have
2118 * any state for this.
2119 * We spot the case where someone tried to talk 22 to a 45 PHY and
2120 * redirect the request to the lowest numbered MMD as a clause45
2121 * request. This is enough to allow simple queries like id and link
2122 * state to succeed. TODO: We may need to do more in future.
2124 if (!(phy_id
& FALCON_PHY_ID_10G
)) {
2125 int mmd
= ffs(efx
->phy_op
->mmds
) - 1;
2126 EFX_TRACE(efx
, "Fixing erroneous clause22 read\n");
2127 phy_addr
= mdio_clause45_pack(phy_addr
, mmd
)
2128 & FALCON_PHY_ID_ID_MASK
;
2131 spin_lock_bh(&efx
->phy_lock
);
2133 /* Check MII not currently being accessed */
2134 if (falcon_gmii_wait(efx
) != 0)
2137 EFX_POPULATE_OWORD_1(reg
, MD_PHY_ADR
, addr
);
2138 falcon_write(efx
, ®
, MD_PHY_ADR_REG_KER
);
2140 EFX_POPULATE_OWORD_1(reg
, MD_PRT_DEV_ADR
, phy_addr
);
2141 falcon_write(efx
, ®
, MD_ID_REG_KER
);
2143 /* Request data to be read */
2144 EFX_POPULATE_OWORD_2(reg
, MD_RDC
, 1, MD_GC
, 0);
2145 falcon_write(efx
, ®
, MD_CS_REG_KER
);
2147 /* Wait for data to become available */
2148 value
= falcon_gmii_wait(efx
);
2150 falcon_read(efx
, ®
, MD_RXD_REG_KER
);
2151 value
= EFX_OWORD_FIELD(reg
, MD_RXD
);
2152 EFX_REGDUMP(efx
, "read from GMII %d register %02x, got %04x\n",
2153 phy_id
, addr
, value
);
2155 /* Abort the read operation */
2156 EFX_POPULATE_OWORD_2(reg
,
2159 falcon_write(efx
, ®
, MD_CS_REG_KER
);
2161 EFX_LOG(efx
, "read from GMII 0x%x register %02x, got "
2162 "error %d\n", phy_id
, addr
, value
);
2166 spin_unlock_bh(&efx
->phy_lock
);
2171 static void falcon_init_mdio(struct mii_if_info
*gmii
)
2173 gmii
->mdio_read
= falcon_mdio_read
;
2174 gmii
->mdio_write
= falcon_mdio_write
;
2175 gmii
->phy_id_mask
= FALCON_PHY_ID_MASK
;
2176 gmii
->reg_num_mask
= ((1 << EFX_WIDTH(MD_PHY_ADR
)) - 1);
2179 static int falcon_probe_phy(struct efx_nic
*efx
)
2181 switch (efx
->phy_type
) {
2182 case PHY_TYPE_10XPRESS
:
2183 efx
->phy_op
= &falcon_tenxpress_phy_ops
;
2186 efx
->phy_op
= &falcon_xfp_phy_ops
;
2189 EFX_ERR(efx
, "Unknown PHY type %d\n",
2194 efx
->loopback_modes
= LOOPBACKS_10G_INTERNAL
| efx
->phy_op
->loopbacks
;
2198 /* This call is responsible for hooking in the MAC and PHY operations */
2199 int falcon_probe_port(struct efx_nic
*efx
)
2203 /* Hook in PHY operations table */
2204 rc
= falcon_probe_phy(efx
);
2208 /* Set up GMII structure for PHY */
2209 efx
->mii
.supports_gmii
= true;
2210 falcon_init_mdio(&efx
->mii
);
2212 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2213 if (falcon_rev(efx
) >= FALCON_REV_B0
)
2214 efx
->flow_control
= EFX_FC_RX
| EFX_FC_TX
;
2216 efx
->flow_control
= EFX_FC_RX
;
2218 /* Allocate buffer for stats */
2219 rc
= falcon_alloc_buffer(efx
, &efx
->stats_buffer
,
2220 FALCON_MAC_STATS_SIZE
);
2223 EFX_LOG(efx
, "stats buffer at %llx (virt %p phys %lx)\n",
2224 (unsigned long long)efx
->stats_buffer
.dma_addr
,
2225 efx
->stats_buffer
.addr
,
2226 virt_to_phys(efx
->stats_buffer
.addr
));
2231 void falcon_remove_port(struct efx_nic
*efx
)
2233 falcon_free_buffer(efx
, &efx
->stats_buffer
);
2236 /**************************************************************************
2238 * Multicast filtering
2240 **************************************************************************
2243 void falcon_set_multicast_hash(struct efx_nic
*efx
)
2245 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
2247 /* Broadcast packets go through the multicast hash filter.
2248 * ether_crc_le() of the broadcast address is 0xbe2612ff
2249 * so we always add bit 0xff to the mask.
2251 set_bit_le(0xff, mc_hash
->byte
);
2253 falcon_write(efx
, &mc_hash
->oword
[0], MAC_MCAST_HASH_REG0_KER
);
2254 falcon_write(efx
, &mc_hash
->oword
[1], MAC_MCAST_HASH_REG1_KER
);
2258 /**************************************************************************
2262 **************************************************************************/
2264 int falcon_read_nvram(struct efx_nic
*efx
, struct falcon_nvconfig
*nvconfig_out
)
2266 struct falcon_nvconfig
*nvconfig
;
2267 struct efx_spi_device
*spi
;
2269 int rc
, magic_num
, struct_ver
;
2270 __le16
*word
, *limit
;
2273 region
= kmalloc(FALCON_NVCONFIG_END
, GFP_KERNEL
);
2276 nvconfig
= region
+ NVCONFIG_OFFSET
;
2278 spi
= efx
->spi_flash
? efx
->spi_flash
: efx
->spi_eeprom
;
2279 mutex_lock(&efx
->spi_lock
);
2280 rc
= falcon_spi_read(spi
, 0, FALCON_NVCONFIG_END
, NULL
, region
);
2281 mutex_unlock(&efx
->spi_lock
);
2283 EFX_ERR(efx
, "Failed to read %s\n",
2284 efx
->spi_flash
? "flash" : "EEPROM");
2289 magic_num
= le16_to_cpu(nvconfig
->board_magic_num
);
2290 struct_ver
= le16_to_cpu(nvconfig
->board_struct_ver
);
2293 if (magic_num
!= NVCONFIG_BOARD_MAGIC_NUM
) {
2294 EFX_ERR(efx
, "NVRAM bad magic 0x%x\n", magic_num
);
2297 if (struct_ver
< 2) {
2298 EFX_ERR(efx
, "NVRAM has ancient version 0x%x\n", struct_ver
);
2300 } else if (struct_ver
< 4) {
2301 word
= &nvconfig
->board_magic_num
;
2302 limit
= (__le16
*) (nvconfig
+ 1);
2305 limit
= region
+ FALCON_NVCONFIG_END
;
2307 for (csum
= 0; word
< limit
; ++word
)
2308 csum
+= le16_to_cpu(*word
);
2310 if (~csum
& 0xffff) {
2311 EFX_ERR(efx
, "NVRAM has incorrect checksum\n");
2317 memcpy(nvconfig_out
, nvconfig
, sizeof(*nvconfig
));
2324 /* Registers tested in the falcon register test */
2328 } efx_test_registers
[] = {
2329 { ADR_REGION_REG_KER
,
2330 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2332 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2334 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2336 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2337 { MAC0_CTRL_REG_KER
,
2338 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2339 { SRM_TX_DC_CFG_REG_KER
,
2340 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2341 { RX_DC_CFG_REG_KER
,
2342 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2343 { RX_DC_PF_WM_REG_KER
,
2344 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2346 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2348 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2350 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2352 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2354 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2356 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2358 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2360 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2363 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
2364 const efx_oword_t
*mask
)
2366 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
2367 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
2370 int falcon_test_registers(struct efx_nic
*efx
)
2372 unsigned address
= 0, i
, j
;
2373 efx_oword_t mask
, imask
, original
, reg
, buf
;
2375 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2376 WARN_ON(!LOOPBACK_INTERNAL(efx
));
2378 for (i
= 0; i
< ARRAY_SIZE(efx_test_registers
); ++i
) {
2379 address
= efx_test_registers
[i
].address
;
2380 mask
= imask
= efx_test_registers
[i
].mask
;
2381 EFX_INVERT_OWORD(imask
);
2383 falcon_read(efx
, &original
, address
);
2385 /* bit sweep on and off */
2386 for (j
= 0; j
< 128; j
++) {
2387 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
2390 /* Test this testable bit can be set in isolation */
2391 EFX_AND_OWORD(reg
, original
, mask
);
2392 EFX_SET_OWORD32(reg
, j
, j
, 1);
2394 falcon_write(efx
, ®
, address
);
2395 falcon_read(efx
, &buf
, address
);
2397 if (efx_masked_compare_oword(®
, &buf
, &mask
))
2400 /* Test this testable bit can be cleared in isolation */
2401 EFX_OR_OWORD(reg
, original
, mask
);
2402 EFX_SET_OWORD32(reg
, j
, j
, 0);
2404 falcon_write(efx
, ®
, address
);
2405 falcon_read(efx
, &buf
, address
);
2407 if (efx_masked_compare_oword(®
, &buf
, &mask
))
2411 falcon_write(efx
, &original
, address
);
2417 EFX_ERR(efx
, "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
2418 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
2419 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
2423 /**************************************************************************
2427 **************************************************************************
2430 /* Resets NIC to known state. This routine must be called in process
2431 * context and is allowed to sleep. */
2432 int falcon_reset_hw(struct efx_nic
*efx
, enum reset_type method
)
2434 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2435 efx_oword_t glb_ctl_reg_ker
;
2438 EFX_LOG(efx
, "performing hardware reset (%d)\n", method
);
2440 /* Initiate device reset */
2441 if (method
== RESET_TYPE_WORLD
) {
2442 rc
= pci_save_state(efx
->pci_dev
);
2444 EFX_ERR(efx
, "failed to backup PCI state of primary "
2445 "function prior to hardware reset\n");
2448 if (FALCON_IS_DUAL_FUNC(efx
)) {
2449 rc
= pci_save_state(nic_data
->pci_dev2
);
2451 EFX_ERR(efx
, "failed to backup PCI state of "
2452 "secondary function prior to "
2453 "hardware reset\n");
2458 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker
,
2459 EXT_PHY_RST_DUR
, 0x7,
2462 int reset_phy
= (method
== RESET_TYPE_INVISIBLE
?
2463 EXCLUDE_FROM_RESET
: 0);
2465 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker
,
2466 EXT_PHY_RST_CTL
, reset_phy
,
2467 PCIE_CORE_RST_CTL
, EXCLUDE_FROM_RESET
,
2468 PCIE_NSTCK_RST_CTL
, EXCLUDE_FROM_RESET
,
2469 PCIE_SD_RST_CTL
, EXCLUDE_FROM_RESET
,
2470 EE_RST_CTL
, EXCLUDE_FROM_RESET
,
2471 EXT_PHY_RST_DUR
, 0x7 /* 10ms */,
2474 falcon_write(efx
, &glb_ctl_reg_ker
, GLB_CTL_REG_KER
);
2476 EFX_LOG(efx
, "waiting for hardware reset\n");
2477 schedule_timeout_uninterruptible(HZ
/ 20);
2479 /* Restore PCI configuration if needed */
2480 if (method
== RESET_TYPE_WORLD
) {
2481 if (FALCON_IS_DUAL_FUNC(efx
)) {
2482 rc
= pci_restore_state(nic_data
->pci_dev2
);
2484 EFX_ERR(efx
, "failed to restore PCI config for "
2485 "the secondary function\n");
2489 rc
= pci_restore_state(efx
->pci_dev
);
2491 EFX_ERR(efx
, "failed to restore PCI config for the "
2492 "primary function\n");
2495 EFX_LOG(efx
, "successfully restored PCI config\n");
2498 /* Assert that reset complete */
2499 falcon_read(efx
, &glb_ctl_reg_ker
, GLB_CTL_REG_KER
);
2500 if (EFX_OWORD_FIELD(glb_ctl_reg_ker
, SWRST
) != 0) {
2502 EFX_ERR(efx
, "timed out waiting for hardware reset\n");
2505 EFX_LOG(efx
, "hardware reset complete\n");
2509 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2512 pci_restore_state(efx
->pci_dev
);
2519 /* Zeroes out the SRAM contents. This routine must be called in
2520 * process context and is allowed to sleep.
2522 static int falcon_reset_sram(struct efx_nic
*efx
)
2524 efx_oword_t srm_cfg_reg_ker
, gpio_cfg_reg_ker
;
2527 /* Set the SRAM wake/sleep GPIO appropriately. */
2528 falcon_read(efx
, &gpio_cfg_reg_ker
, GPIO_CTL_REG_KER
);
2529 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker
, GPIO1_OEN
, 1);
2530 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker
, GPIO1_OUT
, 1);
2531 falcon_write(efx
, &gpio_cfg_reg_ker
, GPIO_CTL_REG_KER
);
2533 /* Initiate SRAM reset */
2534 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker
,
2535 SRAM_OOB_BT_INIT_EN
, 1,
2536 SRM_NUM_BANKS_AND_BANK_SIZE
, 0);
2537 falcon_write(efx
, &srm_cfg_reg_ker
, SRM_CFG_REG_KER
);
2539 /* Wait for SRAM reset to complete */
2542 EFX_LOG(efx
, "waiting for SRAM reset (attempt %d)...\n", count
);
2544 /* SRAM reset is slow; expect around 16ms */
2545 schedule_timeout_uninterruptible(HZ
/ 50);
2547 /* Check for reset complete */
2548 falcon_read(efx
, &srm_cfg_reg_ker
, SRM_CFG_REG_KER
);
2549 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker
, SRAM_OOB_BT_INIT_EN
)) {
2550 EFX_LOG(efx
, "SRAM reset complete\n");
2554 } while (++count
< 20); /* wait upto 0.4 sec */
2556 EFX_ERR(efx
, "timed out waiting for SRAM reset\n");
2560 static int falcon_spi_device_init(struct efx_nic
*efx
,
2561 struct efx_spi_device
**spi_device_ret
,
2562 unsigned int device_id
, u32 device_type
)
2564 struct efx_spi_device
*spi_device
;
2566 if (device_type
!= 0) {
2567 spi_device
= kmalloc(sizeof(*spi_device
), GFP_KERNEL
);
2570 spi_device
->device_id
= device_id
;
2572 1 << SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_SIZE
);
2573 spi_device
->addr_len
=
2574 SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_ADDR_LEN
);
2575 spi_device
->munge_address
= (spi_device
->size
== 1 << 9 &&
2576 spi_device
->addr_len
== 1);
2577 spi_device
->erase_command
=
2578 SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_ERASE_CMD
);
2579 spi_device
->erase_size
=
2580 1 << SPI_DEV_TYPE_FIELD(device_type
,
2581 SPI_DEV_TYPE_ERASE_SIZE
);
2582 spi_device
->block_size
=
2583 1 << SPI_DEV_TYPE_FIELD(device_type
,
2584 SPI_DEV_TYPE_BLOCK_SIZE
);
2586 spi_device
->efx
= efx
;
2591 kfree(*spi_device_ret
);
2592 *spi_device_ret
= spi_device
;
2597 static void falcon_remove_spi_devices(struct efx_nic
*efx
)
2599 kfree(efx
->spi_eeprom
);
2600 efx
->spi_eeprom
= NULL
;
2601 kfree(efx
->spi_flash
);
2602 efx
->spi_flash
= NULL
;
2605 /* Extract non-volatile configuration */
2606 static int falcon_probe_nvconfig(struct efx_nic
*efx
)
2608 struct falcon_nvconfig
*nvconfig
;
2612 nvconfig
= kmalloc(sizeof(*nvconfig
), GFP_KERNEL
);
2616 rc
= falcon_read_nvram(efx
, nvconfig
);
2617 if (rc
== -EINVAL
) {
2618 EFX_ERR(efx
, "NVRAM is invalid therefore using defaults\n");
2619 efx
->phy_type
= PHY_TYPE_NONE
;
2620 efx
->mii
.phy_id
= PHY_ADDR_INVALID
;
2626 struct falcon_nvconfig_board_v2
*v2
= &nvconfig
->board_v2
;
2627 struct falcon_nvconfig_board_v3
*v3
= &nvconfig
->board_v3
;
2629 efx
->phy_type
= v2
->port0_phy_type
;
2630 efx
->mii
.phy_id
= v2
->port0_phy_addr
;
2631 board_rev
= le16_to_cpu(v2
->board_revision
);
2633 if (le16_to_cpu(nvconfig
->board_struct_ver
) >= 3) {
2634 __le32 fl
= v3
->spi_device_type
[EE_SPI_FLASH
];
2635 __le32 ee
= v3
->spi_device_type
[EE_SPI_EEPROM
];
2636 rc
= falcon_spi_device_init(efx
, &efx
->spi_flash
,
2641 rc
= falcon_spi_device_init(efx
, &efx
->spi_eeprom
,
2649 /* Read the MAC addresses */
2650 memcpy(efx
->mac_address
, nvconfig
->mac_address
[0], ETH_ALEN
);
2652 EFX_LOG(efx
, "PHY is %d phy_id %d\n", efx
->phy_type
, efx
->mii
.phy_id
);
2654 efx_set_board_info(efx
, board_rev
);
2660 falcon_remove_spi_devices(efx
);
2666 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2667 * count, port speed). Set workaround and feature flags accordingly.
2669 static int falcon_probe_nic_variant(struct efx_nic
*efx
)
2671 efx_oword_t altera_build
;
2673 falcon_read(efx
, &altera_build
, ALTERA_BUILD_REG_KER
);
2674 if (EFX_OWORD_FIELD(altera_build
, VER_ALL
)) {
2675 EFX_ERR(efx
, "Falcon FPGA not supported\n");
2679 switch (falcon_rev(efx
)) {
2682 EFX_ERR(efx
, "Falcon rev A0 not supported\n");
2685 case FALCON_REV_A1
:{
2686 efx_oword_t nic_stat
;
2688 falcon_read(efx
, &nic_stat
, NIC_STAT_REG
);
2690 if (EFX_OWORD_FIELD(nic_stat
, STRAP_PCIE
) == 0) {
2691 EFX_ERR(efx
, "Falcon rev A1 PCI-X not supported\n");
2694 if (!EFX_OWORD_FIELD(nic_stat
, STRAP_10G
)) {
2695 EFX_ERR(efx
, "1G mode not supported\n");
2705 EFX_ERR(efx
, "Unknown Falcon rev %d\n", falcon_rev(efx
));
2712 /* Probe all SPI devices on the NIC */
2713 static void falcon_probe_spi_devices(struct efx_nic
*efx
)
2715 efx_oword_t nic_stat
, gpio_ctl
, ee_vpd_cfg
;
2716 bool has_flash
, has_eeprom
, boot_is_external
;
2718 falcon_read(efx
, &gpio_ctl
, GPIO_CTL_REG_KER
);
2719 falcon_read(efx
, &nic_stat
, NIC_STAT_REG
);
2720 falcon_read(efx
, &ee_vpd_cfg
, EE_VPD_CFG_REG_KER
);
2722 has_flash
= EFX_OWORD_FIELD(nic_stat
, SF_PRST
);
2723 has_eeprom
= EFX_OWORD_FIELD(nic_stat
, EE_PRST
);
2724 boot_is_external
= EFX_OWORD_FIELD(gpio_ctl
, BOOTED_USING_NVDEVICE
);
2727 /* Default flash SPI device: Atmel AT25F1024
2728 * 128 KB, 24-bit address, 32 KB erase block,
2731 u32 flash_device_type
=
2732 (17 << SPI_DEV_TYPE_SIZE_LBN
)
2733 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
2734 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN
)
2735 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN
)
2736 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
);
2738 falcon_spi_device_init(efx
, &efx
->spi_flash
,
2739 EE_SPI_FLASH
, flash_device_type
);
2741 if (!boot_is_external
) {
2742 /* Disable VPD and set clock dividers to safe
2743 * values for initial programming.
2745 EFX_LOG(efx
, "Booted from internal ASIC settings;"
2746 " setting SPI config\n");
2747 EFX_POPULATE_OWORD_3(ee_vpd_cfg
, EE_VPD_EN
, 0,
2748 /* 125 MHz / 7 ~= 20 MHz */
2750 /* 125 MHz / 63 ~= 2 MHz */
2751 EE_EE_CLOCK_DIV
, 63);
2752 falcon_write(efx
, &ee_vpd_cfg
, EE_VPD_CFG_REG_KER
);
2757 u32 eeprom_device_type
;
2759 /* If it has no flash, it must have a large EEPROM
2760 * for chip config; otherwise check whether 9-bit
2761 * addressing is used for VPD configuration
2764 (!boot_is_external
||
2765 EFX_OWORD_FIELD(ee_vpd_cfg
, EE_VPD_EN_AD9_MODE
))) {
2766 /* Default SPI device: Atmel AT25040 or similar
2767 * 512 B, 9-bit address, 8 B write block
2769 eeprom_device_type
=
2770 (9 << SPI_DEV_TYPE_SIZE_LBN
)
2771 | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
2772 | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
);
2774 /* "Large" SPI device: Atmel AT25640 or similar
2775 * 8 KB, 16-bit address, 32 B write block
2777 eeprom_device_type
=
2778 (13 << SPI_DEV_TYPE_SIZE_LBN
)
2779 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
2780 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
);
2783 falcon_spi_device_init(efx
, &efx
->spi_eeprom
,
2784 EE_SPI_EEPROM
, eeprom_device_type
);
2787 EFX_LOG(efx
, "flash is %s, EEPROM is %s\n",
2788 (has_flash
? "present" : "absent"),
2789 (has_eeprom
? "present" : "absent"));
2792 int falcon_probe_nic(struct efx_nic
*efx
)
2794 struct falcon_nic_data
*nic_data
;
2797 /* Allocate storage for hardware specific data */
2798 nic_data
= kzalloc(sizeof(*nic_data
), GFP_KERNEL
);
2801 efx
->nic_data
= nic_data
;
2803 /* Determine number of ports etc. */
2804 rc
= falcon_probe_nic_variant(efx
);
2808 /* Probe secondary function if expected */
2809 if (FALCON_IS_DUAL_FUNC(efx
)) {
2810 struct pci_dev
*dev
= pci_dev_get(efx
->pci_dev
);
2812 while ((dev
= pci_get_device(EFX_VENDID_SFC
, FALCON_A_S_DEVID
,
2814 if (dev
->bus
== efx
->pci_dev
->bus
&&
2815 dev
->devfn
== efx
->pci_dev
->devfn
+ 1) {
2816 nic_data
->pci_dev2
= dev
;
2820 if (!nic_data
->pci_dev2
) {
2821 EFX_ERR(efx
, "failed to find secondary function\n");
2827 /* Now we can reset the NIC */
2828 rc
= falcon_reset_hw(efx
, RESET_TYPE_ALL
);
2830 EFX_ERR(efx
, "failed to reset NIC\n");
2834 /* Allocate memory for INT_KER */
2835 rc
= falcon_alloc_buffer(efx
, &efx
->irq_status
, sizeof(efx_oword_t
));
2838 BUG_ON(efx
->irq_status
.dma_addr
& 0x0f);
2840 EFX_LOG(efx
, "INT_KER at %llx (virt %p phys %lx)\n",
2841 (unsigned long long)efx
->irq_status
.dma_addr
,
2842 efx
->irq_status
.addr
, virt_to_phys(efx
->irq_status
.addr
));
2844 falcon_probe_spi_devices(efx
);
2846 /* Read in the non-volatile configuration */
2847 rc
= falcon_probe_nvconfig(efx
);
2851 /* Initialise I2C adapter */
2852 efx
->i2c_adap
.owner
= THIS_MODULE
;
2853 nic_data
->i2c_data
= falcon_i2c_bit_operations
;
2854 nic_data
->i2c_data
.data
= efx
;
2855 efx
->i2c_adap
.algo_data
= &nic_data
->i2c_data
;
2856 efx
->i2c_adap
.dev
.parent
= &efx
->pci_dev
->dev
;
2857 strlcpy(efx
->i2c_adap
.name
, "SFC4000 GPIO", sizeof(efx
->i2c_adap
.name
));
2858 rc
= i2c_bit_add_bus(&efx
->i2c_adap
);
2865 falcon_remove_spi_devices(efx
);
2866 falcon_free_buffer(efx
, &efx
->irq_status
);
2869 if (nic_data
->pci_dev2
) {
2870 pci_dev_put(nic_data
->pci_dev2
);
2871 nic_data
->pci_dev2
= NULL
;
2875 kfree(efx
->nic_data
);
2879 /* This call performs hardware-specific global initialisation, such as
2880 * defining the descriptor cache sizes and number of RSS channels.
2881 * It does not set up any buffers, descriptor rings or event queues.
2883 int falcon_init_nic(struct efx_nic
*efx
)
2889 /* Use on-chip SRAM */
2890 falcon_read(efx
, &temp
, NIC_STAT_REG
);
2891 EFX_SET_OWORD_FIELD(temp
, ONCHIP_SRAM
, 1);
2892 falcon_write(efx
, &temp
, NIC_STAT_REG
);
2894 /* Set buffer table mode */
2895 EFX_POPULATE_OWORD_1(temp
, BUF_TBL_MODE
, BUF_TBL_MODE_FULL
);
2896 falcon_write(efx
, &temp
, BUF_TBL_CFG_REG_KER
);
2898 rc
= falcon_reset_sram(efx
);
2902 /* Set positions of descriptor caches in SRAM. */
2903 EFX_POPULATE_OWORD_1(temp
, SRM_TX_DC_BASE_ADR
, TX_DC_BASE
/ 8);
2904 falcon_write(efx
, &temp
, SRM_TX_DC_CFG_REG_KER
);
2905 EFX_POPULATE_OWORD_1(temp
, SRM_RX_DC_BASE_ADR
, RX_DC_BASE
/ 8);
2906 falcon_write(efx
, &temp
, SRM_RX_DC_CFG_REG_KER
);
2908 /* Set TX descriptor cache size. */
2909 BUILD_BUG_ON(TX_DC_ENTRIES
!= (16 << TX_DC_ENTRIES_ORDER
));
2910 EFX_POPULATE_OWORD_1(temp
, TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
2911 falcon_write(efx
, &temp
, TX_DC_CFG_REG_KER
);
2913 /* Set RX descriptor cache size. Set low watermark to size-8, as
2914 * this allows most efficient prefetching.
2916 BUILD_BUG_ON(RX_DC_ENTRIES
!= (16 << RX_DC_ENTRIES_ORDER
));
2917 EFX_POPULATE_OWORD_1(temp
, RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
2918 falcon_write(efx
, &temp
, RX_DC_CFG_REG_KER
);
2919 EFX_POPULATE_OWORD_1(temp
, RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
2920 falcon_write(efx
, &temp
, RX_DC_PF_WM_REG_KER
);
2922 /* Clear the parity enables on the TX data fifos as
2923 * they produce false parity errors because of timing issues
2925 if (EFX_WORKAROUND_5129(efx
)) {
2926 falcon_read(efx
, &temp
, SPARE_REG_KER
);
2927 EFX_SET_OWORD_FIELD(temp
, MEM_PERR_EN_TX_DATA
, 0);
2928 falcon_write(efx
, &temp
, SPARE_REG_KER
);
2931 /* Enable all the genuinely fatal interrupts. (They are still
2932 * masked by the overall interrupt mask, controlled by
2933 * falcon_interrupts()).
2935 * Note: All other fatal interrupts are enabled
2937 EFX_POPULATE_OWORD_3(temp
,
2938 ILL_ADR_INT_KER_EN
, 1,
2939 RBUF_OWN_INT_KER_EN
, 1,
2940 TBUF_OWN_INT_KER_EN
, 1);
2941 EFX_INVERT_OWORD(temp
);
2942 falcon_write(efx
, &temp
, FATAL_INTR_REG_KER
);
2944 if (EFX_WORKAROUND_7244(efx
)) {
2945 falcon_read(efx
, &temp
, RX_FILTER_CTL_REG
);
2946 EFX_SET_OWORD_FIELD(temp
, UDP_FULL_SRCH_LIMIT
, 8);
2947 EFX_SET_OWORD_FIELD(temp
, UDP_WILD_SRCH_LIMIT
, 8);
2948 EFX_SET_OWORD_FIELD(temp
, TCP_FULL_SRCH_LIMIT
, 8);
2949 EFX_SET_OWORD_FIELD(temp
, TCP_WILD_SRCH_LIMIT
, 8);
2950 falcon_write(efx
, &temp
, RX_FILTER_CTL_REG
);
2953 falcon_setup_rss_indir_table(efx
);
2955 /* Setup RX. Wait for descriptor is broken and must
2956 * be disabled. RXDP recovery shouldn't be needed, but is.
2958 falcon_read(efx
, &temp
, RX_SELF_RST_REG_KER
);
2959 EFX_SET_OWORD_FIELD(temp
, RX_NODESC_WAIT_DIS
, 1);
2960 EFX_SET_OWORD_FIELD(temp
, RX_RECOVERY_EN
, 1);
2961 if (EFX_WORKAROUND_5583(efx
))
2962 EFX_SET_OWORD_FIELD(temp
, RX_ISCSI_DIS
, 1);
2963 falcon_write(efx
, &temp
, RX_SELF_RST_REG_KER
);
2965 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
2966 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
2968 falcon_read(efx
, &temp
, TX_CFG2_REG_KER
);
2969 EFX_SET_OWORD_FIELD(temp
, TX_RX_SPACER
, 0xfe);
2970 EFX_SET_OWORD_FIELD(temp
, TX_RX_SPACER_EN
, 1);
2971 EFX_SET_OWORD_FIELD(temp
, TX_ONE_PKT_PER_Q
, 1);
2972 EFX_SET_OWORD_FIELD(temp
, TX_CSR_PUSH_EN
, 0);
2973 EFX_SET_OWORD_FIELD(temp
, TX_DIS_NON_IP_EV
, 1);
2974 /* Enable SW_EV to inherit in char driver - assume harmless here */
2975 EFX_SET_OWORD_FIELD(temp
, TX_SW_EV_EN
, 1);
2976 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2977 EFX_SET_OWORD_FIELD(temp
, TX_PREF_THRESHOLD
, 2);
2978 /* Squash TX of packets of 16 bytes or less */
2979 if (falcon_rev(efx
) >= FALCON_REV_B0
&& EFX_WORKAROUND_9141(efx
))
2980 EFX_SET_OWORD_FIELD(temp
, TX_FLUSH_MIN_LEN_EN_B0
, 1);
2981 falcon_write(efx
, &temp
, TX_CFG2_REG_KER
);
2983 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2984 * descriptors (which is bad).
2986 falcon_read(efx
, &temp
, TX_CFG_REG_KER
);
2987 EFX_SET_OWORD_FIELD(temp
, TX_NO_EOP_DISC_EN
, 0);
2988 falcon_write(efx
, &temp
, TX_CFG_REG_KER
);
2991 falcon_read(efx
, &temp
, RX_CFG_REG_KER
);
2992 EFX_SET_OWORD_FIELD_VER(efx
, temp
, RX_DESC_PUSH_EN
, 0);
2993 if (EFX_WORKAROUND_7575(efx
))
2994 EFX_SET_OWORD_FIELD_VER(efx
, temp
, RX_USR_BUF_SIZE
,
2996 if (falcon_rev(efx
) >= FALCON_REV_B0
)
2997 EFX_SET_OWORD_FIELD(temp
, RX_INGR_EN_B0
, 1);
2999 /* RX FIFO flow control thresholds */
3000 thresh
= ((rx_xon_thresh_bytes
>= 0) ?
3001 rx_xon_thresh_bytes
: efx
->type
->rx_xon_thresh
);
3002 EFX_SET_OWORD_FIELD_VER(efx
, temp
, RX_XON_MAC_TH
, thresh
/ 256);
3003 thresh
= ((rx_xoff_thresh_bytes
>= 0) ?
3004 rx_xoff_thresh_bytes
: efx
->type
->rx_xoff_thresh
);
3005 EFX_SET_OWORD_FIELD_VER(efx
, temp
, RX_XOFF_MAC_TH
, thresh
/ 256);
3006 /* RX control FIFO thresholds [32 entries] */
3007 EFX_SET_OWORD_FIELD_VER(efx
, temp
, RX_XON_TX_TH
, 20);
3008 EFX_SET_OWORD_FIELD_VER(efx
, temp
, RX_XOFF_TX_TH
, 25);
3009 falcon_write(efx
, &temp
, RX_CFG_REG_KER
);
3011 /* Set destination of both TX and RX Flush events */
3012 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
3013 EFX_POPULATE_OWORD_1(temp
, FLS_EVQ_ID
, 0);
3014 falcon_write(efx
, &temp
, DP_CTRL_REG
);
3020 void falcon_remove_nic(struct efx_nic
*efx
)
3022 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3025 rc
= i2c_del_adapter(&efx
->i2c_adap
);
3028 falcon_remove_spi_devices(efx
);
3029 falcon_free_buffer(efx
, &efx
->irq_status
);
3031 falcon_reset_hw(efx
, RESET_TYPE_ALL
);
3033 /* Release the second function after the reset */
3034 if (nic_data
->pci_dev2
) {
3035 pci_dev_put(nic_data
->pci_dev2
);
3036 nic_data
->pci_dev2
= NULL
;
3039 /* Tear down the private nic state */
3040 kfree(efx
->nic_data
);
3041 efx
->nic_data
= NULL
;
3044 void falcon_update_nic_stats(struct efx_nic
*efx
)
3048 falcon_read(efx
, &cnt
, RX_NODESC_DROP_REG_KER
);
3049 efx
->n_rx_nodesc_drop_cnt
+= EFX_OWORD_FIELD(cnt
, RX_NODESC_DROP_CNT
);
3052 /**************************************************************************
3054 * Revision-dependent attributes used by efx.c
3056 **************************************************************************
3059 struct efx_nic_type falcon_a_nic_type
= {
3061 .mem_map_size
= 0x20000,
3062 .txd_ptr_tbl_base
= TX_DESC_PTR_TBL_KER_A1
,
3063 .rxd_ptr_tbl_base
= RX_DESC_PTR_TBL_KER_A1
,
3064 .buf_tbl_base
= BUF_TBL_KER_A1
,
3065 .evq_ptr_tbl_base
= EVQ_PTR_TBL_KER_A1
,
3066 .evq_rptr_tbl_base
= EVQ_RPTR_REG_KER_A1
,
3067 .txd_ring_mask
= FALCON_TXD_RING_MASK
,
3068 .rxd_ring_mask
= FALCON_RXD_RING_MASK
,
3069 .evq_size
= FALCON_EVQ_SIZE
,
3070 .max_dma_mask
= FALCON_DMA_MASK
,
3071 .tx_dma_mask
= FALCON_TX_DMA_MASK
,
3072 .bug5391_mask
= 0xf,
3073 .rx_xoff_thresh
= 2048,
3074 .rx_xon_thresh
= 512,
3075 .rx_buffer_padding
= 0x24,
3076 .max_interrupt_mode
= EFX_INT_MODE_MSI
,
3077 .phys_addr_channels
= 4,
3080 struct efx_nic_type falcon_b_nic_type
= {
3082 /* Map everything up to and including the RSS indirection
3083 * table. Don't map MSI-X table, MSI-X PBA since Linux
3084 * requires that they not be mapped. */
3085 .mem_map_size
= RX_RSS_INDIR_TBL_B0
+ 0x800,
3086 .txd_ptr_tbl_base
= TX_DESC_PTR_TBL_KER_B0
,
3087 .rxd_ptr_tbl_base
= RX_DESC_PTR_TBL_KER_B0
,
3088 .buf_tbl_base
= BUF_TBL_KER_B0
,
3089 .evq_ptr_tbl_base
= EVQ_PTR_TBL_KER_B0
,
3090 .evq_rptr_tbl_base
= EVQ_RPTR_REG_KER_B0
,
3091 .txd_ring_mask
= FALCON_TXD_RING_MASK
,
3092 .rxd_ring_mask
= FALCON_RXD_RING_MASK
,
3093 .evq_size
= FALCON_EVQ_SIZE
,
3094 .max_dma_mask
= FALCON_DMA_MASK
,
3095 .tx_dma_mask
= FALCON_TX_DMA_MASK
,
3097 .rx_xoff_thresh
= 54272, /* ~80Kb - 3*max MTU */
3098 .rx_xon_thresh
= 27648, /* ~3*max MTU */
3099 .rx_buffer_padding
= 0,
3100 .max_interrupt_mode
= EFX_INT_MODE_MSIX
,
3101 .phys_addr_channels
= 32, /* Hardware limit is 64, but the legacy
3102 * interrupt handler only supports 32