1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include "net_driver.h"
28 #include "workarounds.h"
30 /* Hardware control for SFC4000 (aka Falcon). */
32 /**************************************************************************
36 **************************************************************************
39 /* This is set to 16 for a good reason. In summary, if larger than
40 * 16, the descriptor cache holds more than a default socket
41 * buffer's worth of packets (for UDP we can only have at most one
42 * socket buffer's worth outstanding). This combined with the fact
43 * that we only get 1 TX event per descriptor cache means the NIC
46 #define TX_DC_ENTRIES 16
47 #define TX_DC_ENTRIES_ORDER 1
48 #define TX_DC_BASE 0x130000
50 #define RX_DC_ENTRIES 64
51 #define RX_DC_ENTRIES_ORDER 3
52 #define RX_DC_BASE 0x100000
54 static const unsigned int
55 /* "Large" EEPROM device: Atmel AT25640 or similar
56 * 8 KB, 16-bit address, 32 B write block */
57 large_eeprom_type
= ((13 << SPI_DEV_TYPE_SIZE_LBN
)
58 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
59 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
)),
60 /* Default flash device: Atmel AT25F1024
61 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
62 default_flash_type
= ((17 << SPI_DEV_TYPE_SIZE_LBN
)
63 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
64 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN
)
65 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN
)
66 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
));
68 /* RX FIFO XOFF watermark
70 * When the amount of the RX FIFO increases used increases past this
71 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
72 * This also has an effect on RX/TX arbitration
74 static int rx_xoff_thresh_bytes
= -1;
75 module_param(rx_xoff_thresh_bytes
, int, 0644);
76 MODULE_PARM_DESC(rx_xoff_thresh_bytes
, "RX fifo XOFF threshold");
78 /* RX FIFO XON watermark
80 * When the amount of the RX FIFO used decreases below this
81 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
82 * This also has an effect on RX/TX arbitration
84 static int rx_xon_thresh_bytes
= -1;
85 module_param(rx_xon_thresh_bytes
, int, 0644);
86 MODULE_PARM_DESC(rx_xon_thresh_bytes
, "RX fifo XON threshold");
88 /* If FALCON_MAX_INT_ERRORS internal errors occur within
89 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
92 #define FALCON_INT_ERROR_EXPIRE 3600
93 #define FALCON_MAX_INT_ERRORS 5
95 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
97 #define FALCON_FLUSH_INTERVAL 10
98 #define FALCON_FLUSH_POLL_COUNT 100
100 /**************************************************************************
104 **************************************************************************
107 /* Size and alignment of special buffers (4KB) */
108 #define FALCON_BUF_SIZE 4096
110 /* Depth of RX flush request fifo */
111 #define FALCON_RX_FLUSH_COUNT 4
113 #define FALCON_IS_DUAL_FUNC(efx) \
114 (falcon_rev(efx) < FALCON_REV_B0)
116 /**************************************************************************
118 * Falcon hardware access
120 **************************************************************************/
122 static inline void falcon_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
125 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
129 /* Read the current event from the event queue */
130 static inline efx_qword_t
*falcon_event(struct efx_channel
*channel
,
133 return (((efx_qword_t
*) (channel
->eventq
.addr
)) + index
);
136 /* See if an event is present
138 * We check both the high and low dword of the event for all ones. We
139 * wrote all ones when we cleared the event, and no valid event can
140 * have all ones in either its high or low dwords. This approach is
141 * robust against reordering.
143 * Note that using a single 64-bit comparison is incorrect; even
144 * though the CPU read will be atomic, the DMA write may not be.
146 static inline int falcon_event_present(efx_qword_t
*event
)
148 return (!(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
149 EFX_DWORD_IS_ALL_ONES(event
->dword
[1])));
152 /**************************************************************************
154 * I2C bus - this is a bit-bashing interface using GPIO pins
155 * Note that it uses the output enables to tristate the outputs
156 * SDA is the data pin and SCL is the clock
158 **************************************************************************
160 static void falcon_setsda(void *data
, int state
)
162 struct efx_nic
*efx
= (struct efx_nic
*)data
;
165 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
166 EFX_SET_OWORD_FIELD(reg
, FRF_AB_GPIO3_OEN
, !state
);
167 efx_writeo(efx
, ®
, FR_AB_GPIO_CTL
);
170 static void falcon_setscl(void *data
, int state
)
172 struct efx_nic
*efx
= (struct efx_nic
*)data
;
175 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
176 EFX_SET_OWORD_FIELD(reg
, FRF_AB_GPIO0_OEN
, !state
);
177 efx_writeo(efx
, ®
, FR_AB_GPIO_CTL
);
180 static int falcon_getsda(void *data
)
182 struct efx_nic
*efx
= (struct efx_nic
*)data
;
185 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
186 return EFX_OWORD_FIELD(reg
, FRF_AB_GPIO3_IN
);
189 static int falcon_getscl(void *data
)
191 struct efx_nic
*efx
= (struct efx_nic
*)data
;
194 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
195 return EFX_OWORD_FIELD(reg
, FRF_AB_GPIO0_IN
);
198 static struct i2c_algo_bit_data falcon_i2c_bit_operations
= {
199 .setsda
= falcon_setsda
,
200 .setscl
= falcon_setscl
,
201 .getsda
= falcon_getsda
,
202 .getscl
= falcon_getscl
,
204 /* Wait up to 50 ms for slave to let us pull SCL high */
205 .timeout
= DIV_ROUND_UP(HZ
, 20),
208 /**************************************************************************
210 * Falcon special buffer handling
211 * Special buffers are used for event queues and the TX and RX
214 *************************************************************************/
217 * Initialise a Falcon special buffer
219 * This will define a buffer (previously allocated via
220 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
221 * it to be used for event queues, descriptor rings etc.
224 falcon_init_special_buffer(struct efx_nic
*efx
,
225 struct efx_special_buffer
*buffer
)
227 efx_qword_t buf_desc
;
232 EFX_BUG_ON_PARANOID(!buffer
->addr
);
234 /* Write buffer descriptors to NIC */
235 for (i
= 0; i
< buffer
->entries
; i
++) {
236 index
= buffer
->index
+ i
;
237 dma_addr
= buffer
->dma_addr
+ (i
* 4096);
238 EFX_LOG(efx
, "mapping special buffer %d at %llx\n",
239 index
, (unsigned long long)dma_addr
);
240 EFX_POPULATE_QWORD_3(buf_desc
,
241 FRF_AZ_BUF_ADR_REGION
, 0,
242 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
243 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
244 falcon_write_buf_tbl(efx
, &buf_desc
, index
);
248 /* Unmaps a buffer from Falcon and clears the buffer table entries */
250 falcon_fini_special_buffer(struct efx_nic
*efx
,
251 struct efx_special_buffer
*buffer
)
253 efx_oword_t buf_tbl_upd
;
254 unsigned int start
= buffer
->index
;
255 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
257 if (!buffer
->entries
)
260 EFX_LOG(efx
, "unmapping special buffers %d-%d\n",
261 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
263 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
264 FRF_AZ_BUF_UPD_CMD
, 0,
265 FRF_AZ_BUF_CLR_CMD
, 1,
266 FRF_AZ_BUF_CLR_END_ID
, end
,
267 FRF_AZ_BUF_CLR_START_ID
, start
);
268 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
272 * Allocate a new Falcon special buffer
274 * This allocates memory for a new buffer, clears it and allocates a
275 * new buffer ID range. It does not write into Falcon's buffer table.
277 * This call will allocate 4KB buffers, since Falcon can't use 8KB
278 * buffers for event queues and descriptor rings.
280 static int falcon_alloc_special_buffer(struct efx_nic
*efx
,
281 struct efx_special_buffer
*buffer
,
284 len
= ALIGN(len
, FALCON_BUF_SIZE
);
286 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
291 buffer
->entries
= len
/ FALCON_BUF_SIZE
;
292 BUG_ON(buffer
->dma_addr
& (FALCON_BUF_SIZE
- 1));
294 /* All zeros is a potentially valid event so memset to 0xff */
295 memset(buffer
->addr
, 0xff, len
);
297 /* Select new buffer ID */
298 buffer
->index
= efx
->next_buffer_table
;
299 efx
->next_buffer_table
+= buffer
->entries
;
301 EFX_LOG(efx
, "allocating special buffers %d-%d at %llx+%x "
302 "(virt %p phys %llx)\n", buffer
->index
,
303 buffer
->index
+ buffer
->entries
- 1,
304 (u64
)buffer
->dma_addr
, len
,
305 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
310 static void falcon_free_special_buffer(struct efx_nic
*efx
,
311 struct efx_special_buffer
*buffer
)
316 EFX_LOG(efx
, "deallocating special buffers %d-%d at %llx+%x "
317 "(virt %p phys %llx)\n", buffer
->index
,
318 buffer
->index
+ buffer
->entries
- 1,
319 (u64
)buffer
->dma_addr
, buffer
->len
,
320 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
322 pci_free_consistent(efx
->pci_dev
, buffer
->len
, buffer
->addr
,
328 /**************************************************************************
330 * Falcon generic buffer handling
331 * These buffers are used for interrupt status and MAC stats
333 **************************************************************************/
335 static int falcon_alloc_buffer(struct efx_nic
*efx
,
336 struct efx_buffer
*buffer
, unsigned int len
)
338 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
343 memset(buffer
->addr
, 0, len
);
347 static void falcon_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
350 pci_free_consistent(efx
->pci_dev
, buffer
->len
,
351 buffer
->addr
, buffer
->dma_addr
);
356 /**************************************************************************
360 **************************************************************************/
362 /* Returns a pointer to the specified transmit descriptor in the TX
363 * descriptor queue belonging to the specified channel.
365 static inline efx_qword_t
*falcon_tx_desc(struct efx_tx_queue
*tx_queue
,
368 return (((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
);
371 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
372 static inline void falcon_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
377 write_ptr
= tx_queue
->write_count
& EFX_TXQ_MASK
;
378 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
379 efx_writed_page(tx_queue
->efx
, ®
,
380 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
384 /* For each entry inserted into the software descriptor ring, create a
385 * descriptor in the hardware TX descriptor ring (in host memory), and
388 void falcon_push_buffers(struct efx_tx_queue
*tx_queue
)
391 struct efx_tx_buffer
*buffer
;
395 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
398 write_ptr
= tx_queue
->write_count
& EFX_TXQ_MASK
;
399 buffer
= &tx_queue
->buffer
[write_ptr
];
400 txd
= falcon_tx_desc(tx_queue
, write_ptr
);
401 ++tx_queue
->write_count
;
403 /* Create TX descriptor ring entry */
404 EFX_POPULATE_QWORD_4(*txd
,
405 FSF_AZ_TX_KER_CONT
, buffer
->continuation
,
406 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
407 FSF_AZ_TX_KER_BUF_REGION
, 0,
408 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
409 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
411 wmb(); /* Ensure descriptors are written before they are fetched */
412 falcon_notify_tx_desc(tx_queue
);
415 /* Allocate hardware resources for a TX queue */
416 int falcon_probe_tx(struct efx_tx_queue
*tx_queue
)
418 struct efx_nic
*efx
= tx_queue
->efx
;
419 BUILD_BUG_ON(EFX_TXQ_SIZE
< 512 || EFX_TXQ_SIZE
> 4096 ||
420 EFX_TXQ_SIZE
& EFX_TXQ_MASK
);
421 return falcon_alloc_special_buffer(efx
, &tx_queue
->txd
,
422 EFX_TXQ_SIZE
* sizeof(efx_qword_t
));
425 void falcon_init_tx(struct efx_tx_queue
*tx_queue
)
427 efx_oword_t tx_desc_ptr
;
428 struct efx_nic
*efx
= tx_queue
->efx
;
430 tx_queue
->flushed
= FLUSH_NONE
;
432 /* Pin TX descriptor ring */
433 falcon_init_special_buffer(efx
, &tx_queue
->txd
);
435 /* Push TX descriptor ring to card */
436 EFX_POPULATE_OWORD_10(tx_desc_ptr
,
437 FRF_AZ_TX_DESCQ_EN
, 1,
438 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
439 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
440 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
441 FRF_AZ_TX_DESCQ_EVQ_ID
,
442 tx_queue
->channel
->channel
,
443 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
444 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
445 FRF_AZ_TX_DESCQ_SIZE
,
446 __ffs(tx_queue
->txd
.entries
),
447 FRF_AZ_TX_DESCQ_TYPE
, 0,
448 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
450 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
451 int csum
= tx_queue
->queue
== EFX_TX_QUEUE_OFFLOAD_CSUM
;
452 EFX_SET_OWORD_FIELD(tx_desc_ptr
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
453 EFX_SET_OWORD_FIELD(tx_desc_ptr
, FRF_BZ_TX_TCP_CHKSM_DIS
,
457 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
460 if (falcon_rev(efx
) < FALCON_REV_B0
) {
463 /* Only 128 bits in this register */
464 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT
>= 128);
466 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
467 if (tx_queue
->queue
== EFX_TX_QUEUE_OFFLOAD_CSUM
)
468 clear_bit_le(tx_queue
->queue
, (void *)®
);
470 set_bit_le(tx_queue
->queue
, (void *)®
);
471 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
475 static void falcon_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
477 struct efx_nic
*efx
= tx_queue
->efx
;
478 efx_oword_t tx_flush_descq
;
480 tx_queue
->flushed
= FLUSH_PENDING
;
482 /* Post a flush command */
483 EFX_POPULATE_OWORD_2(tx_flush_descq
,
484 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
485 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
486 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
489 void falcon_fini_tx(struct efx_tx_queue
*tx_queue
)
491 struct efx_nic
*efx
= tx_queue
->efx
;
492 efx_oword_t tx_desc_ptr
;
494 /* The queue should have been flushed */
495 WARN_ON(tx_queue
->flushed
!= FLUSH_DONE
);
497 /* Remove TX descriptor ring from card */
498 EFX_ZERO_OWORD(tx_desc_ptr
);
499 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
502 /* Unpin TX descriptor ring */
503 falcon_fini_special_buffer(efx
, &tx_queue
->txd
);
506 /* Free buffers backing TX queue */
507 void falcon_remove_tx(struct efx_tx_queue
*tx_queue
)
509 falcon_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
512 /**************************************************************************
516 **************************************************************************/
518 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
519 static inline efx_qword_t
*falcon_rx_desc(struct efx_rx_queue
*rx_queue
,
522 return (((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
);
525 /* This creates an entry in the RX descriptor queue */
526 static inline void falcon_build_rx_desc(struct efx_rx_queue
*rx_queue
,
529 struct efx_rx_buffer
*rx_buf
;
532 rxd
= falcon_rx_desc(rx_queue
, index
);
533 rx_buf
= efx_rx_buffer(rx_queue
, index
);
534 EFX_POPULATE_QWORD_3(*rxd
,
535 FSF_AZ_RX_KER_BUF_SIZE
,
537 rx_queue
->efx
->type
->rx_buffer_padding
,
538 FSF_AZ_RX_KER_BUF_REGION
, 0,
539 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
542 /* This writes to the RX_DESC_WPTR register for the specified receive
545 void falcon_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
550 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
551 falcon_build_rx_desc(rx_queue
,
552 rx_queue
->notified_count
&
554 ++rx_queue
->notified_count
;
558 write_ptr
= rx_queue
->added_count
& EFX_RXQ_MASK
;
559 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
560 efx_writed_page(rx_queue
->efx
, ®
,
561 FR_AZ_RX_DESC_UPD_DWORD_P0
, rx_queue
->queue
);
564 int falcon_probe_rx(struct efx_rx_queue
*rx_queue
)
566 struct efx_nic
*efx
= rx_queue
->efx
;
567 BUILD_BUG_ON(EFX_RXQ_SIZE
< 512 || EFX_RXQ_SIZE
> 4096 ||
568 EFX_RXQ_SIZE
& EFX_RXQ_MASK
);
569 return falcon_alloc_special_buffer(efx
, &rx_queue
->rxd
,
570 EFX_RXQ_SIZE
* sizeof(efx_qword_t
));
573 void falcon_init_rx(struct efx_rx_queue
*rx_queue
)
575 efx_oword_t rx_desc_ptr
;
576 struct efx_nic
*efx
= rx_queue
->efx
;
577 bool is_b0
= falcon_rev(efx
) >= FALCON_REV_B0
;
578 bool iscsi_digest_en
= is_b0
;
580 EFX_LOG(efx
, "RX queue %d ring in special buffers %d-%d\n",
581 rx_queue
->queue
, rx_queue
->rxd
.index
,
582 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
584 rx_queue
->flushed
= FLUSH_NONE
;
586 /* Pin RX descriptor ring */
587 falcon_init_special_buffer(efx
, &rx_queue
->rxd
);
589 /* Push RX descriptor ring to card */
590 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
591 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
592 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
593 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
594 FRF_AZ_RX_DESCQ_EVQ_ID
,
595 rx_queue
->channel
->channel
,
596 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
597 FRF_AZ_RX_DESCQ_LABEL
, rx_queue
->queue
,
598 FRF_AZ_RX_DESCQ_SIZE
,
599 __ffs(rx_queue
->rxd
.entries
),
600 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
601 /* For >=B0 this is scatter so disable */
602 FRF_AZ_RX_DESCQ_JUMBO
, !is_b0
,
603 FRF_AZ_RX_DESCQ_EN
, 1);
604 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
608 static void falcon_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
610 struct efx_nic
*efx
= rx_queue
->efx
;
611 efx_oword_t rx_flush_descq
;
613 rx_queue
->flushed
= FLUSH_PENDING
;
615 /* Post a flush command */
616 EFX_POPULATE_OWORD_2(rx_flush_descq
,
617 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
618 FRF_AZ_RX_FLUSH_DESCQ
, rx_queue
->queue
);
619 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
622 void falcon_fini_rx(struct efx_rx_queue
*rx_queue
)
624 efx_oword_t rx_desc_ptr
;
625 struct efx_nic
*efx
= rx_queue
->efx
;
627 /* The queue should already have been flushed */
628 WARN_ON(rx_queue
->flushed
!= FLUSH_DONE
);
630 /* Remove RX descriptor ring from card */
631 EFX_ZERO_OWORD(rx_desc_ptr
);
632 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
635 /* Unpin RX descriptor ring */
636 falcon_fini_special_buffer(efx
, &rx_queue
->rxd
);
639 /* Free buffers backing RX queue */
640 void falcon_remove_rx(struct efx_rx_queue
*rx_queue
)
642 falcon_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
645 /**************************************************************************
647 * Falcon event queue processing
648 * Event queues are processed by per-channel tasklets.
650 **************************************************************************/
652 /* Update a channel's event queue's read pointer (RPTR) register
654 * This writes the EVQ_RPTR_REG register for the specified channel's
657 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
658 * whereas channel->eventq_read_ptr contains the index of the "next to
661 void falcon_eventq_read_ack(struct efx_channel
*channel
)
664 struct efx_nic
*efx
= channel
->efx
;
666 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
, channel
->eventq_read_ptr
);
667 efx_writed_table(efx
, ®
, efx
->type
->evq_rptr_tbl_base
,
671 /* Use HW to insert a SW defined event */
672 void falcon_generate_event(struct efx_channel
*channel
, efx_qword_t
*event
)
674 efx_oword_t drv_ev_reg
;
676 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
677 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
678 drv_ev_reg
.u32
[0] = event
->u32
[0];
679 drv_ev_reg
.u32
[1] = event
->u32
[1];
680 drv_ev_reg
.u32
[2] = 0;
681 drv_ev_reg
.u32
[3] = 0;
682 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, channel
->channel
);
683 efx_writeo(channel
->efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
686 /* Handle a transmit completion event
688 * Falcon batches TX completion events; the message we receive is of
689 * the form "complete all TX events up to this index".
691 static void falcon_handle_tx_event(struct efx_channel
*channel
,
694 unsigned int tx_ev_desc_ptr
;
695 unsigned int tx_ev_q_label
;
696 struct efx_tx_queue
*tx_queue
;
697 struct efx_nic
*efx
= channel
->efx
;
699 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
700 /* Transmit completion */
701 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
702 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
703 tx_queue
= &efx
->tx_queue
[tx_ev_q_label
];
704 channel
->irq_mod_score
+=
705 (tx_ev_desc_ptr
- tx_queue
->read_count
) &
707 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
708 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
709 /* Rewrite the FIFO write pointer */
710 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
711 tx_queue
= &efx
->tx_queue
[tx_ev_q_label
];
713 if (efx_dev_registered(efx
))
714 netif_tx_lock(efx
->net_dev
);
715 falcon_notify_tx_desc(tx_queue
);
716 if (efx_dev_registered(efx
))
717 netif_tx_unlock(efx
->net_dev
);
718 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
719 EFX_WORKAROUND_10727(efx
)) {
720 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
722 EFX_ERR(efx
, "channel %d unexpected TX event "
723 EFX_QWORD_FMT
"\n", channel
->channel
,
724 EFX_QWORD_VAL(*event
));
728 /* Detect errors included in the rx_evt_pkt_ok bit. */
729 static void falcon_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
730 const efx_qword_t
*event
,
734 struct efx_nic
*efx
= rx_queue
->efx
;
735 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
736 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
737 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
738 bool rx_ev_other_err
, rx_ev_pause_frm
;
739 bool rx_ev_ip_frag_err
, rx_ev_hdr_type
, rx_ev_mcast_pkt
;
740 unsigned rx_ev_pkt_type
;
742 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
743 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
744 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
745 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
746 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
747 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
748 rx_ev_ip_frag_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_IP_FRAG_ERR
);
749 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
750 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
751 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
752 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
753 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
754 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
755 rx_ev_drib_nib
= ((falcon_rev(efx
) >= FALCON_REV_B0
) ?
756 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
757 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
759 /* Every error apart from tobe_disc and pause_frm */
760 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
761 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
762 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
764 /* Count errors that are not in MAC stats. Ignore expected
765 * checksum errors during self-test. */
767 ++rx_queue
->channel
->n_rx_frm_trunc
;
768 else if (rx_ev_tobe_disc
)
769 ++rx_queue
->channel
->n_rx_tobe_disc
;
770 else if (!efx
->loopback_selftest
) {
771 if (rx_ev_ip_hdr_chksum_err
)
772 ++rx_queue
->channel
->n_rx_ip_hdr_chksum_err
;
773 else if (rx_ev_tcp_udp_chksum_err
)
774 ++rx_queue
->channel
->n_rx_tcp_udp_chksum_err
;
776 if (rx_ev_ip_frag_err
)
777 ++rx_queue
->channel
->n_rx_ip_frag_err
;
779 /* The frame must be discarded if any of these are true. */
780 *discard
= (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
781 rx_ev_tobe_disc
| rx_ev_pause_frm
);
783 /* TOBE_DISC is expected on unicast mismatches; don't print out an
784 * error message. FRM_TRUNC indicates RXDP dropped the packet due
785 * to a FIFO overflow.
787 #ifdef EFX_ENABLE_DEBUG
788 if (rx_ev_other_err
) {
789 EFX_INFO_RL(efx
, " RX queue %d unexpected RX event "
790 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
791 rx_queue
->queue
, EFX_QWORD_VAL(*event
),
792 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
793 rx_ev_ip_hdr_chksum_err
?
794 " [IP_HDR_CHKSUM_ERR]" : "",
795 rx_ev_tcp_udp_chksum_err
?
796 " [TCP_UDP_CHKSUM_ERR]" : "",
797 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
798 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
799 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
800 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
801 rx_ev_pause_frm
? " [PAUSE]" : "");
806 /* Handle receive events that are not in-order. */
807 static void falcon_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
,
810 struct efx_nic
*efx
= rx_queue
->efx
;
811 unsigned expected
, dropped
;
813 expected
= rx_queue
->removed_count
& EFX_RXQ_MASK
;
814 dropped
= (index
- expected
) & EFX_RXQ_MASK
;
815 EFX_INFO(efx
, "dropped %d events (index=%d expected=%d)\n",
816 dropped
, index
, expected
);
818 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
819 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
822 /* Handle a packet received event
824 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
825 * wrong destination address
826 * Also "is multicast" and "matches multicast filter" flags can be used to
827 * discard non-matching multicast packets.
829 static void falcon_handle_rx_event(struct efx_channel
*channel
,
830 const efx_qword_t
*event
)
832 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
833 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
834 unsigned expected_ptr
;
835 bool rx_ev_pkt_ok
, discard
= false, checksummed
;
836 struct efx_rx_queue
*rx_queue
;
837 struct efx_nic
*efx
= channel
->efx
;
839 /* Basic packet information */
840 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
841 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
842 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
843 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
));
844 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
) != 1);
845 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
848 rx_queue
= &efx
->rx_queue
[channel
->channel
];
850 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
851 expected_ptr
= rx_queue
->removed_count
& EFX_RXQ_MASK
;
852 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
))
853 falcon_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
);
855 if (likely(rx_ev_pkt_ok
)) {
856 /* If packet is marked as OK and packet type is TCP/IPv4 or
857 * UDP/IPv4, then we can rely on the hardware checksum.
860 efx
->rx_checksum_enabled
&&
861 (rx_ev_hdr_type
== FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP
||
862 rx_ev_hdr_type
== FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP
);
864 falcon_handle_rx_not_ok(rx_queue
, event
, &rx_ev_pkt_ok
,
869 /* Detect multicast packets that didn't match the filter */
870 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
871 if (rx_ev_mcast_pkt
) {
872 unsigned int rx_ev_mcast_hash_match
=
873 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
875 if (unlikely(!rx_ev_mcast_hash_match
))
879 channel
->irq_mod_score
+= 2;
881 /* Handle received packet */
882 efx_rx_packet(rx_queue
, rx_ev_desc_ptr
, rx_ev_byte_cnt
,
883 checksummed
, discard
);
886 /* Global events are basically PHY events */
887 static void falcon_handle_global_event(struct efx_channel
*channel
,
890 struct efx_nic
*efx
= channel
->efx
;
891 bool handled
= false;
893 if (EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_G_PHY0_INTR
) ||
894 EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_XG_PHY0_INTR
) ||
895 EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_XFP_PHY0_INTR
)) {
896 efx
->phy_op
->clear_interrupt(efx
);
897 queue_work(efx
->workqueue
, &efx
->phy_work
);
901 if ((falcon_rev(efx
) >= FALCON_REV_B0
) &&
902 EFX_QWORD_FIELD(*event
, FSF_BB_GLB_EV_XG_MGT_INTR
)) {
903 efx
->xmac_poll_required
= true;
907 if (falcon_rev(efx
) <= FALCON_REV_A1
?
908 EFX_QWORD_FIELD(*event
, FSF_AA_GLB_EV_RX_RECOVERY
) :
909 EFX_QWORD_FIELD(*event
, FSF_BB_GLB_EV_RX_RECOVERY
)) {
910 EFX_ERR(efx
, "channel %d seen global RX_RESET "
911 "event. Resetting.\n", channel
->channel
);
913 atomic_inc(&efx
->rx_reset
);
914 efx_schedule_reset(efx
, EFX_WORKAROUND_6555(efx
) ?
915 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
920 EFX_ERR(efx
, "channel %d unknown global event "
921 EFX_QWORD_FMT
"\n", channel
->channel
,
922 EFX_QWORD_VAL(*event
));
925 static void falcon_handle_driver_event(struct efx_channel
*channel
,
928 struct efx_nic
*efx
= channel
->efx
;
929 unsigned int ev_sub_code
;
930 unsigned int ev_sub_data
;
932 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
933 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
935 switch (ev_sub_code
) {
936 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
937 EFX_TRACE(efx
, "channel %d TXQ %d flushed\n",
938 channel
->channel
, ev_sub_data
);
940 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
941 EFX_TRACE(efx
, "channel %d RXQ %d flushed\n",
942 channel
->channel
, ev_sub_data
);
944 case FSE_AZ_EVQ_INIT_DONE_EV
:
945 EFX_LOG(efx
, "channel %d EVQ %d initialised\n",
946 channel
->channel
, ev_sub_data
);
948 case FSE_AZ_SRM_UPD_DONE_EV
:
949 EFX_TRACE(efx
, "channel %d SRAM update done\n",
952 case FSE_AZ_WAKE_UP_EV
:
953 EFX_TRACE(efx
, "channel %d RXQ %d wakeup event\n",
954 channel
->channel
, ev_sub_data
);
956 case FSE_AZ_TIMER_EV
:
957 EFX_TRACE(efx
, "channel %d RX queue %d timer expired\n",
958 channel
->channel
, ev_sub_data
);
960 case FSE_AA_RX_RECOVER_EV
:
961 EFX_ERR(efx
, "channel %d seen DRIVER RX_RESET event. "
962 "Resetting.\n", channel
->channel
);
963 atomic_inc(&efx
->rx_reset
);
964 efx_schedule_reset(efx
,
965 EFX_WORKAROUND_6555(efx
) ?
966 RESET_TYPE_RX_RECOVERY
:
969 case FSE_BZ_RX_DSC_ERROR_EV
:
970 EFX_ERR(efx
, "RX DMA Q %d reports descriptor fetch error."
971 " RX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
972 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
974 case FSE_BZ_TX_DSC_ERROR_EV
:
975 EFX_ERR(efx
, "TX DMA Q %d reports descriptor fetch error."
976 " TX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
977 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
980 EFX_TRACE(efx
, "channel %d unknown driver event code %d "
981 "data %04x\n", channel
->channel
, ev_sub_code
,
987 int falcon_process_eventq(struct efx_channel
*channel
, int rx_quota
)
989 unsigned int read_ptr
;
990 efx_qword_t event
, *p_event
;
994 read_ptr
= channel
->eventq_read_ptr
;
997 p_event
= falcon_event(channel
, read_ptr
);
1000 if (!falcon_event_present(&event
))
1004 EFX_TRACE(channel
->efx
, "channel %d event is "EFX_QWORD_FMT
"\n",
1005 channel
->channel
, EFX_QWORD_VAL(event
));
1007 /* Clear this event by marking it all ones */
1008 EFX_SET_QWORD(*p_event
);
1010 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1013 case FSE_AZ_EV_CODE_RX_EV
:
1014 falcon_handle_rx_event(channel
, &event
);
1017 case FSE_AZ_EV_CODE_TX_EV
:
1018 falcon_handle_tx_event(channel
, &event
);
1020 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1021 channel
->eventq_magic
= EFX_QWORD_FIELD(
1022 event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1023 EFX_LOG(channel
->efx
, "channel %d received generated "
1024 "event "EFX_QWORD_FMT
"\n", channel
->channel
,
1025 EFX_QWORD_VAL(event
));
1027 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1028 falcon_handle_global_event(channel
, &event
);
1030 case FSE_AZ_EV_CODE_DRIVER_EV
:
1031 falcon_handle_driver_event(channel
, &event
);
1034 EFX_ERR(channel
->efx
, "channel %d unknown event type %d"
1035 " (data " EFX_QWORD_FMT
")\n", channel
->channel
,
1036 ev_code
, EFX_QWORD_VAL(event
));
1039 /* Increment read pointer */
1040 read_ptr
= (read_ptr
+ 1) & EFX_EVQ_MASK
;
1042 } while (rx_packets
< rx_quota
);
1044 channel
->eventq_read_ptr
= read_ptr
;
1048 void falcon_set_int_moderation(struct efx_channel
*channel
)
1050 efx_dword_t timer_cmd
;
1051 struct efx_nic
*efx
= channel
->efx
;
1053 /* Set timer register */
1054 if (channel
->irq_moderation
) {
1055 EFX_POPULATE_DWORD_2(timer_cmd
,
1056 FRF_AB_TC_TIMER_MODE
,
1057 FFE_BB_TIMER_MODE_INT_HLDOFF
,
1058 FRF_AB_TC_TIMER_VAL
,
1059 channel
->irq_moderation
- 1);
1061 EFX_POPULATE_DWORD_2(timer_cmd
,
1062 FRF_AB_TC_TIMER_MODE
,
1063 FFE_BB_TIMER_MODE_DIS
,
1064 FRF_AB_TC_TIMER_VAL
, 0);
1066 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER
!= FR_BZ_TIMER_COMMAND_P0
);
1067 efx_writed_page_locked(efx
, &timer_cmd
, FR_BZ_TIMER_COMMAND_P0
,
1072 /* Allocate buffer table entries for event queue */
1073 int falcon_probe_eventq(struct efx_channel
*channel
)
1075 struct efx_nic
*efx
= channel
->efx
;
1076 BUILD_BUG_ON(EFX_EVQ_SIZE
< 512 || EFX_EVQ_SIZE
> 32768 ||
1077 EFX_EVQ_SIZE
& EFX_EVQ_MASK
);
1078 return falcon_alloc_special_buffer(efx
, &channel
->eventq
,
1079 EFX_EVQ_SIZE
* sizeof(efx_qword_t
));
1082 void falcon_init_eventq(struct efx_channel
*channel
)
1084 efx_oword_t evq_ptr
;
1085 struct efx_nic
*efx
= channel
->efx
;
1087 EFX_LOG(efx
, "channel %d event queue in special buffers %d-%d\n",
1088 channel
->channel
, channel
->eventq
.index
,
1089 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1091 /* Pin event queue buffer */
1092 falcon_init_special_buffer(efx
, &channel
->eventq
);
1094 /* Fill event queue with all ones (i.e. empty events) */
1095 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1097 /* Push event queue to card */
1098 EFX_POPULATE_OWORD_3(evq_ptr
,
1100 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1101 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1102 efx_writeo_table(efx
, &evq_ptr
, efx
->type
->evq_ptr_tbl_base
,
1105 falcon_set_int_moderation(channel
);
1108 void falcon_fini_eventq(struct efx_channel
*channel
)
1110 efx_oword_t eventq_ptr
;
1111 struct efx_nic
*efx
= channel
->efx
;
1113 /* Remove event queue from card */
1114 EFX_ZERO_OWORD(eventq_ptr
);
1115 efx_writeo_table(efx
, &eventq_ptr
, efx
->type
->evq_ptr_tbl_base
,
1118 /* Unpin event queue */
1119 falcon_fini_special_buffer(efx
, &channel
->eventq
);
1122 /* Free buffers backing event queue */
1123 void falcon_remove_eventq(struct efx_channel
*channel
)
1125 falcon_free_special_buffer(channel
->efx
, &channel
->eventq
);
1129 /* Generates a test event on the event queue. A subsequent call to
1130 * process_eventq() should pick up the event and place the value of
1131 * "magic" into channel->eventq_magic;
1133 void falcon_generate_test_event(struct efx_channel
*channel
, unsigned int magic
)
1135 efx_qword_t test_event
;
1137 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1138 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1139 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1140 falcon_generate_event(channel
, &test_event
);
1143 void falcon_sim_phy_event(struct efx_nic
*efx
)
1145 efx_qword_t phy_event
;
1147 EFX_POPULATE_QWORD_1(phy_event
, FSF_AZ_EV_CODE
,
1148 FSE_AZ_EV_CODE_GLOBAL_EV
);
1150 EFX_SET_QWORD_FIELD(phy_event
, FSF_AB_GLB_EV_XG_PHY0_INTR
, 1);
1152 EFX_SET_QWORD_FIELD(phy_event
, FSF_AB_GLB_EV_G_PHY0_INTR
, 1);
1154 falcon_generate_event(&efx
->channel
[0], &phy_event
);
1157 /**************************************************************************
1161 **************************************************************************/
1164 static void falcon_poll_flush_events(struct efx_nic
*efx
)
1166 struct efx_channel
*channel
= &efx
->channel
[0];
1167 struct efx_tx_queue
*tx_queue
;
1168 struct efx_rx_queue
*rx_queue
;
1169 unsigned int read_ptr
= channel
->eventq_read_ptr
;
1170 unsigned int end_ptr
= (read_ptr
- 1) & EFX_EVQ_MASK
;
1173 efx_qword_t
*event
= falcon_event(channel
, read_ptr
);
1174 int ev_code
, ev_sub_code
, ev_queue
;
1177 if (!falcon_event_present(event
))
1180 ev_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_EV_CODE
);
1181 ev_sub_code
= EFX_QWORD_FIELD(*event
,
1182 FSF_AZ_DRIVER_EV_SUBCODE
);
1183 if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1184 ev_sub_code
== FSE_AZ_TX_DESCQ_FLS_DONE_EV
) {
1185 ev_queue
= EFX_QWORD_FIELD(*event
,
1186 FSF_AZ_DRIVER_EV_SUBDATA
);
1187 if (ev_queue
< EFX_TX_QUEUE_COUNT
) {
1188 tx_queue
= efx
->tx_queue
+ ev_queue
;
1189 tx_queue
->flushed
= FLUSH_DONE
;
1191 } else if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1192 ev_sub_code
== FSE_AZ_RX_DESCQ_FLS_DONE_EV
) {
1193 ev_queue
= EFX_QWORD_FIELD(
1194 *event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1195 ev_failed
= EFX_QWORD_FIELD(
1196 *event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1197 if (ev_queue
< efx
->n_rx_queues
) {
1198 rx_queue
= efx
->rx_queue
+ ev_queue
;
1200 ev_failed
? FLUSH_FAILED
: FLUSH_DONE
;
1204 /* We're about to destroy the queue anyway, so
1205 * it's ok to throw away every non-flush event */
1206 EFX_SET_QWORD(*event
);
1208 read_ptr
= (read_ptr
+ 1) & EFX_EVQ_MASK
;
1209 } while (read_ptr
!= end_ptr
);
1211 channel
->eventq_read_ptr
= read_ptr
;
1214 static void falcon_prepare_flush(struct efx_nic
*efx
)
1216 falcon_deconfigure_mac_wrapper(efx
);
1218 /* Wait for the tx and rx fifo's to get to the next packet boundary
1219 * (~1ms without back-pressure), then to drain the remainder of the
1220 * fifo's at data path speeds (negligible), with a healthy margin. */
1224 /* Handle tx and rx flushes at the same time, since they run in
1225 * parallel in the hardware and there's no reason for us to
1227 int falcon_flush_queues(struct efx_nic
*efx
)
1229 struct efx_rx_queue
*rx_queue
;
1230 struct efx_tx_queue
*tx_queue
;
1231 int i
, tx_pending
, rx_pending
;
1233 falcon_prepare_flush(efx
);
1235 /* Flush all tx queues in parallel */
1236 efx_for_each_tx_queue(tx_queue
, efx
)
1237 falcon_flush_tx_queue(tx_queue
);
1239 /* The hardware supports four concurrent rx flushes, each of which may
1240 * need to be retried if there is an outstanding descriptor fetch */
1241 for (i
= 0; i
< FALCON_FLUSH_POLL_COUNT
; ++i
) {
1242 rx_pending
= tx_pending
= 0;
1243 efx_for_each_rx_queue(rx_queue
, efx
) {
1244 if (rx_queue
->flushed
== FLUSH_PENDING
)
1247 efx_for_each_rx_queue(rx_queue
, efx
) {
1248 if (rx_pending
== FALCON_RX_FLUSH_COUNT
)
1250 if (rx_queue
->flushed
== FLUSH_FAILED
||
1251 rx_queue
->flushed
== FLUSH_NONE
) {
1252 falcon_flush_rx_queue(rx_queue
);
1256 efx_for_each_tx_queue(tx_queue
, efx
) {
1257 if (tx_queue
->flushed
!= FLUSH_DONE
)
1261 if (rx_pending
== 0 && tx_pending
== 0)
1264 msleep(FALCON_FLUSH_INTERVAL
);
1265 falcon_poll_flush_events(efx
);
1268 /* Mark the queues as all flushed. We're going to return failure
1269 * leading to a reset, or fake up success anyway */
1270 efx_for_each_tx_queue(tx_queue
, efx
) {
1271 if (tx_queue
->flushed
!= FLUSH_DONE
)
1272 EFX_ERR(efx
, "tx queue %d flush command timed out\n",
1274 tx_queue
->flushed
= FLUSH_DONE
;
1276 efx_for_each_rx_queue(rx_queue
, efx
) {
1277 if (rx_queue
->flushed
!= FLUSH_DONE
)
1278 EFX_ERR(efx
, "rx queue %d flush command timed out\n",
1280 rx_queue
->flushed
= FLUSH_DONE
;
1283 if (EFX_WORKAROUND_7803(efx
))
1289 /**************************************************************************
1291 * Falcon hardware interrupts
1292 * The hardware interrupt handler does very little work; all the event
1293 * queue processing is carried out by per-channel tasklets.
1295 **************************************************************************/
1297 /* Enable/disable/generate Falcon interrupts */
1298 static inline void falcon_interrupts(struct efx_nic
*efx
, int enabled
,
1301 efx_oword_t int_en_reg_ker
;
1303 EFX_POPULATE_OWORD_2(int_en_reg_ker
,
1304 FRF_AZ_KER_INT_KER
, force
,
1305 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1306 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1309 void falcon_enable_interrupts(struct efx_nic
*efx
)
1311 efx_oword_t int_adr_reg_ker
;
1312 struct efx_channel
*channel
;
1314 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1315 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1317 /* Program address */
1318 EFX_POPULATE_OWORD_2(int_adr_reg_ker
,
1319 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1320 EFX_INT_MODE_USE_MSI(efx
),
1321 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1322 efx_writeo(efx
, &int_adr_reg_ker
, FR_AZ_INT_ADR_KER
);
1324 /* Enable interrupts */
1325 falcon_interrupts(efx
, 1, 0);
1327 /* Force processing of all the channels to get the EVQ RPTRs up to
1329 efx_for_each_channel(channel
, efx
)
1330 efx_schedule_channel(channel
);
1333 void falcon_disable_interrupts(struct efx_nic
*efx
)
1335 /* Disable interrupts */
1336 falcon_interrupts(efx
, 0, 0);
1339 /* Generate a Falcon test interrupt
1340 * Interrupt must already have been enabled, otherwise nasty things
1343 void falcon_generate_interrupt(struct efx_nic
*efx
)
1345 falcon_interrupts(efx
, 1, 1);
1348 /* Acknowledge a legacy interrupt from Falcon
1350 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1352 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1353 * BIU. Interrupt acknowledge is read sensitive so must write instead
1354 * (then read to ensure the BIU collector is flushed)
1356 * NB most hardware supports MSI interrupts
1358 static inline void falcon_irq_ack_a1(struct efx_nic
*efx
)
1362 EFX_POPULATE_DWORD_1(reg
, FRF_AA_INT_ACK_KER_FIELD
, 0xb7eb7e);
1363 efx_writed(efx
, ®
, FR_AA_INT_ACK_KER
);
1364 efx_readd(efx
, ®
, FR_AA_WORK_AROUND_BROKEN_PCI_READS
);
1367 /* Process a fatal interrupt
1368 * Disable bus mastering ASAP and schedule a reset
1370 static irqreturn_t
falcon_fatal_interrupt(struct efx_nic
*efx
)
1372 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1373 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1374 efx_oword_t fatal_intr
;
1375 int error
, mem_perr
;
1377 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1378 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1380 EFX_ERR(efx
, "SYSTEM ERROR " EFX_OWORD_FMT
" status "
1381 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1382 EFX_OWORD_VAL(fatal_intr
),
1383 error
? "disabling bus mastering" : "no recognised error");
1387 /* If this is a memory parity error dump which blocks are offending */
1388 mem_perr
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
);
1391 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1392 EFX_ERR(efx
, "SYSTEM ERROR: memory parity error "
1393 EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
));
1396 /* Disable both devices */
1397 pci_clear_master(efx
->pci_dev
);
1398 if (FALCON_IS_DUAL_FUNC(efx
))
1399 pci_clear_master(nic_data
->pci_dev2
);
1400 falcon_disable_interrupts(efx
);
1402 /* Count errors and reset or disable the NIC accordingly */
1403 if (efx
->int_error_count
== 0 ||
1404 time_after(jiffies
, efx
->int_error_expire
)) {
1405 efx
->int_error_count
= 0;
1406 efx
->int_error_expire
=
1407 jiffies
+ FALCON_INT_ERROR_EXPIRE
* HZ
;
1409 if (++efx
->int_error_count
< FALCON_MAX_INT_ERRORS
) {
1410 EFX_ERR(efx
, "SYSTEM ERROR - reset scheduled\n");
1411 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1413 EFX_ERR(efx
, "SYSTEM ERROR - max number of errors seen."
1414 "NIC will be disabled\n");
1415 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1421 /* Handle a legacy interrupt from Falcon
1422 * Acknowledges the interrupt and schedule event queue processing.
1424 static irqreturn_t
falcon_legacy_interrupt_b0(int irq
, void *dev_id
)
1426 struct efx_nic
*efx
= dev_id
;
1427 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1428 irqreturn_t result
= IRQ_NONE
;
1429 struct efx_channel
*channel
;
1434 /* Read the ISR which also ACKs the interrupts */
1435 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1436 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1438 /* Check to see if we have a serious error condition */
1439 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1440 if (unlikely(syserr
))
1441 return falcon_fatal_interrupt(efx
);
1443 /* Schedule processing of any interrupting queues */
1444 efx_for_each_channel(channel
, efx
) {
1446 falcon_event_present(
1447 falcon_event(channel
, channel
->eventq_read_ptr
))) {
1448 efx_schedule_channel(channel
);
1449 result
= IRQ_HANDLED
;
1454 if (result
== IRQ_HANDLED
) {
1455 efx
->last_irq_cpu
= raw_smp_processor_id();
1456 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1457 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1464 static irqreturn_t
falcon_legacy_interrupt_a1(int irq
, void *dev_id
)
1466 struct efx_nic
*efx
= dev_id
;
1467 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1468 struct efx_channel
*channel
;
1472 /* Check to see if this is our interrupt. If it isn't, we
1473 * exit without having touched the hardware.
1475 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker
))) {
1476 EFX_TRACE(efx
, "IRQ %d on CPU %d not for me\n", irq
,
1477 raw_smp_processor_id());
1480 efx
->last_irq_cpu
= raw_smp_processor_id();
1481 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1482 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1484 /* Check to see if we have a serious error condition */
1485 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1486 if (unlikely(syserr
))
1487 return falcon_fatal_interrupt(efx
);
1489 /* Determine interrupting queues, clear interrupt status
1490 * register and acknowledge the device interrupt.
1492 BUILD_BUG_ON(INT_EVQS_WIDTH
> EFX_MAX_CHANNELS
);
1493 queues
= EFX_OWORD_FIELD(*int_ker
, INT_EVQS
);
1494 EFX_ZERO_OWORD(*int_ker
);
1495 wmb(); /* Ensure the vector is cleared before interrupt ack */
1496 falcon_irq_ack_a1(efx
);
1498 /* Schedule processing of any interrupting queues */
1499 channel
= &efx
->channel
[0];
1502 efx_schedule_channel(channel
);
1510 /* Handle an MSI interrupt from Falcon
1512 * Handle an MSI hardware interrupt. This routine schedules event
1513 * queue processing. No interrupt acknowledgement cycle is necessary.
1514 * Also, we never need to check that the interrupt is for us, since
1515 * MSI interrupts cannot be shared.
1517 static irqreturn_t
falcon_msi_interrupt(int irq
, void *dev_id
)
1519 struct efx_channel
*channel
= dev_id
;
1520 struct efx_nic
*efx
= channel
->efx
;
1521 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1524 efx
->last_irq_cpu
= raw_smp_processor_id();
1525 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1526 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1528 /* Check to see if we have a serious error condition */
1529 syserr
= EFX_OWORD_FIELD(*int_ker
, FATAL_INT
);
1530 if (unlikely(syserr
))
1531 return falcon_fatal_interrupt(efx
);
1533 /* Schedule processing of the channel */
1534 efx_schedule_channel(channel
);
1540 /* Setup RSS indirection table.
1541 * This maps from the hash value of the packet to RXQ
1543 static void falcon_setup_rss_indir_table(struct efx_nic
*efx
)
1546 unsigned long offset
;
1549 if (falcon_rev(efx
) < FALCON_REV_B0
)
1552 for (offset
= FR_BZ_RX_INDIRECTION_TBL
;
1553 offset
< FR_BZ_RX_INDIRECTION_TBL
+ 0x800;
1555 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1556 i
% efx
->n_rx_queues
);
1557 efx_writed(efx
, &dword
, offset
);
1562 /* Hook interrupt handler(s)
1563 * Try MSI and then legacy interrupts.
1565 int falcon_init_interrupt(struct efx_nic
*efx
)
1567 struct efx_channel
*channel
;
1570 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1571 irq_handler_t handler
;
1572 if (falcon_rev(efx
) >= FALCON_REV_B0
)
1573 handler
= falcon_legacy_interrupt_b0
;
1575 handler
= falcon_legacy_interrupt_a1
;
1577 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1580 EFX_ERR(efx
, "failed to hook legacy IRQ %d\n",
1587 /* Hook MSI or MSI-X interrupt */
1588 efx_for_each_channel(channel
, efx
) {
1589 rc
= request_irq(channel
->irq
, falcon_msi_interrupt
,
1590 IRQF_PROBE_SHARED
, /* Not shared */
1591 channel
->name
, channel
);
1593 EFX_ERR(efx
, "failed to hook IRQ %d\n", channel
->irq
);
1601 efx_for_each_channel(channel
, efx
)
1602 free_irq(channel
->irq
, channel
);
1607 void falcon_fini_interrupt(struct efx_nic
*efx
)
1609 struct efx_channel
*channel
;
1612 /* Disable MSI/MSI-X interrupts */
1613 efx_for_each_channel(channel
, efx
) {
1615 free_irq(channel
->irq
, channel
);
1618 /* ACK legacy interrupt */
1619 if (falcon_rev(efx
) >= FALCON_REV_B0
)
1620 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1622 falcon_irq_ack_a1(efx
);
1624 /* Disable legacy interrupt */
1625 if (efx
->legacy_irq
)
1626 free_irq(efx
->legacy_irq
, efx
);
1629 /**************************************************************************
1633 **************************************************************************
1636 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1638 static int falcon_spi_poll(struct efx_nic
*efx
)
1641 efx_reado(efx
, ®
, FR_AB_EE_SPI_HCMD
);
1642 return EFX_OWORD_FIELD(reg
, FRF_AB_EE_SPI_HCMD_CMD_EN
) ? -EBUSY
: 0;
1645 /* Wait for SPI command completion */
1646 static int falcon_spi_wait(struct efx_nic
*efx
)
1648 /* Most commands will finish quickly, so we start polling at
1649 * very short intervals. Sometimes the command may have to
1650 * wait for VPD or expansion ROM access outside of our
1651 * control, so we allow up to 100 ms. */
1652 unsigned long timeout
= jiffies
+ 1 + DIV_ROUND_UP(HZ
, 10);
1655 for (i
= 0; i
< 10; i
++) {
1656 if (!falcon_spi_poll(efx
))
1662 if (!falcon_spi_poll(efx
))
1664 if (time_after_eq(jiffies
, timeout
)) {
1665 EFX_ERR(efx
, "timed out waiting for SPI\n");
1668 schedule_timeout_uninterruptible(1);
1672 int falcon_spi_cmd(const struct efx_spi_device
*spi
,
1673 unsigned int command
, int address
,
1674 const void *in
, void *out
, size_t len
)
1676 struct efx_nic
*efx
= spi
->efx
;
1677 bool addressed
= (address
>= 0);
1678 bool reading
= (out
!= NULL
);
1682 /* Input validation */
1683 if (len
> FALCON_SPI_MAX_LEN
)
1685 BUG_ON(!mutex_is_locked(&efx
->spi_lock
));
1687 /* Check that previous command is not still running */
1688 rc
= falcon_spi_poll(efx
);
1692 /* Program address register, if we have an address */
1694 EFX_POPULATE_OWORD_1(reg
, FRF_AB_EE_SPI_HADR_ADR
, address
);
1695 efx_writeo(efx
, ®
, FR_AB_EE_SPI_HADR
);
1698 /* Program data register, if we have data */
1700 memcpy(®
, in
, len
);
1701 efx_writeo(efx
, ®
, FR_AB_EE_SPI_HDATA
);
1704 /* Issue read/write command */
1705 EFX_POPULATE_OWORD_7(reg
,
1706 FRF_AB_EE_SPI_HCMD_CMD_EN
, 1,
1707 FRF_AB_EE_SPI_HCMD_SF_SEL
, spi
->device_id
,
1708 FRF_AB_EE_SPI_HCMD_DABCNT
, len
,
1709 FRF_AB_EE_SPI_HCMD_READ
, reading
,
1710 FRF_AB_EE_SPI_HCMD_DUBCNT
, 0,
1711 FRF_AB_EE_SPI_HCMD_ADBCNT
,
1712 (addressed
? spi
->addr_len
: 0),
1713 FRF_AB_EE_SPI_HCMD_ENC
, command
);
1714 efx_writeo(efx
, ®
, FR_AB_EE_SPI_HCMD
);
1716 /* Wait for read/write to complete */
1717 rc
= falcon_spi_wait(efx
);
1723 efx_reado(efx
, ®
, FR_AB_EE_SPI_HDATA
);
1724 memcpy(out
, ®
, len
);
1731 falcon_spi_write_limit(const struct efx_spi_device
*spi
, size_t start
)
1733 return min(FALCON_SPI_MAX_LEN
,
1734 (spi
->block_size
- (start
& (spi
->block_size
- 1))));
1738 efx_spi_munge_command(const struct efx_spi_device
*spi
,
1739 const u8 command
, const unsigned int address
)
1741 return command
| (((address
>> 8) & spi
->munge_address
) << 3);
1744 /* Wait up to 10 ms for buffered write completion */
1745 int falcon_spi_wait_write(const struct efx_spi_device
*spi
)
1747 struct efx_nic
*efx
= spi
->efx
;
1748 unsigned long timeout
= jiffies
+ 1 + DIV_ROUND_UP(HZ
, 100);
1753 rc
= falcon_spi_cmd(spi
, SPI_RDSR
, -1, NULL
,
1754 &status
, sizeof(status
));
1757 if (!(status
& SPI_STATUS_NRDY
))
1759 if (time_after_eq(jiffies
, timeout
)) {
1760 EFX_ERR(efx
, "SPI write timeout on device %d"
1761 " last status=0x%02x\n",
1762 spi
->device_id
, status
);
1765 schedule_timeout_uninterruptible(1);
1769 int falcon_spi_read(const struct efx_spi_device
*spi
, loff_t start
,
1770 size_t len
, size_t *retlen
, u8
*buffer
)
1772 size_t block_len
, pos
= 0;
1773 unsigned int command
;
1777 block_len
= min(len
- pos
, FALCON_SPI_MAX_LEN
);
1779 command
= efx_spi_munge_command(spi
, SPI_READ
, start
+ pos
);
1780 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
, NULL
,
1781 buffer
+ pos
, block_len
);
1786 /* Avoid locking up the system */
1788 if (signal_pending(current
)) {
1799 int falcon_spi_write(const struct efx_spi_device
*spi
, loff_t start
,
1800 size_t len
, size_t *retlen
, const u8
*buffer
)
1802 u8 verify_buffer
[FALCON_SPI_MAX_LEN
];
1803 size_t block_len
, pos
= 0;
1804 unsigned int command
;
1808 rc
= falcon_spi_cmd(spi
, SPI_WREN
, -1, NULL
, NULL
, 0);
1812 block_len
= min(len
- pos
,
1813 falcon_spi_write_limit(spi
, start
+ pos
));
1814 command
= efx_spi_munge_command(spi
, SPI_WRITE
, start
+ pos
);
1815 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
,
1816 buffer
+ pos
, NULL
, block_len
);
1820 rc
= falcon_spi_wait_write(spi
);
1824 command
= efx_spi_munge_command(spi
, SPI_READ
, start
+ pos
);
1825 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
,
1826 NULL
, verify_buffer
, block_len
);
1827 if (memcmp(verify_buffer
, buffer
+ pos
, block_len
)) {
1834 /* Avoid locking up the system */
1836 if (signal_pending(current
)) {
1847 /**************************************************************************
1851 **************************************************************************
1854 static int falcon_reset_macs(struct efx_nic
*efx
)
1859 if (falcon_rev(efx
) < FALCON_REV_B0
) {
1860 /* It's not safe to use GLB_CTL_REG to reset the
1861 * macs, so instead use the internal MAC resets
1863 if (!EFX_IS10G(efx
)) {
1864 EFX_POPULATE_OWORD_1(reg
, FRF_AB_GM_SW_RST
, 1);
1865 efx_writeo(efx
, ®
, FR_AB_GM_CFG1
);
1868 EFX_POPULATE_OWORD_1(reg
, FRF_AB_GM_SW_RST
, 0);
1869 efx_writeo(efx
, ®
, FR_AB_GM_CFG1
);
1873 EFX_POPULATE_OWORD_1(reg
, FRF_AB_XM_CORE_RST
, 1);
1874 efx_writeo(efx
, ®
, FR_AB_XM_GLB_CFG
);
1876 for (count
= 0; count
< 10000; count
++) {
1877 efx_reado(efx
, ®
, FR_AB_XM_GLB_CFG
);
1878 if (EFX_OWORD_FIELD(reg
, FRF_AB_XM_CORE_RST
) ==
1884 EFX_ERR(efx
, "timed out waiting for XMAC core reset\n");
1889 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1890 * the drain sequence with the statistics fetch */
1891 falcon_stop_nic_stats(efx
);
1893 efx_reado(efx
, ®
, FR_AB_MAC_CTRL
);
1894 EFX_SET_OWORD_FIELD(reg
, FRF_BB_TXFIFO_DRAIN_EN
, 1);
1895 efx_writeo(efx
, ®
, FR_AB_MAC_CTRL
);
1897 efx_reado(efx
, ®
, FR_AB_GLB_CTL
);
1898 EFX_SET_OWORD_FIELD(reg
, FRF_AB_RST_XGTX
, 1);
1899 EFX_SET_OWORD_FIELD(reg
, FRF_AB_RST_XGRX
, 1);
1900 EFX_SET_OWORD_FIELD(reg
, FRF_AB_RST_EM
, 1);
1901 efx_writeo(efx
, ®
, FR_AB_GLB_CTL
);
1905 efx_reado(efx
, ®
, FR_AB_GLB_CTL
);
1906 if (!EFX_OWORD_FIELD(reg
, FRF_AB_RST_XGTX
) &&
1907 !EFX_OWORD_FIELD(reg
, FRF_AB_RST_XGRX
) &&
1908 !EFX_OWORD_FIELD(reg
, FRF_AB_RST_EM
)) {
1909 EFX_LOG(efx
, "Completed MAC reset after %d loops\n",
1914 EFX_ERR(efx
, "MAC reset failed\n");
1921 /* If we've reset the EM block and the link is up, then
1922 * we'll have to kick the XAUI link so the PHY can recover */
1923 if (efx
->link_state
.up
&& EFX_IS10G(efx
) && EFX_WORKAROUND_5147(efx
))
1924 falcon_reset_xaui(efx
);
1926 falcon_start_nic_stats(efx
);
1931 void falcon_drain_tx_fifo(struct efx_nic
*efx
)
1935 if ((falcon_rev(efx
) < FALCON_REV_B0
) ||
1936 (efx
->loopback_mode
!= LOOPBACK_NONE
))
1939 efx_reado(efx
, ®
, FR_AB_MAC_CTRL
);
1940 /* There is no point in draining more than once */
1941 if (EFX_OWORD_FIELD(reg
, FRF_BB_TXFIFO_DRAIN_EN
))
1944 falcon_reset_macs(efx
);
1947 void falcon_deconfigure_mac_wrapper(struct efx_nic
*efx
)
1951 if (falcon_rev(efx
) < FALCON_REV_B0
)
1954 /* Isolate the MAC -> RX */
1955 efx_reado(efx
, ®
, FR_AZ_RX_CFG
);
1956 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_INGR_EN
, 0);
1957 efx_writeo(efx
, ®
, FR_AZ_RX_CFG
);
1959 if (!efx
->link_state
.up
)
1960 falcon_drain_tx_fifo(efx
);
1963 void falcon_reconfigure_mac_wrapper(struct efx_nic
*efx
)
1965 struct efx_link_state
*link_state
= &efx
->link_state
;
1970 switch (link_state
->speed
) {
1971 case 10000: link_speed
= 3; break;
1972 case 1000: link_speed
= 2; break;
1973 case 100: link_speed
= 1; break;
1974 default: link_speed
= 0; break;
1976 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1977 * as advertised. Disable to ensure packets are not
1978 * indefinitely held and TX queue can be flushed at any point
1979 * while the link is down. */
1980 EFX_POPULATE_OWORD_5(reg
,
1981 FRF_AB_MAC_XOFF_VAL
, 0xffff /* max pause time */,
1982 FRF_AB_MAC_BCAD_ACPT
, 1,
1983 FRF_AB_MAC_UC_PROM
, efx
->promiscuous
,
1984 FRF_AB_MAC_LINK_STATUS
, 1, /* always set */
1985 FRF_AB_MAC_SPEED
, link_speed
);
1986 /* On B0, MAC backpressure can be disabled and packets get
1988 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
1989 EFX_SET_OWORD_FIELD(reg
, FRF_BB_TXFIFO_DRAIN_EN
,
1993 efx_writeo(efx
, ®
, FR_AB_MAC_CTRL
);
1995 /* Restore the multicast hash registers. */
1996 falcon_set_multicast_hash(efx
);
1998 /* Transmission of pause frames when RX crosses the threshold is
1999 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2000 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2001 tx_fc
= !!(efx
->link_state
.fc
& EFX_FC_TX
);
2002 efx_reado(efx
, ®
, FR_AZ_RX_CFG
);
2003 EFX_SET_OWORD_FIELD(reg
, FRF_AZ_RX_XOFF_MAC_EN
, tx_fc
);
2005 /* Unisolate the MAC -> RX */
2006 if (falcon_rev(efx
) >= FALCON_REV_B0
)
2007 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_INGR_EN
, 1);
2008 efx_writeo(efx
, ®
, FR_AZ_RX_CFG
);
2011 static void falcon_stats_request(struct efx_nic
*efx
)
2013 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2016 WARN_ON(nic_data
->stats_pending
);
2017 WARN_ON(nic_data
->stats_disable_count
);
2019 if (nic_data
->stats_dma_done
== NULL
)
2020 return; /* no mac selected */
2022 *nic_data
->stats_dma_done
= FALCON_STATS_NOT_DONE
;
2023 nic_data
->stats_pending
= true;
2024 wmb(); /* ensure done flag is clear */
2026 /* Initiate DMA transfer of stats */
2027 EFX_POPULATE_OWORD_2(reg
,
2028 FRF_AB_MAC_STAT_DMA_CMD
, 1,
2029 FRF_AB_MAC_STAT_DMA_ADR
,
2030 efx
->stats_buffer
.dma_addr
);
2031 efx_writeo(efx
, ®
, FR_AB_MAC_STAT_DMA
);
2033 mod_timer(&nic_data
->stats_timer
, round_jiffies_up(jiffies
+ HZ
/ 2));
2036 static void falcon_stats_complete(struct efx_nic
*efx
)
2038 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2040 if (!nic_data
->stats_pending
)
2043 nic_data
->stats_pending
= 0;
2044 if (*nic_data
->stats_dma_done
== FALCON_STATS_DONE
) {
2045 rmb(); /* read the done flag before the stats */
2046 efx
->mac_op
->update_stats(efx
);
2048 EFX_ERR(efx
, "timed out waiting for statistics\n");
2052 static void falcon_stats_timer_func(unsigned long context
)
2054 struct efx_nic
*efx
= (struct efx_nic
*)context
;
2055 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2057 spin_lock(&efx
->stats_lock
);
2059 falcon_stats_complete(efx
);
2060 if (nic_data
->stats_disable_count
== 0)
2061 falcon_stats_request(efx
);
2063 spin_unlock(&efx
->stats_lock
);
2066 /**************************************************************************
2068 * PHY access via GMII
2070 **************************************************************************
2073 /* Wait for GMII access to complete */
2074 static int falcon_gmii_wait(struct efx_nic
*efx
)
2076 efx_oword_t md_stat
;
2079 /* wait upto 50ms - taken max from datasheet */
2080 for (count
= 0; count
< 5000; count
++) {
2081 efx_reado(efx
, &md_stat
, FR_AB_MD_STAT
);
2082 if (EFX_OWORD_FIELD(md_stat
, FRF_AB_MD_BSY
) == 0) {
2083 if (EFX_OWORD_FIELD(md_stat
, FRF_AB_MD_LNFL
) != 0 ||
2084 EFX_OWORD_FIELD(md_stat
, FRF_AB_MD_BSERR
) != 0) {
2085 EFX_ERR(efx
, "error from GMII access "
2087 EFX_OWORD_VAL(md_stat
));
2094 EFX_ERR(efx
, "timed out waiting for GMII\n");
2098 /* Write an MDIO register of a PHY connected to Falcon. */
2099 static int falcon_mdio_write(struct net_device
*net_dev
,
2100 int prtad
, int devad
, u16 addr
, u16 value
)
2102 struct efx_nic
*efx
= netdev_priv(net_dev
);
2106 EFX_REGDUMP(efx
, "writing MDIO %d register %d.%d with 0x%04x\n",
2107 prtad
, devad
, addr
, value
);
2109 spin_lock_bh(&efx
->phy_lock
);
2111 /* Check MDIO not currently being accessed */
2112 rc
= falcon_gmii_wait(efx
);
2116 /* Write the address/ID register */
2117 EFX_POPULATE_OWORD_1(reg
, FRF_AB_MD_PHY_ADR
, addr
);
2118 efx_writeo(efx
, ®
, FR_AB_MD_PHY_ADR
);
2120 EFX_POPULATE_OWORD_2(reg
, FRF_AB_MD_PRT_ADR
, prtad
,
2121 FRF_AB_MD_DEV_ADR
, devad
);
2122 efx_writeo(efx
, ®
, FR_AB_MD_ID
);
2125 EFX_POPULATE_OWORD_1(reg
, FRF_AB_MD_TXD
, value
);
2126 efx_writeo(efx
, ®
, FR_AB_MD_TXD
);
2128 EFX_POPULATE_OWORD_2(reg
,
2131 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2133 /* Wait for data to be written */
2134 rc
= falcon_gmii_wait(efx
);
2136 /* Abort the write operation */
2137 EFX_POPULATE_OWORD_2(reg
,
2140 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2145 spin_unlock_bh(&efx
->phy_lock
);
2149 /* Read an MDIO register of a PHY connected to Falcon. */
2150 static int falcon_mdio_read(struct net_device
*net_dev
,
2151 int prtad
, int devad
, u16 addr
)
2153 struct efx_nic
*efx
= netdev_priv(net_dev
);
2157 spin_lock_bh(&efx
->phy_lock
);
2159 /* Check MDIO not currently being accessed */
2160 rc
= falcon_gmii_wait(efx
);
2164 EFX_POPULATE_OWORD_1(reg
, FRF_AB_MD_PHY_ADR
, addr
);
2165 efx_writeo(efx
, ®
, FR_AB_MD_PHY_ADR
);
2167 EFX_POPULATE_OWORD_2(reg
, FRF_AB_MD_PRT_ADR
, prtad
,
2168 FRF_AB_MD_DEV_ADR
, devad
);
2169 efx_writeo(efx
, ®
, FR_AB_MD_ID
);
2171 /* Request data to be read */
2172 EFX_POPULATE_OWORD_2(reg
, FRF_AB_MD_RDC
, 1, FRF_AB_MD_GC
, 0);
2173 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2175 /* Wait for data to become available */
2176 rc
= falcon_gmii_wait(efx
);
2178 efx_reado(efx
, ®
, FR_AB_MD_RXD
);
2179 rc
= EFX_OWORD_FIELD(reg
, FRF_AB_MD_RXD
);
2180 EFX_REGDUMP(efx
, "read from MDIO %d register %d.%d, got %04x\n",
2181 prtad
, devad
, addr
, rc
);
2183 /* Abort the read operation */
2184 EFX_POPULATE_OWORD_2(reg
,
2187 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2189 EFX_LOG(efx
, "read from MDIO %d register %d.%d, got error %d\n",
2190 prtad
, devad
, addr
, rc
);
2194 spin_unlock_bh(&efx
->phy_lock
);
2198 static void falcon_clock_mac(struct efx_nic
*efx
)
2201 efx_oword_t nic_stat
;
2203 /* Configure the NIC generated MAC clock correctly */
2204 efx_reado(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2205 strap_val
= EFX_IS10G(efx
) ? 5 : 3;
2206 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
2207 EFX_SET_OWORD_FIELD(nic_stat
, FRF_BB_EE_STRAP_EN
, 1);
2208 EFX_SET_OWORD_FIELD(nic_stat
, FRF_BB_EE_STRAP
, strap_val
);
2209 efx_writeo(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2211 /* Falcon A1 does not support 1G/10G speed switching
2212 * and must not be used with a PHY that does. */
2213 BUG_ON(EFX_OWORD_FIELD(nic_stat
, FRF_AB_STRAP_PINS
) !=
2218 int falcon_switch_mac(struct efx_nic
*efx
)
2220 struct efx_mac_operations
*old_mac_op
= efx
->mac_op
;
2221 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2222 unsigned int stats_done_offset
;
2225 /* Don't try to fetch MAC stats while we're switching MACs */
2226 falcon_stop_nic_stats(efx
);
2228 /* Internal loopbacks override the phy speed setting */
2229 if (efx
->loopback_mode
== LOOPBACK_GMAC
) {
2230 efx
->link_state
.speed
= 1000;
2231 efx
->link_state
.fd
= true;
2232 } else if (LOOPBACK_INTERNAL(efx
)) {
2233 efx
->link_state
.speed
= 10000;
2234 efx
->link_state
.fd
= true;
2237 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
2238 efx
->mac_op
= (EFX_IS10G(efx
) ?
2239 &falcon_xmac_operations
: &falcon_gmac_operations
);
2242 stats_done_offset
= XgDmaDone_offset
;
2244 stats_done_offset
= GDmaDone_offset
;
2245 nic_data
->stats_dma_done
= efx
->stats_buffer
.addr
+ stats_done_offset
;
2247 if (old_mac_op
== efx
->mac_op
)
2250 falcon_clock_mac(efx
);
2252 EFX_LOG(efx
, "selected %cMAC\n", EFX_IS10G(efx
) ? 'X' : 'G');
2253 /* Not all macs support a mac-level link state */
2254 efx
->xmac_poll_required
= false;
2256 rc
= falcon_reset_macs(efx
);
2258 falcon_start_nic_stats(efx
);
2262 /* This call is responsible for hooking in the MAC and PHY operations */
2263 int falcon_probe_port(struct efx_nic
*efx
)
2267 switch (efx
->phy_type
) {
2268 case PHY_TYPE_SFX7101
:
2269 efx
->phy_op
= &falcon_sfx7101_phy_ops
;
2271 case PHY_TYPE_SFT9001A
:
2272 case PHY_TYPE_SFT9001B
:
2273 efx
->phy_op
= &falcon_sft9001_phy_ops
;
2275 case PHY_TYPE_QT2022C2
:
2276 case PHY_TYPE_QT2025C
:
2277 efx
->phy_op
= &falcon_qt202x_phy_ops
;
2280 EFX_ERR(efx
, "Unknown PHY type %d\n",
2285 if (efx
->phy_op
->macs
& EFX_XMAC
)
2286 efx
->loopback_modes
|= ((1 << LOOPBACK_XGMII
) |
2287 (1 << LOOPBACK_XGXS
) |
2288 (1 << LOOPBACK_XAUI
));
2289 if (efx
->phy_op
->macs
& EFX_GMAC
)
2290 efx
->loopback_modes
|= (1 << LOOPBACK_GMAC
);
2291 efx
->loopback_modes
|= efx
->phy_op
->loopbacks
;
2293 /* Set up MDIO structure for PHY */
2294 efx
->mdio
.mmds
= efx
->phy_op
->mmds
;
2295 efx
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
2296 efx
->mdio
.mdio_read
= falcon_mdio_read
;
2297 efx
->mdio
.mdio_write
= falcon_mdio_write
;
2299 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2300 if (falcon_rev(efx
) >= FALCON_REV_B0
)
2301 efx
->wanted_fc
= EFX_FC_RX
| EFX_FC_TX
;
2303 efx
->wanted_fc
= EFX_FC_RX
;
2305 /* Allocate buffer for stats */
2306 rc
= falcon_alloc_buffer(efx
, &efx
->stats_buffer
,
2307 FALCON_MAC_STATS_SIZE
);
2310 EFX_LOG(efx
, "stats buffer at %llx (virt %p phys %llx)\n",
2311 (u64
)efx
->stats_buffer
.dma_addr
,
2312 efx
->stats_buffer
.addr
,
2313 (u64
)virt_to_phys(efx
->stats_buffer
.addr
));
2318 void falcon_remove_port(struct efx_nic
*efx
)
2320 falcon_free_buffer(efx
, &efx
->stats_buffer
);
2323 /**************************************************************************
2325 * Multicast filtering
2327 **************************************************************************
2330 void falcon_set_multicast_hash(struct efx_nic
*efx
)
2332 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
2334 /* Broadcast packets go through the multicast hash filter.
2335 * ether_crc_le() of the broadcast address is 0xbe2612ff
2336 * so we always add bit 0xff to the mask.
2338 set_bit_le(0xff, mc_hash
->byte
);
2340 efx_writeo(efx
, &mc_hash
->oword
[0], FR_AB_MAC_MC_HASH_REG0
);
2341 efx_writeo(efx
, &mc_hash
->oword
[1], FR_AB_MAC_MC_HASH_REG1
);
2345 /**************************************************************************
2349 **************************************************************************/
2351 int falcon_read_nvram(struct efx_nic
*efx
, struct falcon_nvconfig
*nvconfig_out
)
2353 struct falcon_nvconfig
*nvconfig
;
2354 struct efx_spi_device
*spi
;
2356 int rc
, magic_num
, struct_ver
;
2357 __le16
*word
, *limit
;
2360 spi
= efx
->spi_flash
? efx
->spi_flash
: efx
->spi_eeprom
;
2364 region
= kmalloc(FALCON_NVCONFIG_END
, GFP_KERNEL
);
2367 nvconfig
= region
+ FALCON_NVCONFIG_OFFSET
;
2369 mutex_lock(&efx
->spi_lock
);
2370 rc
= falcon_spi_read(spi
, 0, FALCON_NVCONFIG_END
, NULL
, region
);
2371 mutex_unlock(&efx
->spi_lock
);
2373 EFX_ERR(efx
, "Failed to read %s\n",
2374 efx
->spi_flash
? "flash" : "EEPROM");
2379 magic_num
= le16_to_cpu(nvconfig
->board_magic_num
);
2380 struct_ver
= le16_to_cpu(nvconfig
->board_struct_ver
);
2383 if (magic_num
!= FALCON_NVCONFIG_BOARD_MAGIC_NUM
) {
2384 EFX_ERR(efx
, "NVRAM bad magic 0x%x\n", magic_num
);
2387 if (struct_ver
< 2) {
2388 EFX_ERR(efx
, "NVRAM has ancient version 0x%x\n", struct_ver
);
2390 } else if (struct_ver
< 4) {
2391 word
= &nvconfig
->board_magic_num
;
2392 limit
= (__le16
*) (nvconfig
+ 1);
2395 limit
= region
+ FALCON_NVCONFIG_END
;
2397 for (csum
= 0; word
< limit
; ++word
)
2398 csum
+= le16_to_cpu(*word
);
2400 if (~csum
& 0xffff) {
2401 EFX_ERR(efx
, "NVRAM has incorrect checksum\n");
2407 memcpy(nvconfig_out
, nvconfig
, sizeof(*nvconfig
));
2414 /* Registers tested in the falcon register test */
2418 } efx_test_registers
[] = {
2420 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2422 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2424 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2425 { FR_AZ_TX_RESERVED
,
2426 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2428 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2429 { FR_AZ_SRM_TX_DC_CFG
,
2430 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2432 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2433 { FR_AZ_RX_DC_PF_WM
,
2434 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2436 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2438 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2440 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2442 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2444 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2446 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2447 { FR_AB_XM_RX_PARAM
,
2448 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2450 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2452 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2454 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2457 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
2458 const efx_oword_t
*mask
)
2460 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
2461 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
2464 int falcon_test_registers(struct efx_nic
*efx
)
2466 unsigned address
= 0, i
, j
;
2467 efx_oword_t mask
, imask
, original
, reg
, buf
;
2469 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2470 WARN_ON(!LOOPBACK_INTERNAL(efx
));
2472 for (i
= 0; i
< ARRAY_SIZE(efx_test_registers
); ++i
) {
2473 address
= efx_test_registers
[i
].address
;
2474 mask
= imask
= efx_test_registers
[i
].mask
;
2475 EFX_INVERT_OWORD(imask
);
2477 efx_reado(efx
, &original
, address
);
2479 /* bit sweep on and off */
2480 for (j
= 0; j
< 128; j
++) {
2481 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
2484 /* Test this testable bit can be set in isolation */
2485 EFX_AND_OWORD(reg
, original
, mask
);
2486 EFX_SET_OWORD32(reg
, j
, j
, 1);
2488 efx_writeo(efx
, ®
, address
);
2489 efx_reado(efx
, &buf
, address
);
2491 if (efx_masked_compare_oword(®
, &buf
, &mask
))
2494 /* Test this testable bit can be cleared in isolation */
2495 EFX_OR_OWORD(reg
, original
, mask
);
2496 EFX_SET_OWORD32(reg
, j
, j
, 0);
2498 efx_writeo(efx
, ®
, address
);
2499 efx_reado(efx
, &buf
, address
);
2501 if (efx_masked_compare_oword(®
, &buf
, &mask
))
2505 efx_writeo(efx
, &original
, address
);
2511 EFX_ERR(efx
, "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
2512 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
2513 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
2517 /**************************************************************************
2521 **************************************************************************
2524 /* Resets NIC to known state. This routine must be called in process
2525 * context and is allowed to sleep. */
2526 int falcon_reset_hw(struct efx_nic
*efx
, enum reset_type method
)
2528 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2529 efx_oword_t glb_ctl_reg_ker
;
2532 EFX_LOG(efx
, "performing %s hardware reset\n", RESET_TYPE(method
));
2534 /* Initiate device reset */
2535 if (method
== RESET_TYPE_WORLD
) {
2536 rc
= pci_save_state(efx
->pci_dev
);
2538 EFX_ERR(efx
, "failed to backup PCI state of primary "
2539 "function prior to hardware reset\n");
2542 if (FALCON_IS_DUAL_FUNC(efx
)) {
2543 rc
= pci_save_state(nic_data
->pci_dev2
);
2545 EFX_ERR(efx
, "failed to backup PCI state of "
2546 "secondary function prior to "
2547 "hardware reset\n");
2552 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker
,
2553 FRF_AB_EXT_PHY_RST_DUR
,
2554 FFE_AB_EXT_PHY_RST_DUR_10240US
,
2557 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker
,
2558 /* exclude PHY from "invisible" reset */
2559 FRF_AB_EXT_PHY_RST_CTL
,
2560 method
== RESET_TYPE_INVISIBLE
,
2561 /* exclude EEPROM/flash and PCIe */
2562 FRF_AB_PCIE_CORE_RST_CTL
, 1,
2563 FRF_AB_PCIE_NSTKY_RST_CTL
, 1,
2564 FRF_AB_PCIE_SD_RST_CTL
, 1,
2565 FRF_AB_EE_RST_CTL
, 1,
2566 FRF_AB_EXT_PHY_RST_DUR
,
2567 FFE_AB_EXT_PHY_RST_DUR_10240US
,
2570 efx_writeo(efx
, &glb_ctl_reg_ker
, FR_AB_GLB_CTL
);
2572 EFX_LOG(efx
, "waiting for hardware reset\n");
2573 schedule_timeout_uninterruptible(HZ
/ 20);
2575 /* Restore PCI configuration if needed */
2576 if (method
== RESET_TYPE_WORLD
) {
2577 if (FALCON_IS_DUAL_FUNC(efx
)) {
2578 rc
= pci_restore_state(nic_data
->pci_dev2
);
2580 EFX_ERR(efx
, "failed to restore PCI config for "
2581 "the secondary function\n");
2585 rc
= pci_restore_state(efx
->pci_dev
);
2587 EFX_ERR(efx
, "failed to restore PCI config for the "
2588 "primary function\n");
2591 EFX_LOG(efx
, "successfully restored PCI config\n");
2594 /* Assert that reset complete */
2595 efx_reado(efx
, &glb_ctl_reg_ker
, FR_AB_GLB_CTL
);
2596 if (EFX_OWORD_FIELD(glb_ctl_reg_ker
, FRF_AB_SWRST
) != 0) {
2598 EFX_ERR(efx
, "timed out waiting for hardware reset\n");
2601 EFX_LOG(efx
, "hardware reset complete\n");
2605 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2608 pci_restore_state(efx
->pci_dev
);
2615 void falcon_monitor(struct efx_nic
*efx
)
2619 rc
= falcon_board(efx
)->type
->monitor(efx
);
2621 EFX_ERR(efx
, "Board sensor %s; shutting down PHY\n",
2622 (rc
== -ERANGE
) ? "reported fault" : "failed");
2623 efx
->phy_mode
|= PHY_MODE_LOW_POWER
;
2624 falcon_sim_phy_event(efx
);
2626 efx
->phy_op
->poll(efx
);
2628 falcon_poll_xmac(efx
);
2631 /* Zeroes out the SRAM contents. This routine must be called in
2632 * process context and is allowed to sleep.
2634 static int falcon_reset_sram(struct efx_nic
*efx
)
2636 efx_oword_t srm_cfg_reg_ker
, gpio_cfg_reg_ker
;
2639 /* Set the SRAM wake/sleep GPIO appropriately. */
2640 efx_reado(efx
, &gpio_cfg_reg_ker
, FR_AB_GPIO_CTL
);
2641 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker
, FRF_AB_GPIO1_OEN
, 1);
2642 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker
, FRF_AB_GPIO1_OUT
, 1);
2643 efx_writeo(efx
, &gpio_cfg_reg_ker
, FR_AB_GPIO_CTL
);
2645 /* Initiate SRAM reset */
2646 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker
,
2647 FRF_AZ_SRM_INIT_EN
, 1,
2648 FRF_AZ_SRM_NB_SZ
, 0);
2649 efx_writeo(efx
, &srm_cfg_reg_ker
, FR_AZ_SRM_CFG
);
2651 /* Wait for SRAM reset to complete */
2654 EFX_LOG(efx
, "waiting for SRAM reset (attempt %d)...\n", count
);
2656 /* SRAM reset is slow; expect around 16ms */
2657 schedule_timeout_uninterruptible(HZ
/ 50);
2659 /* Check for reset complete */
2660 efx_reado(efx
, &srm_cfg_reg_ker
, FR_AZ_SRM_CFG
);
2661 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker
, FRF_AZ_SRM_INIT_EN
)) {
2662 EFX_LOG(efx
, "SRAM reset complete\n");
2666 } while (++count
< 20); /* wait upto 0.4 sec */
2668 EFX_ERR(efx
, "timed out waiting for SRAM reset\n");
2672 static int falcon_spi_device_init(struct efx_nic
*efx
,
2673 struct efx_spi_device
**spi_device_ret
,
2674 unsigned int device_id
, u32 device_type
)
2676 struct efx_spi_device
*spi_device
;
2678 if (device_type
!= 0) {
2679 spi_device
= kzalloc(sizeof(*spi_device
), GFP_KERNEL
);
2682 spi_device
->device_id
= device_id
;
2684 1 << SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_SIZE
);
2685 spi_device
->addr_len
=
2686 SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_ADDR_LEN
);
2687 spi_device
->munge_address
= (spi_device
->size
== 1 << 9 &&
2688 spi_device
->addr_len
== 1);
2689 spi_device
->erase_command
=
2690 SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_ERASE_CMD
);
2691 spi_device
->erase_size
=
2692 1 << SPI_DEV_TYPE_FIELD(device_type
,
2693 SPI_DEV_TYPE_ERASE_SIZE
);
2694 spi_device
->block_size
=
2695 1 << SPI_DEV_TYPE_FIELD(device_type
,
2696 SPI_DEV_TYPE_BLOCK_SIZE
);
2698 spi_device
->efx
= efx
;
2703 kfree(*spi_device_ret
);
2704 *spi_device_ret
= spi_device
;
2709 static void falcon_remove_spi_devices(struct efx_nic
*efx
)
2711 kfree(efx
->spi_eeprom
);
2712 efx
->spi_eeprom
= NULL
;
2713 kfree(efx
->spi_flash
);
2714 efx
->spi_flash
= NULL
;
2717 /* Extract non-volatile configuration */
2718 static int falcon_probe_nvconfig(struct efx_nic
*efx
)
2720 struct falcon_nvconfig
*nvconfig
;
2724 nvconfig
= kmalloc(sizeof(*nvconfig
), GFP_KERNEL
);
2728 rc
= falcon_read_nvram(efx
, nvconfig
);
2729 if (rc
== -EINVAL
) {
2730 EFX_ERR(efx
, "NVRAM is invalid therefore using defaults\n");
2731 efx
->phy_type
= PHY_TYPE_NONE
;
2732 efx
->mdio
.prtad
= MDIO_PRTAD_NONE
;
2738 struct falcon_nvconfig_board_v2
*v2
= &nvconfig
->board_v2
;
2739 struct falcon_nvconfig_board_v3
*v3
= &nvconfig
->board_v3
;
2741 efx
->phy_type
= v2
->port0_phy_type
;
2742 efx
->mdio
.prtad
= v2
->port0_phy_addr
;
2743 board_rev
= le16_to_cpu(v2
->board_revision
);
2745 if (le16_to_cpu(nvconfig
->board_struct_ver
) >= 3) {
2746 rc
= falcon_spi_device_init(
2747 efx
, &efx
->spi_flash
, FFE_AB_SPI_DEVICE_FLASH
,
2748 le32_to_cpu(v3
->spi_device_type
2749 [FFE_AB_SPI_DEVICE_FLASH
]));
2752 rc
= falcon_spi_device_init(
2753 efx
, &efx
->spi_eeprom
, FFE_AB_SPI_DEVICE_EEPROM
,
2754 le32_to_cpu(v3
->spi_device_type
2755 [FFE_AB_SPI_DEVICE_EEPROM
]));
2761 /* Read the MAC addresses */
2762 memcpy(efx
->mac_address
, nvconfig
->mac_address
[0], ETH_ALEN
);
2764 EFX_LOG(efx
, "PHY is %d phy_id %d\n", efx
->phy_type
, efx
->mdio
.prtad
);
2766 falcon_probe_board(efx
, board_rev
);
2772 falcon_remove_spi_devices(efx
);
2778 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2779 * count, port speed). Set workaround and feature flags accordingly.
2781 static int falcon_probe_nic_variant(struct efx_nic
*efx
)
2783 efx_oword_t altera_build
;
2784 efx_oword_t nic_stat
;
2786 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
2787 if (EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
)) {
2788 EFX_ERR(efx
, "Falcon FPGA not supported\n");
2792 efx_reado(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2794 switch (falcon_rev(efx
)) {
2797 EFX_ERR(efx
, "Falcon rev A0 not supported\n");
2801 if (EFX_OWORD_FIELD(nic_stat
, FRF_AA_STRAP_PCIE
) == 0) {
2802 EFX_ERR(efx
, "Falcon rev A1 PCI-X not supported\n");
2811 EFX_ERR(efx
, "Unknown Falcon rev %d\n", falcon_rev(efx
));
2815 /* Initial assumed speed */
2816 efx
->link_state
.speed
= EFX_OWORD_FIELD(nic_stat
, FRF_AB_STRAP_10G
) ? 10000 : 1000;
2821 /* Probe all SPI devices on the NIC */
2822 static void falcon_probe_spi_devices(struct efx_nic
*efx
)
2824 efx_oword_t nic_stat
, gpio_ctl
, ee_vpd_cfg
;
2827 efx_reado(efx
, &gpio_ctl
, FR_AB_GPIO_CTL
);
2828 efx_reado(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2829 efx_reado(efx
, &ee_vpd_cfg
, FR_AB_EE_VPD_CFG0
);
2831 if (EFX_OWORD_FIELD(gpio_ctl
, FRF_AB_GPIO3_PWRUP_VALUE
)) {
2832 boot_dev
= (EFX_OWORD_FIELD(nic_stat
, FRF_AB_SF_PRST
) ?
2833 FFE_AB_SPI_DEVICE_FLASH
: FFE_AB_SPI_DEVICE_EEPROM
);
2834 EFX_LOG(efx
, "Booted from %s\n",
2835 boot_dev
== FFE_AB_SPI_DEVICE_FLASH
? "flash" : "EEPROM");
2837 /* Disable VPD and set clock dividers to safe
2838 * values for initial programming. */
2840 EFX_LOG(efx
, "Booted from internal ASIC settings;"
2841 " setting SPI config\n");
2842 EFX_POPULATE_OWORD_3(ee_vpd_cfg
, FRF_AB_EE_VPD_EN
, 0,
2843 /* 125 MHz / 7 ~= 20 MHz */
2844 FRF_AB_EE_SF_CLOCK_DIV
, 7,
2845 /* 125 MHz / 63 ~= 2 MHz */
2846 FRF_AB_EE_EE_CLOCK_DIV
, 63);
2847 efx_writeo(efx
, &ee_vpd_cfg
, FR_AB_EE_VPD_CFG0
);
2850 if (boot_dev
== FFE_AB_SPI_DEVICE_FLASH
)
2851 falcon_spi_device_init(efx
, &efx
->spi_flash
,
2852 FFE_AB_SPI_DEVICE_FLASH
,
2853 default_flash_type
);
2854 if (boot_dev
== FFE_AB_SPI_DEVICE_EEPROM
)
2855 falcon_spi_device_init(efx
, &efx
->spi_eeprom
,
2856 FFE_AB_SPI_DEVICE_EEPROM
,
2860 int falcon_probe_nic(struct efx_nic
*efx
)
2862 struct falcon_nic_data
*nic_data
;
2863 struct falcon_board
*board
;
2866 /* Allocate storage for hardware specific data */
2867 nic_data
= kzalloc(sizeof(*nic_data
), GFP_KERNEL
);
2870 efx
->nic_data
= nic_data
;
2872 /* Determine number of ports etc. */
2873 rc
= falcon_probe_nic_variant(efx
);
2877 /* Probe secondary function if expected */
2878 if (FALCON_IS_DUAL_FUNC(efx
)) {
2879 struct pci_dev
*dev
= pci_dev_get(efx
->pci_dev
);
2881 while ((dev
= pci_get_device(EFX_VENDID_SFC
, FALCON_A_S_DEVID
,
2883 if (dev
->bus
== efx
->pci_dev
->bus
&&
2884 dev
->devfn
== efx
->pci_dev
->devfn
+ 1) {
2885 nic_data
->pci_dev2
= dev
;
2889 if (!nic_data
->pci_dev2
) {
2890 EFX_ERR(efx
, "failed to find secondary function\n");
2896 /* Now we can reset the NIC */
2897 rc
= falcon_reset_hw(efx
, RESET_TYPE_ALL
);
2899 EFX_ERR(efx
, "failed to reset NIC\n");
2903 /* Allocate memory for INT_KER */
2904 rc
= falcon_alloc_buffer(efx
, &efx
->irq_status
, sizeof(efx_oword_t
));
2907 BUG_ON(efx
->irq_status
.dma_addr
& 0x0f);
2909 EFX_LOG(efx
, "INT_KER at %llx (virt %p phys %llx)\n",
2910 (u64
)efx
->irq_status
.dma_addr
,
2911 efx
->irq_status
.addr
, (u64
)virt_to_phys(efx
->irq_status
.addr
));
2913 falcon_probe_spi_devices(efx
);
2915 /* Read in the non-volatile configuration */
2916 rc
= falcon_probe_nvconfig(efx
);
2920 /* Initialise I2C adapter */
2921 board
= falcon_board(efx
);
2922 board
->i2c_adap
.owner
= THIS_MODULE
;
2923 board
->i2c_data
= falcon_i2c_bit_operations
;
2924 board
->i2c_data
.data
= efx
;
2925 board
->i2c_adap
.algo_data
= &board
->i2c_data
;
2926 board
->i2c_adap
.dev
.parent
= &efx
->pci_dev
->dev
;
2927 strlcpy(board
->i2c_adap
.name
, "SFC4000 GPIO",
2928 sizeof(board
->i2c_adap
.name
));
2929 rc
= i2c_bit_add_bus(&board
->i2c_adap
);
2933 rc
= falcon_board(efx
)->type
->init(efx
);
2935 EFX_ERR(efx
, "failed to initialise board\n");
2939 nic_data
->stats_disable_count
= 1;
2940 setup_timer(&nic_data
->stats_timer
, &falcon_stats_timer_func
,
2941 (unsigned long)efx
);
2946 BUG_ON(i2c_del_adapter(&board
->i2c_adap
));
2947 memset(&board
->i2c_adap
, 0, sizeof(board
->i2c_adap
));
2949 falcon_remove_spi_devices(efx
);
2950 falcon_free_buffer(efx
, &efx
->irq_status
);
2953 if (nic_data
->pci_dev2
) {
2954 pci_dev_put(nic_data
->pci_dev2
);
2955 nic_data
->pci_dev2
= NULL
;
2959 kfree(efx
->nic_data
);
2963 static void falcon_init_rx_cfg(struct efx_nic
*efx
)
2965 /* Prior to Siena the RX DMA engine will split each frame at
2966 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2967 * be so large that that never happens. */
2968 const unsigned huge_buf_size
= (3 * 4096) >> 5;
2969 /* RX control FIFO thresholds (32 entries) */
2970 const unsigned ctrl_xon_thr
= 20;
2971 const unsigned ctrl_xoff_thr
= 25;
2972 /* RX data FIFO thresholds (256-byte units; size varies) */
2973 int data_xon_thr
= rx_xon_thresh_bytes
>> 8;
2974 int data_xoff_thr
= rx_xoff_thresh_bytes
>> 8;
2977 efx_reado(efx
, ®
, FR_AZ_RX_CFG
);
2978 if (falcon_rev(efx
) <= FALCON_REV_A1
) {
2979 /* Data FIFO size is 5.5K */
2980 if (data_xon_thr
< 0)
2981 data_xon_thr
= 512 >> 8;
2982 if (data_xoff_thr
< 0)
2983 data_xoff_thr
= 2048 >> 8;
2984 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_DESC_PUSH_EN
, 0);
2985 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_USR_BUF_SIZE
,
2987 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XON_MAC_TH
, data_xon_thr
);
2988 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XOFF_MAC_TH
, data_xoff_thr
);
2989 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XON_TX_TH
, ctrl_xon_thr
);
2990 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XOFF_TX_TH
, ctrl_xoff_thr
);
2992 /* Data FIFO size is 80K; register fields moved */
2993 if (data_xon_thr
< 0)
2994 data_xon_thr
= 27648 >> 8; /* ~3*max MTU */
2995 if (data_xoff_thr
< 0)
2996 data_xoff_thr
= 54272 >> 8; /* ~80Kb - 3*max MTU */
2997 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_DESC_PUSH_EN
, 0);
2998 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_USR_BUF_SIZE
,
3000 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XON_MAC_TH
, data_xon_thr
);
3001 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XOFF_MAC_TH
, data_xoff_thr
);
3002 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XON_TX_TH
, ctrl_xon_thr
);
3003 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XOFF_TX_TH
, ctrl_xoff_thr
);
3004 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_INGR_EN
, 1);
3006 efx_writeo(efx
, ®
, FR_AZ_RX_CFG
);
3009 /* This call performs hardware-specific global initialisation, such as
3010 * defining the descriptor cache sizes and number of RSS channels.
3011 * It does not set up any buffers, descriptor rings or event queues.
3013 int falcon_init_nic(struct efx_nic
*efx
)
3018 /* Use on-chip SRAM */
3019 efx_reado(efx
, &temp
, FR_AB_NIC_STAT
);
3020 EFX_SET_OWORD_FIELD(temp
, FRF_AB_ONCHIP_SRAM
, 1);
3021 efx_writeo(efx
, &temp
, FR_AB_NIC_STAT
);
3023 /* Set the source of the GMAC clock */
3024 if (falcon_rev(efx
) == FALCON_REV_B0
) {
3025 efx_reado(efx
, &temp
, FR_AB_GPIO_CTL
);
3026 EFX_SET_OWORD_FIELD(temp
, FRF_AB_USE_NIC_CLK
, true);
3027 efx_writeo(efx
, &temp
, FR_AB_GPIO_CTL
);
3030 /* Select the correct MAC */
3031 falcon_clock_mac(efx
);
3033 rc
= falcon_reset_sram(efx
);
3037 /* Set positions of descriptor caches in SRAM. */
3038 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
, TX_DC_BASE
/ 8);
3039 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
3040 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
, RX_DC_BASE
/ 8);
3041 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
3043 /* Set TX descriptor cache size. */
3044 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
3045 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
3046 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
3048 /* Set RX descriptor cache size. Set low watermark to size-8, as
3049 * this allows most efficient prefetching.
3051 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
3052 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
3053 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
3054 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
3055 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
3057 /* Clear the parity enables on the TX data fifos as
3058 * they produce false parity errors because of timing issues
3060 if (EFX_WORKAROUND_5129(efx
)) {
3061 efx_reado(efx
, &temp
, FR_AZ_CSR_SPARE
);
3062 EFX_SET_OWORD_FIELD(temp
, FRF_AB_MEM_PERR_EN_TX_DATA
, 0);
3063 efx_writeo(efx
, &temp
, FR_AZ_CSR_SPARE
);
3066 /* Enable all the genuinely fatal interrupts. (They are still
3067 * masked by the overall interrupt mask, controlled by
3068 * falcon_interrupts()).
3070 * Note: All other fatal interrupts are enabled
3072 EFX_POPULATE_OWORD_3(temp
,
3073 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
3074 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
3075 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
3076 EFX_INVERT_OWORD(temp
);
3077 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
3079 if (EFX_WORKAROUND_7244(efx
)) {
3080 efx_reado(efx
, &temp
, FR_BZ_RX_FILTER_CTL
);
3081 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_UDP_FULL_SRCH_LIMIT
, 8);
3082 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_UDP_WILD_SRCH_LIMIT
, 8);
3083 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TCP_FULL_SRCH_LIMIT
, 8);
3084 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TCP_WILD_SRCH_LIMIT
, 8);
3085 efx_writeo(efx
, &temp
, FR_BZ_RX_FILTER_CTL
);
3088 falcon_setup_rss_indir_table(efx
);
3090 /* XXX This is documented only for Falcon A0/A1 */
3091 /* Setup RX. Wait for descriptor is broken and must
3092 * be disabled. RXDP recovery shouldn't be needed, but is.
3094 efx_reado(efx
, &temp
, FR_AA_RX_SELF_RST
);
3095 EFX_SET_OWORD_FIELD(temp
, FRF_AA_RX_NODESC_WAIT_DIS
, 1);
3096 EFX_SET_OWORD_FIELD(temp
, FRF_AA_RX_SELF_RST_EN
, 1);
3097 if (EFX_WORKAROUND_5583(efx
))
3098 EFX_SET_OWORD_FIELD(temp
, FRF_AA_RX_ISCSI_DIS
, 1);
3099 efx_writeo(efx
, &temp
, FR_AA_RX_SELF_RST
);
3101 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3102 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3104 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
3105 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
3106 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
3107 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
3108 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 0);
3109 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
3110 /* Enable SW_EV to inherit in char driver - assume harmless here */
3111 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
3112 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3113 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
3114 /* Squash TX of packets of 16 bytes or less */
3115 if (falcon_rev(efx
) >= FALCON_REV_B0
&& EFX_WORKAROUND_9141(efx
))
3116 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
3117 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
3119 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3120 * descriptors (which is bad).
3122 efx_reado(efx
, &temp
, FR_AZ_TX_CFG
);
3123 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_NO_EOP_DISC_EN
, 0);
3124 efx_writeo(efx
, &temp
, FR_AZ_TX_CFG
);
3126 falcon_init_rx_cfg(efx
);
3128 /* Set destination of both TX and RX Flush events */
3129 if (falcon_rev(efx
) >= FALCON_REV_B0
) {
3130 EFX_POPULATE_OWORD_1(temp
, FRF_BZ_FLS_EVQ_ID
, 0);
3131 efx_writeo(efx
, &temp
, FR_BZ_DP_CTRL
);
3137 void falcon_remove_nic(struct efx_nic
*efx
)
3139 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3140 struct falcon_board
*board
= falcon_board(efx
);
3143 board
->type
->fini(efx
);
3145 /* Remove I2C adapter and clear it in preparation for a retry */
3146 rc
= i2c_del_adapter(&board
->i2c_adap
);
3148 memset(&board
->i2c_adap
, 0, sizeof(board
->i2c_adap
));
3150 falcon_remove_spi_devices(efx
);
3151 falcon_free_buffer(efx
, &efx
->irq_status
);
3153 falcon_reset_hw(efx
, RESET_TYPE_ALL
);
3155 /* Release the second function after the reset */
3156 if (nic_data
->pci_dev2
) {
3157 pci_dev_put(nic_data
->pci_dev2
);
3158 nic_data
->pci_dev2
= NULL
;
3161 /* Tear down the private nic state */
3162 kfree(efx
->nic_data
);
3163 efx
->nic_data
= NULL
;
3166 void falcon_update_nic_stats(struct efx_nic
*efx
)
3168 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3171 if (nic_data
->stats_disable_count
)
3174 efx_reado(efx
, &cnt
, FR_AZ_RX_NODESC_DROP
);
3175 efx
->n_rx_nodesc_drop_cnt
+=
3176 EFX_OWORD_FIELD(cnt
, FRF_AB_RX_NODESC_DROP_CNT
);
3178 if (nic_data
->stats_pending
&&
3179 *nic_data
->stats_dma_done
== FALCON_STATS_DONE
) {
3180 nic_data
->stats_pending
= false;
3181 rmb(); /* read the done flag before the stats */
3182 efx
->mac_op
->update_stats(efx
);
3186 void falcon_start_nic_stats(struct efx_nic
*efx
)
3188 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3190 spin_lock_bh(&efx
->stats_lock
);
3191 if (--nic_data
->stats_disable_count
== 0)
3192 falcon_stats_request(efx
);
3193 spin_unlock_bh(&efx
->stats_lock
);
3196 void falcon_stop_nic_stats(struct efx_nic
*efx
)
3198 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3203 spin_lock_bh(&efx
->stats_lock
);
3204 ++nic_data
->stats_disable_count
;
3205 spin_unlock_bh(&efx
->stats_lock
);
3207 del_timer_sync(&nic_data
->stats_timer
);
3209 /* Wait enough time for the most recent transfer to
3211 for (i
= 0; i
< 4 && nic_data
->stats_pending
; i
++) {
3212 if (*nic_data
->stats_dma_done
== FALCON_STATS_DONE
)
3217 spin_lock_bh(&efx
->stats_lock
);
3218 falcon_stats_complete(efx
);
3219 spin_unlock_bh(&efx
->stats_lock
);
3222 /**************************************************************************
3224 * Revision-dependent attributes used by efx.c
3226 **************************************************************************
3229 struct efx_nic_type falcon_a_nic_type
= {
3230 .mem_map_size
= 0x20000,
3231 .txd_ptr_tbl_base
= FR_AA_TX_DESC_PTR_TBL_KER
,
3232 .rxd_ptr_tbl_base
= FR_AA_RX_DESC_PTR_TBL_KER
,
3233 .buf_tbl_base
= FR_AA_BUF_FULL_TBL_KER
,
3234 .evq_ptr_tbl_base
= FR_AA_EVQ_PTR_TBL_KER
,
3235 .evq_rptr_tbl_base
= FR_AA_EVQ_RPTR_KER
,
3236 .max_dma_mask
= DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH
),
3237 .rx_buffer_padding
= 0x24,
3238 .max_interrupt_mode
= EFX_INT_MODE_MSI
,
3239 .phys_addr_channels
= 4,
3242 struct efx_nic_type falcon_b_nic_type
= {
3243 /* Map everything up to and including the RSS indirection
3244 * table. Don't map MSI-X table, MSI-X PBA since Linux
3245 * requires that they not be mapped. */
3246 .mem_map_size
= (FR_BZ_RX_INDIRECTION_TBL
+
3247 FR_BZ_RX_INDIRECTION_TBL_STEP
*
3248 FR_BZ_RX_INDIRECTION_TBL_ROWS
),
3249 .txd_ptr_tbl_base
= FR_BZ_TX_DESC_PTR_TBL
,
3250 .rxd_ptr_tbl_base
= FR_BZ_RX_DESC_PTR_TBL
,
3251 .buf_tbl_base
= FR_BZ_BUF_FULL_TBL
,
3252 .evq_ptr_tbl_base
= FR_BZ_EVQ_PTR_TBL
,
3253 .evq_rptr_tbl_base
= FR_BZ_EVQ_RPTR
,
3254 .max_dma_mask
= DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH
),
3255 .rx_buffer_padding
= 0,
3256 .max_interrupt_mode
= EFX_INT_MODE_MSIX
,
3257 .phys_addr_channels
= 32, /* Hardware limit is 64, but the legacy
3258 * interrupt handler only supports 32