1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include "net_driver.h"
28 #include "workarounds.h"
30 /* Hardware control for SFC4000 (aka Falcon). */
32 /**************************************************************************
36 **************************************************************************
39 /* This is set to 16 for a good reason. In summary, if larger than
40 * 16, the descriptor cache holds more than a default socket
41 * buffer's worth of packets (for UDP we can only have at most one
42 * socket buffer's worth outstanding). This combined with the fact
43 * that we only get 1 TX event per descriptor cache means the NIC
46 #define TX_DC_ENTRIES 16
47 #define TX_DC_ENTRIES_ORDER 1
49 #define RX_DC_ENTRIES 64
50 #define RX_DC_ENTRIES_ORDER 3
52 static const unsigned int
53 /* "Large" EEPROM device: Atmel AT25640 or similar
54 * 8 KB, 16-bit address, 32 B write block */
55 large_eeprom_type
= ((13 << SPI_DEV_TYPE_SIZE_LBN
)
56 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
57 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
)),
58 /* Default flash device: Atmel AT25F1024
59 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
60 default_flash_type
= ((17 << SPI_DEV_TYPE_SIZE_LBN
)
61 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN
)
62 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN
)
63 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN
)
64 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN
));
66 /* RX FIFO XOFF watermark
68 * When the amount of the RX FIFO increases used increases past this
69 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
70 * This also has an effect on RX/TX arbitration
72 static int rx_xoff_thresh_bytes
= -1;
73 module_param(rx_xoff_thresh_bytes
, int, 0644);
74 MODULE_PARM_DESC(rx_xoff_thresh_bytes
, "RX fifo XOFF threshold");
76 /* RX FIFO XON watermark
78 * When the amount of the RX FIFO used decreases below this
79 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
80 * This also has an effect on RX/TX arbitration
82 static int rx_xon_thresh_bytes
= -1;
83 module_param(rx_xon_thresh_bytes
, int, 0644);
84 MODULE_PARM_DESC(rx_xon_thresh_bytes
, "RX fifo XON threshold");
86 /* If FALCON_MAX_INT_ERRORS internal errors occur within
87 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
90 #define FALCON_INT_ERROR_EXPIRE 3600
91 #define FALCON_MAX_INT_ERRORS 5
93 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
95 #define FALCON_FLUSH_INTERVAL 10
96 #define FALCON_FLUSH_POLL_COUNT 100
98 /**************************************************************************
102 **************************************************************************
105 /* Size and alignment of special buffers (4KB) */
106 #define FALCON_BUF_SIZE 4096
108 /* Depth of RX flush request fifo */
109 #define FALCON_RX_FLUSH_COUNT 4
111 #define FALCON_IS_DUAL_FUNC(efx) \
112 (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
114 /**************************************************************************
116 * Falcon hardware access
118 **************************************************************************/
120 static inline void falcon_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
123 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
127 /* Read the current event from the event queue */
128 static inline efx_qword_t
*falcon_event(struct efx_channel
*channel
,
131 return (((efx_qword_t
*) (channel
->eventq
.addr
)) + index
);
134 /* See if an event is present
136 * We check both the high and low dword of the event for all ones. We
137 * wrote all ones when we cleared the event, and no valid event can
138 * have all ones in either its high or low dwords. This approach is
139 * robust against reordering.
141 * Note that using a single 64-bit comparison is incorrect; even
142 * though the CPU read will be atomic, the DMA write may not be.
144 static inline int falcon_event_present(efx_qword_t
*event
)
146 return (!(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
147 EFX_DWORD_IS_ALL_ONES(event
->dword
[1])));
150 /**************************************************************************
152 * I2C bus - this is a bit-bashing interface using GPIO pins
153 * Note that it uses the output enables to tristate the outputs
154 * SDA is the data pin and SCL is the clock
156 **************************************************************************
158 static void falcon_setsda(void *data
, int state
)
160 struct efx_nic
*efx
= (struct efx_nic
*)data
;
163 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
164 EFX_SET_OWORD_FIELD(reg
, FRF_AB_GPIO3_OEN
, !state
);
165 efx_writeo(efx
, ®
, FR_AB_GPIO_CTL
);
168 static void falcon_setscl(void *data
, int state
)
170 struct efx_nic
*efx
= (struct efx_nic
*)data
;
173 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
174 EFX_SET_OWORD_FIELD(reg
, FRF_AB_GPIO0_OEN
, !state
);
175 efx_writeo(efx
, ®
, FR_AB_GPIO_CTL
);
178 static int falcon_getsda(void *data
)
180 struct efx_nic
*efx
= (struct efx_nic
*)data
;
183 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
184 return EFX_OWORD_FIELD(reg
, FRF_AB_GPIO3_IN
);
187 static int falcon_getscl(void *data
)
189 struct efx_nic
*efx
= (struct efx_nic
*)data
;
192 efx_reado(efx
, ®
, FR_AB_GPIO_CTL
);
193 return EFX_OWORD_FIELD(reg
, FRF_AB_GPIO0_IN
);
196 static struct i2c_algo_bit_data falcon_i2c_bit_operations
= {
197 .setsda
= falcon_setsda
,
198 .setscl
= falcon_setscl
,
199 .getsda
= falcon_getsda
,
200 .getscl
= falcon_getscl
,
202 /* Wait up to 50 ms for slave to let us pull SCL high */
203 .timeout
= DIV_ROUND_UP(HZ
, 20),
206 /**************************************************************************
208 * Falcon special buffer handling
209 * Special buffers are used for event queues and the TX and RX
212 *************************************************************************/
215 * Initialise a Falcon special buffer
217 * This will define a buffer (previously allocated via
218 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
219 * it to be used for event queues, descriptor rings etc.
222 falcon_init_special_buffer(struct efx_nic
*efx
,
223 struct efx_special_buffer
*buffer
)
225 efx_qword_t buf_desc
;
230 EFX_BUG_ON_PARANOID(!buffer
->addr
);
232 /* Write buffer descriptors to NIC */
233 for (i
= 0; i
< buffer
->entries
; i
++) {
234 index
= buffer
->index
+ i
;
235 dma_addr
= buffer
->dma_addr
+ (i
* 4096);
236 EFX_LOG(efx
, "mapping special buffer %d at %llx\n",
237 index
, (unsigned long long)dma_addr
);
238 EFX_POPULATE_QWORD_3(buf_desc
,
239 FRF_AZ_BUF_ADR_REGION
, 0,
240 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
241 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
242 falcon_write_buf_tbl(efx
, &buf_desc
, index
);
246 /* Unmaps a buffer from Falcon and clears the buffer table entries */
248 falcon_fini_special_buffer(struct efx_nic
*efx
,
249 struct efx_special_buffer
*buffer
)
251 efx_oword_t buf_tbl_upd
;
252 unsigned int start
= buffer
->index
;
253 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
255 if (!buffer
->entries
)
258 EFX_LOG(efx
, "unmapping special buffers %d-%d\n",
259 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
261 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
262 FRF_AZ_BUF_UPD_CMD
, 0,
263 FRF_AZ_BUF_CLR_CMD
, 1,
264 FRF_AZ_BUF_CLR_END_ID
, end
,
265 FRF_AZ_BUF_CLR_START_ID
, start
);
266 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
270 * Allocate a new Falcon special buffer
272 * This allocates memory for a new buffer, clears it and allocates a
273 * new buffer ID range. It does not write into Falcon's buffer table.
275 * This call will allocate 4KB buffers, since Falcon can't use 8KB
276 * buffers for event queues and descriptor rings.
278 static int falcon_alloc_special_buffer(struct efx_nic
*efx
,
279 struct efx_special_buffer
*buffer
,
282 len
= ALIGN(len
, FALCON_BUF_SIZE
);
284 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
289 buffer
->entries
= len
/ FALCON_BUF_SIZE
;
290 BUG_ON(buffer
->dma_addr
& (FALCON_BUF_SIZE
- 1));
292 /* All zeros is a potentially valid event so memset to 0xff */
293 memset(buffer
->addr
, 0xff, len
);
295 /* Select new buffer ID */
296 buffer
->index
= efx
->next_buffer_table
;
297 efx
->next_buffer_table
+= buffer
->entries
;
299 EFX_LOG(efx
, "allocating special buffers %d-%d at %llx+%x "
300 "(virt %p phys %llx)\n", buffer
->index
,
301 buffer
->index
+ buffer
->entries
- 1,
302 (u64
)buffer
->dma_addr
, len
,
303 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
308 static void falcon_free_special_buffer(struct efx_nic
*efx
,
309 struct efx_special_buffer
*buffer
)
314 EFX_LOG(efx
, "deallocating special buffers %d-%d at %llx+%x "
315 "(virt %p phys %llx)\n", buffer
->index
,
316 buffer
->index
+ buffer
->entries
- 1,
317 (u64
)buffer
->dma_addr
, buffer
->len
,
318 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
320 pci_free_consistent(efx
->pci_dev
, buffer
->len
, buffer
->addr
,
326 /**************************************************************************
328 * Falcon generic buffer handling
329 * These buffers are used for interrupt status and MAC stats
331 **************************************************************************/
333 static int falcon_alloc_buffer(struct efx_nic
*efx
,
334 struct efx_buffer
*buffer
, unsigned int len
)
336 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
341 memset(buffer
->addr
, 0, len
);
345 static void falcon_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
348 pci_free_consistent(efx
->pci_dev
, buffer
->len
,
349 buffer
->addr
, buffer
->dma_addr
);
354 /**************************************************************************
358 **************************************************************************/
360 /* Returns a pointer to the specified transmit descriptor in the TX
361 * descriptor queue belonging to the specified channel.
363 static inline efx_qword_t
*falcon_tx_desc(struct efx_tx_queue
*tx_queue
,
366 return (((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
);
369 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
370 static inline void falcon_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
375 write_ptr
= tx_queue
->write_count
& EFX_TXQ_MASK
;
376 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
377 efx_writed_page(tx_queue
->efx
, ®
,
378 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
382 /* For each entry inserted into the software descriptor ring, create a
383 * descriptor in the hardware TX descriptor ring (in host memory), and
386 void falcon_push_buffers(struct efx_tx_queue
*tx_queue
)
389 struct efx_tx_buffer
*buffer
;
393 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
396 write_ptr
= tx_queue
->write_count
& EFX_TXQ_MASK
;
397 buffer
= &tx_queue
->buffer
[write_ptr
];
398 txd
= falcon_tx_desc(tx_queue
, write_ptr
);
399 ++tx_queue
->write_count
;
401 /* Create TX descriptor ring entry */
402 EFX_POPULATE_QWORD_4(*txd
,
403 FSF_AZ_TX_KER_CONT
, buffer
->continuation
,
404 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
405 FSF_AZ_TX_KER_BUF_REGION
, 0,
406 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
407 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
409 wmb(); /* Ensure descriptors are written before they are fetched */
410 falcon_notify_tx_desc(tx_queue
);
413 /* Allocate hardware resources for a TX queue */
414 int falcon_probe_tx(struct efx_tx_queue
*tx_queue
)
416 struct efx_nic
*efx
= tx_queue
->efx
;
417 BUILD_BUG_ON(EFX_TXQ_SIZE
< 512 || EFX_TXQ_SIZE
> 4096 ||
418 EFX_TXQ_SIZE
& EFX_TXQ_MASK
);
419 return falcon_alloc_special_buffer(efx
, &tx_queue
->txd
,
420 EFX_TXQ_SIZE
* sizeof(efx_qword_t
));
423 void falcon_init_tx(struct efx_tx_queue
*tx_queue
)
425 efx_oword_t tx_desc_ptr
;
426 struct efx_nic
*efx
= tx_queue
->efx
;
428 tx_queue
->flushed
= FLUSH_NONE
;
430 /* Pin TX descriptor ring */
431 falcon_init_special_buffer(efx
, &tx_queue
->txd
);
433 /* Push TX descriptor ring to card */
434 EFX_POPULATE_OWORD_10(tx_desc_ptr
,
435 FRF_AZ_TX_DESCQ_EN
, 1,
436 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
437 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
438 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
439 FRF_AZ_TX_DESCQ_EVQ_ID
,
440 tx_queue
->channel
->channel
,
441 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
442 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
443 FRF_AZ_TX_DESCQ_SIZE
,
444 __ffs(tx_queue
->txd
.entries
),
445 FRF_AZ_TX_DESCQ_TYPE
, 0,
446 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
448 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
449 int csum
= tx_queue
->queue
== EFX_TX_QUEUE_OFFLOAD_CSUM
;
450 EFX_SET_OWORD_FIELD(tx_desc_ptr
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
451 EFX_SET_OWORD_FIELD(tx_desc_ptr
, FRF_BZ_TX_TCP_CHKSM_DIS
,
455 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
458 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
461 /* Only 128 bits in this register */
462 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT
>= 128);
464 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
465 if (tx_queue
->queue
== EFX_TX_QUEUE_OFFLOAD_CSUM
)
466 clear_bit_le(tx_queue
->queue
, (void *)®
);
468 set_bit_le(tx_queue
->queue
, (void *)®
);
469 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
473 static void falcon_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
475 struct efx_nic
*efx
= tx_queue
->efx
;
476 efx_oword_t tx_flush_descq
;
478 tx_queue
->flushed
= FLUSH_PENDING
;
480 /* Post a flush command */
481 EFX_POPULATE_OWORD_2(tx_flush_descq
,
482 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
483 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
484 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
487 void falcon_fini_tx(struct efx_tx_queue
*tx_queue
)
489 struct efx_nic
*efx
= tx_queue
->efx
;
490 efx_oword_t tx_desc_ptr
;
492 /* The queue should have been flushed */
493 WARN_ON(tx_queue
->flushed
!= FLUSH_DONE
);
495 /* Remove TX descriptor ring from card */
496 EFX_ZERO_OWORD(tx_desc_ptr
);
497 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
500 /* Unpin TX descriptor ring */
501 falcon_fini_special_buffer(efx
, &tx_queue
->txd
);
504 /* Free buffers backing TX queue */
505 void falcon_remove_tx(struct efx_tx_queue
*tx_queue
)
507 falcon_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
510 /**************************************************************************
514 **************************************************************************/
516 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
517 static inline efx_qword_t
*falcon_rx_desc(struct efx_rx_queue
*rx_queue
,
520 return (((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
);
523 /* This creates an entry in the RX descriptor queue */
524 static inline void falcon_build_rx_desc(struct efx_rx_queue
*rx_queue
,
527 struct efx_rx_buffer
*rx_buf
;
530 rxd
= falcon_rx_desc(rx_queue
, index
);
531 rx_buf
= efx_rx_buffer(rx_queue
, index
);
532 EFX_POPULATE_QWORD_3(*rxd
,
533 FSF_AZ_RX_KER_BUF_SIZE
,
535 rx_queue
->efx
->type
->rx_buffer_padding
,
536 FSF_AZ_RX_KER_BUF_REGION
, 0,
537 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
540 /* This writes to the RX_DESC_WPTR register for the specified receive
543 void falcon_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
548 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
549 falcon_build_rx_desc(rx_queue
,
550 rx_queue
->notified_count
&
552 ++rx_queue
->notified_count
;
556 write_ptr
= rx_queue
->added_count
& EFX_RXQ_MASK
;
557 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
558 efx_writed_page(rx_queue
->efx
, ®
,
559 FR_AZ_RX_DESC_UPD_DWORD_P0
, rx_queue
->queue
);
562 int falcon_probe_rx(struct efx_rx_queue
*rx_queue
)
564 struct efx_nic
*efx
= rx_queue
->efx
;
565 BUILD_BUG_ON(EFX_RXQ_SIZE
< 512 || EFX_RXQ_SIZE
> 4096 ||
566 EFX_RXQ_SIZE
& EFX_RXQ_MASK
);
567 return falcon_alloc_special_buffer(efx
, &rx_queue
->rxd
,
568 EFX_RXQ_SIZE
* sizeof(efx_qword_t
));
571 void falcon_init_rx(struct efx_rx_queue
*rx_queue
)
573 efx_oword_t rx_desc_ptr
;
574 struct efx_nic
*efx
= rx_queue
->efx
;
575 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
576 bool iscsi_digest_en
= is_b0
;
578 EFX_LOG(efx
, "RX queue %d ring in special buffers %d-%d\n",
579 rx_queue
->queue
, rx_queue
->rxd
.index
,
580 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
582 rx_queue
->flushed
= FLUSH_NONE
;
584 /* Pin RX descriptor ring */
585 falcon_init_special_buffer(efx
, &rx_queue
->rxd
);
587 /* Push RX descriptor ring to card */
588 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
589 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
590 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
591 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
592 FRF_AZ_RX_DESCQ_EVQ_ID
,
593 rx_queue
->channel
->channel
,
594 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
595 FRF_AZ_RX_DESCQ_LABEL
, rx_queue
->queue
,
596 FRF_AZ_RX_DESCQ_SIZE
,
597 __ffs(rx_queue
->rxd
.entries
),
598 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
599 /* For >=B0 this is scatter so disable */
600 FRF_AZ_RX_DESCQ_JUMBO
, !is_b0
,
601 FRF_AZ_RX_DESCQ_EN
, 1);
602 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
606 static void falcon_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
608 struct efx_nic
*efx
= rx_queue
->efx
;
609 efx_oword_t rx_flush_descq
;
611 rx_queue
->flushed
= FLUSH_PENDING
;
613 /* Post a flush command */
614 EFX_POPULATE_OWORD_2(rx_flush_descq
,
615 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
616 FRF_AZ_RX_FLUSH_DESCQ
, rx_queue
->queue
);
617 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
620 void falcon_fini_rx(struct efx_rx_queue
*rx_queue
)
622 efx_oword_t rx_desc_ptr
;
623 struct efx_nic
*efx
= rx_queue
->efx
;
625 /* The queue should already have been flushed */
626 WARN_ON(rx_queue
->flushed
!= FLUSH_DONE
);
628 /* Remove RX descriptor ring from card */
629 EFX_ZERO_OWORD(rx_desc_ptr
);
630 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
633 /* Unpin RX descriptor ring */
634 falcon_fini_special_buffer(efx
, &rx_queue
->rxd
);
637 /* Free buffers backing RX queue */
638 void falcon_remove_rx(struct efx_rx_queue
*rx_queue
)
640 falcon_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
643 /**************************************************************************
645 * Falcon event queue processing
646 * Event queues are processed by per-channel tasklets.
648 **************************************************************************/
650 /* Update a channel's event queue's read pointer (RPTR) register
652 * This writes the EVQ_RPTR_REG register for the specified channel's
655 * Note that EVQ_RPTR_REG contains the index of the "last read" event,
656 * whereas channel->eventq_read_ptr contains the index of the "next to
659 void falcon_eventq_read_ack(struct efx_channel
*channel
)
662 struct efx_nic
*efx
= channel
->efx
;
664 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
, channel
->eventq_read_ptr
);
665 efx_writed_table(efx
, ®
, efx
->type
->evq_rptr_tbl_base
,
669 /* Use HW to insert a SW defined event */
670 void falcon_generate_event(struct efx_channel
*channel
, efx_qword_t
*event
)
672 efx_oword_t drv_ev_reg
;
674 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
675 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
676 drv_ev_reg
.u32
[0] = event
->u32
[0];
677 drv_ev_reg
.u32
[1] = event
->u32
[1];
678 drv_ev_reg
.u32
[2] = 0;
679 drv_ev_reg
.u32
[3] = 0;
680 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, channel
->channel
);
681 efx_writeo(channel
->efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
684 /* Handle a transmit completion event
686 * Falcon batches TX completion events; the message we receive is of
687 * the form "complete all TX events up to this index".
689 static void falcon_handle_tx_event(struct efx_channel
*channel
,
692 unsigned int tx_ev_desc_ptr
;
693 unsigned int tx_ev_q_label
;
694 struct efx_tx_queue
*tx_queue
;
695 struct efx_nic
*efx
= channel
->efx
;
697 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
698 /* Transmit completion */
699 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
700 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
701 tx_queue
= &efx
->tx_queue
[tx_ev_q_label
];
702 channel
->irq_mod_score
+=
703 (tx_ev_desc_ptr
- tx_queue
->read_count
) &
705 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
706 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
707 /* Rewrite the FIFO write pointer */
708 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
709 tx_queue
= &efx
->tx_queue
[tx_ev_q_label
];
711 if (efx_dev_registered(efx
))
712 netif_tx_lock(efx
->net_dev
);
713 falcon_notify_tx_desc(tx_queue
);
714 if (efx_dev_registered(efx
))
715 netif_tx_unlock(efx
->net_dev
);
716 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
717 EFX_WORKAROUND_10727(efx
)) {
718 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
720 EFX_ERR(efx
, "channel %d unexpected TX event "
721 EFX_QWORD_FMT
"\n", channel
->channel
,
722 EFX_QWORD_VAL(*event
));
726 /* Detect errors included in the rx_evt_pkt_ok bit. */
727 static void falcon_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
728 const efx_qword_t
*event
,
732 struct efx_nic
*efx
= rx_queue
->efx
;
733 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
734 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
735 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
736 bool rx_ev_other_err
, rx_ev_pause_frm
;
737 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
738 unsigned rx_ev_pkt_type
;
740 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
741 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
742 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
743 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
744 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
745 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
746 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
747 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
748 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
749 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
750 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
751 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
752 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
753 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
754 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
756 /* Every error apart from tobe_disc and pause_frm */
757 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
758 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
759 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
761 /* Count errors that are not in MAC stats. Ignore expected
762 * checksum errors during self-test. */
764 ++rx_queue
->channel
->n_rx_frm_trunc
;
765 else if (rx_ev_tobe_disc
)
766 ++rx_queue
->channel
->n_rx_tobe_disc
;
767 else if (!efx
->loopback_selftest
) {
768 if (rx_ev_ip_hdr_chksum_err
)
769 ++rx_queue
->channel
->n_rx_ip_hdr_chksum_err
;
770 else if (rx_ev_tcp_udp_chksum_err
)
771 ++rx_queue
->channel
->n_rx_tcp_udp_chksum_err
;
774 /* The frame must be discarded if any of these are true. */
775 *discard
= (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
776 rx_ev_tobe_disc
| rx_ev_pause_frm
);
778 /* TOBE_DISC is expected on unicast mismatches; don't print out an
779 * error message. FRM_TRUNC indicates RXDP dropped the packet due
780 * to a FIFO overflow.
782 #ifdef EFX_ENABLE_DEBUG
783 if (rx_ev_other_err
) {
784 EFX_INFO_RL(efx
, " RX queue %d unexpected RX event "
785 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
786 rx_queue
->queue
, EFX_QWORD_VAL(*event
),
787 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
788 rx_ev_ip_hdr_chksum_err
?
789 " [IP_HDR_CHKSUM_ERR]" : "",
790 rx_ev_tcp_udp_chksum_err
?
791 " [TCP_UDP_CHKSUM_ERR]" : "",
792 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
793 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
794 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
795 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
796 rx_ev_pause_frm
? " [PAUSE]" : "");
801 /* Handle receive events that are not in-order. */
802 static void falcon_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
,
805 struct efx_nic
*efx
= rx_queue
->efx
;
806 unsigned expected
, dropped
;
808 expected
= rx_queue
->removed_count
& EFX_RXQ_MASK
;
809 dropped
= (index
- expected
) & EFX_RXQ_MASK
;
810 EFX_INFO(efx
, "dropped %d events (index=%d expected=%d)\n",
811 dropped
, index
, expected
);
813 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
814 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
817 /* Handle a packet received event
819 * Falcon silicon gives a "discard" flag if it's a unicast packet with the
820 * wrong destination address
821 * Also "is multicast" and "matches multicast filter" flags can be used to
822 * discard non-matching multicast packets.
824 static void falcon_handle_rx_event(struct efx_channel
*channel
,
825 const efx_qword_t
*event
)
827 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
828 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
829 unsigned expected_ptr
;
830 bool rx_ev_pkt_ok
, discard
= false, checksummed
;
831 struct efx_rx_queue
*rx_queue
;
832 struct efx_nic
*efx
= channel
->efx
;
834 /* Basic packet information */
835 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
836 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
837 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
838 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
));
839 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
) != 1);
840 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
843 rx_queue
= &efx
->rx_queue
[channel
->channel
];
845 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
846 expected_ptr
= rx_queue
->removed_count
& EFX_RXQ_MASK
;
847 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
))
848 falcon_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
);
850 if (likely(rx_ev_pkt_ok
)) {
851 /* If packet is marked as OK and packet type is TCP/IPv4 or
852 * UDP/IPv4, then we can rely on the hardware checksum.
855 likely(efx
->rx_checksum_enabled
) &&
856 (rx_ev_hdr_type
== FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP
||
857 rx_ev_hdr_type
== FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP
);
859 falcon_handle_rx_not_ok(rx_queue
, event
, &rx_ev_pkt_ok
,
864 /* Detect multicast packets that didn't match the filter */
865 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
866 if (rx_ev_mcast_pkt
) {
867 unsigned int rx_ev_mcast_hash_match
=
868 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
870 if (unlikely(!rx_ev_mcast_hash_match
)) {
871 ++channel
->n_rx_mcast_mismatch
;
876 channel
->irq_mod_score
+= 2;
878 /* Handle received packet */
879 efx_rx_packet(rx_queue
, rx_ev_desc_ptr
, rx_ev_byte_cnt
,
880 checksummed
, discard
);
883 /* Global events are basically PHY events */
884 static void falcon_handle_global_event(struct efx_channel
*channel
,
887 struct efx_nic
*efx
= channel
->efx
;
888 bool handled
= false;
890 if (EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_G_PHY0_INTR
) ||
891 EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_XG_PHY0_INTR
) ||
892 EFX_QWORD_FIELD(*event
, FSF_AB_GLB_EV_XFP_PHY0_INTR
)) {
897 if ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) &&
898 EFX_QWORD_FIELD(*event
, FSF_BB_GLB_EV_XG_MGT_INTR
)) {
899 efx
->xmac_poll_required
= true;
903 if (efx_nic_rev(efx
) <= EFX_REV_FALCON_A1
?
904 EFX_QWORD_FIELD(*event
, FSF_AA_GLB_EV_RX_RECOVERY
) :
905 EFX_QWORD_FIELD(*event
, FSF_BB_GLB_EV_RX_RECOVERY
)) {
906 EFX_ERR(efx
, "channel %d seen global RX_RESET "
907 "event. Resetting.\n", channel
->channel
);
909 atomic_inc(&efx
->rx_reset
);
910 efx_schedule_reset(efx
, EFX_WORKAROUND_6555(efx
) ?
911 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
916 EFX_ERR(efx
, "channel %d unknown global event "
917 EFX_QWORD_FMT
"\n", channel
->channel
,
918 EFX_QWORD_VAL(*event
));
921 static void falcon_handle_driver_event(struct efx_channel
*channel
,
924 struct efx_nic
*efx
= channel
->efx
;
925 unsigned int ev_sub_code
;
926 unsigned int ev_sub_data
;
928 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
929 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
931 switch (ev_sub_code
) {
932 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
933 EFX_TRACE(efx
, "channel %d TXQ %d flushed\n",
934 channel
->channel
, ev_sub_data
);
936 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
937 EFX_TRACE(efx
, "channel %d RXQ %d flushed\n",
938 channel
->channel
, ev_sub_data
);
940 case FSE_AZ_EVQ_INIT_DONE_EV
:
941 EFX_LOG(efx
, "channel %d EVQ %d initialised\n",
942 channel
->channel
, ev_sub_data
);
944 case FSE_AZ_SRM_UPD_DONE_EV
:
945 EFX_TRACE(efx
, "channel %d SRAM update done\n",
948 case FSE_AZ_WAKE_UP_EV
:
949 EFX_TRACE(efx
, "channel %d RXQ %d wakeup event\n",
950 channel
->channel
, ev_sub_data
);
952 case FSE_AZ_TIMER_EV
:
953 EFX_TRACE(efx
, "channel %d RX queue %d timer expired\n",
954 channel
->channel
, ev_sub_data
);
956 case FSE_AA_RX_RECOVER_EV
:
957 EFX_ERR(efx
, "channel %d seen DRIVER RX_RESET event. "
958 "Resetting.\n", channel
->channel
);
959 atomic_inc(&efx
->rx_reset
);
960 efx_schedule_reset(efx
,
961 EFX_WORKAROUND_6555(efx
) ?
962 RESET_TYPE_RX_RECOVERY
:
965 case FSE_BZ_RX_DSC_ERROR_EV
:
966 EFX_ERR(efx
, "RX DMA Q %d reports descriptor fetch error."
967 " RX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
968 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
970 case FSE_BZ_TX_DSC_ERROR_EV
:
971 EFX_ERR(efx
, "TX DMA Q %d reports descriptor fetch error."
972 " TX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
973 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
976 EFX_TRACE(efx
, "channel %d unknown driver event code %d "
977 "data %04x\n", channel
->channel
, ev_sub_code
,
983 int falcon_process_eventq(struct efx_channel
*channel
, int rx_quota
)
985 unsigned int read_ptr
;
986 efx_qword_t event
, *p_event
;
990 read_ptr
= channel
->eventq_read_ptr
;
993 p_event
= falcon_event(channel
, read_ptr
);
996 if (!falcon_event_present(&event
))
1000 EFX_TRACE(channel
->efx
, "channel %d event is "EFX_QWORD_FMT
"\n",
1001 channel
->channel
, EFX_QWORD_VAL(event
));
1003 /* Clear this event by marking it all ones */
1004 EFX_SET_QWORD(*p_event
);
1006 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1009 case FSE_AZ_EV_CODE_RX_EV
:
1010 falcon_handle_rx_event(channel
, &event
);
1013 case FSE_AZ_EV_CODE_TX_EV
:
1014 falcon_handle_tx_event(channel
, &event
);
1016 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1017 channel
->eventq_magic
= EFX_QWORD_FIELD(
1018 event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
1019 EFX_LOG(channel
->efx
, "channel %d received generated "
1020 "event "EFX_QWORD_FMT
"\n", channel
->channel
,
1021 EFX_QWORD_VAL(event
));
1023 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1024 falcon_handle_global_event(channel
, &event
);
1026 case FSE_AZ_EV_CODE_DRIVER_EV
:
1027 falcon_handle_driver_event(channel
, &event
);
1030 EFX_ERR(channel
->efx
, "channel %d unknown event type %d"
1031 " (data " EFX_QWORD_FMT
")\n", channel
->channel
,
1032 ev_code
, EFX_QWORD_VAL(event
));
1035 /* Increment read pointer */
1036 read_ptr
= (read_ptr
+ 1) & EFX_EVQ_MASK
;
1038 } while (rx_packets
< rx_quota
);
1040 channel
->eventq_read_ptr
= read_ptr
;
1044 void falcon_set_int_moderation(struct efx_channel
*channel
)
1046 efx_dword_t timer_cmd
;
1047 struct efx_nic
*efx
= channel
->efx
;
1049 /* Set timer register */
1050 if (channel
->irq_moderation
) {
1051 EFX_POPULATE_DWORD_2(timer_cmd
,
1052 FRF_AB_TC_TIMER_MODE
,
1053 FFE_BB_TIMER_MODE_INT_HLDOFF
,
1054 FRF_AB_TC_TIMER_VAL
,
1055 channel
->irq_moderation
- 1);
1057 EFX_POPULATE_DWORD_2(timer_cmd
,
1058 FRF_AB_TC_TIMER_MODE
,
1059 FFE_BB_TIMER_MODE_DIS
,
1060 FRF_AB_TC_TIMER_VAL
, 0);
1062 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER
!= FR_BZ_TIMER_COMMAND_P0
);
1063 efx_writed_page_locked(efx
, &timer_cmd
, FR_BZ_TIMER_COMMAND_P0
,
1068 /* Allocate buffer table entries for event queue */
1069 int falcon_probe_eventq(struct efx_channel
*channel
)
1071 struct efx_nic
*efx
= channel
->efx
;
1072 BUILD_BUG_ON(EFX_EVQ_SIZE
< 512 || EFX_EVQ_SIZE
> 32768 ||
1073 EFX_EVQ_SIZE
& EFX_EVQ_MASK
);
1074 return falcon_alloc_special_buffer(efx
, &channel
->eventq
,
1075 EFX_EVQ_SIZE
* sizeof(efx_qword_t
));
1078 void falcon_init_eventq(struct efx_channel
*channel
)
1080 efx_oword_t evq_ptr
;
1081 struct efx_nic
*efx
= channel
->efx
;
1083 EFX_LOG(efx
, "channel %d event queue in special buffers %d-%d\n",
1084 channel
->channel
, channel
->eventq
.index
,
1085 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1087 /* Pin event queue buffer */
1088 falcon_init_special_buffer(efx
, &channel
->eventq
);
1090 /* Fill event queue with all ones (i.e. empty events) */
1091 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1093 /* Push event queue to card */
1094 EFX_POPULATE_OWORD_3(evq_ptr
,
1096 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1097 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1098 efx_writeo_table(efx
, &evq_ptr
, efx
->type
->evq_ptr_tbl_base
,
1101 falcon_set_int_moderation(channel
);
1104 void falcon_fini_eventq(struct efx_channel
*channel
)
1106 efx_oword_t eventq_ptr
;
1107 struct efx_nic
*efx
= channel
->efx
;
1109 /* Remove event queue from card */
1110 EFX_ZERO_OWORD(eventq_ptr
);
1111 efx_writeo_table(efx
, &eventq_ptr
, efx
->type
->evq_ptr_tbl_base
,
1114 /* Unpin event queue */
1115 falcon_fini_special_buffer(efx
, &channel
->eventq
);
1118 /* Free buffers backing event queue */
1119 void falcon_remove_eventq(struct efx_channel
*channel
)
1121 falcon_free_special_buffer(channel
->efx
, &channel
->eventq
);
1125 /* Generates a test event on the event queue. A subsequent call to
1126 * process_eventq() should pick up the event and place the value of
1127 * "magic" into channel->eventq_magic;
1129 void falcon_generate_test_event(struct efx_channel
*channel
, unsigned int magic
)
1131 efx_qword_t test_event
;
1133 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1134 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1135 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1136 falcon_generate_event(channel
, &test_event
);
1139 /**************************************************************************
1143 **************************************************************************/
1146 static void falcon_poll_flush_events(struct efx_nic
*efx
)
1148 struct efx_channel
*channel
= &efx
->channel
[0];
1149 struct efx_tx_queue
*tx_queue
;
1150 struct efx_rx_queue
*rx_queue
;
1151 unsigned int read_ptr
= channel
->eventq_read_ptr
;
1152 unsigned int end_ptr
= (read_ptr
- 1) & EFX_EVQ_MASK
;
1155 efx_qword_t
*event
= falcon_event(channel
, read_ptr
);
1156 int ev_code
, ev_sub_code
, ev_queue
;
1159 if (!falcon_event_present(event
))
1162 ev_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_EV_CODE
);
1163 ev_sub_code
= EFX_QWORD_FIELD(*event
,
1164 FSF_AZ_DRIVER_EV_SUBCODE
);
1165 if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1166 ev_sub_code
== FSE_AZ_TX_DESCQ_FLS_DONE_EV
) {
1167 ev_queue
= EFX_QWORD_FIELD(*event
,
1168 FSF_AZ_DRIVER_EV_SUBDATA
);
1169 if (ev_queue
< EFX_TX_QUEUE_COUNT
) {
1170 tx_queue
= efx
->tx_queue
+ ev_queue
;
1171 tx_queue
->flushed
= FLUSH_DONE
;
1173 } else if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1174 ev_sub_code
== FSE_AZ_RX_DESCQ_FLS_DONE_EV
) {
1175 ev_queue
= EFX_QWORD_FIELD(
1176 *event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1177 ev_failed
= EFX_QWORD_FIELD(
1178 *event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1179 if (ev_queue
< efx
->n_rx_queues
) {
1180 rx_queue
= efx
->rx_queue
+ ev_queue
;
1182 ev_failed
? FLUSH_FAILED
: FLUSH_DONE
;
1186 /* We're about to destroy the queue anyway, so
1187 * it's ok to throw away every non-flush event */
1188 EFX_SET_QWORD(*event
);
1190 read_ptr
= (read_ptr
+ 1) & EFX_EVQ_MASK
;
1191 } while (read_ptr
!= end_ptr
);
1193 channel
->eventq_read_ptr
= read_ptr
;
1196 static void falcon_prepare_flush(struct efx_nic
*efx
)
1198 falcon_deconfigure_mac_wrapper(efx
);
1200 /* Wait for the tx and rx fifo's to get to the next packet boundary
1201 * (~1ms without back-pressure), then to drain the remainder of the
1202 * fifo's at data path speeds (negligible), with a healthy margin. */
1206 /* Handle tx and rx flushes at the same time, since they run in
1207 * parallel in the hardware and there's no reason for us to
1209 int falcon_flush_queues(struct efx_nic
*efx
)
1211 struct efx_rx_queue
*rx_queue
;
1212 struct efx_tx_queue
*tx_queue
;
1213 int i
, tx_pending
, rx_pending
;
1215 falcon_prepare_flush(efx
);
1217 /* Flush all tx queues in parallel */
1218 efx_for_each_tx_queue(tx_queue
, efx
)
1219 falcon_flush_tx_queue(tx_queue
);
1221 /* The hardware supports four concurrent rx flushes, each of which may
1222 * need to be retried if there is an outstanding descriptor fetch */
1223 for (i
= 0; i
< FALCON_FLUSH_POLL_COUNT
; ++i
) {
1224 rx_pending
= tx_pending
= 0;
1225 efx_for_each_rx_queue(rx_queue
, efx
) {
1226 if (rx_queue
->flushed
== FLUSH_PENDING
)
1229 efx_for_each_rx_queue(rx_queue
, efx
) {
1230 if (rx_pending
== FALCON_RX_FLUSH_COUNT
)
1232 if (rx_queue
->flushed
== FLUSH_FAILED
||
1233 rx_queue
->flushed
== FLUSH_NONE
) {
1234 falcon_flush_rx_queue(rx_queue
);
1238 efx_for_each_tx_queue(tx_queue
, efx
) {
1239 if (tx_queue
->flushed
!= FLUSH_DONE
)
1243 if (rx_pending
== 0 && tx_pending
== 0)
1246 msleep(FALCON_FLUSH_INTERVAL
);
1247 falcon_poll_flush_events(efx
);
1250 /* Mark the queues as all flushed. We're going to return failure
1251 * leading to a reset, or fake up success anyway */
1252 efx_for_each_tx_queue(tx_queue
, efx
) {
1253 if (tx_queue
->flushed
!= FLUSH_DONE
)
1254 EFX_ERR(efx
, "tx queue %d flush command timed out\n",
1256 tx_queue
->flushed
= FLUSH_DONE
;
1258 efx_for_each_rx_queue(rx_queue
, efx
) {
1259 if (rx_queue
->flushed
!= FLUSH_DONE
)
1260 EFX_ERR(efx
, "rx queue %d flush command timed out\n",
1262 rx_queue
->flushed
= FLUSH_DONE
;
1265 if (EFX_WORKAROUND_7803(efx
))
1271 /**************************************************************************
1273 * Falcon hardware interrupts
1274 * The hardware interrupt handler does very little work; all the event
1275 * queue processing is carried out by per-channel tasklets.
1277 **************************************************************************/
1279 /* Enable/disable/generate Falcon interrupts */
1280 static inline void falcon_interrupts(struct efx_nic
*efx
, int enabled
,
1283 efx_oword_t int_en_reg_ker
;
1285 EFX_POPULATE_OWORD_2(int_en_reg_ker
,
1286 FRF_AZ_KER_INT_KER
, force
,
1287 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1288 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1291 void falcon_enable_interrupts(struct efx_nic
*efx
)
1293 efx_oword_t int_adr_reg_ker
;
1294 struct efx_channel
*channel
;
1296 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1297 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1299 /* Program address */
1300 EFX_POPULATE_OWORD_2(int_adr_reg_ker
,
1301 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1302 EFX_INT_MODE_USE_MSI(efx
),
1303 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1304 efx_writeo(efx
, &int_adr_reg_ker
, FR_AZ_INT_ADR_KER
);
1306 /* Enable interrupts */
1307 falcon_interrupts(efx
, 1, 0);
1309 /* Force processing of all the channels to get the EVQ RPTRs up to
1311 efx_for_each_channel(channel
, efx
)
1312 efx_schedule_channel(channel
);
1315 void falcon_disable_interrupts(struct efx_nic
*efx
)
1317 /* Disable interrupts */
1318 falcon_interrupts(efx
, 0, 0);
1321 /* Generate a Falcon test interrupt
1322 * Interrupt must already have been enabled, otherwise nasty things
1325 void falcon_generate_interrupt(struct efx_nic
*efx
)
1327 falcon_interrupts(efx
, 1, 1);
1330 /* Acknowledge a legacy interrupt from Falcon
1332 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
1334 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
1335 * BIU. Interrupt acknowledge is read sensitive so must write instead
1336 * (then read to ensure the BIU collector is flushed)
1338 * NB most hardware supports MSI interrupts
1340 static inline void falcon_irq_ack_a1(struct efx_nic
*efx
)
1344 EFX_POPULATE_DWORD_1(reg
, FRF_AA_INT_ACK_KER_FIELD
, 0xb7eb7e);
1345 efx_writed(efx
, ®
, FR_AA_INT_ACK_KER
);
1346 efx_readd(efx
, ®
, FR_AA_WORK_AROUND_BROKEN_PCI_READS
);
1349 /* Process a fatal interrupt
1350 * Disable bus mastering ASAP and schedule a reset
1352 static irqreturn_t
falcon_fatal_interrupt(struct efx_nic
*efx
)
1354 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1355 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1356 efx_oword_t fatal_intr
;
1357 int error
, mem_perr
;
1359 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1360 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1362 EFX_ERR(efx
, "SYSTEM ERROR " EFX_OWORD_FMT
" status "
1363 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1364 EFX_OWORD_VAL(fatal_intr
),
1365 error
? "disabling bus mastering" : "no recognised error");
1369 /* If this is a memory parity error dump which blocks are offending */
1370 mem_perr
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
);
1373 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1374 EFX_ERR(efx
, "SYSTEM ERROR: memory parity error "
1375 EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
));
1378 /* Disable both devices */
1379 pci_clear_master(efx
->pci_dev
);
1380 if (FALCON_IS_DUAL_FUNC(efx
))
1381 pci_clear_master(nic_data
->pci_dev2
);
1382 falcon_disable_interrupts(efx
);
1384 /* Count errors and reset or disable the NIC accordingly */
1385 if (efx
->int_error_count
== 0 ||
1386 time_after(jiffies
, efx
->int_error_expire
)) {
1387 efx
->int_error_count
= 0;
1388 efx
->int_error_expire
=
1389 jiffies
+ FALCON_INT_ERROR_EXPIRE
* HZ
;
1391 if (++efx
->int_error_count
< FALCON_MAX_INT_ERRORS
) {
1392 EFX_ERR(efx
, "SYSTEM ERROR - reset scheduled\n");
1393 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1395 EFX_ERR(efx
, "SYSTEM ERROR - max number of errors seen."
1396 "NIC will be disabled\n");
1397 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1403 /* Handle a legacy interrupt from Falcon
1404 * Acknowledges the interrupt and schedule event queue processing.
1406 static irqreturn_t
falcon_legacy_interrupt_b0(int irq
, void *dev_id
)
1408 struct efx_nic
*efx
= dev_id
;
1409 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1410 irqreturn_t result
= IRQ_NONE
;
1411 struct efx_channel
*channel
;
1416 /* Read the ISR which also ACKs the interrupts */
1417 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1418 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1420 /* Check to see if we have a serious error condition */
1421 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1422 if (unlikely(syserr
))
1423 return falcon_fatal_interrupt(efx
);
1425 /* Schedule processing of any interrupting queues */
1426 efx_for_each_channel(channel
, efx
) {
1428 falcon_event_present(
1429 falcon_event(channel
, channel
->eventq_read_ptr
))) {
1430 efx_schedule_channel(channel
);
1431 result
= IRQ_HANDLED
;
1436 if (result
== IRQ_HANDLED
) {
1437 efx
->last_irq_cpu
= raw_smp_processor_id();
1438 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1439 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1446 static irqreturn_t
falcon_legacy_interrupt_a1(int irq
, void *dev_id
)
1448 struct efx_nic
*efx
= dev_id
;
1449 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1450 struct efx_channel
*channel
;
1454 /* Check to see if this is our interrupt. If it isn't, we
1455 * exit without having touched the hardware.
1457 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker
))) {
1458 EFX_TRACE(efx
, "IRQ %d on CPU %d not for me\n", irq
,
1459 raw_smp_processor_id());
1462 efx
->last_irq_cpu
= raw_smp_processor_id();
1463 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1464 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1466 /* Check to see if we have a serious error condition */
1467 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1468 if (unlikely(syserr
))
1469 return falcon_fatal_interrupt(efx
);
1471 /* Determine interrupting queues, clear interrupt status
1472 * register and acknowledge the device interrupt.
1474 BUILD_BUG_ON(INT_EVQS_WIDTH
> EFX_MAX_CHANNELS
);
1475 queues
= EFX_OWORD_FIELD(*int_ker
, INT_EVQS
);
1476 EFX_ZERO_OWORD(*int_ker
);
1477 wmb(); /* Ensure the vector is cleared before interrupt ack */
1478 falcon_irq_ack_a1(efx
);
1480 /* Schedule processing of any interrupting queues */
1481 channel
= &efx
->channel
[0];
1484 efx_schedule_channel(channel
);
1492 /* Handle an MSI interrupt from Falcon
1494 * Handle an MSI hardware interrupt. This routine schedules event
1495 * queue processing. No interrupt acknowledgement cycle is necessary.
1496 * Also, we never need to check that the interrupt is for us, since
1497 * MSI interrupts cannot be shared.
1499 static irqreturn_t
falcon_msi_interrupt(int irq
, void *dev_id
)
1501 struct efx_channel
*channel
= dev_id
;
1502 struct efx_nic
*efx
= channel
->efx
;
1503 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1506 efx
->last_irq_cpu
= raw_smp_processor_id();
1507 EFX_TRACE(efx
, "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1508 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1510 /* Check to see if we have a serious error condition */
1511 syserr
= EFX_OWORD_FIELD(*int_ker
, FATAL_INT
);
1512 if (unlikely(syserr
))
1513 return falcon_fatal_interrupt(efx
);
1515 /* Schedule processing of the channel */
1516 efx_schedule_channel(channel
);
1522 /* Setup RSS indirection table.
1523 * This maps from the hash value of the packet to RXQ
1525 static void falcon_setup_rss_indir_table(struct efx_nic
*efx
)
1528 unsigned long offset
;
1531 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1534 for (offset
= FR_BZ_RX_INDIRECTION_TBL
;
1535 offset
< FR_BZ_RX_INDIRECTION_TBL
+ 0x800;
1537 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1538 i
% efx
->n_rx_queues
);
1539 efx_writed(efx
, &dword
, offset
);
1544 /* Hook interrupt handler(s)
1545 * Try MSI and then legacy interrupts.
1547 int falcon_init_interrupt(struct efx_nic
*efx
)
1549 struct efx_channel
*channel
;
1552 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1553 irq_handler_t handler
;
1554 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1555 handler
= falcon_legacy_interrupt_b0
;
1557 handler
= falcon_legacy_interrupt_a1
;
1559 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1562 EFX_ERR(efx
, "failed to hook legacy IRQ %d\n",
1569 /* Hook MSI or MSI-X interrupt */
1570 efx_for_each_channel(channel
, efx
) {
1571 rc
= request_irq(channel
->irq
, falcon_msi_interrupt
,
1572 IRQF_PROBE_SHARED
, /* Not shared */
1573 channel
->name
, channel
);
1575 EFX_ERR(efx
, "failed to hook IRQ %d\n", channel
->irq
);
1583 efx_for_each_channel(channel
, efx
)
1584 free_irq(channel
->irq
, channel
);
1589 void falcon_fini_interrupt(struct efx_nic
*efx
)
1591 struct efx_channel
*channel
;
1594 /* Disable MSI/MSI-X interrupts */
1595 efx_for_each_channel(channel
, efx
) {
1597 free_irq(channel
->irq
, channel
);
1600 /* ACK legacy interrupt */
1601 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1602 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1604 falcon_irq_ack_a1(efx
);
1606 /* Disable legacy interrupt */
1607 if (efx
->legacy_irq
)
1608 free_irq(efx
->legacy_irq
, efx
);
1611 /**************************************************************************
1615 **************************************************************************
1618 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1620 static int falcon_spi_poll(struct efx_nic
*efx
)
1623 efx_reado(efx
, ®
, FR_AB_EE_SPI_HCMD
);
1624 return EFX_OWORD_FIELD(reg
, FRF_AB_EE_SPI_HCMD_CMD_EN
) ? -EBUSY
: 0;
1627 /* Wait for SPI command completion */
1628 static int falcon_spi_wait(struct efx_nic
*efx
)
1630 /* Most commands will finish quickly, so we start polling at
1631 * very short intervals. Sometimes the command may have to
1632 * wait for VPD or expansion ROM access outside of our
1633 * control, so we allow up to 100 ms. */
1634 unsigned long timeout
= jiffies
+ 1 + DIV_ROUND_UP(HZ
, 10);
1637 for (i
= 0; i
< 10; i
++) {
1638 if (!falcon_spi_poll(efx
))
1644 if (!falcon_spi_poll(efx
))
1646 if (time_after_eq(jiffies
, timeout
)) {
1647 EFX_ERR(efx
, "timed out waiting for SPI\n");
1650 schedule_timeout_uninterruptible(1);
1654 int falcon_spi_cmd(const struct efx_spi_device
*spi
,
1655 unsigned int command
, int address
,
1656 const void *in
, void *out
, size_t len
)
1658 struct efx_nic
*efx
= spi
->efx
;
1659 bool addressed
= (address
>= 0);
1660 bool reading
= (out
!= NULL
);
1664 /* Input validation */
1665 if (len
> FALCON_SPI_MAX_LEN
)
1667 BUG_ON(!mutex_is_locked(&efx
->spi_lock
));
1669 /* Check that previous command is not still running */
1670 rc
= falcon_spi_poll(efx
);
1674 /* Program address register, if we have an address */
1676 EFX_POPULATE_OWORD_1(reg
, FRF_AB_EE_SPI_HADR_ADR
, address
);
1677 efx_writeo(efx
, ®
, FR_AB_EE_SPI_HADR
);
1680 /* Program data register, if we have data */
1682 memcpy(®
, in
, len
);
1683 efx_writeo(efx
, ®
, FR_AB_EE_SPI_HDATA
);
1686 /* Issue read/write command */
1687 EFX_POPULATE_OWORD_7(reg
,
1688 FRF_AB_EE_SPI_HCMD_CMD_EN
, 1,
1689 FRF_AB_EE_SPI_HCMD_SF_SEL
, spi
->device_id
,
1690 FRF_AB_EE_SPI_HCMD_DABCNT
, len
,
1691 FRF_AB_EE_SPI_HCMD_READ
, reading
,
1692 FRF_AB_EE_SPI_HCMD_DUBCNT
, 0,
1693 FRF_AB_EE_SPI_HCMD_ADBCNT
,
1694 (addressed
? spi
->addr_len
: 0),
1695 FRF_AB_EE_SPI_HCMD_ENC
, command
);
1696 efx_writeo(efx
, ®
, FR_AB_EE_SPI_HCMD
);
1698 /* Wait for read/write to complete */
1699 rc
= falcon_spi_wait(efx
);
1705 efx_reado(efx
, ®
, FR_AB_EE_SPI_HDATA
);
1706 memcpy(out
, ®
, len
);
1713 falcon_spi_write_limit(const struct efx_spi_device
*spi
, size_t start
)
1715 return min(FALCON_SPI_MAX_LEN
,
1716 (spi
->block_size
- (start
& (spi
->block_size
- 1))));
1720 efx_spi_munge_command(const struct efx_spi_device
*spi
,
1721 const u8 command
, const unsigned int address
)
1723 return command
| (((address
>> 8) & spi
->munge_address
) << 3);
1726 /* Wait up to 10 ms for buffered write completion */
1727 int falcon_spi_wait_write(const struct efx_spi_device
*spi
)
1729 struct efx_nic
*efx
= spi
->efx
;
1730 unsigned long timeout
= jiffies
+ 1 + DIV_ROUND_UP(HZ
, 100);
1735 rc
= falcon_spi_cmd(spi
, SPI_RDSR
, -1, NULL
,
1736 &status
, sizeof(status
));
1739 if (!(status
& SPI_STATUS_NRDY
))
1741 if (time_after_eq(jiffies
, timeout
)) {
1742 EFX_ERR(efx
, "SPI write timeout on device %d"
1743 " last status=0x%02x\n",
1744 spi
->device_id
, status
);
1747 schedule_timeout_uninterruptible(1);
1751 int falcon_spi_read(const struct efx_spi_device
*spi
, loff_t start
,
1752 size_t len
, size_t *retlen
, u8
*buffer
)
1754 size_t block_len
, pos
= 0;
1755 unsigned int command
;
1759 block_len
= min(len
- pos
, FALCON_SPI_MAX_LEN
);
1761 command
= efx_spi_munge_command(spi
, SPI_READ
, start
+ pos
);
1762 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
, NULL
,
1763 buffer
+ pos
, block_len
);
1768 /* Avoid locking up the system */
1770 if (signal_pending(current
)) {
1781 int falcon_spi_write(const struct efx_spi_device
*spi
, loff_t start
,
1782 size_t len
, size_t *retlen
, const u8
*buffer
)
1784 u8 verify_buffer
[FALCON_SPI_MAX_LEN
];
1785 size_t block_len
, pos
= 0;
1786 unsigned int command
;
1790 rc
= falcon_spi_cmd(spi
, SPI_WREN
, -1, NULL
, NULL
, 0);
1794 block_len
= min(len
- pos
,
1795 falcon_spi_write_limit(spi
, start
+ pos
));
1796 command
= efx_spi_munge_command(spi
, SPI_WRITE
, start
+ pos
);
1797 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
,
1798 buffer
+ pos
, NULL
, block_len
);
1802 rc
= falcon_spi_wait_write(spi
);
1806 command
= efx_spi_munge_command(spi
, SPI_READ
, start
+ pos
);
1807 rc
= falcon_spi_cmd(spi
, command
, start
+ pos
,
1808 NULL
, verify_buffer
, block_len
);
1809 if (memcmp(verify_buffer
, buffer
+ pos
, block_len
)) {
1816 /* Avoid locking up the system */
1818 if (signal_pending(current
)) {
1829 /**************************************************************************
1833 **************************************************************************
1836 static int falcon_reset_macs(struct efx_nic
*efx
)
1841 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
1842 /* It's not safe to use GLB_CTL_REG to reset the
1843 * macs, so instead use the internal MAC resets
1845 if (!EFX_IS10G(efx
)) {
1846 EFX_POPULATE_OWORD_1(reg
, FRF_AB_GM_SW_RST
, 1);
1847 efx_writeo(efx
, ®
, FR_AB_GM_CFG1
);
1850 EFX_POPULATE_OWORD_1(reg
, FRF_AB_GM_SW_RST
, 0);
1851 efx_writeo(efx
, ®
, FR_AB_GM_CFG1
);
1855 EFX_POPULATE_OWORD_1(reg
, FRF_AB_XM_CORE_RST
, 1);
1856 efx_writeo(efx
, ®
, FR_AB_XM_GLB_CFG
);
1858 for (count
= 0; count
< 10000; count
++) {
1859 efx_reado(efx
, ®
, FR_AB_XM_GLB_CFG
);
1860 if (EFX_OWORD_FIELD(reg
, FRF_AB_XM_CORE_RST
) ==
1866 EFX_ERR(efx
, "timed out waiting for XMAC core reset\n");
1871 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1872 * the drain sequence with the statistics fetch */
1873 falcon_stop_nic_stats(efx
);
1875 efx_reado(efx
, ®
, FR_AB_MAC_CTRL
);
1876 EFX_SET_OWORD_FIELD(reg
, FRF_BB_TXFIFO_DRAIN_EN
, 1);
1877 efx_writeo(efx
, ®
, FR_AB_MAC_CTRL
);
1879 efx_reado(efx
, ®
, FR_AB_GLB_CTL
);
1880 EFX_SET_OWORD_FIELD(reg
, FRF_AB_RST_XGTX
, 1);
1881 EFX_SET_OWORD_FIELD(reg
, FRF_AB_RST_XGRX
, 1);
1882 EFX_SET_OWORD_FIELD(reg
, FRF_AB_RST_EM
, 1);
1883 efx_writeo(efx
, ®
, FR_AB_GLB_CTL
);
1887 efx_reado(efx
, ®
, FR_AB_GLB_CTL
);
1888 if (!EFX_OWORD_FIELD(reg
, FRF_AB_RST_XGTX
) &&
1889 !EFX_OWORD_FIELD(reg
, FRF_AB_RST_XGRX
) &&
1890 !EFX_OWORD_FIELD(reg
, FRF_AB_RST_EM
)) {
1891 EFX_LOG(efx
, "Completed MAC reset after %d loops\n",
1896 EFX_ERR(efx
, "MAC reset failed\n");
1903 /* If we've reset the EM block and the link is up, then
1904 * we'll have to kick the XAUI link so the PHY can recover */
1905 if (efx
->link_state
.up
&& EFX_IS10G(efx
) && EFX_WORKAROUND_5147(efx
))
1906 falcon_reset_xaui(efx
);
1908 falcon_start_nic_stats(efx
);
1913 void falcon_drain_tx_fifo(struct efx_nic
*efx
)
1917 if ((efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) ||
1918 (efx
->loopback_mode
!= LOOPBACK_NONE
))
1921 efx_reado(efx
, ®
, FR_AB_MAC_CTRL
);
1922 /* There is no point in draining more than once */
1923 if (EFX_OWORD_FIELD(reg
, FRF_BB_TXFIFO_DRAIN_EN
))
1926 falcon_reset_macs(efx
);
1929 void falcon_deconfigure_mac_wrapper(struct efx_nic
*efx
)
1933 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1936 /* Isolate the MAC -> RX */
1937 efx_reado(efx
, ®
, FR_AZ_RX_CFG
);
1938 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_INGR_EN
, 0);
1939 efx_writeo(efx
, ®
, FR_AZ_RX_CFG
);
1941 if (!efx
->link_state
.up
)
1942 falcon_drain_tx_fifo(efx
);
1945 void falcon_reconfigure_mac_wrapper(struct efx_nic
*efx
)
1947 struct efx_link_state
*link_state
= &efx
->link_state
;
1952 switch (link_state
->speed
) {
1953 case 10000: link_speed
= 3; break;
1954 case 1000: link_speed
= 2; break;
1955 case 100: link_speed
= 1; break;
1956 default: link_speed
= 0; break;
1958 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1959 * as advertised. Disable to ensure packets are not
1960 * indefinitely held and TX queue can be flushed at any point
1961 * while the link is down. */
1962 EFX_POPULATE_OWORD_5(reg
,
1963 FRF_AB_MAC_XOFF_VAL
, 0xffff /* max pause time */,
1964 FRF_AB_MAC_BCAD_ACPT
, 1,
1965 FRF_AB_MAC_UC_PROM
, efx
->promiscuous
,
1966 FRF_AB_MAC_LINK_STATUS
, 1, /* always set */
1967 FRF_AB_MAC_SPEED
, link_speed
);
1968 /* On B0, MAC backpressure can be disabled and packets get
1970 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1971 EFX_SET_OWORD_FIELD(reg
, FRF_BB_TXFIFO_DRAIN_EN
,
1975 efx_writeo(efx
, ®
, FR_AB_MAC_CTRL
);
1977 /* Restore the multicast hash registers. */
1978 falcon_push_multicast_hash(efx
);
1980 /* Transmission of pause frames when RX crosses the threshold is
1981 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1982 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1983 tx_fc
= !!(efx
->link_state
.fc
& EFX_FC_TX
);
1984 efx_reado(efx
, ®
, FR_AZ_RX_CFG
);
1985 EFX_SET_OWORD_FIELD(reg
, FRF_AZ_RX_XOFF_MAC_EN
, tx_fc
);
1987 /* Unisolate the MAC -> RX */
1988 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1989 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_INGR_EN
, 1);
1990 efx_writeo(efx
, ®
, FR_AZ_RX_CFG
);
1993 static void falcon_stats_request(struct efx_nic
*efx
)
1995 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1998 WARN_ON(nic_data
->stats_pending
);
1999 WARN_ON(nic_data
->stats_disable_count
);
2001 if (nic_data
->stats_dma_done
== NULL
)
2002 return; /* no mac selected */
2004 *nic_data
->stats_dma_done
= FALCON_STATS_NOT_DONE
;
2005 nic_data
->stats_pending
= true;
2006 wmb(); /* ensure done flag is clear */
2008 /* Initiate DMA transfer of stats */
2009 EFX_POPULATE_OWORD_2(reg
,
2010 FRF_AB_MAC_STAT_DMA_CMD
, 1,
2011 FRF_AB_MAC_STAT_DMA_ADR
,
2012 efx
->stats_buffer
.dma_addr
);
2013 efx_writeo(efx
, ®
, FR_AB_MAC_STAT_DMA
);
2015 mod_timer(&nic_data
->stats_timer
, round_jiffies_up(jiffies
+ HZ
/ 2));
2018 static void falcon_stats_complete(struct efx_nic
*efx
)
2020 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2022 if (!nic_data
->stats_pending
)
2025 nic_data
->stats_pending
= 0;
2026 if (*nic_data
->stats_dma_done
== FALCON_STATS_DONE
) {
2027 rmb(); /* read the done flag before the stats */
2028 efx
->mac_op
->update_stats(efx
);
2030 EFX_ERR(efx
, "timed out waiting for statistics\n");
2034 static void falcon_stats_timer_func(unsigned long context
)
2036 struct efx_nic
*efx
= (struct efx_nic
*)context
;
2037 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2039 spin_lock(&efx
->stats_lock
);
2041 falcon_stats_complete(efx
);
2042 if (nic_data
->stats_disable_count
== 0)
2043 falcon_stats_request(efx
);
2045 spin_unlock(&efx
->stats_lock
);
2048 static bool falcon_loopback_link_poll(struct efx_nic
*efx
)
2050 struct efx_link_state old_state
= efx
->link_state
;
2052 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
2053 WARN_ON(!LOOPBACK_INTERNAL(efx
));
2055 efx
->link_state
.fd
= true;
2056 efx
->link_state
.fc
= efx
->wanted_fc
;
2057 efx
->link_state
.up
= true;
2059 if (efx
->loopback_mode
== LOOPBACK_GMAC
)
2060 efx
->link_state
.speed
= 1000;
2062 efx
->link_state
.speed
= 10000;
2064 return !efx_link_state_equal(&efx
->link_state
, &old_state
);
2067 /**************************************************************************
2069 * PHY access via GMII
2071 **************************************************************************
2074 /* Wait for GMII access to complete */
2075 static int falcon_gmii_wait(struct efx_nic
*efx
)
2077 efx_oword_t md_stat
;
2080 /* wait upto 50ms - taken max from datasheet */
2081 for (count
= 0; count
< 5000; count
++) {
2082 efx_reado(efx
, &md_stat
, FR_AB_MD_STAT
);
2083 if (EFX_OWORD_FIELD(md_stat
, FRF_AB_MD_BSY
) == 0) {
2084 if (EFX_OWORD_FIELD(md_stat
, FRF_AB_MD_LNFL
) != 0 ||
2085 EFX_OWORD_FIELD(md_stat
, FRF_AB_MD_BSERR
) != 0) {
2086 EFX_ERR(efx
, "error from GMII access "
2088 EFX_OWORD_VAL(md_stat
));
2095 EFX_ERR(efx
, "timed out waiting for GMII\n");
2099 /* Write an MDIO register of a PHY connected to Falcon. */
2100 static int falcon_mdio_write(struct net_device
*net_dev
,
2101 int prtad
, int devad
, u16 addr
, u16 value
)
2103 struct efx_nic
*efx
= netdev_priv(net_dev
);
2107 EFX_REGDUMP(efx
, "writing MDIO %d register %d.%d with 0x%04x\n",
2108 prtad
, devad
, addr
, value
);
2110 mutex_lock(&efx
->mdio_lock
);
2112 /* Check MDIO not currently being accessed */
2113 rc
= falcon_gmii_wait(efx
);
2117 /* Write the address/ID register */
2118 EFX_POPULATE_OWORD_1(reg
, FRF_AB_MD_PHY_ADR
, addr
);
2119 efx_writeo(efx
, ®
, FR_AB_MD_PHY_ADR
);
2121 EFX_POPULATE_OWORD_2(reg
, FRF_AB_MD_PRT_ADR
, prtad
,
2122 FRF_AB_MD_DEV_ADR
, devad
);
2123 efx_writeo(efx
, ®
, FR_AB_MD_ID
);
2126 EFX_POPULATE_OWORD_1(reg
, FRF_AB_MD_TXD
, value
);
2127 efx_writeo(efx
, ®
, FR_AB_MD_TXD
);
2129 EFX_POPULATE_OWORD_2(reg
,
2132 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2134 /* Wait for data to be written */
2135 rc
= falcon_gmii_wait(efx
);
2137 /* Abort the write operation */
2138 EFX_POPULATE_OWORD_2(reg
,
2141 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2146 mutex_unlock(&efx
->mdio_lock
);
2150 /* Read an MDIO register of a PHY connected to Falcon. */
2151 static int falcon_mdio_read(struct net_device
*net_dev
,
2152 int prtad
, int devad
, u16 addr
)
2154 struct efx_nic
*efx
= netdev_priv(net_dev
);
2158 mutex_lock(&efx
->mdio_lock
);
2160 /* Check MDIO not currently being accessed */
2161 rc
= falcon_gmii_wait(efx
);
2165 EFX_POPULATE_OWORD_1(reg
, FRF_AB_MD_PHY_ADR
, addr
);
2166 efx_writeo(efx
, ®
, FR_AB_MD_PHY_ADR
);
2168 EFX_POPULATE_OWORD_2(reg
, FRF_AB_MD_PRT_ADR
, prtad
,
2169 FRF_AB_MD_DEV_ADR
, devad
);
2170 efx_writeo(efx
, ®
, FR_AB_MD_ID
);
2172 /* Request data to be read */
2173 EFX_POPULATE_OWORD_2(reg
, FRF_AB_MD_RDC
, 1, FRF_AB_MD_GC
, 0);
2174 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2176 /* Wait for data to become available */
2177 rc
= falcon_gmii_wait(efx
);
2179 efx_reado(efx
, ®
, FR_AB_MD_RXD
);
2180 rc
= EFX_OWORD_FIELD(reg
, FRF_AB_MD_RXD
);
2181 EFX_REGDUMP(efx
, "read from MDIO %d register %d.%d, got %04x\n",
2182 prtad
, devad
, addr
, rc
);
2184 /* Abort the read operation */
2185 EFX_POPULATE_OWORD_2(reg
,
2188 efx_writeo(efx
, ®
, FR_AB_MD_CS
);
2190 EFX_LOG(efx
, "read from MDIO %d register %d.%d, got error %d\n",
2191 prtad
, devad
, addr
, rc
);
2195 mutex_unlock(&efx
->mdio_lock
);
2199 static void falcon_clock_mac(struct efx_nic
*efx
)
2202 efx_oword_t nic_stat
;
2204 /* Configure the NIC generated MAC clock correctly */
2205 efx_reado(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2206 strap_val
= EFX_IS10G(efx
) ? 5 : 3;
2207 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
2208 EFX_SET_OWORD_FIELD(nic_stat
, FRF_BB_EE_STRAP_EN
, 1);
2209 EFX_SET_OWORD_FIELD(nic_stat
, FRF_BB_EE_STRAP
, strap_val
);
2210 efx_writeo(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2212 /* Falcon A1 does not support 1G/10G speed switching
2213 * and must not be used with a PHY that does. */
2214 BUG_ON(EFX_OWORD_FIELD(nic_stat
, FRF_AB_STRAP_PINS
) !=
2219 int falcon_switch_mac(struct efx_nic
*efx
)
2221 struct efx_mac_operations
*old_mac_op
= efx
->mac_op
;
2222 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2223 unsigned int stats_done_offset
;
2226 /* Don't try to fetch MAC stats while we're switching MACs */
2227 falcon_stop_nic_stats(efx
);
2229 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
2230 efx
->mac_op
= (EFX_IS10G(efx
) ?
2231 &falcon_xmac_operations
: &falcon_gmac_operations
);
2234 stats_done_offset
= XgDmaDone_offset
;
2236 stats_done_offset
= GDmaDone_offset
;
2237 nic_data
->stats_dma_done
= efx
->stats_buffer
.addr
+ stats_done_offset
;
2239 if (old_mac_op
== efx
->mac_op
)
2242 falcon_clock_mac(efx
);
2244 EFX_LOG(efx
, "selected %cMAC\n", EFX_IS10G(efx
) ? 'X' : 'G');
2245 /* Not all macs support a mac-level link state */
2246 efx
->xmac_poll_required
= false;
2248 rc
= falcon_reset_macs(efx
);
2250 falcon_start_nic_stats(efx
);
2254 /* This call is responsible for hooking in the MAC and PHY operations */
2255 int falcon_probe_port(struct efx_nic
*efx
)
2259 switch (efx
->phy_type
) {
2260 case PHY_TYPE_SFX7101
:
2261 efx
->phy_op
= &falcon_sfx7101_phy_ops
;
2263 case PHY_TYPE_SFT9001A
:
2264 case PHY_TYPE_SFT9001B
:
2265 efx
->phy_op
= &falcon_sft9001_phy_ops
;
2267 case PHY_TYPE_QT2022C2
:
2268 case PHY_TYPE_QT2025C
:
2269 efx
->phy_op
= &falcon_qt202x_phy_ops
;
2272 EFX_ERR(efx
, "Unknown PHY type %d\n",
2277 if (efx
->phy_op
->macs
& EFX_XMAC
)
2278 efx
->loopback_modes
|= ((1 << LOOPBACK_XGMII
) |
2279 (1 << LOOPBACK_XGXS
) |
2280 (1 << LOOPBACK_XAUI
));
2281 if (efx
->phy_op
->macs
& EFX_GMAC
)
2282 efx
->loopback_modes
|= (1 << LOOPBACK_GMAC
);
2283 efx
->loopback_modes
|= efx
->phy_op
->loopbacks
;
2285 /* Set up MDIO structure for PHY */
2286 efx
->mdio
.mmds
= efx
->phy_op
->mmds
;
2287 efx
->mdio
.mode_support
= MDIO_SUPPORTS_C45
| MDIO_EMULATE_C22
;
2288 efx
->mdio
.mdio_read
= falcon_mdio_read
;
2289 efx
->mdio
.mdio_write
= falcon_mdio_write
;
2291 /* Initial assumption */
2292 efx
->link_state
.speed
= 10000;
2293 efx
->link_state
.fd
= true;
2295 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2296 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
2297 efx
->wanted_fc
= EFX_FC_RX
| EFX_FC_TX
;
2299 efx
->wanted_fc
= EFX_FC_RX
;
2301 /* Allocate buffer for stats */
2302 rc
= falcon_alloc_buffer(efx
, &efx
->stats_buffer
,
2303 FALCON_MAC_STATS_SIZE
);
2306 EFX_LOG(efx
, "stats buffer at %llx (virt %p phys %llx)\n",
2307 (u64
)efx
->stats_buffer
.dma_addr
,
2308 efx
->stats_buffer
.addr
,
2309 (u64
)virt_to_phys(efx
->stats_buffer
.addr
));
2314 void falcon_remove_port(struct efx_nic
*efx
)
2316 falcon_free_buffer(efx
, &efx
->stats_buffer
);
2319 /**************************************************************************
2321 * Multicast filtering
2323 **************************************************************************
2326 void falcon_push_multicast_hash(struct efx_nic
*efx
)
2328 union efx_multicast_hash
*mc_hash
= &efx
->multicast_hash
;
2330 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
2332 efx_writeo(efx
, &mc_hash
->oword
[0], FR_AB_MAC_MC_HASH_REG0
);
2333 efx_writeo(efx
, &mc_hash
->oword
[1], FR_AB_MAC_MC_HASH_REG1
);
2337 /**************************************************************************
2341 **************************************************************************/
2343 int falcon_read_nvram(struct efx_nic
*efx
, struct falcon_nvconfig
*nvconfig_out
)
2345 struct falcon_nvconfig
*nvconfig
;
2346 struct efx_spi_device
*spi
;
2348 int rc
, magic_num
, struct_ver
;
2349 __le16
*word
, *limit
;
2352 spi
= efx
->spi_flash
? efx
->spi_flash
: efx
->spi_eeprom
;
2356 region
= kmalloc(FALCON_NVCONFIG_END
, GFP_KERNEL
);
2359 nvconfig
= region
+ FALCON_NVCONFIG_OFFSET
;
2361 mutex_lock(&efx
->spi_lock
);
2362 rc
= falcon_spi_read(spi
, 0, FALCON_NVCONFIG_END
, NULL
, region
);
2363 mutex_unlock(&efx
->spi_lock
);
2365 EFX_ERR(efx
, "Failed to read %s\n",
2366 efx
->spi_flash
? "flash" : "EEPROM");
2371 magic_num
= le16_to_cpu(nvconfig
->board_magic_num
);
2372 struct_ver
= le16_to_cpu(nvconfig
->board_struct_ver
);
2375 if (magic_num
!= FALCON_NVCONFIG_BOARD_MAGIC_NUM
) {
2376 EFX_ERR(efx
, "NVRAM bad magic 0x%x\n", magic_num
);
2379 if (struct_ver
< 2) {
2380 EFX_ERR(efx
, "NVRAM has ancient version 0x%x\n", struct_ver
);
2382 } else if (struct_ver
< 4) {
2383 word
= &nvconfig
->board_magic_num
;
2384 limit
= (__le16
*) (nvconfig
+ 1);
2387 limit
= region
+ FALCON_NVCONFIG_END
;
2389 for (csum
= 0; word
< limit
; ++word
)
2390 csum
+= le16_to_cpu(*word
);
2392 if (~csum
& 0xffff) {
2393 EFX_ERR(efx
, "NVRAM has incorrect checksum\n");
2399 memcpy(nvconfig_out
, nvconfig
, sizeof(*nvconfig
));
2406 /* Registers tested in the falcon register test */
2410 } efx_test_registers
[] = {
2412 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2414 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2416 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2417 { FR_AZ_TX_RESERVED
,
2418 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2420 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2421 { FR_AZ_SRM_TX_DC_CFG
,
2422 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2424 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2425 { FR_AZ_RX_DC_PF_WM
,
2426 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2428 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2430 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2432 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2434 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2436 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2438 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2439 { FR_AB_XM_RX_PARAM
,
2440 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2442 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2444 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2446 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2449 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
2450 const efx_oword_t
*mask
)
2452 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
2453 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
2456 int falcon_test_registers(struct efx_nic
*efx
)
2458 unsigned address
= 0, i
, j
;
2459 efx_oword_t mask
, imask
, original
, reg
, buf
;
2461 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2462 WARN_ON(!LOOPBACK_INTERNAL(efx
));
2464 for (i
= 0; i
< ARRAY_SIZE(efx_test_registers
); ++i
) {
2465 address
= efx_test_registers
[i
].address
;
2466 mask
= imask
= efx_test_registers
[i
].mask
;
2467 EFX_INVERT_OWORD(imask
);
2469 efx_reado(efx
, &original
, address
);
2471 /* bit sweep on and off */
2472 for (j
= 0; j
< 128; j
++) {
2473 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
2476 /* Test this testable bit can be set in isolation */
2477 EFX_AND_OWORD(reg
, original
, mask
);
2478 EFX_SET_OWORD32(reg
, j
, j
, 1);
2480 efx_writeo(efx
, ®
, address
);
2481 efx_reado(efx
, &buf
, address
);
2483 if (efx_masked_compare_oword(®
, &buf
, &mask
))
2486 /* Test this testable bit can be cleared in isolation */
2487 EFX_OR_OWORD(reg
, original
, mask
);
2488 EFX_SET_OWORD32(reg
, j
, j
, 0);
2490 efx_writeo(efx
, ®
, address
);
2491 efx_reado(efx
, &buf
, address
);
2493 if (efx_masked_compare_oword(®
, &buf
, &mask
))
2497 efx_writeo(efx
, &original
, address
);
2503 EFX_ERR(efx
, "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
2504 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
2505 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
2509 /**************************************************************************
2513 **************************************************************************
2516 /* Resets NIC to known state. This routine must be called in process
2517 * context and is allowed to sleep. */
2518 int falcon_reset_hw(struct efx_nic
*efx
, enum reset_type method
)
2520 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
2521 efx_oword_t glb_ctl_reg_ker
;
2524 EFX_LOG(efx
, "performing %s hardware reset\n", RESET_TYPE(method
));
2526 /* Initiate device reset */
2527 if (method
== RESET_TYPE_WORLD
) {
2528 rc
= pci_save_state(efx
->pci_dev
);
2530 EFX_ERR(efx
, "failed to backup PCI state of primary "
2531 "function prior to hardware reset\n");
2534 if (FALCON_IS_DUAL_FUNC(efx
)) {
2535 rc
= pci_save_state(nic_data
->pci_dev2
);
2537 EFX_ERR(efx
, "failed to backup PCI state of "
2538 "secondary function prior to "
2539 "hardware reset\n");
2544 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker
,
2545 FRF_AB_EXT_PHY_RST_DUR
,
2546 FFE_AB_EXT_PHY_RST_DUR_10240US
,
2549 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker
,
2550 /* exclude PHY from "invisible" reset */
2551 FRF_AB_EXT_PHY_RST_CTL
,
2552 method
== RESET_TYPE_INVISIBLE
,
2553 /* exclude EEPROM/flash and PCIe */
2554 FRF_AB_PCIE_CORE_RST_CTL
, 1,
2555 FRF_AB_PCIE_NSTKY_RST_CTL
, 1,
2556 FRF_AB_PCIE_SD_RST_CTL
, 1,
2557 FRF_AB_EE_RST_CTL
, 1,
2558 FRF_AB_EXT_PHY_RST_DUR
,
2559 FFE_AB_EXT_PHY_RST_DUR_10240US
,
2562 efx_writeo(efx
, &glb_ctl_reg_ker
, FR_AB_GLB_CTL
);
2564 EFX_LOG(efx
, "waiting for hardware reset\n");
2565 schedule_timeout_uninterruptible(HZ
/ 20);
2567 /* Restore PCI configuration if needed */
2568 if (method
== RESET_TYPE_WORLD
) {
2569 if (FALCON_IS_DUAL_FUNC(efx
)) {
2570 rc
= pci_restore_state(nic_data
->pci_dev2
);
2572 EFX_ERR(efx
, "failed to restore PCI config for "
2573 "the secondary function\n");
2577 rc
= pci_restore_state(efx
->pci_dev
);
2579 EFX_ERR(efx
, "failed to restore PCI config for the "
2580 "primary function\n");
2583 EFX_LOG(efx
, "successfully restored PCI config\n");
2586 /* Assert that reset complete */
2587 efx_reado(efx
, &glb_ctl_reg_ker
, FR_AB_GLB_CTL
);
2588 if (EFX_OWORD_FIELD(glb_ctl_reg_ker
, FRF_AB_SWRST
) != 0) {
2590 EFX_ERR(efx
, "timed out waiting for hardware reset\n");
2593 EFX_LOG(efx
, "hardware reset complete\n");
2597 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2600 pci_restore_state(efx
->pci_dev
);
2607 void falcon_monitor(struct efx_nic
*efx
)
2612 BUG_ON(!mutex_is_locked(&efx
->mac_lock
));
2614 rc
= falcon_board(efx
)->type
->monitor(efx
);
2616 EFX_ERR(efx
, "Board sensor %s; shutting down PHY\n",
2617 (rc
== -ERANGE
) ? "reported fault" : "failed");
2618 efx
->phy_mode
|= PHY_MODE_LOW_POWER
;
2619 __efx_reconfigure_port(efx
);
2622 if (LOOPBACK_INTERNAL(efx
))
2623 link_changed
= falcon_loopback_link_poll(efx
);
2625 link_changed
= efx
->phy_op
->poll(efx
);
2628 falcon_stop_nic_stats(efx
);
2629 falcon_deconfigure_mac_wrapper(efx
);
2631 falcon_switch_mac(efx
);
2632 efx
->mac_op
->reconfigure(efx
);
2634 falcon_start_nic_stats(efx
);
2636 efx_link_status_changed(efx
);
2640 falcon_poll_xmac(efx
);
2643 /* Zeroes out the SRAM contents. This routine must be called in
2644 * process context and is allowed to sleep.
2646 static int falcon_reset_sram(struct efx_nic
*efx
)
2648 efx_oword_t srm_cfg_reg_ker
, gpio_cfg_reg_ker
;
2651 /* Set the SRAM wake/sleep GPIO appropriately. */
2652 efx_reado(efx
, &gpio_cfg_reg_ker
, FR_AB_GPIO_CTL
);
2653 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker
, FRF_AB_GPIO1_OEN
, 1);
2654 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker
, FRF_AB_GPIO1_OUT
, 1);
2655 efx_writeo(efx
, &gpio_cfg_reg_ker
, FR_AB_GPIO_CTL
);
2657 /* Initiate SRAM reset */
2658 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker
,
2659 FRF_AZ_SRM_INIT_EN
, 1,
2660 FRF_AZ_SRM_NB_SZ
, 0);
2661 efx_writeo(efx
, &srm_cfg_reg_ker
, FR_AZ_SRM_CFG
);
2663 /* Wait for SRAM reset to complete */
2666 EFX_LOG(efx
, "waiting for SRAM reset (attempt %d)...\n", count
);
2668 /* SRAM reset is slow; expect around 16ms */
2669 schedule_timeout_uninterruptible(HZ
/ 50);
2671 /* Check for reset complete */
2672 efx_reado(efx
, &srm_cfg_reg_ker
, FR_AZ_SRM_CFG
);
2673 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker
, FRF_AZ_SRM_INIT_EN
)) {
2674 EFX_LOG(efx
, "SRAM reset complete\n");
2678 } while (++count
< 20); /* wait upto 0.4 sec */
2680 EFX_ERR(efx
, "timed out waiting for SRAM reset\n");
2684 static int falcon_spi_device_init(struct efx_nic
*efx
,
2685 struct efx_spi_device
**spi_device_ret
,
2686 unsigned int device_id
, u32 device_type
)
2688 struct efx_spi_device
*spi_device
;
2690 if (device_type
!= 0) {
2691 spi_device
= kzalloc(sizeof(*spi_device
), GFP_KERNEL
);
2694 spi_device
->device_id
= device_id
;
2696 1 << SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_SIZE
);
2697 spi_device
->addr_len
=
2698 SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_ADDR_LEN
);
2699 spi_device
->munge_address
= (spi_device
->size
== 1 << 9 &&
2700 spi_device
->addr_len
== 1);
2701 spi_device
->erase_command
=
2702 SPI_DEV_TYPE_FIELD(device_type
, SPI_DEV_TYPE_ERASE_CMD
);
2703 spi_device
->erase_size
=
2704 1 << SPI_DEV_TYPE_FIELD(device_type
,
2705 SPI_DEV_TYPE_ERASE_SIZE
);
2706 spi_device
->block_size
=
2707 1 << SPI_DEV_TYPE_FIELD(device_type
,
2708 SPI_DEV_TYPE_BLOCK_SIZE
);
2710 spi_device
->efx
= efx
;
2715 kfree(*spi_device_ret
);
2716 *spi_device_ret
= spi_device
;
2721 static void falcon_remove_spi_devices(struct efx_nic
*efx
)
2723 kfree(efx
->spi_eeprom
);
2724 efx
->spi_eeprom
= NULL
;
2725 kfree(efx
->spi_flash
);
2726 efx
->spi_flash
= NULL
;
2729 /* Extract non-volatile configuration */
2730 static int falcon_probe_nvconfig(struct efx_nic
*efx
)
2732 struct falcon_nvconfig
*nvconfig
;
2736 nvconfig
= kmalloc(sizeof(*nvconfig
), GFP_KERNEL
);
2740 rc
= falcon_read_nvram(efx
, nvconfig
);
2741 if (rc
== -EINVAL
) {
2742 EFX_ERR(efx
, "NVRAM is invalid therefore using defaults\n");
2743 efx
->phy_type
= PHY_TYPE_NONE
;
2744 efx
->mdio
.prtad
= MDIO_PRTAD_NONE
;
2750 struct falcon_nvconfig_board_v2
*v2
= &nvconfig
->board_v2
;
2751 struct falcon_nvconfig_board_v3
*v3
= &nvconfig
->board_v3
;
2753 efx
->phy_type
= v2
->port0_phy_type
;
2754 efx
->mdio
.prtad
= v2
->port0_phy_addr
;
2755 board_rev
= le16_to_cpu(v2
->board_revision
);
2757 if (le16_to_cpu(nvconfig
->board_struct_ver
) >= 3) {
2758 rc
= falcon_spi_device_init(
2759 efx
, &efx
->spi_flash
, FFE_AB_SPI_DEVICE_FLASH
,
2760 le32_to_cpu(v3
->spi_device_type
2761 [FFE_AB_SPI_DEVICE_FLASH
]));
2764 rc
= falcon_spi_device_init(
2765 efx
, &efx
->spi_eeprom
, FFE_AB_SPI_DEVICE_EEPROM
,
2766 le32_to_cpu(v3
->spi_device_type
2767 [FFE_AB_SPI_DEVICE_EEPROM
]));
2773 /* Read the MAC addresses */
2774 memcpy(efx
->mac_address
, nvconfig
->mac_address
[0], ETH_ALEN
);
2776 EFX_LOG(efx
, "PHY is %d phy_id %d\n", efx
->phy_type
, efx
->mdio
.prtad
);
2778 falcon_probe_board(efx
, board_rev
);
2784 falcon_remove_spi_devices(efx
);
2790 /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
2791 * count, port speed). Set workaround and feature flags accordingly.
2793 static int falcon_probe_nic_variant(struct efx_nic
*efx
)
2795 efx_oword_t altera_build
;
2796 efx_oword_t nic_stat
;
2798 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
2799 if (EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
)) {
2800 EFX_ERR(efx
, "Falcon FPGA not supported\n");
2804 efx_reado(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2806 if (efx_nic_rev(efx
) <= EFX_REV_FALCON_A1
) {
2807 u8 pci_rev
= efx
->pci_dev
->revision
;
2809 if ((pci_rev
== 0xff) || (pci_rev
== 0)) {
2810 EFX_ERR(efx
, "Falcon rev A0 not supported\n");
2813 if (EFX_OWORD_FIELD(nic_stat
, FRF_AB_STRAP_10G
) == 0) {
2814 EFX_ERR(efx
, "Falcon rev A1 1G not supported\n");
2817 if (EFX_OWORD_FIELD(nic_stat
, FRF_AA_STRAP_PCIE
) == 0) {
2818 EFX_ERR(efx
, "Falcon rev A1 PCI-X not supported\n");
2826 /* Probe all SPI devices on the NIC */
2827 static void falcon_probe_spi_devices(struct efx_nic
*efx
)
2829 efx_oword_t nic_stat
, gpio_ctl
, ee_vpd_cfg
;
2832 efx_reado(efx
, &gpio_ctl
, FR_AB_GPIO_CTL
);
2833 efx_reado(efx
, &nic_stat
, FR_AB_NIC_STAT
);
2834 efx_reado(efx
, &ee_vpd_cfg
, FR_AB_EE_VPD_CFG0
);
2836 if (EFX_OWORD_FIELD(gpio_ctl
, FRF_AB_GPIO3_PWRUP_VALUE
)) {
2837 boot_dev
= (EFX_OWORD_FIELD(nic_stat
, FRF_AB_SF_PRST
) ?
2838 FFE_AB_SPI_DEVICE_FLASH
: FFE_AB_SPI_DEVICE_EEPROM
);
2839 EFX_LOG(efx
, "Booted from %s\n",
2840 boot_dev
== FFE_AB_SPI_DEVICE_FLASH
? "flash" : "EEPROM");
2842 /* Disable VPD and set clock dividers to safe
2843 * values for initial programming. */
2845 EFX_LOG(efx
, "Booted from internal ASIC settings;"
2846 " setting SPI config\n");
2847 EFX_POPULATE_OWORD_3(ee_vpd_cfg
, FRF_AB_EE_VPD_EN
, 0,
2848 /* 125 MHz / 7 ~= 20 MHz */
2849 FRF_AB_EE_SF_CLOCK_DIV
, 7,
2850 /* 125 MHz / 63 ~= 2 MHz */
2851 FRF_AB_EE_EE_CLOCK_DIV
, 63);
2852 efx_writeo(efx
, &ee_vpd_cfg
, FR_AB_EE_VPD_CFG0
);
2855 if (boot_dev
== FFE_AB_SPI_DEVICE_FLASH
)
2856 falcon_spi_device_init(efx
, &efx
->spi_flash
,
2857 FFE_AB_SPI_DEVICE_FLASH
,
2858 default_flash_type
);
2859 if (boot_dev
== FFE_AB_SPI_DEVICE_EEPROM
)
2860 falcon_spi_device_init(efx
, &efx
->spi_eeprom
,
2861 FFE_AB_SPI_DEVICE_EEPROM
,
2865 int falcon_probe_nic(struct efx_nic
*efx
)
2867 struct falcon_nic_data
*nic_data
;
2868 struct falcon_board
*board
;
2871 /* Allocate storage for hardware specific data */
2872 nic_data
= kzalloc(sizeof(*nic_data
), GFP_KERNEL
);
2875 efx
->nic_data
= nic_data
;
2877 /* Determine number of ports etc. */
2878 rc
= falcon_probe_nic_variant(efx
);
2882 /* Probe secondary function if expected */
2883 if (FALCON_IS_DUAL_FUNC(efx
)) {
2884 struct pci_dev
*dev
= pci_dev_get(efx
->pci_dev
);
2886 while ((dev
= pci_get_device(EFX_VENDID_SFC
, FALCON_A_S_DEVID
,
2888 if (dev
->bus
== efx
->pci_dev
->bus
&&
2889 dev
->devfn
== efx
->pci_dev
->devfn
+ 1) {
2890 nic_data
->pci_dev2
= dev
;
2894 if (!nic_data
->pci_dev2
) {
2895 EFX_ERR(efx
, "failed to find secondary function\n");
2901 /* Now we can reset the NIC */
2902 rc
= falcon_reset_hw(efx
, RESET_TYPE_ALL
);
2904 EFX_ERR(efx
, "failed to reset NIC\n");
2908 /* Allocate memory for INT_KER */
2909 rc
= falcon_alloc_buffer(efx
, &efx
->irq_status
, sizeof(efx_oword_t
));
2912 BUG_ON(efx
->irq_status
.dma_addr
& 0x0f);
2914 EFX_LOG(efx
, "INT_KER at %llx (virt %p phys %llx)\n",
2915 (u64
)efx
->irq_status
.dma_addr
,
2916 efx
->irq_status
.addr
, (u64
)virt_to_phys(efx
->irq_status
.addr
));
2918 falcon_probe_spi_devices(efx
);
2920 /* Read in the non-volatile configuration */
2921 rc
= falcon_probe_nvconfig(efx
);
2925 /* Initialise I2C adapter */
2926 board
= falcon_board(efx
);
2927 board
->i2c_adap
.owner
= THIS_MODULE
;
2928 board
->i2c_data
= falcon_i2c_bit_operations
;
2929 board
->i2c_data
.data
= efx
;
2930 board
->i2c_adap
.algo_data
= &board
->i2c_data
;
2931 board
->i2c_adap
.dev
.parent
= &efx
->pci_dev
->dev
;
2932 strlcpy(board
->i2c_adap
.name
, "SFC4000 GPIO",
2933 sizeof(board
->i2c_adap
.name
));
2934 rc
= i2c_bit_add_bus(&board
->i2c_adap
);
2938 rc
= falcon_board(efx
)->type
->init(efx
);
2940 EFX_ERR(efx
, "failed to initialise board\n");
2944 nic_data
->stats_disable_count
= 1;
2945 setup_timer(&nic_data
->stats_timer
, &falcon_stats_timer_func
,
2946 (unsigned long)efx
);
2951 BUG_ON(i2c_del_adapter(&board
->i2c_adap
));
2952 memset(&board
->i2c_adap
, 0, sizeof(board
->i2c_adap
));
2954 falcon_remove_spi_devices(efx
);
2955 falcon_free_buffer(efx
, &efx
->irq_status
);
2958 if (nic_data
->pci_dev2
) {
2959 pci_dev_put(nic_data
->pci_dev2
);
2960 nic_data
->pci_dev2
= NULL
;
2964 kfree(efx
->nic_data
);
2968 static void falcon_init_rx_cfg(struct efx_nic
*efx
)
2970 /* Prior to Siena the RX DMA engine will split each frame at
2971 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2972 * be so large that that never happens. */
2973 const unsigned huge_buf_size
= (3 * 4096) >> 5;
2974 /* RX control FIFO thresholds (32 entries) */
2975 const unsigned ctrl_xon_thr
= 20;
2976 const unsigned ctrl_xoff_thr
= 25;
2977 /* RX data FIFO thresholds (256-byte units; size varies) */
2978 int data_xon_thr
= rx_xon_thresh_bytes
>> 8;
2979 int data_xoff_thr
= rx_xoff_thresh_bytes
>> 8;
2982 efx_reado(efx
, ®
, FR_AZ_RX_CFG
);
2983 if (efx_nic_rev(efx
) <= EFX_REV_FALCON_A1
) {
2984 /* Data FIFO size is 5.5K */
2985 if (data_xon_thr
< 0)
2986 data_xon_thr
= 512 >> 8;
2987 if (data_xoff_thr
< 0)
2988 data_xoff_thr
= 2048 >> 8;
2989 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_DESC_PUSH_EN
, 0);
2990 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_USR_BUF_SIZE
,
2992 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XON_MAC_TH
, data_xon_thr
);
2993 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XOFF_MAC_TH
, data_xoff_thr
);
2994 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XON_TX_TH
, ctrl_xon_thr
);
2995 EFX_SET_OWORD_FIELD(reg
, FRF_AA_RX_XOFF_TX_TH
, ctrl_xoff_thr
);
2997 /* Data FIFO size is 80K; register fields moved */
2998 if (data_xon_thr
< 0)
2999 data_xon_thr
= 27648 >> 8; /* ~3*max MTU */
3000 if (data_xoff_thr
< 0)
3001 data_xoff_thr
= 54272 >> 8; /* ~80Kb - 3*max MTU */
3002 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_DESC_PUSH_EN
, 0);
3003 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_USR_BUF_SIZE
,
3005 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XON_MAC_TH
, data_xon_thr
);
3006 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XOFF_MAC_TH
, data_xoff_thr
);
3007 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XON_TX_TH
, ctrl_xon_thr
);
3008 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_XOFF_TX_TH
, ctrl_xoff_thr
);
3009 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_RX_INGR_EN
, 1);
3011 efx_writeo(efx
, ®
, FR_AZ_RX_CFG
);
3014 /* This call performs hardware-specific global initialisation, such as
3015 * defining the descriptor cache sizes and number of RSS channels.
3016 * It does not set up any buffers, descriptor rings or event queues.
3018 int falcon_init_nic(struct efx_nic
*efx
)
3023 /* Use on-chip SRAM */
3024 efx_reado(efx
, &temp
, FR_AB_NIC_STAT
);
3025 EFX_SET_OWORD_FIELD(temp
, FRF_AB_ONCHIP_SRAM
, 1);
3026 efx_writeo(efx
, &temp
, FR_AB_NIC_STAT
);
3028 /* Set the source of the GMAC clock */
3029 if (efx_nic_rev(efx
) == EFX_REV_FALCON_B0
) {
3030 efx_reado(efx
, &temp
, FR_AB_GPIO_CTL
);
3031 EFX_SET_OWORD_FIELD(temp
, FRF_AB_USE_NIC_CLK
, true);
3032 efx_writeo(efx
, &temp
, FR_AB_GPIO_CTL
);
3035 /* Select the correct MAC */
3036 falcon_clock_mac(efx
);
3038 rc
= falcon_reset_sram(efx
);
3042 /* Set positions of descriptor caches in SRAM. */
3043 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
,
3044 efx
->type
->tx_dc_base
/ 8);
3045 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
3046 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
,
3047 efx
->type
->rx_dc_base
/ 8);
3048 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
3050 /* Set TX descriptor cache size. */
3051 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
3052 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
3053 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
3055 /* Set RX descriptor cache size. Set low watermark to size-8, as
3056 * this allows most efficient prefetching.
3058 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
3059 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
3060 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
3061 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
3062 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
3064 /* Clear the parity enables on the TX data fifos as
3065 * they produce false parity errors because of timing issues
3067 if (EFX_WORKAROUND_5129(efx
)) {
3068 efx_reado(efx
, &temp
, FR_AZ_CSR_SPARE
);
3069 EFX_SET_OWORD_FIELD(temp
, FRF_AB_MEM_PERR_EN_TX_DATA
, 0);
3070 efx_writeo(efx
, &temp
, FR_AZ_CSR_SPARE
);
3073 /* Enable all the genuinely fatal interrupts. (They are still
3074 * masked by the overall interrupt mask, controlled by
3075 * falcon_interrupts()).
3077 * Note: All other fatal interrupts are enabled
3079 EFX_POPULATE_OWORD_3(temp
,
3080 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
3081 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
3082 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
3083 EFX_INVERT_OWORD(temp
);
3084 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
3086 if (EFX_WORKAROUND_7244(efx
)) {
3087 efx_reado(efx
, &temp
, FR_BZ_RX_FILTER_CTL
);
3088 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_UDP_FULL_SRCH_LIMIT
, 8);
3089 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_UDP_WILD_SRCH_LIMIT
, 8);
3090 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TCP_FULL_SRCH_LIMIT
, 8);
3091 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TCP_WILD_SRCH_LIMIT
, 8);
3092 efx_writeo(efx
, &temp
, FR_BZ_RX_FILTER_CTL
);
3095 falcon_setup_rss_indir_table(efx
);
3097 /* XXX This is documented only for Falcon A0/A1 */
3098 /* Setup RX. Wait for descriptor is broken and must
3099 * be disabled. RXDP recovery shouldn't be needed, but is.
3101 efx_reado(efx
, &temp
, FR_AA_RX_SELF_RST
);
3102 EFX_SET_OWORD_FIELD(temp
, FRF_AA_RX_NODESC_WAIT_DIS
, 1);
3103 EFX_SET_OWORD_FIELD(temp
, FRF_AA_RX_SELF_RST_EN
, 1);
3104 if (EFX_WORKAROUND_5583(efx
))
3105 EFX_SET_OWORD_FIELD(temp
, FRF_AA_RX_ISCSI_DIS
, 1);
3106 efx_writeo(efx
, &temp
, FR_AA_RX_SELF_RST
);
3108 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3109 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3111 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
3112 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
3113 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
3114 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
3115 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 0);
3116 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
3117 /* Enable SW_EV to inherit in char driver - assume harmless here */
3118 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
3119 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3120 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
3121 /* Squash TX of packets of 16 bytes or less */
3122 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
3123 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
3124 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
3126 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3127 * descriptors (which is bad).
3129 efx_reado(efx
, &temp
, FR_AZ_TX_CFG
);
3130 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_NO_EOP_DISC_EN
, 0);
3131 efx_writeo(efx
, &temp
, FR_AZ_TX_CFG
);
3133 falcon_init_rx_cfg(efx
);
3135 /* Set destination of both TX and RX Flush events */
3136 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
3137 EFX_POPULATE_OWORD_1(temp
, FRF_BZ_FLS_EVQ_ID
, 0);
3138 efx_writeo(efx
, &temp
, FR_BZ_DP_CTRL
);
3144 void falcon_remove_nic(struct efx_nic
*efx
)
3146 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3147 struct falcon_board
*board
= falcon_board(efx
);
3150 board
->type
->fini(efx
);
3152 /* Remove I2C adapter and clear it in preparation for a retry */
3153 rc
= i2c_del_adapter(&board
->i2c_adap
);
3155 memset(&board
->i2c_adap
, 0, sizeof(board
->i2c_adap
));
3157 falcon_remove_spi_devices(efx
);
3158 falcon_free_buffer(efx
, &efx
->irq_status
);
3160 falcon_reset_hw(efx
, RESET_TYPE_ALL
);
3162 /* Release the second function after the reset */
3163 if (nic_data
->pci_dev2
) {
3164 pci_dev_put(nic_data
->pci_dev2
);
3165 nic_data
->pci_dev2
= NULL
;
3168 /* Tear down the private nic state */
3169 kfree(efx
->nic_data
);
3170 efx
->nic_data
= NULL
;
3173 void falcon_update_nic_stats(struct efx_nic
*efx
)
3175 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3178 if (nic_data
->stats_disable_count
)
3181 efx_reado(efx
, &cnt
, FR_AZ_RX_NODESC_DROP
);
3182 efx
->n_rx_nodesc_drop_cnt
+=
3183 EFX_OWORD_FIELD(cnt
, FRF_AB_RX_NODESC_DROP_CNT
);
3185 if (nic_data
->stats_pending
&&
3186 *nic_data
->stats_dma_done
== FALCON_STATS_DONE
) {
3187 nic_data
->stats_pending
= false;
3188 rmb(); /* read the done flag before the stats */
3189 efx
->mac_op
->update_stats(efx
);
3193 void falcon_start_nic_stats(struct efx_nic
*efx
)
3195 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3197 spin_lock_bh(&efx
->stats_lock
);
3198 if (--nic_data
->stats_disable_count
== 0)
3199 falcon_stats_request(efx
);
3200 spin_unlock_bh(&efx
->stats_lock
);
3203 void falcon_stop_nic_stats(struct efx_nic
*efx
)
3205 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
3210 spin_lock_bh(&efx
->stats_lock
);
3211 ++nic_data
->stats_disable_count
;
3212 spin_unlock_bh(&efx
->stats_lock
);
3214 del_timer_sync(&nic_data
->stats_timer
);
3216 /* Wait enough time for the most recent transfer to
3218 for (i
= 0; i
< 4 && nic_data
->stats_pending
; i
++) {
3219 if (*nic_data
->stats_dma_done
== FALCON_STATS_DONE
)
3224 spin_lock_bh(&efx
->stats_lock
);
3225 falcon_stats_complete(efx
);
3226 spin_unlock_bh(&efx
->stats_lock
);
3229 /**************************************************************************
3231 * Revision-dependent attributes used by efx.c
3233 **************************************************************************
3236 struct efx_nic_type falcon_a1_nic_type
= {
3237 .default_mac_ops
= &falcon_xmac_operations
,
3239 .revision
= EFX_REV_FALCON_A1
,
3240 .mem_map_size
= 0x20000,
3241 .txd_ptr_tbl_base
= FR_AA_TX_DESC_PTR_TBL_KER
,
3242 .rxd_ptr_tbl_base
= FR_AA_RX_DESC_PTR_TBL_KER
,
3243 .buf_tbl_base
= FR_AA_BUF_FULL_TBL_KER
,
3244 .evq_ptr_tbl_base
= FR_AA_EVQ_PTR_TBL_KER
,
3245 .evq_rptr_tbl_base
= FR_AA_EVQ_RPTR_KER
,
3246 .max_dma_mask
= DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH
),
3247 .rx_buffer_padding
= 0x24,
3248 .max_interrupt_mode
= EFX_INT_MODE_MSI
,
3249 .phys_addr_channels
= 4,
3250 .tx_dc_base
= 0x130000,
3251 .rx_dc_base
= 0x100000,
3254 struct efx_nic_type falcon_b0_nic_type
= {
3255 .default_mac_ops
= &falcon_xmac_operations
,
3257 .revision
= EFX_REV_FALCON_B0
,
3258 /* Map everything up to and including the RSS indirection
3259 * table. Don't map MSI-X table, MSI-X PBA since Linux
3260 * requires that they not be mapped. */
3261 .mem_map_size
= (FR_BZ_RX_INDIRECTION_TBL
+
3262 FR_BZ_RX_INDIRECTION_TBL_STEP
*
3263 FR_BZ_RX_INDIRECTION_TBL_ROWS
),
3264 .txd_ptr_tbl_base
= FR_BZ_TX_DESC_PTR_TBL
,
3265 .rxd_ptr_tbl_base
= FR_BZ_RX_DESC_PTR_TBL
,
3266 .buf_tbl_base
= FR_BZ_BUF_FULL_TBL
,
3267 .evq_ptr_tbl_base
= FR_BZ_EVQ_PTR_TBL
,
3268 .evq_rptr_tbl_base
= FR_BZ_EVQ_RPTR
,
3269 .max_dma_mask
= DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH
),
3270 .rx_buffer_padding
= 0,
3271 .max_interrupt_mode
= EFX_INT_MODE_MSIX
,
3272 .phys_addr_channels
= 32, /* Hardware limit is 64, but the legacy
3273 * interrupt handler only supports 32
3275 .tx_dc_base
= 0x130000,
3276 .rx_dc_base
= 0x100000,