1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include "net_driver.h"
22 #include "workarounds.h"
24 /**************************************************************************
28 **************************************************************************
31 /* This is set to 16 for a good reason. In summary, if larger than
32 * 16, the descriptor cache holds more than a default socket
33 * buffer's worth of packets (for UDP we can only have at most one
34 * socket buffer's worth outstanding). This combined with the fact
35 * that we only get 1 TX event per descriptor cache means the NIC
38 #define TX_DC_ENTRIES 16
39 #define TX_DC_ENTRIES_ORDER 1
41 #define RX_DC_ENTRIES 64
42 #define RX_DC_ENTRIES_ORDER 3
44 /* If EFX_MAX_INT_ERRORS internal errors occur within
45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
48 #define EFX_INT_ERROR_EXPIRE 3600
49 #define EFX_MAX_INT_ERRORS 5
51 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53 #define EFX_FLUSH_INTERVAL 10
54 #define EFX_FLUSH_POLL_COUNT 100
56 /* Size and alignment of special buffers (4KB) */
57 #define EFX_BUF_SIZE 4096
59 /* Depth of RX flush request fifo */
60 #define EFX_RX_FLUSH_COUNT 4
62 /* Generated event code for efx_generate_test_event() */
63 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
64 (0x00010100 + (_channel)->channel)
66 /* Generated event code for efx_generate_fill_event() */
67 #define EFX_CHANNEL_MAGIC_FILL(_channel) \
68 (0x00010200 + (_channel)->channel)
70 /**************************************************************************
72 * Solarstorm hardware access
74 **************************************************************************/
76 static inline void efx_write_buf_tbl(struct efx_nic
*efx
, efx_qword_t
*value
,
79 efx_sram_writeq(efx
, efx
->membase
+ efx
->type
->buf_tbl_base
,
83 /* Read the current event from the event queue */
84 static inline efx_qword_t
*efx_event(struct efx_channel
*channel
,
87 return ((efx_qword_t
*) (channel
->eventq
.addr
)) + index
;
90 /* See if an event is present
92 * We check both the high and low dword of the event for all ones. We
93 * wrote all ones when we cleared the event, and no valid event can
94 * have all ones in either its high or low dwords. This approach is
95 * robust against reordering.
97 * Note that using a single 64-bit comparison is incorrect; even
98 * though the CPU read will be atomic, the DMA write may not be.
100 static inline int efx_event_present(efx_qword_t
*event
)
102 return !(EFX_DWORD_IS_ALL_ONES(event
->dword
[0]) |
103 EFX_DWORD_IS_ALL_ONES(event
->dword
[1]));
106 static bool efx_masked_compare_oword(const efx_oword_t
*a
, const efx_oword_t
*b
,
107 const efx_oword_t
*mask
)
109 return ((a
->u64
[0] ^ b
->u64
[0]) & mask
->u64
[0]) ||
110 ((a
->u64
[1] ^ b
->u64
[1]) & mask
->u64
[1]);
113 int efx_nic_test_registers(struct efx_nic
*efx
,
114 const struct efx_nic_register_test
*regs
,
117 unsigned address
= 0, i
, j
;
118 efx_oword_t mask
, imask
, original
, reg
, buf
;
120 /* Falcon should be in loopback to isolate the XMAC from the PHY */
121 WARN_ON(!LOOPBACK_INTERNAL(efx
));
123 for (i
= 0; i
< n_regs
; ++i
) {
124 address
= regs
[i
].address
;
125 mask
= imask
= regs
[i
].mask
;
126 EFX_INVERT_OWORD(imask
);
128 efx_reado(efx
, &original
, address
);
130 /* bit sweep on and off */
131 for (j
= 0; j
< 128; j
++) {
132 if (!EFX_EXTRACT_OWORD32(mask
, j
, j
))
135 /* Test this testable bit can be set in isolation */
136 EFX_AND_OWORD(reg
, original
, mask
);
137 EFX_SET_OWORD32(reg
, j
, j
, 1);
139 efx_writeo(efx
, ®
, address
);
140 efx_reado(efx
, &buf
, address
);
142 if (efx_masked_compare_oword(®
, &buf
, &mask
))
145 /* Test this testable bit can be cleared in isolation */
146 EFX_OR_OWORD(reg
, original
, mask
);
147 EFX_SET_OWORD32(reg
, j
, j
, 0);
149 efx_writeo(efx
, ®
, address
);
150 efx_reado(efx
, &buf
, address
);
152 if (efx_masked_compare_oword(®
, &buf
, &mask
))
156 efx_writeo(efx
, &original
, address
);
162 netif_err(efx
, hw
, efx
->net_dev
,
163 "wrote "EFX_OWORD_FMT
" read "EFX_OWORD_FMT
164 " at address 0x%x mask "EFX_OWORD_FMT
"\n", EFX_OWORD_VAL(reg
),
165 EFX_OWORD_VAL(buf
), address
, EFX_OWORD_VAL(mask
));
169 /**************************************************************************
171 * Special buffer handling
172 * Special buffers are used for event queues and the TX and RX
175 *************************************************************************/
178 * Initialise a special buffer
180 * This will define a buffer (previously allocated via
181 * efx_alloc_special_buffer()) in the buffer table, allowing
182 * it to be used for event queues, descriptor rings etc.
185 efx_init_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
187 efx_qword_t buf_desc
;
192 EFX_BUG_ON_PARANOID(!buffer
->addr
);
194 /* Write buffer descriptors to NIC */
195 for (i
= 0; i
< buffer
->entries
; i
++) {
196 index
= buffer
->index
+ i
;
197 dma_addr
= buffer
->dma_addr
+ (i
* 4096);
198 netif_dbg(efx
, probe
, efx
->net_dev
,
199 "mapping special buffer %d at %llx\n",
200 index
, (unsigned long long)dma_addr
);
201 EFX_POPULATE_QWORD_3(buf_desc
,
202 FRF_AZ_BUF_ADR_REGION
, 0,
203 FRF_AZ_BUF_ADR_FBUF
, dma_addr
>> 12,
204 FRF_AZ_BUF_OWNER_ID_FBUF
, 0);
205 efx_write_buf_tbl(efx
, &buf_desc
, index
);
209 /* Unmaps a buffer and clears the buffer table entries */
211 efx_fini_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
213 efx_oword_t buf_tbl_upd
;
214 unsigned int start
= buffer
->index
;
215 unsigned int end
= (buffer
->index
+ buffer
->entries
- 1);
217 if (!buffer
->entries
)
220 netif_dbg(efx
, hw
, efx
->net_dev
, "unmapping special buffers %d-%d\n",
221 buffer
->index
, buffer
->index
+ buffer
->entries
- 1);
223 EFX_POPULATE_OWORD_4(buf_tbl_upd
,
224 FRF_AZ_BUF_UPD_CMD
, 0,
225 FRF_AZ_BUF_CLR_CMD
, 1,
226 FRF_AZ_BUF_CLR_END_ID
, end
,
227 FRF_AZ_BUF_CLR_START_ID
, start
);
228 efx_writeo(efx
, &buf_tbl_upd
, FR_AZ_BUF_TBL_UPD
);
232 * Allocate a new special buffer
234 * This allocates memory for a new buffer, clears it and allocates a
235 * new buffer ID range. It does not write into the buffer table.
237 * This call will allocate 4KB buffers, since 8KB buffers can't be
238 * used for event queues and descriptor rings.
240 static int efx_alloc_special_buffer(struct efx_nic
*efx
,
241 struct efx_special_buffer
*buffer
,
244 len
= ALIGN(len
, EFX_BUF_SIZE
);
246 buffer
->addr
= dma_alloc_coherent(&efx
->pci_dev
->dev
, len
,
247 &buffer
->dma_addr
, GFP_KERNEL
);
251 buffer
->entries
= len
/ EFX_BUF_SIZE
;
252 BUG_ON(buffer
->dma_addr
& (EFX_BUF_SIZE
- 1));
254 /* All zeros is a potentially valid event so memset to 0xff */
255 memset(buffer
->addr
, 0xff, len
);
257 /* Select new buffer ID */
258 buffer
->index
= efx
->next_buffer_table
;
259 efx
->next_buffer_table
+= buffer
->entries
;
261 netif_dbg(efx
, probe
, efx
->net_dev
,
262 "allocating special buffers %d-%d at %llx+%x "
263 "(virt %p phys %llx)\n", buffer
->index
,
264 buffer
->index
+ buffer
->entries
- 1,
265 (u64
)buffer
->dma_addr
, len
,
266 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
272 efx_free_special_buffer(struct efx_nic
*efx
, struct efx_special_buffer
*buffer
)
277 netif_dbg(efx
, hw
, efx
->net_dev
,
278 "deallocating special buffers %d-%d at %llx+%x "
279 "(virt %p phys %llx)\n", buffer
->index
,
280 buffer
->index
+ buffer
->entries
- 1,
281 (u64
)buffer
->dma_addr
, buffer
->len
,
282 buffer
->addr
, (u64
)virt_to_phys(buffer
->addr
));
284 dma_free_coherent(&efx
->pci_dev
->dev
, buffer
->len
, buffer
->addr
,
290 /**************************************************************************
292 * Generic buffer handling
293 * These buffers are used for interrupt status and MAC stats
295 **************************************************************************/
297 int efx_nic_alloc_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
,
300 buffer
->addr
= pci_alloc_consistent(efx
->pci_dev
, len
,
305 memset(buffer
->addr
, 0, len
);
309 void efx_nic_free_buffer(struct efx_nic
*efx
, struct efx_buffer
*buffer
)
312 pci_free_consistent(efx
->pci_dev
, buffer
->len
,
313 buffer
->addr
, buffer
->dma_addr
);
318 /**************************************************************************
322 **************************************************************************/
324 /* Returns a pointer to the specified transmit descriptor in the TX
325 * descriptor queue belonging to the specified channel.
327 static inline efx_qword_t
*
328 efx_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int index
)
330 return ((efx_qword_t
*) (tx_queue
->txd
.addr
)) + index
;
333 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
334 static inline void efx_notify_tx_desc(struct efx_tx_queue
*tx_queue
)
339 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
340 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_TX_DESC_WPTR_DWORD
, write_ptr
);
341 efx_writed_page(tx_queue
->efx
, ®
,
342 FR_AZ_TX_DESC_UPD_DWORD_P0
, tx_queue
->queue
);
345 /* Write pointer and first descriptor for TX descriptor ring */
346 static inline void efx_push_tx_desc(struct efx_tx_queue
*tx_queue
,
347 const efx_qword_t
*txd
)
352 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN
!= 0);
353 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER
!= FR_BZ_TX_DESC_UPD_P0
);
355 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
356 EFX_POPULATE_OWORD_2(reg
, FRF_AZ_TX_DESC_PUSH_CMD
, true,
357 FRF_AZ_TX_DESC_WPTR
, write_ptr
);
359 efx_writeo_page(tx_queue
->efx
, ®
,
360 FR_BZ_TX_DESC_UPD_P0
, tx_queue
->queue
);
364 efx_may_push_tx_desc(struct efx_tx_queue
*tx_queue
, unsigned int write_count
)
366 unsigned empty_read_count
= ACCESS_ONCE(tx_queue
->empty_read_count
);
368 if (empty_read_count
== 0)
371 tx_queue
->empty_read_count
= 0;
372 return ((empty_read_count
^ write_count
) & ~EFX_EMPTY_COUNT_VALID
) == 0;
375 /* For each entry inserted into the software descriptor ring, create a
376 * descriptor in the hardware TX descriptor ring (in host memory), and
379 void efx_nic_push_buffers(struct efx_tx_queue
*tx_queue
)
382 struct efx_tx_buffer
*buffer
;
385 unsigned old_write_count
= tx_queue
->write_count
;
387 BUG_ON(tx_queue
->write_count
== tx_queue
->insert_count
);
390 write_ptr
= tx_queue
->write_count
& tx_queue
->ptr_mask
;
391 buffer
= &tx_queue
->buffer
[write_ptr
];
392 txd
= efx_tx_desc(tx_queue
, write_ptr
);
393 ++tx_queue
->write_count
;
395 /* Create TX descriptor ring entry */
396 EFX_POPULATE_QWORD_4(*txd
,
397 FSF_AZ_TX_KER_CONT
, buffer
->continuation
,
398 FSF_AZ_TX_KER_BYTE_COUNT
, buffer
->len
,
399 FSF_AZ_TX_KER_BUF_REGION
, 0,
400 FSF_AZ_TX_KER_BUF_ADDR
, buffer
->dma_addr
);
401 } while (tx_queue
->write_count
!= tx_queue
->insert_count
);
403 wmb(); /* Ensure descriptors are written before they are fetched */
405 if (efx_may_push_tx_desc(tx_queue
, old_write_count
)) {
406 txd
= efx_tx_desc(tx_queue
,
407 old_write_count
& tx_queue
->ptr_mask
);
408 efx_push_tx_desc(tx_queue
, txd
);
411 efx_notify_tx_desc(tx_queue
);
415 /* Allocate hardware resources for a TX queue */
416 int efx_nic_probe_tx(struct efx_tx_queue
*tx_queue
)
418 struct efx_nic
*efx
= tx_queue
->efx
;
421 entries
= tx_queue
->ptr_mask
+ 1;
422 return efx_alloc_special_buffer(efx
, &tx_queue
->txd
,
423 entries
* sizeof(efx_qword_t
));
426 void efx_nic_init_tx(struct efx_tx_queue
*tx_queue
)
428 struct efx_nic
*efx
= tx_queue
->efx
;
431 tx_queue
->flushed
= FLUSH_NONE
;
433 /* Pin TX descriptor ring */
434 efx_init_special_buffer(efx
, &tx_queue
->txd
);
436 /* Push TX descriptor ring to card */
437 EFX_POPULATE_OWORD_10(reg
,
438 FRF_AZ_TX_DESCQ_EN
, 1,
439 FRF_AZ_TX_ISCSI_DDIG_EN
, 0,
440 FRF_AZ_TX_ISCSI_HDIG_EN
, 0,
441 FRF_AZ_TX_DESCQ_BUF_BASE_ID
, tx_queue
->txd
.index
,
442 FRF_AZ_TX_DESCQ_EVQ_ID
,
443 tx_queue
->channel
->channel
,
444 FRF_AZ_TX_DESCQ_OWNER_ID
, 0,
445 FRF_AZ_TX_DESCQ_LABEL
, tx_queue
->queue
,
446 FRF_AZ_TX_DESCQ_SIZE
,
447 __ffs(tx_queue
->txd
.entries
),
448 FRF_AZ_TX_DESCQ_TYPE
, 0,
449 FRF_BZ_TX_NON_IP_DROP_DIS
, 1);
451 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
452 int csum
= tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
;
453 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_IP_CHKSM_DIS
, !csum
);
454 EFX_SET_OWORD_FIELD(reg
, FRF_BZ_TX_TCP_CHKSM_DIS
,
458 efx_writeo_table(efx
, ®
, efx
->type
->txd_ptr_tbl_base
,
461 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
) {
462 /* Only 128 bits in this register */
463 BUILD_BUG_ON(EFX_MAX_TX_QUEUES
> 128);
465 efx_reado(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
466 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
467 clear_bit_le(tx_queue
->queue
, (void *)®
);
469 set_bit_le(tx_queue
->queue
, (void *)®
);
470 efx_writeo(efx
, ®
, FR_AA_TX_CHKSM_CFG
);
473 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
474 EFX_POPULATE_OWORD_1(reg
,
476 (tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
478 FFE_BZ_TX_PACE_RESERVED
);
479 efx_writeo_table(efx
, ®
, FR_BZ_TX_PACE_TBL
,
484 static void efx_flush_tx_queue(struct efx_tx_queue
*tx_queue
)
486 struct efx_nic
*efx
= tx_queue
->efx
;
487 efx_oword_t tx_flush_descq
;
489 tx_queue
->flushed
= FLUSH_PENDING
;
491 /* Post a flush command */
492 EFX_POPULATE_OWORD_2(tx_flush_descq
,
493 FRF_AZ_TX_FLUSH_DESCQ_CMD
, 1,
494 FRF_AZ_TX_FLUSH_DESCQ
, tx_queue
->queue
);
495 efx_writeo(efx
, &tx_flush_descq
, FR_AZ_TX_FLUSH_DESCQ
);
498 void efx_nic_fini_tx(struct efx_tx_queue
*tx_queue
)
500 struct efx_nic
*efx
= tx_queue
->efx
;
501 efx_oword_t tx_desc_ptr
;
503 /* The queue should have been flushed */
504 WARN_ON(tx_queue
->flushed
!= FLUSH_DONE
);
506 /* Remove TX descriptor ring from card */
507 EFX_ZERO_OWORD(tx_desc_ptr
);
508 efx_writeo_table(efx
, &tx_desc_ptr
, efx
->type
->txd_ptr_tbl_base
,
511 /* Unpin TX descriptor ring */
512 efx_fini_special_buffer(efx
, &tx_queue
->txd
);
515 /* Free buffers backing TX queue */
516 void efx_nic_remove_tx(struct efx_tx_queue
*tx_queue
)
518 efx_free_special_buffer(tx_queue
->efx
, &tx_queue
->txd
);
521 /**************************************************************************
525 **************************************************************************/
527 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
528 static inline efx_qword_t
*
529 efx_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned int index
)
531 return ((efx_qword_t
*) (rx_queue
->rxd
.addr
)) + index
;
534 /* This creates an entry in the RX descriptor queue */
536 efx_build_rx_desc(struct efx_rx_queue
*rx_queue
, unsigned index
)
538 struct efx_rx_buffer
*rx_buf
;
541 rxd
= efx_rx_desc(rx_queue
, index
);
542 rx_buf
= efx_rx_buffer(rx_queue
, index
);
543 EFX_POPULATE_QWORD_3(*rxd
,
544 FSF_AZ_RX_KER_BUF_SIZE
,
546 rx_queue
->efx
->type
->rx_buffer_padding
,
547 FSF_AZ_RX_KER_BUF_REGION
, 0,
548 FSF_AZ_RX_KER_BUF_ADDR
, rx_buf
->dma_addr
);
551 /* This writes to the RX_DESC_WPTR register for the specified receive
554 void efx_nic_notify_rx_desc(struct efx_rx_queue
*rx_queue
)
556 struct efx_nic
*efx
= rx_queue
->efx
;
560 while (rx_queue
->notified_count
!= rx_queue
->added_count
) {
563 rx_queue
->notified_count
& rx_queue
->ptr_mask
);
564 ++rx_queue
->notified_count
;
568 write_ptr
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
569 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_RX_DESC_WPTR_DWORD
, write_ptr
);
570 efx_writed_page(efx
, ®
, FR_AZ_RX_DESC_UPD_DWORD_P0
,
571 efx_rx_queue_index(rx_queue
));
574 int efx_nic_probe_rx(struct efx_rx_queue
*rx_queue
)
576 struct efx_nic
*efx
= rx_queue
->efx
;
579 entries
= rx_queue
->ptr_mask
+ 1;
580 return efx_alloc_special_buffer(efx
, &rx_queue
->rxd
,
581 entries
* sizeof(efx_qword_t
));
584 void efx_nic_init_rx(struct efx_rx_queue
*rx_queue
)
586 efx_oword_t rx_desc_ptr
;
587 struct efx_nic
*efx
= rx_queue
->efx
;
588 bool is_b0
= efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
;
589 bool iscsi_digest_en
= is_b0
;
591 netif_dbg(efx
, hw
, efx
->net_dev
,
592 "RX queue %d ring in special buffers %d-%d\n",
593 efx_rx_queue_index(rx_queue
), rx_queue
->rxd
.index
,
594 rx_queue
->rxd
.index
+ rx_queue
->rxd
.entries
- 1);
596 rx_queue
->flushed
= FLUSH_NONE
;
598 /* Pin RX descriptor ring */
599 efx_init_special_buffer(efx
, &rx_queue
->rxd
);
601 /* Push RX descriptor ring to card */
602 EFX_POPULATE_OWORD_10(rx_desc_ptr
,
603 FRF_AZ_RX_ISCSI_DDIG_EN
, iscsi_digest_en
,
604 FRF_AZ_RX_ISCSI_HDIG_EN
, iscsi_digest_en
,
605 FRF_AZ_RX_DESCQ_BUF_BASE_ID
, rx_queue
->rxd
.index
,
606 FRF_AZ_RX_DESCQ_EVQ_ID
,
607 efx_rx_queue_channel(rx_queue
)->channel
,
608 FRF_AZ_RX_DESCQ_OWNER_ID
, 0,
609 FRF_AZ_RX_DESCQ_LABEL
,
610 efx_rx_queue_index(rx_queue
),
611 FRF_AZ_RX_DESCQ_SIZE
,
612 __ffs(rx_queue
->rxd
.entries
),
613 FRF_AZ_RX_DESCQ_TYPE
, 0 /* kernel queue */ ,
614 /* For >=B0 this is scatter so disable */
615 FRF_AZ_RX_DESCQ_JUMBO
, !is_b0
,
616 FRF_AZ_RX_DESCQ_EN
, 1);
617 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
618 efx_rx_queue_index(rx_queue
));
621 static void efx_flush_rx_queue(struct efx_rx_queue
*rx_queue
)
623 struct efx_nic
*efx
= rx_queue
->efx
;
624 efx_oword_t rx_flush_descq
;
626 rx_queue
->flushed
= FLUSH_PENDING
;
628 /* Post a flush command */
629 EFX_POPULATE_OWORD_2(rx_flush_descq
,
630 FRF_AZ_RX_FLUSH_DESCQ_CMD
, 1,
631 FRF_AZ_RX_FLUSH_DESCQ
,
632 efx_rx_queue_index(rx_queue
));
633 efx_writeo(efx
, &rx_flush_descq
, FR_AZ_RX_FLUSH_DESCQ
);
636 void efx_nic_fini_rx(struct efx_rx_queue
*rx_queue
)
638 efx_oword_t rx_desc_ptr
;
639 struct efx_nic
*efx
= rx_queue
->efx
;
641 /* The queue should already have been flushed */
642 WARN_ON(rx_queue
->flushed
!= FLUSH_DONE
);
644 /* Remove RX descriptor ring from card */
645 EFX_ZERO_OWORD(rx_desc_ptr
);
646 efx_writeo_table(efx
, &rx_desc_ptr
, efx
->type
->rxd_ptr_tbl_base
,
647 efx_rx_queue_index(rx_queue
));
649 /* Unpin RX descriptor ring */
650 efx_fini_special_buffer(efx
, &rx_queue
->rxd
);
653 /* Free buffers backing RX queue */
654 void efx_nic_remove_rx(struct efx_rx_queue
*rx_queue
)
656 efx_free_special_buffer(rx_queue
->efx
, &rx_queue
->rxd
);
659 /**************************************************************************
661 * Event queue processing
662 * Event queues are processed by per-channel tasklets.
664 **************************************************************************/
666 /* Update a channel's event queue's read pointer (RPTR) register
668 * This writes the EVQ_RPTR_REG register for the specified channel's
671 void efx_nic_eventq_read_ack(struct efx_channel
*channel
)
674 struct efx_nic
*efx
= channel
->efx
;
676 EFX_POPULATE_DWORD_1(reg
, FRF_AZ_EVQ_RPTR
, channel
->eventq_read_ptr
);
677 efx_writed_table(efx
, ®
, efx
->type
->evq_rptr_tbl_base
,
681 /* Use HW to insert a SW defined event */
682 static void efx_generate_event(struct efx_channel
*channel
, efx_qword_t
*event
)
684 efx_oword_t drv_ev_reg
;
686 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN
!= 0 ||
687 FRF_AZ_DRV_EV_DATA_WIDTH
!= 64);
688 drv_ev_reg
.u32
[0] = event
->u32
[0];
689 drv_ev_reg
.u32
[1] = event
->u32
[1];
690 drv_ev_reg
.u32
[2] = 0;
691 drv_ev_reg
.u32
[3] = 0;
692 EFX_SET_OWORD_FIELD(drv_ev_reg
, FRF_AZ_DRV_EV_QID
, channel
->channel
);
693 efx_writeo(channel
->efx
, &drv_ev_reg
, FR_AZ_DRV_EV
);
696 /* Handle a transmit completion event
698 * The NIC batches TX completion events; the message we receive is of
699 * the form "complete all TX events up to this index".
702 efx_handle_tx_event(struct efx_channel
*channel
, efx_qword_t
*event
)
704 unsigned int tx_ev_desc_ptr
;
705 unsigned int tx_ev_q_label
;
706 struct efx_tx_queue
*tx_queue
;
707 struct efx_nic
*efx
= channel
->efx
;
710 if (likely(EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_COMP
))) {
711 /* Transmit completion */
712 tx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_DESC_PTR
);
713 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
714 tx_queue
= efx_channel_get_tx_queue(
715 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
716 tx_packets
= ((tx_ev_desc_ptr
- tx_queue
->read_count
) &
718 channel
->irq_mod_score
+= tx_packets
;
719 efx_xmit_done(tx_queue
, tx_ev_desc_ptr
);
720 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_WQ_FF_FULL
)) {
721 /* Rewrite the FIFO write pointer */
722 tx_ev_q_label
= EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_Q_LABEL
);
723 tx_queue
= efx_channel_get_tx_queue(
724 channel
, tx_ev_q_label
% EFX_TXQ_TYPES
);
726 if (efx_dev_registered(efx
))
727 netif_tx_lock(efx
->net_dev
);
728 efx_notify_tx_desc(tx_queue
);
729 if (efx_dev_registered(efx
))
730 netif_tx_unlock(efx
->net_dev
);
731 } else if (EFX_QWORD_FIELD(*event
, FSF_AZ_TX_EV_PKT_ERR
) &&
732 EFX_WORKAROUND_10727(efx
)) {
733 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
735 netif_err(efx
, tx_err
, efx
->net_dev
,
736 "channel %d unexpected TX event "
737 EFX_QWORD_FMT
"\n", channel
->channel
,
738 EFX_QWORD_VAL(*event
));
744 /* Detect errors included in the rx_evt_pkt_ok bit. */
745 static void efx_handle_rx_not_ok(struct efx_rx_queue
*rx_queue
,
746 const efx_qword_t
*event
,
750 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
751 struct efx_nic
*efx
= rx_queue
->efx
;
752 bool rx_ev_buf_owner_id_err
, rx_ev_ip_hdr_chksum_err
;
753 bool rx_ev_tcp_udp_chksum_err
, rx_ev_eth_crc_err
;
754 bool rx_ev_frm_trunc
, rx_ev_drib_nib
, rx_ev_tobe_disc
;
755 bool rx_ev_other_err
, rx_ev_pause_frm
;
756 bool rx_ev_hdr_type
, rx_ev_mcast_pkt
;
757 unsigned rx_ev_pkt_type
;
759 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
760 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
761 rx_ev_tobe_disc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_TOBE_DISC
);
762 rx_ev_pkt_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_TYPE
);
763 rx_ev_buf_owner_id_err
= EFX_QWORD_FIELD(*event
,
764 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR
);
765 rx_ev_ip_hdr_chksum_err
= EFX_QWORD_FIELD(*event
,
766 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR
);
767 rx_ev_tcp_udp_chksum_err
= EFX_QWORD_FIELD(*event
,
768 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR
);
769 rx_ev_eth_crc_err
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_ETH_CRC_ERR
);
770 rx_ev_frm_trunc
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_FRM_TRUNC
);
771 rx_ev_drib_nib
= ((efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) ?
772 0 : EFX_QWORD_FIELD(*event
, FSF_AA_RX_EV_DRIB_NIB
));
773 rx_ev_pause_frm
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PAUSE_FRM_ERR
);
775 /* Every error apart from tobe_disc and pause_frm */
776 rx_ev_other_err
= (rx_ev_drib_nib
| rx_ev_tcp_udp_chksum_err
|
777 rx_ev_buf_owner_id_err
| rx_ev_eth_crc_err
|
778 rx_ev_frm_trunc
| rx_ev_ip_hdr_chksum_err
);
780 /* Count errors that are not in MAC stats. Ignore expected
781 * checksum errors during self-test. */
783 ++channel
->n_rx_frm_trunc
;
784 else if (rx_ev_tobe_disc
)
785 ++channel
->n_rx_tobe_disc
;
786 else if (!efx
->loopback_selftest
) {
787 if (rx_ev_ip_hdr_chksum_err
)
788 ++channel
->n_rx_ip_hdr_chksum_err
;
789 else if (rx_ev_tcp_udp_chksum_err
)
790 ++channel
->n_rx_tcp_udp_chksum_err
;
793 /* The frame must be discarded if any of these are true. */
794 *discard
= (rx_ev_eth_crc_err
| rx_ev_frm_trunc
| rx_ev_drib_nib
|
795 rx_ev_tobe_disc
| rx_ev_pause_frm
);
797 /* TOBE_DISC is expected on unicast mismatches; don't print out an
798 * error message. FRM_TRUNC indicates RXDP dropped the packet due
799 * to a FIFO overflow.
801 #ifdef EFX_ENABLE_DEBUG
802 if (rx_ev_other_err
&& net_ratelimit()) {
803 netif_dbg(efx
, rx_err
, efx
->net_dev
,
804 " RX queue %d unexpected RX event "
805 EFX_QWORD_FMT
"%s%s%s%s%s%s%s%s\n",
806 efx_rx_queue_index(rx_queue
), EFX_QWORD_VAL(*event
),
807 rx_ev_buf_owner_id_err
? " [OWNER_ID_ERR]" : "",
808 rx_ev_ip_hdr_chksum_err
?
809 " [IP_HDR_CHKSUM_ERR]" : "",
810 rx_ev_tcp_udp_chksum_err
?
811 " [TCP_UDP_CHKSUM_ERR]" : "",
812 rx_ev_eth_crc_err
? " [ETH_CRC_ERR]" : "",
813 rx_ev_frm_trunc
? " [FRM_TRUNC]" : "",
814 rx_ev_drib_nib
? " [DRIB_NIB]" : "",
815 rx_ev_tobe_disc
? " [TOBE_DISC]" : "",
816 rx_ev_pause_frm
? " [PAUSE]" : "");
821 /* Handle receive events that are not in-order. */
823 efx_handle_rx_bad_index(struct efx_rx_queue
*rx_queue
, unsigned index
)
825 struct efx_nic
*efx
= rx_queue
->efx
;
826 unsigned expected
, dropped
;
828 expected
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
829 dropped
= (index
- expected
) & rx_queue
->ptr_mask
;
830 netif_info(efx
, rx_err
, efx
->net_dev
,
831 "dropped %d events (index=%d expected=%d)\n",
832 dropped
, index
, expected
);
834 efx_schedule_reset(efx
, EFX_WORKAROUND_5676(efx
) ?
835 RESET_TYPE_RX_RECOVERY
: RESET_TYPE_DISABLE
);
838 /* Handle a packet received event
840 * The NIC gives a "discard" flag if it's a unicast packet with the
841 * wrong destination address
842 * Also "is multicast" and "matches multicast filter" flags can be used to
843 * discard non-matching multicast packets.
846 efx_handle_rx_event(struct efx_channel
*channel
, const efx_qword_t
*event
)
848 unsigned int rx_ev_desc_ptr
, rx_ev_byte_cnt
;
849 unsigned int rx_ev_hdr_type
, rx_ev_mcast_pkt
;
850 unsigned expected_ptr
;
851 bool rx_ev_pkt_ok
, discard
= false, checksummed
;
852 struct efx_rx_queue
*rx_queue
;
854 /* Basic packet information */
855 rx_ev_byte_cnt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_BYTE_CNT
);
856 rx_ev_pkt_ok
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_PKT_OK
);
857 rx_ev_hdr_type
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_HDR_TYPE
);
858 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_JUMBO_CONT
));
859 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_SOP
) != 1);
860 WARN_ON(EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_Q_LABEL
) !=
863 rx_queue
= efx_channel_get_rx_queue(channel
);
865 rx_ev_desc_ptr
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_DESC_PTR
);
866 expected_ptr
= rx_queue
->removed_count
& rx_queue
->ptr_mask
;
867 if (unlikely(rx_ev_desc_ptr
!= expected_ptr
))
868 efx_handle_rx_bad_index(rx_queue
, rx_ev_desc_ptr
);
870 if (likely(rx_ev_pkt_ok
)) {
871 /* If packet is marked as OK and packet type is TCP/IP or
872 * UDP/IP, then we can rely on the hardware checksum.
875 rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP
||
876 rx_ev_hdr_type
== FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP
;
878 efx_handle_rx_not_ok(rx_queue
, event
, &rx_ev_pkt_ok
, &discard
);
882 /* Detect multicast packets that didn't match the filter */
883 rx_ev_mcast_pkt
= EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_PKT
);
884 if (rx_ev_mcast_pkt
) {
885 unsigned int rx_ev_mcast_hash_match
=
886 EFX_QWORD_FIELD(*event
, FSF_AZ_RX_EV_MCAST_HASH_MATCH
);
888 if (unlikely(!rx_ev_mcast_hash_match
)) {
889 ++channel
->n_rx_mcast_mismatch
;
894 channel
->irq_mod_score
+= 2;
896 /* Handle received packet */
897 efx_rx_packet(rx_queue
, rx_ev_desc_ptr
, rx_ev_byte_cnt
,
898 checksummed
, discard
);
902 efx_handle_generated_event(struct efx_channel
*channel
, efx_qword_t
*event
)
904 struct efx_nic
*efx
= channel
->efx
;
907 code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRV_GEN_EV_MAGIC
);
908 if (code
== EFX_CHANNEL_MAGIC_TEST(channel
))
909 ++channel
->magic_count
;
910 else if (code
== EFX_CHANNEL_MAGIC_FILL(channel
))
911 /* The queue must be empty, so we won't receive any rx
912 * events, so efx_process_channel() won't refill the
913 * queue. Refill it here */
914 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel
));
916 netif_dbg(efx
, hw
, efx
->net_dev
, "channel %d received "
917 "generated event "EFX_QWORD_FMT
"\n",
918 channel
->channel
, EFX_QWORD_VAL(*event
));
922 efx_handle_driver_event(struct efx_channel
*channel
, efx_qword_t
*event
)
924 struct efx_nic
*efx
= channel
->efx
;
925 unsigned int ev_sub_code
;
926 unsigned int ev_sub_data
;
928 ev_sub_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBCODE
);
929 ev_sub_data
= EFX_QWORD_FIELD(*event
, FSF_AZ_DRIVER_EV_SUBDATA
);
931 switch (ev_sub_code
) {
932 case FSE_AZ_TX_DESCQ_FLS_DONE_EV
:
933 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d TXQ %d flushed\n",
934 channel
->channel
, ev_sub_data
);
936 case FSE_AZ_RX_DESCQ_FLS_DONE_EV
:
937 netif_vdbg(efx
, hw
, efx
->net_dev
, "channel %d RXQ %d flushed\n",
938 channel
->channel
, ev_sub_data
);
940 case FSE_AZ_EVQ_INIT_DONE_EV
:
941 netif_dbg(efx
, hw
, efx
->net_dev
,
942 "channel %d EVQ %d initialised\n",
943 channel
->channel
, ev_sub_data
);
945 case FSE_AZ_SRM_UPD_DONE_EV
:
946 netif_vdbg(efx
, hw
, efx
->net_dev
,
947 "channel %d SRAM update done\n", channel
->channel
);
949 case FSE_AZ_WAKE_UP_EV
:
950 netif_vdbg(efx
, hw
, efx
->net_dev
,
951 "channel %d RXQ %d wakeup event\n",
952 channel
->channel
, ev_sub_data
);
954 case FSE_AZ_TIMER_EV
:
955 netif_vdbg(efx
, hw
, efx
->net_dev
,
956 "channel %d RX queue %d timer expired\n",
957 channel
->channel
, ev_sub_data
);
959 case FSE_AA_RX_RECOVER_EV
:
960 netif_err(efx
, rx_err
, efx
->net_dev
,
961 "channel %d seen DRIVER RX_RESET event. "
962 "Resetting.\n", channel
->channel
);
963 atomic_inc(&efx
->rx_reset
);
964 efx_schedule_reset(efx
,
965 EFX_WORKAROUND_6555(efx
) ?
966 RESET_TYPE_RX_RECOVERY
:
969 case FSE_BZ_RX_DSC_ERROR_EV
:
970 netif_err(efx
, rx_err
, efx
->net_dev
,
971 "RX DMA Q %d reports descriptor fetch error."
972 " RX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
973 efx_schedule_reset(efx
, RESET_TYPE_RX_DESC_FETCH
);
975 case FSE_BZ_TX_DSC_ERROR_EV
:
976 netif_err(efx
, tx_err
, efx
->net_dev
,
977 "TX DMA Q %d reports descriptor fetch error."
978 " TX Q %d is disabled.\n", ev_sub_data
, ev_sub_data
);
979 efx_schedule_reset(efx
, RESET_TYPE_TX_DESC_FETCH
);
982 netif_vdbg(efx
, hw
, efx
->net_dev
,
983 "channel %d unknown driver event code %d "
984 "data %04x\n", channel
->channel
, ev_sub_code
,
990 int efx_nic_process_eventq(struct efx_channel
*channel
, int budget
)
992 struct efx_nic
*efx
= channel
->efx
;
993 unsigned int read_ptr
;
994 efx_qword_t event
, *p_event
;
999 read_ptr
= channel
->eventq_read_ptr
;
1002 p_event
= efx_event(channel
, read_ptr
);
1005 if (!efx_event_present(&event
))
1009 netif_vdbg(channel
->efx
, intr
, channel
->efx
->net_dev
,
1010 "channel %d event is "EFX_QWORD_FMT
"\n",
1011 channel
->channel
, EFX_QWORD_VAL(event
));
1013 /* Clear this event by marking it all ones */
1014 EFX_SET_QWORD(*p_event
);
1016 /* Increment read pointer */
1017 read_ptr
= (read_ptr
+ 1) & channel
->eventq_mask
;
1019 ev_code
= EFX_QWORD_FIELD(event
, FSF_AZ_EV_CODE
);
1022 case FSE_AZ_EV_CODE_RX_EV
:
1023 efx_handle_rx_event(channel
, &event
);
1024 if (++spent
== budget
)
1027 case FSE_AZ_EV_CODE_TX_EV
:
1028 tx_packets
+= efx_handle_tx_event(channel
, &event
);
1029 if (tx_packets
> efx
->txq_entries
) {
1034 case FSE_AZ_EV_CODE_DRV_GEN_EV
:
1035 efx_handle_generated_event(channel
, &event
);
1037 case FSE_AZ_EV_CODE_DRIVER_EV
:
1038 efx_handle_driver_event(channel
, &event
);
1040 case FSE_CZ_EV_CODE_MCDI_EV
:
1041 efx_mcdi_process_event(channel
, &event
);
1043 case FSE_AZ_EV_CODE_GLOBAL_EV
:
1044 if (efx
->type
->handle_global_event
&&
1045 efx
->type
->handle_global_event(channel
, &event
))
1047 /* else fall through */
1049 netif_err(channel
->efx
, hw
, channel
->efx
->net_dev
,
1050 "channel %d unknown event type %d (data "
1051 EFX_QWORD_FMT
")\n", channel
->channel
,
1052 ev_code
, EFX_QWORD_VAL(event
));
1057 channel
->eventq_read_ptr
= read_ptr
;
1062 /* Allocate buffer table entries for event queue */
1063 int efx_nic_probe_eventq(struct efx_channel
*channel
)
1065 struct efx_nic
*efx
= channel
->efx
;
1068 entries
= channel
->eventq_mask
+ 1;
1069 return efx_alloc_special_buffer(efx
, &channel
->eventq
,
1070 entries
* sizeof(efx_qword_t
));
1073 void efx_nic_init_eventq(struct efx_channel
*channel
)
1076 struct efx_nic
*efx
= channel
->efx
;
1078 netif_dbg(efx
, hw
, efx
->net_dev
,
1079 "channel %d event queue in special buffers %d-%d\n",
1080 channel
->channel
, channel
->eventq
.index
,
1081 channel
->eventq
.index
+ channel
->eventq
.entries
- 1);
1083 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
) {
1084 EFX_POPULATE_OWORD_3(reg
,
1085 FRF_CZ_TIMER_Q_EN
, 1,
1086 FRF_CZ_HOST_NOTIFY_MODE
, 0,
1087 FRF_CZ_TIMER_MODE
, FFE_CZ_TIMER_MODE_DIS
);
1088 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1091 /* Pin event queue buffer */
1092 efx_init_special_buffer(efx
, &channel
->eventq
);
1094 /* Fill event queue with all ones (i.e. empty events) */
1095 memset(channel
->eventq
.addr
, 0xff, channel
->eventq
.len
);
1097 /* Push event queue to card */
1098 EFX_POPULATE_OWORD_3(reg
,
1100 FRF_AZ_EVQ_SIZE
, __ffs(channel
->eventq
.entries
),
1101 FRF_AZ_EVQ_BUF_BASE_ID
, channel
->eventq
.index
);
1102 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1105 efx
->type
->push_irq_moderation(channel
);
1108 void efx_nic_fini_eventq(struct efx_channel
*channel
)
1111 struct efx_nic
*efx
= channel
->efx
;
1113 /* Remove event queue from card */
1114 EFX_ZERO_OWORD(reg
);
1115 efx_writeo_table(efx
, ®
, efx
->type
->evq_ptr_tbl_base
,
1117 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1118 efx_writeo_table(efx
, ®
, FR_BZ_TIMER_TBL
, channel
->channel
);
1120 /* Unpin event queue */
1121 efx_fini_special_buffer(efx
, &channel
->eventq
);
1124 /* Free buffers backing event queue */
1125 void efx_nic_remove_eventq(struct efx_channel
*channel
)
1127 efx_free_special_buffer(channel
->efx
, &channel
->eventq
);
1131 void efx_nic_generate_test_event(struct efx_channel
*channel
)
1133 unsigned int magic
= EFX_CHANNEL_MAGIC_TEST(channel
);
1134 efx_qword_t test_event
;
1136 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1137 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1138 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1139 efx_generate_event(channel
, &test_event
);
1142 void efx_nic_generate_fill_event(struct efx_channel
*channel
)
1144 unsigned int magic
= EFX_CHANNEL_MAGIC_FILL(channel
);
1145 efx_qword_t test_event
;
1147 EFX_POPULATE_QWORD_2(test_event
, FSF_AZ_EV_CODE
,
1148 FSE_AZ_EV_CODE_DRV_GEN_EV
,
1149 FSF_AZ_DRV_GEN_EV_MAGIC
, magic
);
1150 efx_generate_event(channel
, &test_event
);
1153 /**************************************************************************
1157 **************************************************************************/
1160 static void efx_poll_flush_events(struct efx_nic
*efx
)
1162 struct efx_channel
*channel
= efx_get_channel(efx
, 0);
1163 struct efx_tx_queue
*tx_queue
;
1164 struct efx_rx_queue
*rx_queue
;
1165 unsigned int read_ptr
= channel
->eventq_read_ptr
;
1166 unsigned int end_ptr
= (read_ptr
- 1) & channel
->eventq_mask
;
1169 efx_qword_t
*event
= efx_event(channel
, read_ptr
);
1170 int ev_code
, ev_sub_code
, ev_queue
;
1173 if (!efx_event_present(event
))
1176 ev_code
= EFX_QWORD_FIELD(*event
, FSF_AZ_EV_CODE
);
1177 ev_sub_code
= EFX_QWORD_FIELD(*event
,
1178 FSF_AZ_DRIVER_EV_SUBCODE
);
1179 if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1180 ev_sub_code
== FSE_AZ_TX_DESCQ_FLS_DONE_EV
) {
1181 ev_queue
= EFX_QWORD_FIELD(*event
,
1182 FSF_AZ_DRIVER_EV_SUBDATA
);
1183 if (ev_queue
< EFX_TXQ_TYPES
* efx
->n_tx_channels
) {
1184 tx_queue
= efx_get_tx_queue(
1185 efx
, ev_queue
/ EFX_TXQ_TYPES
,
1186 ev_queue
% EFX_TXQ_TYPES
);
1187 tx_queue
->flushed
= FLUSH_DONE
;
1189 } else if (ev_code
== FSE_AZ_EV_CODE_DRIVER_EV
&&
1190 ev_sub_code
== FSE_AZ_RX_DESCQ_FLS_DONE_EV
) {
1191 ev_queue
= EFX_QWORD_FIELD(
1192 *event
, FSF_AZ_DRIVER_EV_RX_DESCQ_ID
);
1193 ev_failed
= EFX_QWORD_FIELD(
1194 *event
, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL
);
1195 if (ev_queue
< efx
->n_rx_channels
) {
1196 rx_queue
= efx_get_rx_queue(efx
, ev_queue
);
1198 ev_failed
? FLUSH_FAILED
: FLUSH_DONE
;
1202 /* We're about to destroy the queue anyway, so
1203 * it's ok to throw away every non-flush event */
1204 EFX_SET_QWORD(*event
);
1206 read_ptr
= (read_ptr
+ 1) & channel
->eventq_mask
;
1207 } while (read_ptr
!= end_ptr
);
1209 channel
->eventq_read_ptr
= read_ptr
;
1212 /* Handle tx and rx flushes at the same time, since they run in
1213 * parallel in the hardware and there's no reason for us to
1215 int efx_nic_flush_queues(struct efx_nic
*efx
)
1217 struct efx_channel
*channel
;
1218 struct efx_rx_queue
*rx_queue
;
1219 struct efx_tx_queue
*tx_queue
;
1220 int i
, tx_pending
, rx_pending
;
1222 /* If necessary prepare the hardware for flushing */
1223 efx
->type
->prepare_flush(efx
);
1225 /* Flush all tx queues in parallel */
1226 efx_for_each_channel(channel
, efx
) {
1227 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
) {
1228 if (tx_queue
->initialised
)
1229 efx_flush_tx_queue(tx_queue
);
1233 /* The hardware supports four concurrent rx flushes, each of which may
1234 * need to be retried if there is an outstanding descriptor fetch */
1235 for (i
= 0; i
< EFX_FLUSH_POLL_COUNT
; ++i
) {
1236 rx_pending
= tx_pending
= 0;
1237 efx_for_each_channel(channel
, efx
) {
1238 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1239 if (rx_queue
->flushed
== FLUSH_PENDING
)
1243 efx_for_each_channel(channel
, efx
) {
1244 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1245 if (rx_pending
== EFX_RX_FLUSH_COUNT
)
1247 if (rx_queue
->flushed
== FLUSH_FAILED
||
1248 rx_queue
->flushed
== FLUSH_NONE
) {
1249 efx_flush_rx_queue(rx_queue
);
1253 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
) {
1254 if (tx_queue
->initialised
&&
1255 tx_queue
->flushed
!= FLUSH_DONE
)
1260 if (rx_pending
== 0 && tx_pending
== 0)
1263 msleep(EFX_FLUSH_INTERVAL
);
1264 efx_poll_flush_events(efx
);
1267 /* Mark the queues as all flushed. We're going to return failure
1268 * leading to a reset, or fake up success anyway */
1269 efx_for_each_channel(channel
, efx
) {
1270 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
) {
1271 if (tx_queue
->initialised
&&
1272 tx_queue
->flushed
!= FLUSH_DONE
)
1273 netif_err(efx
, hw
, efx
->net_dev
,
1274 "tx queue %d flush command timed out\n",
1276 tx_queue
->flushed
= FLUSH_DONE
;
1278 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
1279 if (rx_queue
->flushed
!= FLUSH_DONE
)
1280 netif_err(efx
, hw
, efx
->net_dev
,
1281 "rx queue %d flush command timed out\n",
1282 efx_rx_queue_index(rx_queue
));
1283 rx_queue
->flushed
= FLUSH_DONE
;
1290 /**************************************************************************
1292 * Hardware interrupts
1293 * The hardware interrupt handler does very little work; all the event
1294 * queue processing is carried out by per-channel tasklets.
1296 **************************************************************************/
1298 /* Enable/disable/generate interrupts */
1299 static inline void efx_nic_interrupts(struct efx_nic
*efx
,
1300 bool enabled
, bool force
)
1302 efx_oword_t int_en_reg_ker
;
1304 EFX_POPULATE_OWORD_3(int_en_reg_ker
,
1305 FRF_AZ_KER_INT_LEVE_SEL
, efx
->fatal_irq_level
,
1306 FRF_AZ_KER_INT_KER
, force
,
1307 FRF_AZ_DRV_INT_EN_KER
, enabled
);
1308 efx_writeo(efx
, &int_en_reg_ker
, FR_AZ_INT_EN_KER
);
1311 void efx_nic_enable_interrupts(struct efx_nic
*efx
)
1313 struct efx_channel
*channel
;
1315 EFX_ZERO_OWORD(*((efx_oword_t
*) efx
->irq_status
.addr
));
1316 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1318 /* Enable interrupts */
1319 efx_nic_interrupts(efx
, true, false);
1321 /* Force processing of all the channels to get the EVQ RPTRs up to
1323 efx_for_each_channel(channel
, efx
)
1324 efx_schedule_channel(channel
);
1327 void efx_nic_disable_interrupts(struct efx_nic
*efx
)
1329 /* Disable interrupts */
1330 efx_nic_interrupts(efx
, false, false);
1333 /* Generate a test interrupt
1334 * Interrupt must already have been enabled, otherwise nasty things
1337 void efx_nic_generate_interrupt(struct efx_nic
*efx
)
1339 efx_nic_interrupts(efx
, true, true);
1342 /* Process a fatal interrupt
1343 * Disable bus mastering ASAP and schedule a reset
1345 irqreturn_t
efx_nic_fatal_interrupt(struct efx_nic
*efx
)
1347 struct falcon_nic_data
*nic_data
= efx
->nic_data
;
1348 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1349 efx_oword_t fatal_intr
;
1350 int error
, mem_perr
;
1352 efx_reado(efx
, &fatal_intr
, FR_AZ_FATAL_INTR_KER
);
1353 error
= EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_FATAL_INTR
);
1355 netif_err(efx
, hw
, efx
->net_dev
, "SYSTEM ERROR "EFX_OWORD_FMT
" status "
1356 EFX_OWORD_FMT
": %s\n", EFX_OWORD_VAL(*int_ker
),
1357 EFX_OWORD_VAL(fatal_intr
),
1358 error
? "disabling bus mastering" : "no recognised error");
1360 /* If this is a memory parity error dump which blocks are offending */
1361 mem_perr
= (EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_MEM_PERR_INT_KER
) ||
1362 EFX_OWORD_FIELD(fatal_intr
, FRF_AZ_SRM_PERR_INT_KER
));
1365 efx_reado(efx
, ®
, FR_AZ_MEM_STAT
);
1366 netif_err(efx
, hw
, efx
->net_dev
,
1367 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT
"\n",
1368 EFX_OWORD_VAL(reg
));
1371 /* Disable both devices */
1372 pci_clear_master(efx
->pci_dev
);
1373 if (efx_nic_is_dual_func(efx
))
1374 pci_clear_master(nic_data
->pci_dev2
);
1375 efx_nic_disable_interrupts(efx
);
1377 /* Count errors and reset or disable the NIC accordingly */
1378 if (efx
->int_error_count
== 0 ||
1379 time_after(jiffies
, efx
->int_error_expire
)) {
1380 efx
->int_error_count
= 0;
1381 efx
->int_error_expire
=
1382 jiffies
+ EFX_INT_ERROR_EXPIRE
* HZ
;
1384 if (++efx
->int_error_count
< EFX_MAX_INT_ERRORS
) {
1385 netif_err(efx
, hw
, efx
->net_dev
,
1386 "SYSTEM ERROR - reset scheduled\n");
1387 efx_schedule_reset(efx
, RESET_TYPE_INT_ERROR
);
1389 netif_err(efx
, hw
, efx
->net_dev
,
1390 "SYSTEM ERROR - max number of errors seen."
1391 "NIC will be disabled\n");
1392 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
1398 /* Handle a legacy interrupt
1399 * Acknowledges the interrupt and schedule event queue processing.
1401 static irqreturn_t
efx_legacy_interrupt(int irq
, void *dev_id
)
1403 struct efx_nic
*efx
= dev_id
;
1404 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1405 irqreturn_t result
= IRQ_NONE
;
1406 struct efx_channel
*channel
;
1411 /* Could this be ours? If interrupts are disabled then the
1412 * channel state may not be valid.
1414 if (!efx
->legacy_irq_enabled
)
1417 /* Read the ISR which also ACKs the interrupts */
1418 efx_readd(efx
, ®
, FR_BZ_INT_ISR0
);
1419 queues
= EFX_EXTRACT_DWORD(reg
, 0, 31);
1421 /* Check to see if we have a serious error condition */
1422 if (queues
& (1U << efx
->fatal_irq_level
)) {
1423 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1424 if (unlikely(syserr
))
1425 return efx_nic_fatal_interrupt(efx
);
1429 if (EFX_WORKAROUND_15783(efx
))
1430 efx
->irq_zero_count
= 0;
1432 /* Schedule processing of any interrupting queues */
1433 efx_for_each_channel(channel
, efx
) {
1435 efx_schedule_channel(channel
);
1438 result
= IRQ_HANDLED
;
1440 } else if (EFX_WORKAROUND_15783(efx
)) {
1443 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1444 * because this might be a shared interrupt. */
1445 if (efx
->irq_zero_count
++ == 0)
1446 result
= IRQ_HANDLED
;
1448 /* Ensure we schedule or rearm all event queues */
1449 efx_for_each_channel(channel
, efx
) {
1450 event
= efx_event(channel
, channel
->eventq_read_ptr
);
1451 if (efx_event_present(event
))
1452 efx_schedule_channel(channel
);
1454 efx_nic_eventq_read_ack(channel
);
1458 if (result
== IRQ_HANDLED
) {
1459 efx
->last_irq_cpu
= raw_smp_processor_id();
1460 netif_vdbg(efx
, intr
, efx
->net_dev
,
1461 "IRQ %d on CPU %d status " EFX_DWORD_FMT
"\n",
1462 irq
, raw_smp_processor_id(), EFX_DWORD_VAL(reg
));
1468 /* Handle an MSI interrupt
1470 * Handle an MSI hardware interrupt. This routine schedules event
1471 * queue processing. No interrupt acknowledgement cycle is necessary.
1472 * Also, we never need to check that the interrupt is for us, since
1473 * MSI interrupts cannot be shared.
1475 static irqreturn_t
efx_msi_interrupt(int irq
, void *dev_id
)
1477 struct efx_channel
*channel
= *(struct efx_channel
**)dev_id
;
1478 struct efx_nic
*efx
= channel
->efx
;
1479 efx_oword_t
*int_ker
= efx
->irq_status
.addr
;
1482 efx
->last_irq_cpu
= raw_smp_processor_id();
1483 netif_vdbg(efx
, intr
, efx
->net_dev
,
1484 "IRQ %d on CPU %d status " EFX_OWORD_FMT
"\n",
1485 irq
, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker
));
1487 /* Check to see if we have a serious error condition */
1488 if (channel
->channel
== efx
->fatal_irq_level
) {
1489 syserr
= EFX_OWORD_FIELD(*int_ker
, FSF_AZ_NET_IVEC_FATAL_INT
);
1490 if (unlikely(syserr
))
1491 return efx_nic_fatal_interrupt(efx
);
1494 /* Schedule processing of the channel */
1495 efx_schedule_channel(channel
);
1501 /* Setup RSS indirection table.
1502 * This maps from the hash value of the packet to RXQ
1504 void efx_nic_push_rx_indir_table(struct efx_nic
*efx
)
1509 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
)
1512 BUILD_BUG_ON(ARRAY_SIZE(efx
->rx_indir_table
) !=
1513 FR_BZ_RX_INDIRECTION_TBL_ROWS
);
1515 for (i
= 0; i
< FR_BZ_RX_INDIRECTION_TBL_ROWS
; i
++) {
1516 EFX_POPULATE_DWORD_1(dword
, FRF_BZ_IT_QUEUE
,
1517 efx
->rx_indir_table
[i
]);
1518 efx_writed_table(efx
, &dword
, FR_BZ_RX_INDIRECTION_TBL
, i
);
1522 /* Hook interrupt handler(s)
1523 * Try MSI and then legacy interrupts.
1525 int efx_nic_init_interrupt(struct efx_nic
*efx
)
1527 struct efx_channel
*channel
;
1530 if (!EFX_INT_MODE_USE_MSI(efx
)) {
1531 irq_handler_t handler
;
1532 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1533 handler
= efx_legacy_interrupt
;
1535 handler
= falcon_legacy_interrupt_a1
;
1537 rc
= request_irq(efx
->legacy_irq
, handler
, IRQF_SHARED
,
1540 netif_err(efx
, drv
, efx
->net_dev
,
1541 "failed to hook legacy IRQ %d\n",
1548 /* Hook MSI or MSI-X interrupt */
1549 efx_for_each_channel(channel
, efx
) {
1550 rc
= request_irq(channel
->irq
, efx_msi_interrupt
,
1551 IRQF_PROBE_SHARED
, /* Not shared */
1552 efx
->channel_name
[channel
->channel
],
1553 &efx
->channel
[channel
->channel
]);
1555 netif_err(efx
, drv
, efx
->net_dev
,
1556 "failed to hook IRQ %d\n", channel
->irq
);
1564 efx_for_each_channel(channel
, efx
)
1565 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1570 void efx_nic_fini_interrupt(struct efx_nic
*efx
)
1572 struct efx_channel
*channel
;
1575 /* Disable MSI/MSI-X interrupts */
1576 efx_for_each_channel(channel
, efx
) {
1578 free_irq(channel
->irq
, &efx
->channel
[channel
->channel
]);
1581 /* ACK legacy interrupt */
1582 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1583 efx_reado(efx
, ®
, FR_BZ_INT_ISR0
);
1585 falcon_irq_ack_a1(efx
);
1587 /* Disable legacy interrupt */
1588 if (efx
->legacy_irq
)
1589 free_irq(efx
->legacy_irq
, efx
);
1592 u32
efx_nic_fpga_ver(struct efx_nic
*efx
)
1594 efx_oword_t altera_build
;
1595 efx_reado(efx
, &altera_build
, FR_AZ_ALTERA_BUILD
);
1596 return EFX_OWORD_FIELD(altera_build
, FRF_AZ_ALTERA_BUILD_VER
);
1599 void efx_nic_init_common(struct efx_nic
*efx
)
1603 /* Set positions of descriptor caches in SRAM. */
1604 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_TX_DC_BASE_ADR
,
1605 efx
->type
->tx_dc_base
/ 8);
1606 efx_writeo(efx
, &temp
, FR_AZ_SRM_TX_DC_CFG
);
1607 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_SRM_RX_DC_BASE_ADR
,
1608 efx
->type
->rx_dc_base
/ 8);
1609 efx_writeo(efx
, &temp
, FR_AZ_SRM_RX_DC_CFG
);
1611 /* Set TX descriptor cache size. */
1612 BUILD_BUG_ON(TX_DC_ENTRIES
!= (8 << TX_DC_ENTRIES_ORDER
));
1613 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_TX_DC_SIZE
, TX_DC_ENTRIES_ORDER
);
1614 efx_writeo(efx
, &temp
, FR_AZ_TX_DC_CFG
);
1616 /* Set RX descriptor cache size. Set low watermark to size-8, as
1617 * this allows most efficient prefetching.
1619 BUILD_BUG_ON(RX_DC_ENTRIES
!= (8 << RX_DC_ENTRIES_ORDER
));
1620 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_SIZE
, RX_DC_ENTRIES_ORDER
);
1621 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_CFG
);
1622 EFX_POPULATE_OWORD_1(temp
, FRF_AZ_RX_DC_PF_LWM
, RX_DC_ENTRIES
- 8);
1623 efx_writeo(efx
, &temp
, FR_AZ_RX_DC_PF_WM
);
1625 /* Program INT_KER address */
1626 EFX_POPULATE_OWORD_2(temp
,
1627 FRF_AZ_NORM_INT_VEC_DIS_KER
,
1628 EFX_INT_MODE_USE_MSI(efx
),
1629 FRF_AZ_INT_ADR_KER
, efx
->irq_status
.dma_addr
);
1630 efx_writeo(efx
, &temp
, FR_AZ_INT_ADR_KER
);
1632 if (EFX_WORKAROUND_17213(efx
) && !EFX_INT_MODE_USE_MSI(efx
))
1633 /* Use an interrupt level unused by event queues */
1634 efx
->fatal_irq_level
= 0x1f;
1636 /* Use a valid MSI-X vector */
1637 efx
->fatal_irq_level
= 0;
1639 /* Enable all the genuinely fatal interrupts. (They are still
1640 * masked by the overall interrupt mask, controlled by
1641 * falcon_interrupts()).
1643 * Note: All other fatal interrupts are enabled
1645 EFX_POPULATE_OWORD_3(temp
,
1646 FRF_AZ_ILL_ADR_INT_KER_EN
, 1,
1647 FRF_AZ_RBUF_OWN_INT_KER_EN
, 1,
1648 FRF_AZ_TBUF_OWN_INT_KER_EN
, 1);
1649 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
1650 EFX_SET_OWORD_FIELD(temp
, FRF_CZ_SRAM_PERR_INT_P_KER_EN
, 1);
1651 EFX_INVERT_OWORD(temp
);
1652 efx_writeo(efx
, &temp
, FR_AZ_FATAL_INTR_KER
);
1654 efx_nic_push_rx_indir_table(efx
);
1656 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1657 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1659 efx_reado(efx
, &temp
, FR_AZ_TX_RESERVED
);
1660 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER
, 0xfe);
1661 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_RX_SPACER_EN
, 1);
1662 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_ONE_PKT_PER_Q
, 1);
1663 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PUSH_EN
, 1);
1664 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_DIS_NON_IP_EV
, 1);
1665 /* Enable SW_EV to inherit in char driver - assume harmless here */
1666 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_SOFT_EVT_EN
, 1);
1667 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1668 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_THRESHOLD
, 2);
1669 /* Disable hardware watchdog which can misfire */
1670 EFX_SET_OWORD_FIELD(temp
, FRF_AZ_TX_PREF_WD_TMR
, 0x3fffff);
1671 /* Squash TX of packets of 16 bytes or less */
1672 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
)
1673 EFX_SET_OWORD_FIELD(temp
, FRF_BZ_TX_FLUSH_MIN_LEN_EN
, 1);
1674 efx_writeo(efx
, &temp
, FR_AZ_TX_RESERVED
);
1676 if (efx_nic_rev(efx
) >= EFX_REV_FALCON_B0
) {
1677 EFX_POPULATE_OWORD_4(temp
,
1678 /* Default values */
1679 FRF_BZ_TX_PACE_SB_NOT_AF
, 0x15,
1680 FRF_BZ_TX_PACE_SB_AF
, 0xb,
1681 FRF_BZ_TX_PACE_FB_BASE
, 0,
1682 /* Allow large pace values in the
1684 FRF_BZ_TX_PACE_BIN_TH
,
1685 FFE_BZ_TX_PACE_RESERVED
);
1686 efx_writeo(efx
, &temp
, FR_BZ_TX_PACE
);
1692 #define REGISTER_REVISION_A 1
1693 #define REGISTER_REVISION_B 2
1694 #define REGISTER_REVISION_C 3
1695 #define REGISTER_REVISION_Z 3 /* latest revision */
1697 struct efx_nic_reg
{
1699 u32 min_revision
:2, max_revision
:2;
1702 #define REGISTER(name, min_rev, max_rev) { \
1703 FR_ ## min_rev ## max_rev ## _ ## name, \
1704 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1706 #define REGISTER_AA(name) REGISTER(name, A, A)
1707 #define REGISTER_AB(name) REGISTER(name, A, B)
1708 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1709 #define REGISTER_BB(name) REGISTER(name, B, B)
1710 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1711 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1713 static const struct efx_nic_reg efx_nic_regs
[] = {
1714 REGISTER_AZ(ADR_REGION
),
1715 REGISTER_AZ(INT_EN_KER
),
1716 REGISTER_BZ(INT_EN_CHAR
),
1717 REGISTER_AZ(INT_ADR_KER
),
1718 REGISTER_BZ(INT_ADR_CHAR
),
1719 /* INT_ACK_KER is WO */
1720 /* INT_ISR0 is RC */
1721 REGISTER_AZ(HW_INIT
),
1722 REGISTER_CZ(USR_EV_CFG
),
1723 REGISTER_AB(EE_SPI_HCMD
),
1724 REGISTER_AB(EE_SPI_HADR
),
1725 REGISTER_AB(EE_SPI_HDATA
),
1726 REGISTER_AB(EE_BASE_PAGE
),
1727 REGISTER_AB(EE_VPD_CFG0
),
1728 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1729 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1730 /* PCIE_CORE_INDIRECT is indirect */
1731 REGISTER_AB(NIC_STAT
),
1732 REGISTER_AB(GPIO_CTL
),
1733 REGISTER_AB(GLB_CTL
),
1734 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1735 REGISTER_BZ(DP_CTRL
),
1736 REGISTER_AZ(MEM_STAT
),
1737 REGISTER_AZ(CS_DEBUG
),
1738 REGISTER_AZ(ALTERA_BUILD
),
1739 REGISTER_AZ(CSR_SPARE
),
1740 REGISTER_AB(PCIE_SD_CTL0123
),
1741 REGISTER_AB(PCIE_SD_CTL45
),
1742 REGISTER_AB(PCIE_PCS_CTL_STAT
),
1743 /* DEBUG_DATA_OUT is not used */
1745 REGISTER_AZ(EVQ_CTL
),
1746 REGISTER_AZ(EVQ_CNT1
),
1747 REGISTER_AZ(EVQ_CNT2
),
1748 REGISTER_AZ(BUF_TBL_CFG
),
1749 REGISTER_AZ(SRM_RX_DC_CFG
),
1750 REGISTER_AZ(SRM_TX_DC_CFG
),
1751 REGISTER_AZ(SRM_CFG
),
1752 /* BUF_TBL_UPD is WO */
1753 REGISTER_AZ(SRM_UPD_EVQ
),
1754 REGISTER_AZ(SRAM_PARITY
),
1755 REGISTER_AZ(RX_CFG
),
1756 REGISTER_BZ(RX_FILTER_CTL
),
1757 /* RX_FLUSH_DESCQ is WO */
1758 REGISTER_AZ(RX_DC_CFG
),
1759 REGISTER_AZ(RX_DC_PF_WM
),
1760 REGISTER_BZ(RX_RSS_TKEY
),
1761 /* RX_NODESC_DROP is RC */
1762 REGISTER_AA(RX_SELF_RST
),
1763 /* RX_DEBUG, RX_PUSH_DROP are not used */
1764 REGISTER_CZ(RX_RSS_IPV6_REG1
),
1765 REGISTER_CZ(RX_RSS_IPV6_REG2
),
1766 REGISTER_CZ(RX_RSS_IPV6_REG3
),
1767 /* TX_FLUSH_DESCQ is WO */
1768 REGISTER_AZ(TX_DC_CFG
),
1769 REGISTER_AA(TX_CHKSM_CFG
),
1770 REGISTER_AZ(TX_CFG
),
1771 /* TX_PUSH_DROP is not used */
1772 REGISTER_AZ(TX_RESERVED
),
1773 REGISTER_BZ(TX_PACE
),
1774 /* TX_PACE_DROP_QID is RC */
1775 REGISTER_BB(TX_VLAN
),
1776 REGISTER_BZ(TX_IPFIL_PORTEN
),
1777 REGISTER_AB(MD_TXD
),
1778 REGISTER_AB(MD_RXD
),
1780 REGISTER_AB(MD_PHY_ADR
),
1783 REGISTER_AB(MAC_STAT_DMA
),
1784 REGISTER_AB(MAC_CTRL
),
1785 REGISTER_BB(GEN_MODE
),
1786 REGISTER_AB(MAC_MC_HASH_REG0
),
1787 REGISTER_AB(MAC_MC_HASH_REG1
),
1788 REGISTER_AB(GM_CFG1
),
1789 REGISTER_AB(GM_CFG2
),
1790 /* GM_IPG and GM_HD are not used */
1791 REGISTER_AB(GM_MAX_FLEN
),
1792 /* GM_TEST is not used */
1793 REGISTER_AB(GM_ADR1
),
1794 REGISTER_AB(GM_ADR2
),
1795 REGISTER_AB(GMF_CFG0
),
1796 REGISTER_AB(GMF_CFG1
),
1797 REGISTER_AB(GMF_CFG2
),
1798 REGISTER_AB(GMF_CFG3
),
1799 REGISTER_AB(GMF_CFG4
),
1800 REGISTER_AB(GMF_CFG5
),
1801 REGISTER_BB(TX_SRC_MAC_CTL
),
1802 REGISTER_AB(XM_ADR_LO
),
1803 REGISTER_AB(XM_ADR_HI
),
1804 REGISTER_AB(XM_GLB_CFG
),
1805 REGISTER_AB(XM_TX_CFG
),
1806 REGISTER_AB(XM_RX_CFG
),
1807 REGISTER_AB(XM_MGT_INT_MASK
),
1809 REGISTER_AB(XM_PAUSE_TIME
),
1810 REGISTER_AB(XM_TX_PARAM
),
1811 REGISTER_AB(XM_RX_PARAM
),
1812 /* XM_MGT_INT_MSK (note no 'A') is RC */
1813 REGISTER_AB(XX_PWR_RST
),
1814 REGISTER_AB(XX_SD_CTL
),
1815 REGISTER_AB(XX_TXDRV_CTL
),
1816 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1817 /* XX_CORE_STAT is partly RC */
1820 struct efx_nic_reg_table
{
1822 u32 min_revision
:2, max_revision
:2;
1823 u32 step
:6, rows
:21;
1826 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1828 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1831 #define REGISTER_TABLE(name, min_rev, max_rev) \
1832 REGISTER_TABLE_DIMENSIONS( \
1833 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1835 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1836 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1837 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1838 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1839 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1840 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1841 #define REGISTER_TABLE_BB_CZ(name) \
1842 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1843 FR_BZ_ ## name ## _STEP, \
1844 FR_BB_ ## name ## _ROWS), \
1845 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1846 FR_BZ_ ## name ## _STEP, \
1847 FR_CZ_ ## name ## _ROWS)
1848 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1850 static const struct efx_nic_reg_table efx_nic_reg_tables
[] = {
1851 /* DRIVER is not used */
1852 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1853 REGISTER_TABLE_BB(TX_IPFIL_TBL
),
1854 REGISTER_TABLE_BB(TX_SRC_MAC_TBL
),
1855 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER
),
1856 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL
),
1857 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER
),
1858 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL
),
1859 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER
),
1860 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL
),
1861 /* We can't reasonably read all of the buffer table (up to 8MB!).
1862 * However this driver will only use a few entries. Reading
1863 * 1K entries allows for some expansion of queue count and
1864 * size before we need to change the version. */
1865 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER
, FR_AA_BUF_FULL_TBL_KER
,
1867 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL
, FR_BZ_BUF_FULL_TBL
,
1869 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0
),
1870 REGISTER_TABLE_BB_CZ(TIMER_TBL
),
1871 REGISTER_TABLE_BB_CZ(TX_PACE_TBL
),
1872 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL
),
1873 /* TX_FILTER_TBL0 is huge and not used by this driver */
1874 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0
),
1875 REGISTER_TABLE_CZ(MC_TREG_SMEM
),
1876 /* MSIX_PBA_TABLE is not mapped */
1877 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1878 REGISTER_TABLE_BZ(RX_FILTER_TBL0
),
1881 size_t efx_nic_get_regs_len(struct efx_nic
*efx
)
1883 const struct efx_nic_reg
*reg
;
1884 const struct efx_nic_reg_table
*table
;
1887 for (reg
= efx_nic_regs
;
1888 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
1890 if (efx
->type
->revision
>= reg
->min_revision
&&
1891 efx
->type
->revision
<= reg
->max_revision
)
1892 len
+= sizeof(efx_oword_t
);
1894 for (table
= efx_nic_reg_tables
;
1895 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
1897 if (efx
->type
->revision
>= table
->min_revision
&&
1898 efx
->type
->revision
<= table
->max_revision
)
1899 len
+= table
->rows
* min_t(size_t, table
->step
, 16);
1904 void efx_nic_get_regs(struct efx_nic
*efx
, void *buf
)
1906 const struct efx_nic_reg
*reg
;
1907 const struct efx_nic_reg_table
*table
;
1909 for (reg
= efx_nic_regs
;
1910 reg
< efx_nic_regs
+ ARRAY_SIZE(efx_nic_regs
);
1912 if (efx
->type
->revision
>= reg
->min_revision
&&
1913 efx
->type
->revision
<= reg
->max_revision
) {
1914 efx_reado(efx
, (efx_oword_t
*)buf
, reg
->offset
);
1915 buf
+= sizeof(efx_oword_t
);
1919 for (table
= efx_nic_reg_tables
;
1920 table
< efx_nic_reg_tables
+ ARRAY_SIZE(efx_nic_reg_tables
);
1924 if (!(efx
->type
->revision
>= table
->min_revision
&&
1925 efx
->type
->revision
<= table
->max_revision
))
1928 size
= min_t(size_t, table
->step
, 16);
1930 for (i
= 0; i
< table
->rows
; i
++) {
1931 switch (table
->step
) {
1932 case 4: /* 32-bit register or SRAM */
1933 efx_readd_table(efx
, buf
, table
->offset
, i
);
1935 case 8: /* 64-bit SRAM */
1937 efx
->membase
+ table
->offset
,
1940 case 16: /* 128-bit register */
1941 efx_reado_table(efx
, buf
, table
->offset
, i
);
1943 case 32: /* 128-bit register, interleaved */
1944 efx_reado_table(efx
, buf
, table
->offset
, 2 * i
);