2 * Core code for QEMU igb emulation
5 * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
7 * Copyright (c) 2020-2023 Red Hat, Inc.
8 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
9 * Developed by Daynix Computing LTD (http://www.daynix.com)
12 * Akihiko Odaki <akihiko.odaki@daynix.com>
13 * Gal Hammmer <gal.hammer@sap.com>
14 * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
15 * Dmitry Fleytman <dmitry@daynix.com>
16 * Leonid Bloch <leonid@daynix.com>
17 * Yan Vugenfirer <yan@daynix.com>
19 * Based on work done by:
20 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
21 * Copyright (c) 2008 Qumranet
22 * Based on work done by:
23 * Copyright (c) 2007 Dan Aloni
24 * Copyright (c) 2004 Antony T Curtis
26 * This library is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU Lesser General Public
28 * License as published by the Free Software Foundation; either
29 * version 2.1 of the License, or (at your option) any later version.
31 * This library is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 * Lesser General Public License for more details.
36 * You should have received a copy of the GNU Lesser General Public
37 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
40 #include "qemu/osdep.h"
44 #include "hw/net/mii.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "sysemu/runstate.h"
49 #include "net_tx_pkt.h"
50 #include "net_rx_pkt.h"
52 #include "igb_common.h"
53 #include "e1000x_common.h"
58 #define E1000E_MAX_TX_FRAGS (64)
60 union e1000_rx_desc_union
{
61 struct e1000_rx_desc legacy
;
62 union e1000_adv_rx_desc adv
;
65 typedef struct IGBTxPktVmdqCallbackContext
{
68 } IGBTxPktVmdqCallbackContext
;
70 typedef struct L2Header
{
71 struct eth_header eth
;
72 struct vlan_header vlan
[2];
76 uint8_t message_id_transport_specific
;
78 uint16_t message_length
;
79 uint8_t subdomain_number
;
84 uint8_t source_communication_technology
;
85 uint32_t source_uuid_lo
;
86 uint16_t source_uuid_hi
;
87 uint16_t source_port_id
;
90 uint8_t log_message_period
;
94 igb_receive_internal(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
,
95 bool has_vnet
, bool *external_tx
);
97 static void igb_raise_interrupts(IGBCore
*core
, size_t index
, uint32_t causes
);
98 static void igb_reset(IGBCore
*core
, bool sw
);
101 igb_raise_legacy_irq(IGBCore
*core
)
103 trace_e1000e_irq_legacy_notify(true);
104 e1000x_inc_reg_if_not_full(core
->mac
, IAC
);
105 pci_set_irq(core
->owner
, 1);
109 igb_lower_legacy_irq(IGBCore
*core
)
111 trace_e1000e_irq_legacy_notify(false);
112 pci_set_irq(core
->owner
, 0);
115 static void igb_msix_notify(IGBCore
*core
, unsigned int cause
)
117 PCIDevice
*dev
= core
->owner
;
119 uint32_t effective_eiac
;
122 vfn
= 8 - (cause
+ 2) / IGBVF_MSIX_VEC_NUM
;
123 if (vfn
< pcie_sriov_num_vfs(core
->owner
)) {
124 dev
= pcie_sriov_get_vf_at_index(core
->owner
, vfn
);
126 vector
= (cause
+ 2) % IGBVF_MSIX_VEC_NUM
;
127 } else if (cause
>= IGB_MSIX_VEC_NUM
) {
128 qemu_log_mask(LOG_GUEST_ERROR
,
129 "igb: Tried to use vector unavailable for PF");
135 msix_notify(dev
, vector
);
137 trace_e1000e_irq_icr_clear_eiac(core
->mac
[EICR
], core
->mac
[EIAC
]);
138 effective_eiac
= core
->mac
[EIAC
] & BIT(cause
);
139 core
->mac
[EICR
] &= ~effective_eiac
;
143 igb_intrmgr_rearm_timer(IGBIntrDelayTimer
*timer
)
145 int64_t delay_ns
= (int64_t) timer
->core
->mac
[timer
->delay_reg
] *
146 timer
->delay_resolution_ns
;
148 trace_e1000e_irq_rearm_timer(timer
->delay_reg
<< 2, delay_ns
);
150 timer_mod(timer
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + delay_ns
);
152 timer
->running
= true;
156 igb_intmgr_timer_resume(IGBIntrDelayTimer
*timer
)
158 if (timer
->running
) {
159 igb_intrmgr_rearm_timer(timer
);
164 igb_intrmgr_on_msix_throttling_timer(void *opaque
)
166 IGBIntrDelayTimer
*timer
= opaque
;
167 int idx
= timer
- &timer
->core
->eitr
[0];
169 timer
->running
= false;
171 trace_e1000e_irq_msix_notify_postponed_vec(idx
);
172 igb_msix_notify(timer
->core
, idx
);
176 igb_intrmgr_initialize_all_timers(IGBCore
*core
, bool create
)
180 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
181 core
->eitr
[i
].core
= core
;
182 core
->eitr
[i
].delay_reg
= EITR0
+ i
;
183 core
->eitr
[i
].delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
190 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
191 core
->eitr
[i
].timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
192 igb_intrmgr_on_msix_throttling_timer
,
198 igb_intrmgr_resume(IGBCore
*core
)
202 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
203 igb_intmgr_timer_resume(&core
->eitr
[i
]);
208 igb_intrmgr_reset(IGBCore
*core
)
212 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
213 if (core
->eitr
[i
].running
) {
214 timer_del(core
->eitr
[i
].timer
);
215 igb_intrmgr_on_msix_throttling_timer(&core
->eitr
[i
]);
221 igb_intrmgr_pci_unint(IGBCore
*core
)
225 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
226 timer_free(core
->eitr
[i
].timer
);
231 igb_intrmgr_pci_realize(IGBCore
*core
)
233 igb_intrmgr_initialize_all_timers(core
, true);
237 igb_rx_csum_enabled(IGBCore
*core
)
239 return (core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) ? false : true;
243 igb_rx_use_legacy_descriptor(IGBCore
*core
)
246 * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
252 typedef struct E1000ERingInfo
{
262 igb_rx_queue_desctyp_get(IGBCore
*core
, const E1000ERingInfo
*r
)
264 return core
->mac
[E1000_SRRCTL(r
->idx
) >> 2] & E1000_SRRCTL_DESCTYPE_MASK
;
268 igb_rx_use_ps_descriptor(IGBCore
*core
, const E1000ERingInfo
*r
)
270 uint32_t desctyp
= igb_rx_queue_desctyp_get(core
, r
);
271 return desctyp
== E1000_SRRCTL_DESCTYPE_HDR_SPLIT
||
272 desctyp
== E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
276 igb_rss_enabled(IGBCore
*core
)
278 return (core
->mac
[MRQC
] & 3) == E1000_MRQC_ENABLE_RSS_MQ
&&
279 !igb_rx_csum_enabled(core
) &&
280 !igb_rx_use_legacy_descriptor(core
);
283 typedef struct E1000E_RSSInfo_st
{
291 igb_rss_get_hash_type(IGBCore
*core
, struct NetRxPkt
*pkt
)
294 EthL4HdrProto l4hdr_proto
;
296 assert(igb_rss_enabled(core
));
298 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
301 trace_e1000e_rx_rss_ip4(l4hdr_proto
, core
->mac
[MRQC
],
302 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
]),
303 E1000_MRQC_EN_IPV4(core
->mac
[MRQC
]));
305 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
306 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
])) {
307 return E1000_MRQ_RSS_TYPE_IPV4TCP
;
310 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
&&
311 (core
->mac
[MRQC
] & E1000_MRQC_RSS_FIELD_IPV4_UDP
)) {
312 return E1000_MRQ_RSS_TYPE_IPV4UDP
;
315 if (E1000_MRQC_EN_IPV4(core
->mac
[MRQC
])) {
316 return E1000_MRQ_RSS_TYPE_IPV4
;
319 eth_ip6_hdr_info
*ip6info
= net_rx_pkt_get_ip6_info(pkt
);
321 bool ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_EX_DIS
;
322 bool new_ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_NEW_IPV6_EXT_DIS
;
325 * Following two traces must not be combined because resulting
326 * event will have 11 arguments totally and some trace backends
327 * (at least "ust") have limitation of maximum 10 arguments per
328 * event. Events with more arguments fail to compile for
329 * backends like these.
331 trace_e1000e_rx_rss_ip6_rfctl(core
->mac
[RFCTL
]);
332 trace_e1000e_rx_rss_ip6(ex_dis
, new_ex_dis
, l4hdr_proto
,
333 ip6info
->has_ext_hdrs
,
334 ip6info
->rss_ex_dst_valid
,
335 ip6info
->rss_ex_src_valid
,
337 E1000_MRQC_EN_TCPIPV6EX(core
->mac
[MRQC
]),
338 E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
]),
339 E1000_MRQC_EN_IPV6(core
->mac
[MRQC
]));
341 if ((!ex_dis
|| !ip6info
->has_ext_hdrs
) &&
342 (!new_ex_dis
|| !(ip6info
->rss_ex_dst_valid
||
343 ip6info
->rss_ex_src_valid
))) {
345 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
346 E1000_MRQC_EN_TCPIPV6EX(core
->mac
[MRQC
])) {
347 return E1000_MRQ_RSS_TYPE_IPV6TCPEX
;
350 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
&&
351 (core
->mac
[MRQC
] & E1000_MRQC_RSS_FIELD_IPV6_UDP
)) {
352 return E1000_MRQ_RSS_TYPE_IPV6UDP
;
355 if (E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
])) {
356 return E1000_MRQ_RSS_TYPE_IPV6EX
;
361 if (E1000_MRQC_EN_IPV6(core
->mac
[MRQC
])) {
362 return E1000_MRQ_RSS_TYPE_IPV6
;
367 return E1000_MRQ_RSS_TYPE_NONE
;
371 igb_rss_calc_hash(IGBCore
*core
, struct NetRxPkt
*pkt
, E1000E_RSSInfo
*info
)
373 NetRxPktRssType type
;
375 assert(igb_rss_enabled(core
));
377 switch (info
->type
) {
378 case E1000_MRQ_RSS_TYPE_IPV4
:
379 type
= NetPktRssIpV4
;
381 case E1000_MRQ_RSS_TYPE_IPV4TCP
:
382 type
= NetPktRssIpV4Tcp
;
384 case E1000_MRQ_RSS_TYPE_IPV6TCPEX
:
385 type
= NetPktRssIpV6TcpEx
;
387 case E1000_MRQ_RSS_TYPE_IPV6
:
388 type
= NetPktRssIpV6
;
390 case E1000_MRQ_RSS_TYPE_IPV6EX
:
391 type
= NetPktRssIpV6Ex
;
393 case E1000_MRQ_RSS_TYPE_IPV4UDP
:
394 type
= NetPktRssIpV4Udp
;
396 case E1000_MRQ_RSS_TYPE_IPV6UDP
:
397 type
= NetPktRssIpV6Udp
;
404 return net_rx_pkt_calc_rss_hash(pkt
, type
, (uint8_t *) &core
->mac
[RSSRK
]);
408 igb_rss_parse_packet(IGBCore
*core
, struct NetRxPkt
*pkt
, bool tx
,
409 E1000E_RSSInfo
*info
)
411 trace_e1000e_rx_rss_started();
413 if (tx
|| !igb_rss_enabled(core
)) {
414 info
->enabled
= false;
418 trace_e1000e_rx_rss_disabled();
422 info
->enabled
= true;
424 info
->type
= igb_rss_get_hash_type(core
, pkt
);
426 trace_e1000e_rx_rss_type(info
->type
);
428 if (info
->type
== E1000_MRQ_RSS_TYPE_NONE
) {
434 info
->hash
= igb_rss_calc_hash(core
, pkt
, info
);
435 info
->queue
= E1000_RSS_QUEUE(&core
->mac
[RETA
], info
->hash
);
439 igb_tx_insert_vlan(IGBCore
*core
, uint16_t qn
, struct igb_tx
*tx
,
440 uint16_t vlan
, bool insert_vlan
)
442 if (core
->mac
[MRQC
] & 1) {
443 uint16_t pool
= qn
% IGB_NUM_VM_POOLS
;
445 if (core
->mac
[VMVIR0
+ pool
] & E1000_VMVIR_VLANA_DEFAULT
) {
446 /* always insert default VLAN */
448 vlan
= core
->mac
[VMVIR0
+ pool
] & 0xffff;
449 } else if (core
->mac
[VMVIR0
+ pool
] & E1000_VMVIR_VLANA_NEVER
) {
455 net_tx_pkt_setup_vlan_header_ex(tx
->tx_pkt
, vlan
,
456 core
->mac
[VET
] & 0xffff);
461 igb_setup_tx_offloads(IGBCore
*core
, struct igb_tx
*tx
)
463 uint32_t idx
= (tx
->first_olinfo_status
>> 4) & 1;
465 if (tx
->first_cmd_type_len
& E1000_ADVTXD_DCMD_TSE
) {
466 uint32_t mss
= tx
->ctx
[idx
].mss_l4len_idx
>> E1000_ADVTXD_MSS_SHIFT
;
467 if (!net_tx_pkt_build_vheader(tx
->tx_pkt
, true, true, mss
)) {
471 net_tx_pkt_update_ip_checksums(tx
->tx_pkt
);
472 e1000x_inc_reg_if_not_full(core
->mac
, TSCTC
);
476 if ((tx
->first_olinfo_status
& E1000_ADVTXD_POTS_TXSM
) &&
477 !((tx
->ctx
[idx
].type_tucmd_mlhl
& E1000_ADVTXD_TUCMD_L4T_SCTP
) ?
478 net_tx_pkt_update_sctp_checksum(tx
->tx_pkt
) :
479 net_tx_pkt_build_vheader(tx
->tx_pkt
, false, true, 0))) {
483 if (tx
->first_olinfo_status
& E1000_ADVTXD_POTS_IXSM
) {
484 net_tx_pkt_update_ip_hdr_checksum(tx
->tx_pkt
);
490 static void igb_tx_pkt_mac_callback(void *core
,
491 const struct iovec
*iov
,
493 const struct iovec
*virt_iov
,
496 igb_receive_internal(core
, virt_iov
, virt_iovcnt
, true, NULL
);
499 static void igb_tx_pkt_vmdq_callback(void *opaque
,
500 const struct iovec
*iov
,
502 const struct iovec
*virt_iov
,
505 IGBTxPktVmdqCallbackContext
*context
= opaque
;
508 igb_receive_internal(context
->core
, virt_iov
, virt_iovcnt
, true,
512 if (context
->core
->has_vnet
) {
513 qemu_sendv_packet(context
->nc
, virt_iov
, virt_iovcnt
);
515 qemu_sendv_packet(context
->nc
, iov
, iovcnt
);
520 /* TX Packets Switching (7.10.3.6) */
521 static bool igb_tx_pkt_switch(IGBCore
*core
, struct igb_tx
*tx
,
524 IGBTxPktVmdqCallbackContext context
;
526 /* TX switching is only used to serve VM to VM traffic. */
527 if (!(core
->mac
[MRQC
] & 1)) {
531 /* TX switching requires DTXSWC.Loopback_en bit enabled. */
532 if (!(core
->mac
[DTXSWC
] & E1000_DTXSWC_VMDQ_LOOPBACK_EN
)) {
539 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
540 igb_tx_pkt_vmdq_callback
, &context
);
543 return net_tx_pkt_send(tx
->tx_pkt
, nc
);
547 igb_tx_pkt_send(IGBCore
*core
, struct igb_tx
*tx
, int queue_index
)
549 int target_queue
= MIN(core
->max_queue_num
, queue_index
);
550 NetClientState
*queue
= qemu_get_subqueue(core
->owner_nic
, target_queue
);
552 if (!igb_setup_tx_offloads(core
, tx
)) {
556 net_tx_pkt_dump(tx
->tx_pkt
);
558 if ((core
->phy
[MII_BMCR
] & MII_BMCR_LOOPBACK
) ||
559 ((core
->mac
[RCTL
] & E1000_RCTL_LBM_MAC
) == E1000_RCTL_LBM_MAC
)) {
560 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
561 igb_tx_pkt_mac_callback
, core
);
563 return igb_tx_pkt_switch(core
, tx
, queue
);
568 igb_on_tx_done_update_stats(IGBCore
*core
, struct NetTxPkt
*tx_pkt
, int qn
)
570 static const int PTCregs
[6] = { PTC64
, PTC127
, PTC255
, PTC511
,
573 size_t tot_len
= net_tx_pkt_get_total_len(tx_pkt
) + 4;
575 e1000x_increase_size_stats(core
->mac
, PTCregs
, tot_len
);
576 e1000x_inc_reg_if_not_full(core
->mac
, TPT
);
577 e1000x_grow_8reg_if_not_full(core
->mac
, TOTL
, tot_len
);
579 switch (net_tx_pkt_get_packet_type(tx_pkt
)) {
581 e1000x_inc_reg_if_not_full(core
->mac
, BPTC
);
584 e1000x_inc_reg_if_not_full(core
->mac
, MPTC
);
589 g_assert_not_reached();
592 e1000x_inc_reg_if_not_full(core
->mac
, GPTC
);
593 e1000x_grow_8reg_if_not_full(core
->mac
, GOTCL
, tot_len
);
595 if (core
->mac
[MRQC
] & 1) {
596 uint16_t pool
= qn
% IGB_NUM_VM_POOLS
;
598 core
->mac
[PVFGOTC0
+ (pool
* 64)] += tot_len
;
599 core
->mac
[PVFGPTC0
+ (pool
* 64)]++;
604 igb_process_tx_desc(IGBCore
*core
,
607 union e1000_adv_tx_desc
*tx_desc
,
610 struct e1000_adv_tx_context_desc
*tx_ctx_desc
;
611 uint32_t cmd_type_len
;
613 uint64_t buffer_addr
;
616 cmd_type_len
= le32_to_cpu(tx_desc
->read
.cmd_type_len
);
618 if (cmd_type_len
& E1000_ADVTXD_DCMD_DEXT
) {
619 if ((cmd_type_len
& E1000_ADVTXD_DTYP_DATA
) ==
620 E1000_ADVTXD_DTYP_DATA
) {
621 /* advanced transmit data descriptor */
623 tx
->first_cmd_type_len
= cmd_type_len
;
624 tx
->first_olinfo_status
= le32_to_cpu(tx_desc
->read
.olinfo_status
);
627 } else if ((cmd_type_len
& E1000_ADVTXD_DTYP_CTXT
) ==
628 E1000_ADVTXD_DTYP_CTXT
) {
629 /* advanced transmit context descriptor */
630 tx_ctx_desc
= (struct e1000_adv_tx_context_desc
*)tx_desc
;
631 idx
= (le32_to_cpu(tx_ctx_desc
->mss_l4len_idx
) >> 4) & 1;
632 tx
->ctx
[idx
].vlan_macip_lens
= le32_to_cpu(tx_ctx_desc
->vlan_macip_lens
);
633 tx
->ctx
[idx
].seqnum_seed
= le32_to_cpu(tx_ctx_desc
->seqnum_seed
);
634 tx
->ctx
[idx
].type_tucmd_mlhl
= le32_to_cpu(tx_ctx_desc
->type_tucmd_mlhl
);
635 tx
->ctx
[idx
].mss_l4len_idx
= le32_to_cpu(tx_ctx_desc
->mss_l4len_idx
);
638 /* unknown descriptor type */
642 /* legacy descriptor */
644 /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
647 buffer_addr
= le64_to_cpu(tx_desc
->read
.buffer_addr
);
648 length
= cmd_type_len
& 0xFFFF;
651 if (!net_tx_pkt_add_raw_fragment_pci(tx
->tx_pkt
, dev
,
652 buffer_addr
, length
)) {
657 if (cmd_type_len
& E1000_TXD_CMD_EOP
) {
658 if (!tx
->skip_cp
&& net_tx_pkt_parse(tx
->tx_pkt
)) {
659 idx
= (tx
->first_olinfo_status
>> 4) & 1;
660 igb_tx_insert_vlan(core
, queue_index
, tx
,
661 tx
->ctx
[idx
].vlan_macip_lens
>> IGB_TX_FLAGS_VLAN_SHIFT
,
662 !!(tx
->first_cmd_type_len
& E1000_TXD_CMD_VLE
));
664 if ((tx
->first_cmd_type_len
& E1000_ADVTXD_MAC_TSTAMP
) &&
665 (core
->mac
[TSYNCTXCTL
] & E1000_TSYNCTXCTL_ENABLED
) &&
666 !(core
->mac
[TSYNCTXCTL
] & E1000_TSYNCTXCTL_VALID
)) {
667 core
->mac
[TSYNCTXCTL
] |= E1000_TSYNCTXCTL_VALID
;
668 e1000x_timestamp(core
->mac
, core
->timadj
, TXSTMPL
, TXSTMPH
);
671 if (igb_tx_pkt_send(core
, tx
, queue_index
)) {
672 igb_on_tx_done_update_stats(core
, tx
->tx_pkt
, queue_index
);
678 net_tx_pkt_reset(tx
->tx_pkt
, net_tx_pkt_unmap_frag_pci
, dev
);
682 static uint32_t igb_tx_wb_eic(IGBCore
*core
, int queue_idx
)
686 n
= igb_ivar_entry_tx(queue_idx
);
687 ent
= (core
->mac
[IVAR0
+ n
/ 4] >> (8 * (n
% 4))) & 0xff;
689 return (ent
& E1000_IVAR_VALID
) ? BIT(ent
& 0x1f) : 0;
692 static uint32_t igb_rx_wb_eic(IGBCore
*core
, int queue_idx
)
696 n
= igb_ivar_entry_rx(queue_idx
);
697 ent
= (core
->mac
[IVAR0
+ n
/ 4] >> (8 * (n
% 4))) & 0xff;
699 return (ent
& E1000_IVAR_VALID
) ? BIT(ent
& 0x1f) : 0;
703 igb_ring_empty(IGBCore
*core
, const E1000ERingInfo
*r
)
705 return core
->mac
[r
->dh
] == core
->mac
[r
->dt
] ||
706 core
->mac
[r
->dt
] >= core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
;
709 static inline uint64_t
710 igb_ring_base(IGBCore
*core
, const E1000ERingInfo
*r
)
712 uint64_t bah
= core
->mac
[r
->dbah
];
713 uint64_t bal
= core
->mac
[r
->dbal
];
715 return (bah
<< 32) + bal
;
718 static inline uint64_t
719 igb_ring_head_descr(IGBCore
*core
, const E1000ERingInfo
*r
)
721 return igb_ring_base(core
, r
) + E1000_RING_DESC_LEN
* core
->mac
[r
->dh
];
725 igb_ring_advance(IGBCore
*core
, const E1000ERingInfo
*r
, uint32_t count
)
727 core
->mac
[r
->dh
] += count
;
729 if (core
->mac
[r
->dh
] * E1000_RING_DESC_LEN
>= core
->mac
[r
->dlen
]) {
730 core
->mac
[r
->dh
] = 0;
734 static inline uint32_t
735 igb_ring_free_descr_num(IGBCore
*core
, const E1000ERingInfo
*r
)
737 trace_e1000e_ring_free_space(r
->idx
, core
->mac
[r
->dlen
],
738 core
->mac
[r
->dh
], core
->mac
[r
->dt
]);
740 if (core
->mac
[r
->dh
] <= core
->mac
[r
->dt
]) {
741 return core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
744 if (core
->mac
[r
->dh
] > core
->mac
[r
->dt
]) {
745 return core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
+
746 core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
749 g_assert_not_reached();
754 igb_ring_enabled(IGBCore
*core
, const E1000ERingInfo
*r
)
756 return core
->mac
[r
->dlen
] > 0;
759 typedef struct IGB_TxRing_st
{
760 const E1000ERingInfo
*i
;
765 igb_mq_queue_idx(int base_reg_idx
, int reg_idx
)
767 return (reg_idx
- base_reg_idx
) / 16;
771 igb_tx_ring_init(IGBCore
*core
, IGB_TxRing
*txr
, int idx
)
773 static const E1000ERingInfo i
[IGB_NUM_QUEUES
] = {
774 { TDBAH0
, TDBAL0
, TDLEN0
, TDH0
, TDT0
, 0 },
775 { TDBAH1
, TDBAL1
, TDLEN1
, TDH1
, TDT1
, 1 },
776 { TDBAH2
, TDBAL2
, TDLEN2
, TDH2
, TDT2
, 2 },
777 { TDBAH3
, TDBAL3
, TDLEN3
, TDH3
, TDT3
, 3 },
778 { TDBAH4
, TDBAL4
, TDLEN4
, TDH4
, TDT4
, 4 },
779 { TDBAH5
, TDBAL5
, TDLEN5
, TDH5
, TDT5
, 5 },
780 { TDBAH6
, TDBAL6
, TDLEN6
, TDH6
, TDT6
, 6 },
781 { TDBAH7
, TDBAL7
, TDLEN7
, TDH7
, TDT7
, 7 },
782 { TDBAH8
, TDBAL8
, TDLEN8
, TDH8
, TDT8
, 8 },
783 { TDBAH9
, TDBAL9
, TDLEN9
, TDH9
, TDT9
, 9 },
784 { TDBAH10
, TDBAL10
, TDLEN10
, TDH10
, TDT10
, 10 },
785 { TDBAH11
, TDBAL11
, TDLEN11
, TDH11
, TDT11
, 11 },
786 { TDBAH12
, TDBAL12
, TDLEN12
, TDH12
, TDT12
, 12 },
787 { TDBAH13
, TDBAL13
, TDLEN13
, TDH13
, TDT13
, 13 },
788 { TDBAH14
, TDBAL14
, TDLEN14
, TDH14
, TDT14
, 14 },
789 { TDBAH15
, TDBAL15
, TDLEN15
, TDH15
, TDT15
, 15 }
792 assert(idx
< ARRAY_SIZE(i
));
795 txr
->tx
= &core
->tx
[idx
];
798 typedef struct E1000E_RxRing_st
{
799 const E1000ERingInfo
*i
;
803 igb_rx_ring_init(IGBCore
*core
, E1000E_RxRing
*rxr
, int idx
)
805 static const E1000ERingInfo i
[IGB_NUM_QUEUES
] = {
806 { RDBAH0
, RDBAL0
, RDLEN0
, RDH0
, RDT0
, 0 },
807 { RDBAH1
, RDBAL1
, RDLEN1
, RDH1
, RDT1
, 1 },
808 { RDBAH2
, RDBAL2
, RDLEN2
, RDH2
, RDT2
, 2 },
809 { RDBAH3
, RDBAL3
, RDLEN3
, RDH3
, RDT3
, 3 },
810 { RDBAH4
, RDBAL4
, RDLEN4
, RDH4
, RDT4
, 4 },
811 { RDBAH5
, RDBAL5
, RDLEN5
, RDH5
, RDT5
, 5 },
812 { RDBAH6
, RDBAL6
, RDLEN6
, RDH6
, RDT6
, 6 },
813 { RDBAH7
, RDBAL7
, RDLEN7
, RDH7
, RDT7
, 7 },
814 { RDBAH8
, RDBAL8
, RDLEN8
, RDH8
, RDT8
, 8 },
815 { RDBAH9
, RDBAL9
, RDLEN9
, RDH9
, RDT9
, 9 },
816 { RDBAH10
, RDBAL10
, RDLEN10
, RDH10
, RDT10
, 10 },
817 { RDBAH11
, RDBAL11
, RDLEN11
, RDH11
, RDT11
, 11 },
818 { RDBAH12
, RDBAL12
, RDLEN12
, RDH12
, RDT12
, 12 },
819 { RDBAH13
, RDBAL13
, RDLEN13
, RDH13
, RDT13
, 13 },
820 { RDBAH14
, RDBAL14
, RDLEN14
, RDH14
, RDT14
, 14 },
821 { RDBAH15
, RDBAL15
, RDLEN15
, RDH15
, RDT15
, 15 }
824 assert(idx
< ARRAY_SIZE(i
));
830 igb_txdesc_writeback(IGBCore
*core
, dma_addr_t base
,
831 union e1000_adv_tx_desc
*tx_desc
,
832 const E1000ERingInfo
*txi
)
835 uint32_t cmd_type_len
= le32_to_cpu(tx_desc
->read
.cmd_type_len
);
838 tdwba
= core
->mac
[E1000_TDWBAL(txi
->idx
) >> 2];
839 tdwba
|= (uint64_t)core
->mac
[E1000_TDWBAH(txi
->idx
) >> 2] << 32;
841 if (!(cmd_type_len
& E1000_TXD_CMD_RS
)) {
845 d
= pcie_sriov_get_vf_at_index(core
->owner
, txi
->idx
% 8);
851 uint32_t buffer
= cpu_to_le32(core
->mac
[txi
->dh
]);
852 pci_dma_write(d
, tdwba
& ~3, &buffer
, sizeof(buffer
));
854 uint32_t status
= le32_to_cpu(tx_desc
->wb
.status
) | E1000_TXD_STAT_DD
;
856 tx_desc
->wb
.status
= cpu_to_le32(status
);
857 pci_dma_write(d
, base
+ offsetof(union e1000_adv_tx_desc
, wb
),
858 &tx_desc
->wb
, sizeof(tx_desc
->wb
));
861 return igb_tx_wb_eic(core
, txi
->idx
);
865 igb_tx_enabled(IGBCore
*core
, const E1000ERingInfo
*txi
)
867 bool vmdq
= core
->mac
[MRQC
] & 1;
868 uint16_t qn
= txi
->idx
;
869 uint16_t pool
= qn
% IGB_NUM_VM_POOLS
;
871 return (core
->mac
[TCTL
] & E1000_TCTL_EN
) &&
872 (!vmdq
|| core
->mac
[VFTE
] & BIT(pool
)) &&
873 (core
->mac
[TXDCTL0
+ (qn
* 16)] & E1000_TXDCTL_QUEUE_ENABLE
);
877 igb_start_xmit(IGBCore
*core
, const IGB_TxRing
*txr
)
881 union e1000_adv_tx_desc desc
;
882 const E1000ERingInfo
*txi
= txr
->i
;
885 if (!igb_tx_enabled(core
, txi
)) {
886 trace_e1000e_tx_disabled();
890 d
= pcie_sriov_get_vf_at_index(core
->owner
, txi
->idx
% 8);
895 while (!igb_ring_empty(core
, txi
)) {
896 base
= igb_ring_head_descr(core
, txi
);
898 pci_dma_read(d
, base
, &desc
, sizeof(desc
));
900 trace_e1000e_tx_descr((void *)(intptr_t)desc
.read
.buffer_addr
,
901 desc
.read
.cmd_type_len
, desc
.wb
.status
);
903 igb_process_tx_desc(core
, d
, txr
->tx
, &desc
, txi
->idx
);
904 igb_ring_advance(core
, txi
, 1);
905 eic
|= igb_txdesc_writeback(core
, base
, &desc
, txi
);
909 igb_raise_interrupts(core
, EICR
, eic
);
910 igb_raise_interrupts(core
, ICR
, E1000_ICR_TXDW
);
913 net_tx_pkt_reset(txr
->tx
->tx_pkt
, net_tx_pkt_unmap_frag_pci
, d
);
917 igb_rxbufsize(IGBCore
*core
, const E1000ERingInfo
*r
)
919 uint32_t srrctl
= core
->mac
[E1000_SRRCTL(r
->idx
) >> 2];
920 uint32_t bsizepkt
= srrctl
& E1000_SRRCTL_BSIZEPKT_MASK
;
922 return bsizepkt
<< E1000_SRRCTL_BSIZEPKT_SHIFT
;
925 return e1000x_rxbufsize(core
->mac
[RCTL
]);
929 igb_has_rxbufs(IGBCore
*core
, const E1000ERingInfo
*r
, size_t total_size
)
931 uint32_t bufs
= igb_ring_free_descr_num(core
, r
);
932 uint32_t bufsize
= igb_rxbufsize(core
, r
);
934 trace_e1000e_rx_has_buffers(r
->idx
, bufs
, total_size
, bufsize
);
936 return total_size
<= bufs
/ (core
->rx_desc_len
/ E1000_MIN_RX_DESC_LEN
) *
941 igb_rxhdrbufsize(IGBCore
*core
, const E1000ERingInfo
*r
)
943 uint32_t srrctl
= core
->mac
[E1000_SRRCTL(r
->idx
) >> 2];
944 return (srrctl
& E1000_SRRCTL_BSIZEHDRSIZE_MASK
) >>
945 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
949 igb_start_recv(IGBCore
*core
)
953 trace_e1000e_rx_start_recv();
955 for (i
= 0; i
<= core
->max_queue_num
; i
++) {
956 qemu_flush_queued_packets(qemu_get_subqueue(core
->owner_nic
, i
));
961 igb_can_receive(IGBCore
*core
)
965 if (!e1000x_rx_ready(core
->owner
, core
->mac
)) {
969 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
971 if (!(core
->mac
[RXDCTL0
+ (i
* 16)] & E1000_RXDCTL_QUEUE_ENABLE
)) {
975 igb_rx_ring_init(core
, &rxr
, i
);
976 if (igb_ring_enabled(core
, rxr
.i
) && igb_has_rxbufs(core
, rxr
.i
, 1)) {
977 trace_e1000e_rx_can_recv();
982 trace_e1000e_rx_can_recv_rings_full();
987 igb_receive(IGBCore
*core
, const uint8_t *buf
, size_t size
)
989 const struct iovec iov
= {
990 .iov_base
= (uint8_t *)buf
,
994 return igb_receive_iov(core
, &iov
, 1);
998 igb_rx_l3_cso_enabled(IGBCore
*core
)
1000 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_IPOFLD
);
1004 igb_rx_l4_cso_enabled(IGBCore
*core
)
1006 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_TUOFLD
);
1009 static bool igb_rx_is_oversized(IGBCore
*core
, const struct eth_header
*ehdr
,
1010 size_t size
, size_t vlan_num
,
1011 bool lpe
, uint16_t rlpml
)
1013 size_t vlan_header_size
= sizeof(struct vlan_header
) * vlan_num
;
1014 size_t header_size
= sizeof(struct eth_header
) + vlan_header_size
;
1015 return lpe
? size
+ ETH_FCS_LEN
> rlpml
: size
> header_size
+ ETH_MTU
;
1018 static uint16_t igb_receive_assign(IGBCore
*core
, const struct iovec
*iov
,
1019 size_t iovcnt
, size_t iov_ofs
,
1020 const L2Header
*l2_header
, size_t size
,
1021 E1000E_RSSInfo
*rss_info
,
1022 uint16_t *etqf
, bool *ts
, bool *external_tx
)
1024 static const int ta_shift
[] = { 4, 3, 2, 0 };
1025 const struct eth_header
*ehdr
= &l2_header
->eth
;
1026 uint32_t f
, ra
[2], *macp
, rctl
= core
->mac
[RCTL
];
1027 uint16_t queues
= 0;
1028 uint16_t oversized
= 0;
1029 size_t vlan_num
= 0;
1035 memset(rss_info
, 0, sizeof(E1000E_RSSInfo
));
1039 *external_tx
= true;
1042 if (core
->mac
[CTRL_EXT
] & BIT(26)) {
1043 if (be16_to_cpu(ehdr
->h_proto
) == core
->mac
[VET
] >> 16 &&
1044 be16_to_cpu(l2_header
->vlan
[0].h_proto
) == (core
->mac
[VET
] & 0xffff)) {
1048 if (be16_to_cpu(ehdr
->h_proto
) == (core
->mac
[VET
] & 0xffff)) {
1053 lpe
= !!(core
->mac
[RCTL
] & E1000_RCTL_LPE
);
1054 rlpml
= core
->mac
[RLPML
];
1055 if (!(core
->mac
[RCTL
] & E1000_RCTL_SBP
) &&
1056 igb_rx_is_oversized(core
, ehdr
, size
, vlan_num
, lpe
, rlpml
)) {
1057 trace_e1000x_rx_oversized(size
);
1061 for (*etqf
= 0; *etqf
< 8; (*etqf
)++) {
1062 if ((core
->mac
[ETQF0
+ *etqf
] & E1000_ETQF_FILTER_ENABLE
) &&
1063 be16_to_cpu(ehdr
->h_proto
) == (core
->mac
[ETQF0
+ *etqf
] & E1000_ETQF_ETYPE_MASK
)) {
1064 if ((core
->mac
[ETQF0
+ *etqf
] & E1000_ETQF_1588
) &&
1065 (core
->mac
[TSYNCRXCTL
] & E1000_TSYNCRXCTL_ENABLED
) &&
1066 !(core
->mac
[TSYNCRXCTL
] & E1000_TSYNCRXCTL_VALID
) &&
1067 iov_to_buf(iov
, iovcnt
, iov_ofs
+ ETH_HLEN
, &ptp2
, sizeof(ptp2
)) >= sizeof(ptp2
) &&
1068 (ptp2
.version_ptp
& 15) == 2 &&
1069 ptp2
.message_id_transport_specific
== ((core
->mac
[TSYNCRXCFG
] >> 8) & 255)) {
1070 e1000x_timestamp(core
->mac
, core
->timadj
, RXSTMPL
, RXSTMPH
);
1072 core
->mac
[TSYNCRXCTL
] |= E1000_TSYNCRXCTL_VALID
;
1073 core
->mac
[RXSATRL
] = le32_to_cpu(ptp2
.source_uuid_lo
);
1074 core
->mac
[RXSATRH
] = le16_to_cpu(ptp2
.source_uuid_hi
) |
1075 (le16_to_cpu(ptp2
.sequence_id
) << 16);
1082 !e1000x_rx_vlan_filter(core
->mac
, l2_header
->vlan
+ vlan_num
- 1)) {
1086 if (core
->mac
[MRQC
] & 1) {
1087 if (is_broadcast_ether_addr(ehdr
->h_dest
)) {
1088 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1089 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_BAM
) {
1094 for (macp
= core
->mac
+ RA
; macp
< core
->mac
+ RA
+ 32; macp
+= 2) {
1095 if (!(macp
[1] & E1000_RAH_AV
)) {
1098 ra
[0] = cpu_to_le32(macp
[0]);
1099 ra
[1] = cpu_to_le32(macp
[1]);
1100 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1101 queues
|= (macp
[1] & E1000_RAH_POOL_MASK
) / E1000_RAH_POOL_1
;
1105 for (macp
= core
->mac
+ RA2
; macp
< core
->mac
+ RA2
+ 16; macp
+= 2) {
1106 if (!(macp
[1] & E1000_RAH_AV
)) {
1109 ra
[0] = cpu_to_le32(macp
[0]);
1110 ra
[1] = cpu_to_le32(macp
[1]);
1111 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1112 queues
|= (macp
[1] & E1000_RAH_POOL_MASK
) / E1000_RAH_POOL_1
;
1117 macp
= core
->mac
+ (is_multicast_ether_addr(ehdr
->h_dest
) ? MTA
: UTA
);
1119 f
= ta_shift
[(rctl
>> E1000_RCTL_MO_SHIFT
) & 3];
1120 f
= (((ehdr
->h_dest
[5] << 8) | ehdr
->h_dest
[4]) >> f
) & 0xfff;
1121 if (macp
[f
>> 5] & (1 << (f
& 0x1f))) {
1122 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1123 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_ROMPE
) {
1128 } else if (is_unicast_ether_addr(ehdr
->h_dest
) && external_tx
) {
1129 *external_tx
= false;
1133 if (e1000x_vlan_rx_filter_enabled(core
->mac
)) {
1137 uint16_t vid
= be16_to_cpu(l2_header
->vlan
[vlan_num
- 1].h_tci
) & VLAN_VID_MASK
;
1139 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
1140 if ((core
->mac
[VLVF0
+ i
] & E1000_VLVF_VLANID_MASK
) == vid
&&
1141 (core
->mac
[VLVF0
+ i
] & E1000_VLVF_VLANID_ENABLE
)) {
1142 uint32_t poolsel
= core
->mac
[VLVF0
+ i
] & E1000_VLVF_POOLSEL_MASK
;
1143 mask
|= poolsel
>> E1000_VLVF_POOLSEL_SHIFT
;
1147 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1148 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_AUPE
) {
1157 if (is_unicast_ether_addr(ehdr
->h_dest
) && !queues
&& !external_tx
&&
1158 !(core
->mac
[VT_CTL
] & E1000_VT_CTL_DISABLE_DEF_POOL
)) {
1159 uint32_t def_pl
= core
->mac
[VT_CTL
] & E1000_VT_CTL_DEFAULT_POOL_MASK
;
1160 queues
= BIT(def_pl
>> E1000_VT_CTL_DEFAULT_POOL_SHIFT
);
1163 queues
&= core
->mac
[VFRE
];
1165 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1166 lpe
= !!(core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_LPE
);
1167 rlpml
= core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_RLPML_MASK
;
1168 if ((queues
& BIT(i
)) &&
1169 igb_rx_is_oversized(core
, ehdr
, size
, vlan_num
,
1171 oversized
|= BIT(i
);
1174 /* 8.19.37 increment ROC if packet is oversized for all queues */
1175 if (oversized
== queues
) {
1176 trace_e1000x_rx_oversized(size
);
1177 e1000x_inc_reg_if_not_full(core
->mac
, ROC
);
1179 queues
&= ~oversized
;
1183 igb_rss_parse_packet(core
, core
->rx_pkt
,
1184 external_tx
!= NULL
, rss_info
);
1185 /* Sec 8.26.1: PQn = VFn + VQn*8 */
1186 if (rss_info
->queue
& 1) {
1187 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1188 if ((queues
& BIT(i
)) &&
1189 (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_RSSE
)) {
1190 queues
|= BIT(i
+ IGB_NUM_VM_POOLS
);
1197 bool accepted
= e1000x_rx_group_filter(core
->mac
, ehdr
);
1199 for (macp
= core
->mac
+ RA2
; macp
< core
->mac
+ RA2
+ 16; macp
+= 2) {
1200 if (!(macp
[1] & E1000_RAH_AV
)) {
1203 ra
[0] = cpu_to_le32(macp
[0]);
1204 ra
[1] = cpu_to_le32(macp
[1]);
1205 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1206 trace_e1000x_rx_flt_ucast_match((int)(macp
- core
->mac
- RA2
) / 2,
1207 MAC_ARG(ehdr
->h_dest
));
1216 igb_rss_parse_packet(core
, core
->rx_pkt
, false, rss_info
);
1217 queues
= BIT(rss_info
->queue
);
1225 igb_read_lgcy_rx_descr(IGBCore
*core
, struct e1000_rx_desc
*desc
,
1228 *buff_addr
= le64_to_cpu(desc
->buffer_addr
);
1232 igb_read_adv_rx_single_buf_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1235 *buff_addr
= le64_to_cpu(desc
->read
.pkt_addr
);
1239 igb_read_adv_rx_split_buf_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1242 buff_addr
[0] = le64_to_cpu(desc
->read
.hdr_addr
);
1243 buff_addr
[1] = le64_to_cpu(desc
->read
.pkt_addr
);
1246 typedef struct IGBBAState
{
1247 uint16_t written
[IGB_MAX_PS_BUFFERS
];
1251 typedef struct IGBSplitDescriptorData
{
1255 } IGBSplitDescriptorData
;
1257 typedef struct IGBPacketRxDMAState
{
1263 uint32_t rx_desc_packet_buf_size
;
1264 uint32_t rx_desc_header_buf_size
;
1270 hwaddr ba
[IGB_MAX_PS_BUFFERS
];
1271 IGBSplitDescriptorData ps_desc_data
;
1272 } IGBPacketRxDMAState
;
1275 igb_read_rx_descr(IGBCore
*core
,
1276 union e1000_rx_desc_union
*desc
,
1277 IGBPacketRxDMAState
*pdma_st
,
1278 const E1000ERingInfo
*r
)
1282 if (igb_rx_use_legacy_descriptor(core
)) {
1283 igb_read_lgcy_rx_descr(core
, &desc
->legacy
, &pdma_st
->ba
[1]);
1288 /* advanced header split descriptor */
1289 if (igb_rx_use_ps_descriptor(core
, r
)) {
1290 igb_read_adv_rx_split_buf_descr(core
, &desc
->adv
, &pdma_st
->ba
[0]);
1294 /* descriptor replication modes not supported */
1295 desc_type
= igb_rx_queue_desctyp_get(core
, r
);
1296 if (desc_type
!= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
) {
1297 trace_igb_wrn_rx_desc_modes_not_supp(desc_type
);
1300 /* advanced single buffer descriptor */
1301 igb_read_adv_rx_single_buf_descr(core
, &desc
->adv
, &pdma_st
->ba
[1]);
1306 igb_verify_csum_in_sw(IGBCore
*core
,
1307 struct NetRxPkt
*pkt
,
1308 uint32_t *status_flags
,
1309 EthL4HdrProto l4hdr_proto
)
1312 uint32_t csum_error
;
1314 if (igb_rx_l3_cso_enabled(core
)) {
1315 if (!net_rx_pkt_validate_l3_csum(pkt
, &csum_valid
)) {
1316 trace_e1000e_rx_metadata_l3_csum_validation_failed();
1318 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_IPE
;
1319 *status_flags
|= E1000_RXD_STAT_IPCS
| csum_error
;
1322 trace_e1000e_rx_metadata_l3_cso_disabled();
1325 if (!igb_rx_l4_cso_enabled(core
)) {
1326 trace_e1000e_rx_metadata_l4_cso_disabled();
1330 if (!net_rx_pkt_validate_l4_csum(pkt
, &csum_valid
)) {
1331 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1335 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_TCPE
;
1336 *status_flags
|= E1000_RXD_STAT_TCPCS
| csum_error
;
1338 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1339 *status_flags
|= E1000_RXD_STAT_UDPCS
;
1344 igb_build_rx_metadata_common(IGBCore
*core
,
1345 struct NetRxPkt
*pkt
,
1347 uint32_t *status_flags
,
1350 struct virtio_net_hdr
*vhdr
;
1351 bool hasip4
, hasip6
, csum_valid
;
1352 EthL4HdrProto l4hdr_proto
;
1354 *status_flags
= E1000_RXD_STAT_DD
;
1356 /* No additional metadata needed for non-EOP descriptors */
1361 *status_flags
|= E1000_RXD_STAT_EOP
;
1363 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1364 trace_e1000e_rx_metadata_protocols(hasip4
, hasip6
, l4hdr_proto
);
1367 if (net_rx_pkt_is_vlan_stripped(pkt
)) {
1368 *status_flags
|= E1000_RXD_STAT_VP
;
1369 *vlan_tag
= cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt
));
1370 trace_e1000e_rx_metadata_vlan(*vlan_tag
);
1373 /* RX CSO information */
1374 if (hasip6
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_XSUM_DIS
)) {
1375 trace_e1000e_rx_metadata_ipv6_sum_disabled();
1379 vhdr
= net_rx_pkt_get_vhdr(pkt
);
1381 if (!(vhdr
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
) &&
1382 !(vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
)) {
1383 trace_e1000e_rx_metadata_virthdr_no_csum_info();
1384 igb_verify_csum_in_sw(core
, pkt
, status_flags
, l4hdr_proto
);
1388 if (igb_rx_l3_cso_enabled(core
)) {
1389 *status_flags
|= hasip4
? E1000_RXD_STAT_IPCS
: 0;
1391 trace_e1000e_rx_metadata_l3_cso_disabled();
1394 if (igb_rx_l4_cso_enabled(core
)) {
1395 switch (l4hdr_proto
) {
1396 case ETH_L4_HDR_PROTO_SCTP
:
1397 if (!net_rx_pkt_validate_l4_csum(pkt
, &csum_valid
)) {
1398 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1402 *status_flags
|= E1000_RXDEXT_STATERR_TCPE
;
1405 case ETH_L4_HDR_PROTO_TCP
:
1406 *status_flags
|= E1000_RXD_STAT_TCPCS
;
1409 case ETH_L4_HDR_PROTO_UDP
:
1410 *status_flags
|= E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
;
1417 trace_e1000e_rx_metadata_l4_cso_disabled();
1421 trace_e1000e_rx_metadata_status_flags(*status_flags
);
1422 *status_flags
= cpu_to_le32(*status_flags
);
1426 igb_write_lgcy_rx_descr(IGBCore
*core
, struct e1000_rx_desc
*desc
,
1427 struct NetRxPkt
*pkt
,
1428 const E1000E_RSSInfo
*rss_info
,
1431 uint32_t status_flags
;
1433 assert(!rss_info
->enabled
);
1435 memset(desc
, 0, sizeof(*desc
));
1436 desc
->length
= cpu_to_le16(length
);
1437 igb_build_rx_metadata_common(core
, pkt
, pkt
!= NULL
,
1441 desc
->errors
= (uint8_t) (le32_to_cpu(status_flags
) >> 24);
1442 desc
->status
= (uint8_t) le32_to_cpu(status_flags
);
1446 igb_rx_ps_descriptor_split_always(IGBCore
*core
, const E1000ERingInfo
*r
)
1448 uint32_t desctyp
= igb_rx_queue_desctyp_get(core
, r
);
1449 return desctyp
== E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1453 igb_rx_desc_get_packet_type(IGBCore
*core
, struct NetRxPkt
*pkt
, uint16_t etqf
)
1456 bool hasip4
, hasip6
;
1457 EthL4HdrProto l4hdr_proto
;
1460 pkt_type
= BIT(11) | etqf
;
1464 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1466 if (hasip6
&& !(core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_DIS
)) {
1467 eth_ip6_hdr_info
*ip6hdr_info
= net_rx_pkt_get_ip6_info(pkt
);
1468 pkt_type
= ip6hdr_info
->has_ext_hdrs
? E1000_ADVRXD_PKT_IP6E
:
1469 E1000_ADVRXD_PKT_IP6
;
1470 } else if (hasip4
) {
1471 pkt_type
= E1000_ADVRXD_PKT_IP4
;
1476 switch (l4hdr_proto
) {
1477 case ETH_L4_HDR_PROTO_TCP
:
1478 pkt_type
|= E1000_ADVRXD_PKT_TCP
;
1480 case ETH_L4_HDR_PROTO_UDP
:
1481 pkt_type
|= E1000_ADVRXD_PKT_UDP
;
1483 case ETH_L4_HDR_PROTO_SCTP
:
1484 pkt_type
|= E1000_ADVRXD_PKT_SCTP
;
1494 igb_write_adv_rx_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1495 struct NetRxPkt
*pkt
,
1496 const E1000E_RSSInfo
*rss_info
, uint16_t etqf
, bool ts
,
1499 bool hasip4
, hasip6
;
1500 EthL4HdrProto l4hdr_proto
;
1501 uint16_t rss_type
= 0, pkt_type
;
1502 bool eop
= (pkt
!= NULL
);
1503 uint32_t adv_desc_status_error
= 0;
1504 memset(&desc
->wb
, 0, sizeof(desc
->wb
));
1506 desc
->wb
.upper
.length
= cpu_to_le16(length
);
1507 igb_build_rx_metadata_common(core
, pkt
, eop
,
1508 &desc
->wb
.upper
.status_error
,
1509 &desc
->wb
.upper
.vlan
);
1515 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1517 if ((core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) != 0) {
1518 if (rss_info
->enabled
) {
1519 desc
->wb
.lower
.hi_dword
.rss
= cpu_to_le32(rss_info
->hash
);
1520 rss_type
= rss_info
->type
;
1521 trace_igb_rx_metadata_rss(desc
->wb
.lower
.hi_dword
.rss
, rss_type
);
1523 } else if (hasip4
) {
1524 adv_desc_status_error
|= E1000_RXD_STAT_IPIDV
;
1525 desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
=
1526 cpu_to_le16(net_rx_pkt_get_ip_id(pkt
));
1527 trace_e1000e_rx_metadata_ip_id(
1528 desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
);
1532 adv_desc_status_error
|= BIT(16);
1535 pkt_type
= igb_rx_desc_get_packet_type(core
, pkt
, etqf
);
1536 trace_e1000e_rx_metadata_pkt_type(pkt_type
);
1537 desc
->wb
.lower
.lo_dword
.pkt_info
= cpu_to_le16(rss_type
| (pkt_type
<< 4));
1538 desc
->wb
.upper
.status_error
|= cpu_to_le32(adv_desc_status_error
);
1542 igb_write_adv_ps_rx_descr(IGBCore
*core
,
1543 union e1000_adv_rx_desc
*desc
,
1544 struct NetRxPkt
*pkt
,
1545 const E1000E_RSSInfo
*rss_info
,
1546 const E1000ERingInfo
*r
,
1549 IGBPacketRxDMAState
*pdma_st
)
1552 uint16_t hdr_info
= 0;
1554 if (pdma_st
->do_ps
) {
1555 pkt_len
= pdma_st
->bastate
.written
[1];
1557 pkt_len
= pdma_st
->bastate
.written
[0] + pdma_st
->bastate
.written
[1];
1560 igb_write_adv_rx_descr(core
, desc
, pkt
, rss_info
, etqf
, ts
, pkt_len
);
1562 hdr_info
= (pdma_st
->ps_desc_data
.hdr_len
<< E1000_ADVRXD_HDR_LEN_OFFSET
) &
1563 E1000_ADVRXD_ADV_HDR_LEN_MASK
;
1564 hdr_info
|= pdma_st
->ps_desc_data
.sph
? E1000_ADVRXD_HDR_SPH
: 0;
1565 desc
->wb
.lower
.lo_dword
.hdr_info
= cpu_to_le16(hdr_info
);
1567 desc
->wb
.upper
.status_error
|= cpu_to_le32(
1568 pdma_st
->ps_desc_data
.hbo
? E1000_ADVRXD_ST_ERR_HBO_OFFSET
: 0);
1572 igb_write_rx_descr(IGBCore
*core
,
1573 union e1000_rx_desc_union
*desc
,
1574 struct NetRxPkt
*pkt
,
1575 const E1000E_RSSInfo
*rss_info
,
1578 IGBPacketRxDMAState
*pdma_st
,
1579 const E1000ERingInfo
*r
)
1581 if (igb_rx_use_legacy_descriptor(core
)) {
1582 igb_write_lgcy_rx_descr(core
, &desc
->legacy
, pkt
, rss_info
,
1583 pdma_st
->bastate
.written
[1]);
1584 } else if (igb_rx_use_ps_descriptor(core
, r
)) {
1585 igb_write_adv_ps_rx_descr(core
, &desc
->adv
, pkt
, rss_info
, r
, etqf
, ts
,
1588 igb_write_adv_rx_descr(core
, &desc
->adv
, pkt
, rss_info
,
1589 etqf
, ts
, pdma_st
->bastate
.written
[1]);
1594 igb_pci_dma_write_rx_desc(IGBCore
*core
, PCIDevice
*dev
, dma_addr_t addr
,
1595 union e1000_rx_desc_union
*desc
, dma_addr_t len
)
1597 if (igb_rx_use_legacy_descriptor(core
)) {
1598 struct e1000_rx_desc
*d
= &desc
->legacy
;
1599 size_t offset
= offsetof(struct e1000_rx_desc
, status
);
1600 uint8_t status
= d
->status
;
1602 d
->status
&= ~E1000_RXD_STAT_DD
;
1603 pci_dma_write(dev
, addr
, desc
, len
);
1605 if (status
& E1000_RXD_STAT_DD
) {
1607 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1610 union e1000_adv_rx_desc
*d
= &desc
->adv
;
1612 offsetof(union e1000_adv_rx_desc
, wb
.upper
.status_error
);
1613 uint32_t status
= d
->wb
.upper
.status_error
;
1615 d
->wb
.upper
.status_error
&= ~E1000_RXD_STAT_DD
;
1616 pci_dma_write(dev
, addr
, desc
, len
);
1618 if (status
& E1000_RXD_STAT_DD
) {
1619 d
->wb
.upper
.status_error
= status
;
1620 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1626 igb_update_rx_stats(IGBCore
*core
, const E1000ERingInfo
*rxi
,
1627 size_t pkt_size
, size_t pkt_fcs_size
)
1629 eth_pkt_types_e pkt_type
= net_rx_pkt_get_packet_type(core
->rx_pkt
);
1630 e1000x_update_rx_total_stats(core
->mac
, pkt_type
, pkt_size
, pkt_fcs_size
);
1632 if (core
->mac
[MRQC
] & 1) {
1633 uint16_t pool
= rxi
->idx
% IGB_NUM_VM_POOLS
;
1635 core
->mac
[PVFGORC0
+ (pool
* 64)] += pkt_size
+ 4;
1636 core
->mac
[PVFGPRC0
+ (pool
* 64)]++;
1637 if (pkt_type
== ETH_PKT_MCAST
) {
1638 core
->mac
[PVFMPRC0
+ (pool
* 64)]++;
1644 igb_rx_descr_threshold_hit(IGBCore
*core
, const E1000ERingInfo
*rxi
)
1646 return igb_ring_free_descr_num(core
, rxi
) ==
1647 ((core
->mac
[E1000_SRRCTL(rxi
->idx
) >> 2] >> 20) & 31) * 16;
1651 igb_do_ps(IGBCore
*core
,
1652 const E1000ERingInfo
*r
,
1653 struct NetRxPkt
*pkt
,
1654 IGBPacketRxDMAState
*pdma_st
)
1656 bool hasip4
, hasip6
;
1657 EthL4HdrProto l4hdr_proto
;
1660 size_t bheader_size
;
1661 size_t total_pkt_len
;
1663 if (!igb_rx_use_ps_descriptor(core
, r
)) {
1667 total_pkt_len
= net_rx_pkt_get_total_len(pkt
);
1668 bheader_size
= igb_rxhdrbufsize(core
, r
);
1669 split_always
= igb_rx_ps_descriptor_split_always(core
, r
);
1670 if (split_always
&& total_pkt_len
<= bheader_size
) {
1671 pdma_st
->ps_hdr_len
= total_pkt_len
;
1672 pdma_st
->ps_desc_data
.hdr_len
= total_pkt_len
;
1676 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1679 fragment
= net_rx_pkt_get_ip4_info(pkt
)->fragment
;
1680 } else if (hasip6
) {
1681 fragment
= net_rx_pkt_get_ip6_info(pkt
)->fragment
;
1683 pdma_st
->ps_desc_data
.hdr_len
= bheader_size
;
1684 goto header_not_handled
;
1687 if (fragment
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPFRSP_DIS
)) {
1688 pdma_st
->ps_desc_data
.hdr_len
= bheader_size
;
1689 goto header_not_handled
;
1692 /* no header splitting for SCTP */
1693 if (!fragment
&& (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
||
1694 l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
)) {
1695 pdma_st
->ps_hdr_len
= net_rx_pkt_get_l5_hdr_offset(pkt
);
1697 pdma_st
->ps_hdr_len
= net_rx_pkt_get_l4_hdr_offset(pkt
);
1700 pdma_st
->ps_desc_data
.sph
= true;
1701 pdma_st
->ps_desc_data
.hdr_len
= pdma_st
->ps_hdr_len
;
1703 if (pdma_st
->ps_hdr_len
> bheader_size
) {
1704 pdma_st
->ps_desc_data
.hbo
= true;
1705 goto header_not_handled
;
1712 pdma_st
->ps_hdr_len
= bheader_size
;
1720 igb_truncate_to_descriptor_size(IGBPacketRxDMAState
*pdma_st
, size_t *size
)
1722 if (pdma_st
->do_ps
&& pdma_st
->is_first
) {
1723 if (*size
> pdma_st
->rx_desc_packet_buf_size
+ pdma_st
->ps_hdr_len
) {
1724 *size
= pdma_st
->rx_desc_packet_buf_size
+ pdma_st
->ps_hdr_len
;
1727 if (*size
> pdma_st
->rx_desc_packet_buf_size
) {
1728 *size
= pdma_st
->rx_desc_packet_buf_size
;
1734 igb_write_hdr_frag_to_rx_buffers(IGBCore
*core
,
1736 IGBPacketRxDMAState
*pdma_st
,
1738 dma_addr_t data_len
)
1740 assert(data_len
<= pdma_st
->rx_desc_header_buf_size
-
1741 pdma_st
->bastate
.written
[0]);
1743 pdma_st
->ba
[0] + pdma_st
->bastate
.written
[0],
1745 pdma_st
->bastate
.written
[0] += data_len
;
1746 pdma_st
->bastate
.cur_idx
= 1;
1750 igb_write_header_to_rx_buffers(IGBCore
*core
,
1751 struct NetRxPkt
*pkt
,
1753 IGBPacketRxDMAState
*pdma_st
,
1757 size_t ps_hdr_copied
= 0;
1759 if (!pdma_st
->is_first
) {
1760 /* Leave buffer 0 of each descriptor except first */
1762 pdma_st
->bastate
.cur_idx
= 1;
1767 iov_copy
= MIN(pdma_st
->ps_hdr_len
- ps_hdr_copied
,
1768 pdma_st
->iov
->iov_len
- pdma_st
->iov_ofs
);
1770 igb_write_hdr_frag_to_rx_buffers(core
, d
, pdma_st
,
1771 pdma_st
->iov
->iov_base
,
1774 *copy_size
-= iov_copy
;
1775 ps_hdr_copied
+= iov_copy
;
1777 pdma_st
->iov_ofs
+= iov_copy
;
1778 if (pdma_st
->iov_ofs
== pdma_st
->iov
->iov_len
) {
1780 pdma_st
->iov_ofs
= 0;
1782 } while (ps_hdr_copied
< pdma_st
->ps_hdr_len
);
1784 pdma_st
->is_first
= false;
1788 igb_write_payload_frag_to_rx_buffers(IGBCore
*core
,
1790 IGBPacketRxDMAState
*pdma_st
,
1792 dma_addr_t data_len
)
1794 while (data_len
> 0) {
1795 assert(pdma_st
->bastate
.cur_idx
< IGB_MAX_PS_BUFFERS
);
1797 uint32_t cur_buf_bytes_left
=
1798 pdma_st
->rx_desc_packet_buf_size
-
1799 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
];
1800 uint32_t bytes_to_write
= MIN(data_len
, cur_buf_bytes_left
);
1802 trace_igb_rx_desc_buff_write(
1803 pdma_st
->bastate
.cur_idx
,
1804 pdma_st
->ba
[pdma_st
->bastate
.cur_idx
],
1805 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
],
1810 pdma_st
->ba
[pdma_st
->bastate
.cur_idx
] +
1811 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
],
1812 data
, bytes_to_write
);
1814 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
] += bytes_to_write
;
1815 data
+= bytes_to_write
;
1816 data_len
-= bytes_to_write
;
1818 if (pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
] ==
1819 pdma_st
->rx_desc_packet_buf_size
) {
1820 pdma_st
->bastate
.cur_idx
++;
1826 igb_write_payload_to_rx_buffers(IGBCore
*core
,
1827 struct NetRxPkt
*pkt
,
1829 IGBPacketRxDMAState
*pdma_st
,
1832 static const uint32_t fcs_pad
;
1835 /* Copy packet payload */
1836 while (*copy_size
) {
1837 iov_copy
= MIN(*copy_size
, pdma_st
->iov
->iov_len
- pdma_st
->iov_ofs
);
1838 igb_write_payload_frag_to_rx_buffers(core
, d
,
1840 pdma_st
->iov
->iov_base
+
1844 *copy_size
-= iov_copy
;
1845 pdma_st
->iov_ofs
+= iov_copy
;
1846 if (pdma_st
->iov_ofs
== pdma_st
->iov
->iov_len
) {
1848 pdma_st
->iov_ofs
= 0;
1852 if (pdma_st
->desc_offset
+ pdma_st
->desc_size
>= pdma_st
->total_size
) {
1853 /* Simulate FCS checksum presence in the last descriptor */
1854 igb_write_payload_frag_to_rx_buffers(core
, d
,
1856 (const char *) &fcs_pad
,
1857 e1000x_fcs_len(core
->mac
));
1862 igb_write_to_rx_buffers(IGBCore
*core
,
1863 struct NetRxPkt
*pkt
,
1865 IGBPacketRxDMAState
*pdma_st
)
1869 if (!(pdma_st
->ba
)[1] || (pdma_st
->do_ps
&& !(pdma_st
->ba
[0]))) {
1870 /* as per intel docs; skip descriptors with null buf addr */
1871 trace_e1000e_rx_null_descriptor();
1875 if (pdma_st
->desc_offset
>= pdma_st
->size
) {
1879 pdma_st
->desc_size
= pdma_st
->total_size
- pdma_st
->desc_offset
;
1880 igb_truncate_to_descriptor_size(pdma_st
, &pdma_st
->desc_size
);
1881 copy_size
= pdma_st
->size
- pdma_st
->desc_offset
;
1882 igb_truncate_to_descriptor_size(pdma_st
, ©_size
);
1884 /* For PS mode copy the packet header first */
1885 if (pdma_st
->do_ps
) {
1886 igb_write_header_to_rx_buffers(core
, pkt
, d
, pdma_st
, ©_size
);
1888 pdma_st
->bastate
.cur_idx
= 1;
1891 igb_write_payload_to_rx_buffers(core
, pkt
, d
, pdma_st
, ©_size
);
1895 igb_write_packet_to_guest(IGBCore
*core
, struct NetRxPkt
*pkt
,
1896 const E1000E_RxRing
*rxr
,
1897 const E1000E_RSSInfo
*rss_info
,
1898 uint16_t etqf
, bool ts
)
1902 union e1000_rx_desc_union desc
;
1903 const E1000ERingInfo
*rxi
;
1906 IGBPacketRxDMAState pdma_st
= {0};
1907 pdma_st
.is_first
= true;
1908 pdma_st
.size
= net_rx_pkt_get_total_len(pkt
);
1909 pdma_st
.total_size
= pdma_st
.size
+ e1000x_fcs_len(core
->mac
);
1912 rx_desc_len
= core
->rx_desc_len
;
1913 pdma_st
.rx_desc_packet_buf_size
= igb_rxbufsize(core
, rxi
);
1914 pdma_st
.rx_desc_header_buf_size
= igb_rxhdrbufsize(core
, rxi
);
1915 pdma_st
.iov
= net_rx_pkt_get_iovec(pkt
);
1916 d
= pcie_sriov_get_vf_at_index(core
->owner
, rxi
->idx
% 8);
1921 pdma_st
.do_ps
= igb_do_ps(core
, rxi
, pkt
, &pdma_st
);
1924 memset(&pdma_st
.bastate
, 0, sizeof(IGBBAState
));
1925 bool is_last
= false;
1927 if (igb_ring_empty(core
, rxi
)) {
1931 base
= igb_ring_head_descr(core
, rxi
);
1932 pci_dma_read(d
, base
, &desc
, rx_desc_len
);
1933 trace_e1000e_rx_descr(rxi
->idx
, base
, rx_desc_len
);
1935 igb_read_rx_descr(core
, &desc
, &pdma_st
, rxi
);
1937 igb_write_to_rx_buffers(core
, pkt
, d
, &pdma_st
);
1938 pdma_st
.desc_offset
+= pdma_st
.desc_size
;
1939 if (pdma_st
.desc_offset
>= pdma_st
.total_size
) {
1943 igb_write_rx_descr(core
, &desc
,
1944 is_last
? pkt
: NULL
,
1949 igb_pci_dma_write_rx_desc(core
, d
, base
, &desc
, rx_desc_len
);
1950 igb_ring_advance(core
, rxi
, rx_desc_len
/ E1000_MIN_RX_DESC_LEN
);
1951 } while (pdma_st
.desc_offset
< pdma_st
.total_size
);
1953 igb_update_rx_stats(core
, rxi
, pdma_st
.size
, pdma_st
.total_size
);
1957 igb_rx_strip_vlan(IGBCore
*core
, const E1000ERingInfo
*rxi
)
1959 if (core
->mac
[MRQC
] & 1) {
1960 uint16_t pool
= rxi
->idx
% IGB_NUM_VM_POOLS
;
1961 /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
1962 return (net_rx_pkt_get_packet_type(core
->rx_pkt
) == ETH_PKT_MCAST
) ?
1963 core
->mac
[RPLOLR
] & E1000_RPLOLR_STRVLAN
:
1964 core
->mac
[VMOLR0
+ pool
] & E1000_VMOLR_STRVLAN
;
1967 return e1000x_vlan_enabled(core
->mac
);
1971 igb_rx_fix_l4_csum(IGBCore
*core
, struct NetRxPkt
*pkt
)
1973 struct virtio_net_hdr
*vhdr
= net_rx_pkt_get_vhdr(pkt
);
1975 if (vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1976 net_rx_pkt_fix_l4_csum(pkt
);
1981 igb_receive_iov(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
)
1983 return igb_receive_internal(core
, iov
, iovcnt
, core
->has_vnet
, NULL
);
1987 igb_receive_internal(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
,
1988 bool has_vnet
, bool *external_tx
)
1990 uint16_t queues
= 0;
1991 uint32_t causes
= 0;
1992 uint32_t ecauses
= 0;
1995 uint8_t octets
[ETH_ZLEN
];
1997 struct iovec min_iov
;
1998 size_t size
, orig_size
;
2001 E1000E_RSSInfo rss_info
;
2005 int strip_vlan_index
;
2008 trace_e1000e_rx_receive_iov(iovcnt
);
2011 *external_tx
= true;
2014 if (!e1000x_hw_rx_enabled(core
->mac
)) {
2018 /* Pull virtio header in */
2020 net_rx_pkt_set_vhdr_iovec(core
->rx_pkt
, iov
, iovcnt
);
2021 iov_ofs
= sizeof(struct virtio_net_hdr
);
2023 net_rx_pkt_unset_vhdr(core
->rx_pkt
);
2026 orig_size
= iov_size(iov
, iovcnt
);
2027 size
= orig_size
- iov_ofs
;
2029 /* Pad to minimum Ethernet frame length */
2030 if (size
< sizeof(buf
)) {
2031 iov_to_buf(iov
, iovcnt
, iov_ofs
, &buf
, size
);
2032 memset(&buf
.octets
[size
], 0, sizeof(buf
) - size
);
2033 e1000x_inc_reg_if_not_full(core
->mac
, RUC
);
2034 min_iov
.iov_base
= &buf
;
2035 min_iov
.iov_len
= size
= sizeof(buf
);
2040 iov_to_buf(iov
, iovcnt
, iov_ofs
, &buf
, sizeof(buf
.l2_header
));
2043 net_rx_pkt_set_packet_type(core
->rx_pkt
,
2044 get_eth_packet_type(&buf
.l2_header
.eth
));
2045 net_rx_pkt_set_protocols(core
->rx_pkt
, iov
, iovcnt
, iov_ofs
);
2047 queues
= igb_receive_assign(core
, iov
, iovcnt
, iov_ofs
,
2048 &buf
.l2_header
, size
,
2049 &rss_info
, &etqf
, &ts
, external_tx
);
2051 trace_e1000e_rx_flt_dropped();
2055 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
2056 if (!(queues
& BIT(i
)) ||
2057 !(core
->mac
[RXDCTL0
+ (i
* 16)] & E1000_RXDCTL_QUEUE_ENABLE
)) {
2061 igb_rx_ring_init(core
, &rxr
, i
);
2063 if (!igb_rx_strip_vlan(core
, rxr
.i
)) {
2064 strip_vlan_index
= -1;
2065 } else if (core
->mac
[CTRL_EXT
] & BIT(26)) {
2066 strip_vlan_index
= 1;
2068 strip_vlan_index
= 0;
2071 net_rx_pkt_attach_iovec_ex(core
->rx_pkt
, iov
, iovcnt
, iov_ofs
,
2073 core
->mac
[VET
] & 0xffff,
2074 core
->mac
[VET
] >> 16);
2076 total_size
= net_rx_pkt_get_total_len(core
->rx_pkt
) +
2077 e1000x_fcs_len(core
->mac
);
2079 if (!igb_has_rxbufs(core
, rxr
.i
, total_size
)) {
2080 causes
|= E1000_ICS_RXO
;
2081 trace_e1000e_rx_not_written_to_guest(rxr
.i
->idx
);
2085 causes
|= E1000_ICR_RXDW
;
2087 igb_rx_fix_l4_csum(core
, core
->rx_pkt
);
2088 igb_write_packet_to_guest(core
, core
->rx_pkt
, &rxr
, &rss_info
, etqf
, ts
);
2090 /* Check if receive descriptor minimum threshold hit */
2091 if (igb_rx_descr_threshold_hit(core
, rxr
.i
)) {
2092 causes
|= E1000_ICS_RXDMT0
;
2095 ecauses
|= igb_rx_wb_eic(core
, rxr
.i
->idx
);
2097 trace_e1000e_rx_written_to_guest(rxr
.i
->idx
);
2100 trace_e1000e_rx_interrupt_set(causes
);
2101 igb_raise_interrupts(core
, EICR
, ecauses
);
2102 igb_raise_interrupts(core
, ICR
, causes
);
2108 igb_have_autoneg(IGBCore
*core
)
2110 return core
->phy
[MII_BMCR
] & MII_BMCR_AUTOEN
;
2113 static void igb_update_flowctl_status(IGBCore
*core
)
2115 if (igb_have_autoneg(core
) && core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
) {
2116 trace_e1000e_link_autoneg_flowctl(true);
2117 core
->mac
[CTRL
] |= E1000_CTRL_TFCE
| E1000_CTRL_RFCE
;
2119 trace_e1000e_link_autoneg_flowctl(false);
2124 igb_link_down(IGBCore
*core
)
2126 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
);
2127 igb_update_flowctl_status(core
);
2131 igb_set_phy_ctrl(IGBCore
*core
, uint16_t val
)
2133 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
2134 core
->phy
[MII_BMCR
] = val
& ~(0x3f | MII_BMCR_RESET
| MII_BMCR_ANRESTART
);
2136 if ((val
& MII_BMCR_ANRESTART
) && igb_have_autoneg(core
)) {
2137 e1000x_restart_autoneg(core
->mac
, core
->phy
, core
->autoneg_timer
);
2141 void igb_core_set_link_status(IGBCore
*core
)
2143 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
2144 uint32_t old_status
= core
->mac
[STATUS
];
2146 trace_e1000e_link_status_changed(nc
->link_down
? false : true);
2148 if (nc
->link_down
) {
2149 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
);
2151 if (igb_have_autoneg(core
) &&
2152 !(core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
)) {
2153 e1000x_restart_autoneg(core
->mac
, core
->phy
,
2154 core
->autoneg_timer
);
2156 e1000x_update_regs_on_link_up(core
->mac
, core
->phy
);
2157 igb_start_recv(core
);
2161 if (core
->mac
[STATUS
] != old_status
) {
2162 igb_raise_interrupts(core
, ICR
, E1000_ICR_LSC
);
2167 igb_set_ctrl(IGBCore
*core
, int index
, uint32_t val
)
2169 trace_e1000e_core_ctrl_write(index
, val
);
2171 /* RST is self clearing */
2172 core
->mac
[CTRL
] = val
& ~E1000_CTRL_RST
;
2173 core
->mac
[CTRL_DUP
] = core
->mac
[CTRL
];
2175 trace_e1000e_link_set_params(
2176 !!(val
& E1000_CTRL_ASDE
),
2177 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
2178 !!(val
& E1000_CTRL_FRCSPD
),
2179 !!(val
& E1000_CTRL_FRCDPX
),
2180 !!(val
& E1000_CTRL_RFCE
),
2181 !!(val
& E1000_CTRL_TFCE
));
2183 if (val
& E1000_CTRL_RST
) {
2184 trace_e1000e_core_ctrl_sw_reset();
2185 igb_reset(core
, true);
2188 if (val
& E1000_CTRL_PHY_RST
) {
2189 trace_e1000e_core_ctrl_phy_reset();
2190 core
->mac
[STATUS
] |= E1000_STATUS_PHYRA
;
2195 igb_set_rfctl(IGBCore
*core
, int index
, uint32_t val
)
2197 trace_e1000e_rx_set_rfctl(val
);
2199 if (!(val
& E1000_RFCTL_ISCSI_DIS
)) {
2200 trace_e1000e_wrn_iscsi_filtering_not_supported();
2203 if (!(val
& E1000_RFCTL_NFSW_DIS
)) {
2204 trace_e1000e_wrn_nfsw_filtering_not_supported();
2207 if (!(val
& E1000_RFCTL_NFSR_DIS
)) {
2208 trace_e1000e_wrn_nfsr_filtering_not_supported();
2211 core
->mac
[RFCTL
] = val
;
2215 igb_calc_rxdesclen(IGBCore
*core
)
2217 if (igb_rx_use_legacy_descriptor(core
)) {
2218 core
->rx_desc_len
= sizeof(struct e1000_rx_desc
);
2220 core
->rx_desc_len
= sizeof(union e1000_adv_rx_desc
);
2222 trace_e1000e_rx_desc_len(core
->rx_desc_len
);
2226 igb_set_rx_control(IGBCore
*core
, int index
, uint32_t val
)
2228 core
->mac
[RCTL
] = val
;
2229 trace_e1000e_rx_set_rctl(core
->mac
[RCTL
]);
2231 if (val
& E1000_RCTL_DTYP_MASK
) {
2232 qemu_log_mask(LOG_GUEST_ERROR
,
2233 "igb: RCTL.DTYP must be zero for compatibility");
2236 if (val
& E1000_RCTL_EN
) {
2237 igb_calc_rxdesclen(core
);
2238 igb_start_recv(core
);
2243 igb_postpone_interrupt(IGBIntrDelayTimer
*timer
)
2245 if (timer
->running
) {
2246 trace_e1000e_irq_postponed_by_xitr(timer
->delay_reg
<< 2);
2251 if (timer
->core
->mac
[timer
->delay_reg
] != 0) {
2252 igb_intrmgr_rearm_timer(timer
);
2259 igb_eitr_should_postpone(IGBCore
*core
, int idx
)
2261 return igb_postpone_interrupt(&core
->eitr
[idx
]);
2264 static void igb_send_msix(IGBCore
*core
, uint32_t causes
)
2268 for (vector
= 0; vector
< IGB_INTR_NUM
; ++vector
) {
2269 if ((causes
& BIT(vector
)) && !igb_eitr_should_postpone(core
, vector
)) {
2271 trace_e1000e_irq_msix_notify_vec(vector
);
2272 igb_msix_notify(core
, vector
);
2278 igb_fix_icr_asserted(IGBCore
*core
)
2280 core
->mac
[ICR
] &= ~E1000_ICR_ASSERTED
;
2281 if (core
->mac
[ICR
]) {
2282 core
->mac
[ICR
] |= E1000_ICR_ASSERTED
;
2285 trace_e1000e_irq_fix_icr_asserted(core
->mac
[ICR
]);
2288 static void igb_raise_interrupts(IGBCore
*core
, size_t index
, uint32_t causes
)
2290 uint32_t old_causes
= core
->mac
[ICR
] & core
->mac
[IMS
];
2291 uint32_t old_ecauses
= core
->mac
[EICR
] & core
->mac
[EIMS
];
2292 uint32_t raised_causes
;
2293 uint32_t raised_ecauses
;
2296 trace_e1000e_irq_set(index
<< 2,
2297 core
->mac
[index
], core
->mac
[index
] | causes
);
2299 core
->mac
[index
] |= causes
;
2301 if (core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
) {
2302 raised_causes
= core
->mac
[ICR
] & core
->mac
[IMS
] & ~old_causes
;
2304 if (raised_causes
& E1000_ICR_DRSTA
) {
2305 int_alloc
= core
->mac
[IVAR_MISC
] & 0xff;
2306 if (int_alloc
& E1000_IVAR_VALID
) {
2307 core
->mac
[EICR
] |= BIT(int_alloc
& 0x1f);
2310 /* Check if other bits (excluding the TCP Timer) are enabled. */
2311 if (raised_causes
& ~E1000_ICR_DRSTA
) {
2312 int_alloc
= (core
->mac
[IVAR_MISC
] >> 8) & 0xff;
2313 if (int_alloc
& E1000_IVAR_VALID
) {
2314 core
->mac
[EICR
] |= BIT(int_alloc
& 0x1f);
2318 raised_ecauses
= core
->mac
[EICR
] & core
->mac
[EIMS
] & ~old_ecauses
;
2319 if (!raised_ecauses
) {
2323 igb_send_msix(core
, raised_ecauses
);
2325 igb_fix_icr_asserted(core
);
2327 raised_causes
= core
->mac
[ICR
] & core
->mac
[IMS
] & ~old_causes
;
2328 if (!raised_causes
) {
2332 core
->mac
[EICR
] |= (raised_causes
& E1000_ICR_DRSTA
) | E1000_EICR_OTHER
;
2334 if (msix_enabled(core
->owner
)) {
2335 trace_e1000e_irq_msix_notify_vec(0);
2336 msix_notify(core
->owner
, 0);
2337 } else if (msi_enabled(core
->owner
)) {
2338 trace_e1000e_irq_msi_notify(raised_causes
);
2339 msi_notify(core
->owner
, 0);
2341 igb_raise_legacy_irq(core
);
2346 static void igb_lower_interrupts(IGBCore
*core
, size_t index
, uint32_t causes
)
2348 trace_e1000e_irq_clear(index
<< 2,
2349 core
->mac
[index
], core
->mac
[index
] & ~causes
);
2351 core
->mac
[index
] &= ~causes
;
2353 trace_e1000e_irq_pending_interrupts(core
->mac
[ICR
] & core
->mac
[IMS
],
2354 core
->mac
[ICR
], core
->mac
[IMS
]);
2356 if (!(core
->mac
[ICR
] & core
->mac
[IMS
]) &&
2357 !(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
)) {
2358 core
->mac
[EICR
] &= ~E1000_EICR_OTHER
;
2360 if (!msix_enabled(core
->owner
) && !msi_enabled(core
->owner
)) {
2361 igb_lower_legacy_irq(core
);
2366 static void igb_set_eics(IGBCore
*core
, int index
, uint32_t val
)
2368 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2369 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2371 trace_igb_irq_write_eics(val
, msix
);
2372 igb_raise_interrupts(core
, EICR
, val
& mask
);
2375 static void igb_set_eims(IGBCore
*core
, int index
, uint32_t val
)
2377 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2378 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2380 trace_igb_irq_write_eims(val
, msix
);
2381 igb_raise_interrupts(core
, EIMS
, val
& mask
);
2384 static void mailbox_interrupt_to_vf(IGBCore
*core
, uint16_t vfn
)
2386 uint32_t ent
= core
->mac
[VTIVAR_MISC
+ vfn
];
2389 if ((ent
& E1000_IVAR_VALID
)) {
2390 causes
= (ent
& 0x3) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
);
2391 igb_raise_interrupts(core
, EICR
, causes
);
2395 static void mailbox_interrupt_to_pf(IGBCore
*core
)
2397 igb_raise_interrupts(core
, ICR
, E1000_ICR_VMMB
);
2400 static void igb_set_pfmailbox(IGBCore
*core
, int index
, uint32_t val
)
2402 uint16_t vfn
= index
- P2VMAILBOX0
;
2404 trace_igb_set_pfmailbox(vfn
, val
);
2406 if (val
& E1000_P2VMAILBOX_STS
) {
2407 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFSTS
;
2408 mailbox_interrupt_to_vf(core
, vfn
);
2411 if (val
& E1000_P2VMAILBOX_ACK
) {
2412 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFACK
;
2413 mailbox_interrupt_to_vf(core
, vfn
);
2416 /* Buffer Taken by PF (can be set only if the VFU is cleared). */
2417 if (val
& E1000_P2VMAILBOX_PFU
) {
2418 if (!(core
->mac
[index
] & E1000_P2VMAILBOX_VFU
)) {
2419 core
->mac
[index
] |= E1000_P2VMAILBOX_PFU
;
2420 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFU
;
2423 core
->mac
[index
] &= ~E1000_P2VMAILBOX_PFU
;
2424 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_PFU
;
2427 if (val
& E1000_P2VMAILBOX_RVFU
) {
2428 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_VFU
;
2429 core
->mac
[MBVFICR
] &= ~((E1000_MBVFICR_VFACK_VF1
<< vfn
) |
2430 (E1000_MBVFICR_VFREQ_VF1
<< vfn
));
2434 static void igb_set_vfmailbox(IGBCore
*core
, int index
, uint32_t val
)
2436 uint16_t vfn
= index
- V2PMAILBOX0
;
2438 trace_igb_set_vfmailbox(vfn
, val
);
2440 if (val
& E1000_V2PMAILBOX_REQ
) {
2441 core
->mac
[MBVFICR
] |= E1000_MBVFICR_VFREQ_VF1
<< vfn
;
2442 mailbox_interrupt_to_pf(core
);
2445 if (val
& E1000_V2PMAILBOX_ACK
) {
2446 core
->mac
[MBVFICR
] |= E1000_MBVFICR_VFACK_VF1
<< vfn
;
2447 mailbox_interrupt_to_pf(core
);
2450 /* Buffer Taken by VF (can be set only if the PFU is cleared). */
2451 if (val
& E1000_V2PMAILBOX_VFU
) {
2452 if (!(core
->mac
[index
] & E1000_V2PMAILBOX_PFU
)) {
2453 core
->mac
[index
] |= E1000_V2PMAILBOX_VFU
;
2454 core
->mac
[P2VMAILBOX0
+ vfn
] |= E1000_P2VMAILBOX_VFU
;
2457 core
->mac
[index
] &= ~E1000_V2PMAILBOX_VFU
;
2458 core
->mac
[P2VMAILBOX0
+ vfn
] &= ~E1000_P2VMAILBOX_VFU
;
2462 void igb_core_vf_reset(IGBCore
*core
, uint16_t vfn
)
2465 uint16_t qn1
= vfn
+ IGB_NUM_VM_POOLS
;
2467 trace_igb_core_vf_reset(vfn
);
2469 /* disable Rx and Tx for the VF*/
2470 core
->mac
[RXDCTL0
+ (qn0
* 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE
;
2471 core
->mac
[RXDCTL0
+ (qn1
* 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE
;
2472 core
->mac
[TXDCTL0
+ (qn0
* 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE
;
2473 core
->mac
[TXDCTL0
+ (qn1
* 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE
;
2474 core
->mac
[VFRE
] &= ~BIT(vfn
);
2475 core
->mac
[VFTE
] &= ~BIT(vfn
);
2476 /* indicate VF reset to PF */
2477 core
->mac
[VFLRE
] |= BIT(vfn
);
2478 /* VFLRE and mailbox use the same interrupt cause */
2479 mailbox_interrupt_to_pf(core
);
2482 static void igb_w1c(IGBCore
*core
, int index
, uint32_t val
)
2484 core
->mac
[index
] &= ~val
;
2487 static void igb_set_eimc(IGBCore
*core
, int index
, uint32_t val
)
2489 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2490 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2492 trace_igb_irq_write_eimc(val
, msix
);
2494 /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
2495 igb_lower_interrupts(core
, EIMS
, val
& mask
);
2498 static void igb_set_eiac(IGBCore
*core
, int index
, uint32_t val
)
2500 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2503 trace_igb_irq_write_eiac(val
);
2506 * TODO: When using IOV, the bits that correspond to MSI-X vectors
2507 * that are assigned to a VF are read-only.
2509 core
->mac
[EIAC
] |= (val
& E1000_EICR_MSIX_MASK
);
2513 static void igb_set_eiam(IGBCore
*core
, int index
, uint32_t val
)
2515 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2518 * TODO: When using IOV, the bits that correspond to MSI-X vectors that
2519 * are assigned to a VF are read-only.
2522 ~(val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
));
2524 trace_igb_irq_write_eiam(val
, msix
);
2527 static void igb_set_eicr(IGBCore
*core
, int index
, uint32_t val
)
2529 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2532 * TODO: In IOV mode, only bit zero of this vector is available for the PF
2535 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2537 trace_igb_irq_write_eicr(val
, msix
);
2538 igb_lower_interrupts(core
, EICR
, val
& mask
);
2541 static void igb_set_vtctrl(IGBCore
*core
, int index
, uint32_t val
)
2545 if (val
& E1000_CTRL_RST
) {
2546 vfn
= (index
- PVTCTRL0
) / 0x40;
2547 igb_core_vf_reset(core
, vfn
);
2551 static void igb_set_vteics(IGBCore
*core
, int index
, uint32_t val
)
2553 uint16_t vfn
= (index
- PVTEICS0
) / 0x40;
2555 core
->mac
[index
] = val
;
2556 igb_set_eics(core
, EICS
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2559 static void igb_set_vteims(IGBCore
*core
, int index
, uint32_t val
)
2561 uint16_t vfn
= (index
- PVTEIMS0
) / 0x40;
2563 core
->mac
[index
] = val
;
2564 igb_set_eims(core
, EIMS
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2567 static void igb_set_vteimc(IGBCore
*core
, int index
, uint32_t val
)
2569 uint16_t vfn
= (index
- PVTEIMC0
) / 0x40;
2571 core
->mac
[index
] = val
;
2572 igb_set_eimc(core
, EIMC
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2575 static void igb_set_vteiac(IGBCore
*core
, int index
, uint32_t val
)
2577 uint16_t vfn
= (index
- PVTEIAC0
) / 0x40;
2579 core
->mac
[index
] = val
;
2580 igb_set_eiac(core
, EIAC
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2583 static void igb_set_vteiam(IGBCore
*core
, int index
, uint32_t val
)
2585 uint16_t vfn
= (index
- PVTEIAM0
) / 0x40;
2587 core
->mac
[index
] = val
;
2588 igb_set_eiam(core
, EIAM
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2591 static void igb_set_vteicr(IGBCore
*core
, int index
, uint32_t val
)
2593 uint16_t vfn
= (index
- PVTEICR0
) / 0x40;
2595 core
->mac
[index
] = val
;
2596 igb_set_eicr(core
, EICR
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2599 static void igb_set_vtivar(IGBCore
*core
, int index
, uint32_t val
)
2601 uint16_t vfn
= (index
- VTIVAR
);
2606 core
->mac
[index
] = val
;
2608 /* Get assigned vector associated with queue Rx#0. */
2609 if ((val
& E1000_IVAR_VALID
)) {
2610 n
= igb_ivar_entry_rx(qn
);
2611 ent
= E1000_IVAR_VALID
| (24 - vfn
* IGBVF_MSIX_VEC_NUM
- (2 - (val
& 0x7)));
2612 core
->mac
[IVAR0
+ n
/ 4] |= ent
<< 8 * (n
% 4);
2615 /* Get assigned vector associated with queue Tx#0 */
2617 if ((ent
& E1000_IVAR_VALID
)) {
2618 n
= igb_ivar_entry_tx(qn
);
2619 ent
= E1000_IVAR_VALID
| (24 - vfn
* IGBVF_MSIX_VEC_NUM
- (2 - (ent
& 0x7)));
2620 core
->mac
[IVAR0
+ n
/ 4] |= ent
<< 8 * (n
% 4);
2624 * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
2629 igb_autoneg_timer(void *opaque
)
2631 IGBCore
*core
= opaque
;
2632 if (!qemu_get_queue(core
->owner_nic
)->link_down
) {
2633 e1000x_update_regs_on_autoneg_done(core
->mac
, core
->phy
);
2634 igb_start_recv(core
);
2636 igb_update_flowctl_status(core
);
2637 /* signal link status change to the guest */
2638 igb_raise_interrupts(core
, ICR
, E1000_ICR_LSC
);
2642 static inline uint16_t
2643 igb_get_reg_index_with_offset(const uint16_t *mac_reg_access
, hwaddr addr
)
2645 uint16_t index
= (addr
& 0x1ffff) >> 2;
2646 return index
+ (mac_reg_access
[index
] & 0xfffe);
2649 static const char igb_phy_regcap
[MAX_PHY_REG_ADDRESS
+ 1] = {
2650 [MII_BMCR
] = PHY_RW
,
2652 [MII_PHYID1
] = PHY_R
,
2653 [MII_PHYID2
] = PHY_R
,
2654 [MII_ANAR
] = PHY_RW
,
2655 [MII_ANLPAR
] = PHY_R
,
2657 [MII_ANNP
] = PHY_RW
,
2658 [MII_ANLPRNP
] = PHY_R
,
2659 [MII_CTRL1000
] = PHY_RW
,
2660 [MII_STAT1000
] = PHY_R
,
2661 [MII_EXTSTAT
] = PHY_R
,
2663 [IGP01E1000_PHY_PORT_CONFIG
] = PHY_RW
,
2664 [IGP01E1000_PHY_PORT_STATUS
] = PHY_R
,
2665 [IGP01E1000_PHY_PORT_CTRL
] = PHY_RW
,
2666 [IGP01E1000_PHY_LINK_HEALTH
] = PHY_R
,
2667 [IGP02E1000_PHY_POWER_MGMT
] = PHY_RW
,
2668 [IGP01E1000_PHY_PAGE_SELECT
] = PHY_W
2672 igb_phy_reg_write(IGBCore
*core
, uint32_t addr
, uint16_t data
)
2674 assert(addr
<= MAX_PHY_REG_ADDRESS
);
2676 if (addr
== MII_BMCR
) {
2677 igb_set_phy_ctrl(core
, data
);
2679 core
->phy
[addr
] = data
;
2684 igb_set_mdic(IGBCore
*core
, int index
, uint32_t val
)
2686 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
2687 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
2689 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) { /* phy # */
2690 val
= core
->mac
[MDIC
] | E1000_MDIC_ERROR
;
2691 } else if (val
& E1000_MDIC_OP_READ
) {
2692 if (!(igb_phy_regcap
[addr
] & PHY_R
)) {
2693 trace_igb_core_mdic_read_unhandled(addr
);
2694 val
|= E1000_MDIC_ERROR
;
2696 val
= (val
^ data
) | core
->phy
[addr
];
2697 trace_igb_core_mdic_read(addr
, val
);
2699 } else if (val
& E1000_MDIC_OP_WRITE
) {
2700 if (!(igb_phy_regcap
[addr
] & PHY_W
)) {
2701 trace_igb_core_mdic_write_unhandled(addr
);
2702 val
|= E1000_MDIC_ERROR
;
2704 trace_igb_core_mdic_write(addr
, data
);
2705 igb_phy_reg_write(core
, addr
, data
);
2708 core
->mac
[MDIC
] = val
| E1000_MDIC_READY
;
2710 if (val
& E1000_MDIC_INT_EN
) {
2711 igb_raise_interrupts(core
, ICR
, E1000_ICR_MDAC
);
2716 igb_set_rdt(IGBCore
*core
, int index
, uint32_t val
)
2718 core
->mac
[index
] = val
& 0xffff;
2719 trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0
, index
), val
);
2720 igb_start_recv(core
);
2724 igb_set_status(IGBCore
*core
, int index
, uint32_t val
)
2726 if ((val
& E1000_STATUS_PHYRA
) == 0) {
2727 core
->mac
[index
] &= ~E1000_STATUS_PHYRA
;
2732 igb_set_ctrlext(IGBCore
*core
, int index
, uint32_t val
)
2734 trace_igb_link_set_ext_params(!!(val
& E1000_CTRL_EXT_ASDCHK
),
2735 !!(val
& E1000_CTRL_EXT_SPD_BYPS
),
2736 !!(val
& E1000_CTRL_EXT_PFRSTD
));
2738 /* Zero self-clearing bits */
2739 val
&= ~(E1000_CTRL_EXT_ASDCHK
| E1000_CTRL_EXT_EE_RST
);
2740 core
->mac
[CTRL_EXT
] = val
;
2742 if (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_PFRSTD
) {
2743 for (int vfn
= 0; vfn
< IGB_MAX_VF_FUNCTIONS
; vfn
++) {
2744 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_RSTI
;
2745 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_RSTD
;
2751 igb_set_pbaclr(IGBCore
*core
, int index
, uint32_t val
)
2755 core
->mac
[PBACLR
] = val
& E1000_PBACLR_VALID_MASK
;
2757 if (!msix_enabled(core
->owner
)) {
2761 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
2762 if (core
->mac
[PBACLR
] & BIT(i
)) {
2763 msix_clr_pending(core
->owner
, i
);
2769 igb_set_fcrth(IGBCore
*core
, int index
, uint32_t val
)
2771 core
->mac
[FCRTH
] = val
& 0xFFF8;
2775 igb_set_fcrtl(IGBCore
*core
, int index
, uint32_t val
)
2777 core
->mac
[FCRTL
] = val
& 0x8000FFF8;
2780 #define IGB_LOW_BITS_SET_FUNC(num) \
2782 igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2784 core->mac[index] = val & (BIT(num) - 1); \
2787 IGB_LOW_BITS_SET_FUNC(4)
2788 IGB_LOW_BITS_SET_FUNC(13)
2789 IGB_LOW_BITS_SET_FUNC(16)
2792 igb_set_dlen(IGBCore
*core
, int index
, uint32_t val
)
2794 core
->mac
[index
] = val
& 0xffff0;
2798 igb_set_dbal(IGBCore
*core
, int index
, uint32_t val
)
2800 core
->mac
[index
] = val
& E1000_XDBAL_MASK
;
2804 igb_set_tdt(IGBCore
*core
, int index
, uint32_t val
)
2807 int qn
= igb_mq_queue_idx(TDT0
, index
);
2809 core
->mac
[index
] = val
& 0xffff;
2811 igb_tx_ring_init(core
, &txr
, qn
);
2812 igb_start_xmit(core
, &txr
);
2816 igb_set_ics(IGBCore
*core
, int index
, uint32_t val
)
2818 trace_e1000e_irq_write_ics(val
);
2819 igb_raise_interrupts(core
, ICR
, val
);
2823 igb_set_imc(IGBCore
*core
, int index
, uint32_t val
)
2825 trace_e1000e_irq_ims_clear_set_imc(val
);
2826 igb_lower_interrupts(core
, IMS
, val
);
2830 igb_set_ims(IGBCore
*core
, int index
, uint32_t val
)
2832 igb_raise_interrupts(core
, IMS
, val
& 0x77D4FBFD);
2835 static void igb_nsicr(IGBCore
*core
)
2838 * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
2839 * least one bit is set in the IMS and there is a true interrupt as
2840 * reflected in ICR.INTA.
2842 if ((core
->mac
[GPIE
] & E1000_GPIE_NSICR
) ||
2843 (core
->mac
[IMS
] && (core
->mac
[ICR
] & E1000_ICR_INT_ASSERTED
))) {
2844 igb_lower_interrupts(core
, IMS
, core
->mac
[IAM
]);
2848 static void igb_set_icr(IGBCore
*core
, int index
, uint32_t val
)
2851 igb_lower_interrupts(core
, ICR
, val
);
2855 igb_mac_readreg(IGBCore
*core
, int index
)
2857 return core
->mac
[index
];
2861 igb_mac_ics_read(IGBCore
*core
, int index
)
2863 trace_e1000e_irq_read_ics(core
->mac
[ICS
]);
2864 return core
->mac
[ICS
];
2868 igb_mac_ims_read(IGBCore
*core
, int index
)
2870 trace_e1000e_irq_read_ims(core
->mac
[IMS
]);
2871 return core
->mac
[IMS
];
2875 igb_mac_swsm_read(IGBCore
*core
, int index
)
2877 uint32_t val
= core
->mac
[SWSM
];
2878 core
->mac
[SWSM
] = val
| E1000_SWSM_SMBI
;
2883 igb_mac_eitr_read(IGBCore
*core
, int index
)
2885 return core
->eitr_guest_value
[index
- EITR0
];
2888 static uint32_t igb_mac_vfmailbox_read(IGBCore
*core
, int index
)
2890 uint32_t val
= core
->mac
[index
];
2892 core
->mac
[index
] &= ~(E1000_V2PMAILBOX_PFSTS
| E1000_V2PMAILBOX_PFACK
|
2893 E1000_V2PMAILBOX_RSTD
);
2899 igb_mac_icr_read(IGBCore
*core
, int index
)
2901 uint32_t ret
= core
->mac
[ICR
];
2903 if (core
->mac
[GPIE
] & E1000_GPIE_NSICR
) {
2904 trace_igb_irq_icr_clear_gpie_nsicr();
2905 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2906 } else if (core
->mac
[IMS
] == 0) {
2907 trace_e1000e_irq_icr_clear_zero_ims();
2908 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2909 } else if (core
->mac
[ICR
] & E1000_ICR_INT_ASSERTED
) {
2910 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2911 } else if (!msix_enabled(core
->owner
)) {
2912 trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2913 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2921 igb_mac_read_clr4(IGBCore
*core
, int index
)
2923 uint32_t ret
= core
->mac
[index
];
2925 core
->mac
[index
] = 0;
2930 igb_mac_read_clr8(IGBCore
*core
, int index
)
2932 uint32_t ret
= core
->mac
[index
];
2934 core
->mac
[index
] = 0;
2935 core
->mac
[index
- 1] = 0;
2940 igb_get_ctrl(IGBCore
*core
, int index
)
2942 uint32_t val
= core
->mac
[CTRL
];
2944 trace_e1000e_link_read_params(
2945 !!(val
& E1000_CTRL_ASDE
),
2946 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
2947 !!(val
& E1000_CTRL_FRCSPD
),
2948 !!(val
& E1000_CTRL_FRCDPX
),
2949 !!(val
& E1000_CTRL_RFCE
),
2950 !!(val
& E1000_CTRL_TFCE
));
2955 static uint32_t igb_get_status(IGBCore
*core
, int index
)
2957 uint32_t res
= core
->mac
[STATUS
];
2958 uint16_t num_vfs
= pcie_sriov_num_vfs(core
->owner
);
2960 if (core
->mac
[CTRL
] & E1000_CTRL_FRCDPX
) {
2961 res
|= (core
->mac
[CTRL
] & E1000_CTRL_FD
) ? E1000_STATUS_FD
: 0;
2963 res
|= E1000_STATUS_FD
;
2966 if ((core
->mac
[CTRL
] & E1000_CTRL_FRCSPD
) ||
2967 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_SPD_BYPS
)) {
2968 switch (core
->mac
[CTRL
] & E1000_CTRL_SPD_SEL
) {
2969 case E1000_CTRL_SPD_10
:
2970 res
|= E1000_STATUS_SPEED_10
;
2972 case E1000_CTRL_SPD_100
:
2973 res
|= E1000_STATUS_SPEED_100
;
2975 case E1000_CTRL_SPD_1000
:
2977 res
|= E1000_STATUS_SPEED_1000
;
2981 res
|= E1000_STATUS_SPEED_1000
;
2985 res
|= num_vfs
<< E1000_STATUS_NUM_VFS_SHIFT
;
2986 res
|= E1000_STATUS_IOV_MODE
;
2989 if (!(core
->mac
[CTRL
] & E1000_CTRL_GIO_MASTER_DISABLE
)) {
2990 res
|= E1000_STATUS_GIO_MASTER_ENABLE
;
2997 igb_mac_writereg(IGBCore
*core
, int index
, uint32_t val
)
2999 core
->mac
[index
] = val
;
3003 igb_mac_setmacaddr(IGBCore
*core
, int index
, uint32_t val
)
3005 uint32_t macaddr
[2];
3007 core
->mac
[index
] = val
;
3009 macaddr
[0] = cpu_to_le32(core
->mac
[RA
]);
3010 macaddr
[1] = cpu_to_le32(core
->mac
[RA
+ 1]);
3011 qemu_format_nic_info_str(qemu_get_queue(core
->owner_nic
),
3012 (uint8_t *) macaddr
);
3014 trace_e1000e_mac_set_sw(MAC_ARG(macaddr
));
3018 igb_set_eecd(IGBCore
*core
, int index
, uint32_t val
)
3020 static const uint32_t ro_bits
= E1000_EECD_PRES
|
3021 E1000_EECD_AUTO_RD
|
3022 E1000_EECD_SIZE_EX_MASK
;
3024 core
->mac
[EECD
] = (core
->mac
[EECD
] & ro_bits
) | (val
& ~ro_bits
);
3028 igb_set_eerd(IGBCore
*core
, int index
, uint32_t val
)
3030 uint32_t addr
= (val
>> E1000_EERW_ADDR_SHIFT
) & E1000_EERW_ADDR_MASK
;
3034 if ((addr
< IGB_EEPROM_SIZE
) && (val
& E1000_EERW_START
)) {
3035 data
= core
->eeprom
[addr
];
3036 flags
= E1000_EERW_DONE
;
3039 core
->mac
[EERD
] = flags
|
3040 (addr
<< E1000_EERW_ADDR_SHIFT
) |
3041 (data
<< E1000_EERW_DATA_SHIFT
);
3045 igb_set_eitr(IGBCore
*core
, int index
, uint32_t val
)
3047 uint32_t eitr_num
= index
- EITR0
;
3049 trace_igb_irq_eitr_set(eitr_num
, val
);
3051 core
->eitr_guest_value
[eitr_num
] = val
& ~E1000_EITR_CNT_IGNR
;
3052 core
->mac
[index
] = val
& 0x7FFE;
3056 igb_update_rx_offloads(IGBCore
*core
)
3058 int cso_state
= igb_rx_l4_cso_enabled(core
);
3060 trace_e1000e_rx_set_cso(cso_state
);
3062 if (core
->has_vnet
) {
3063 qemu_set_offload(qemu_get_queue(core
->owner_nic
)->peer
,
3064 cso_state
, 0, 0, 0, 0, 0, 0);
3069 igb_set_rxcsum(IGBCore
*core
, int index
, uint32_t val
)
3071 core
->mac
[RXCSUM
] = val
;
3072 igb_update_rx_offloads(core
);
3076 igb_set_gcr(IGBCore
*core
, int index
, uint32_t val
)
3078 uint32_t ro_bits
= core
->mac
[GCR
] & E1000_GCR_RO_BITS
;
3079 core
->mac
[GCR
] = (val
& ~E1000_GCR_RO_BITS
) | ro_bits
;
3082 static uint32_t igb_get_systiml(IGBCore
*core
, int index
)
3084 e1000x_timestamp(core
->mac
, core
->timadj
, SYSTIML
, SYSTIMH
);
3085 return core
->mac
[SYSTIML
];
3088 static uint32_t igb_get_rxsatrh(IGBCore
*core
, int index
)
3090 core
->mac
[TSYNCRXCTL
] &= ~E1000_TSYNCRXCTL_VALID
;
3091 return core
->mac
[RXSATRH
];
3094 static uint32_t igb_get_txstmph(IGBCore
*core
, int index
)
3096 core
->mac
[TSYNCTXCTL
] &= ~E1000_TSYNCTXCTL_VALID
;
3097 return core
->mac
[TXSTMPH
];
3100 static void igb_set_timinca(IGBCore
*core
, int index
, uint32_t val
)
3102 e1000x_set_timinca(core
->mac
, &core
->timadj
, val
);
3105 static void igb_set_timadjh(IGBCore
*core
, int index
, uint32_t val
)
3107 core
->mac
[TIMADJH
] = val
;
3108 core
->timadj
+= core
->mac
[TIMADJL
] | ((int64_t)core
->mac
[TIMADJH
] << 32);
3111 #define igb_getreg(x) [x] = igb_mac_readreg
3112 typedef uint32_t (*readops
)(IGBCore
*, int);
3113 static const readops igb_macreg_readops
[] = {
3143 igb_getreg(RDBAH10
),
3144 igb_getreg(RDBAH11
),
3145 igb_getreg(RDBAH12
),
3146 igb_getreg(RDBAH13
),
3147 igb_getreg(RDBAH14
),
3148 igb_getreg(RDBAH15
),
3159 igb_getreg(TDBAL10
),
3160 igb_getreg(TDBAL11
),
3161 igb_getreg(TDBAL12
),
3162 igb_getreg(TDBAL13
),
3163 igb_getreg(TDBAL14
),
3164 igb_getreg(TDBAL15
),
3175 igb_getreg(RDLEN10
),
3176 igb_getreg(RDLEN11
),
3177 igb_getreg(RDLEN12
),
3178 igb_getreg(RDLEN13
),
3179 igb_getreg(RDLEN14
),
3180 igb_getreg(RDLEN15
),
3181 igb_getreg(SRRCTL0
),
3182 igb_getreg(SRRCTL1
),
3183 igb_getreg(SRRCTL2
),
3184 igb_getreg(SRRCTL3
),
3185 igb_getreg(SRRCTL4
),
3186 igb_getreg(SRRCTL5
),
3187 igb_getreg(SRRCTL6
),
3188 igb_getreg(SRRCTL7
),
3189 igb_getreg(SRRCTL8
),
3190 igb_getreg(SRRCTL9
),
3191 igb_getreg(SRRCTL10
),
3192 igb_getreg(SRRCTL11
),
3193 igb_getreg(SRRCTL12
),
3194 igb_getreg(SRRCTL13
),
3195 igb_getreg(SRRCTL14
),
3196 igb_getreg(SRRCTL15
),
3197 igb_getreg(LATECOL
),
3221 igb_getreg(RXSTMPH
),
3222 igb_getreg(TXSTMPL
),
3223 igb_getreg(TIMADJL
),
3261 igb_getreg(FLMNGCTL
),
3262 igb_getreg(FLMNGCNT
),
3263 igb_getreg(TSYNCTXCTL
),
3264 igb_getreg(EEMNGDATA
),
3265 igb_getreg(CTRL_EXT
),
3266 igb_getreg(SYSTIMH
),
3267 igb_getreg(EEMNGCTL
),
3268 igb_getreg(FLMNGDATA
),
3269 igb_getreg(TSYNCRXCTL
),
3272 igb_getreg(TCTL_EXT
),
3294 igb_getreg(XOFFTXC
),
3298 igb_getreg(TIMINCA
),
3304 igb_getreg(RXSATRL
),
3316 igb_getreg(TDLEN10
),
3317 igb_getreg(TDLEN11
),
3318 igb_getreg(TDLEN12
),
3319 igb_getreg(TDLEN13
),
3320 igb_getreg(TDLEN14
),
3321 igb_getreg(TDLEN15
),
3326 igb_getreg(TXDCTL0
),
3327 igb_getreg(TXDCTL1
),
3328 igb_getreg(TXDCTL2
),
3329 igb_getreg(TXDCTL3
),
3330 igb_getreg(TXDCTL4
),
3331 igb_getreg(TXDCTL5
),
3332 igb_getreg(TXDCTL6
),
3333 igb_getreg(TXDCTL7
),
3334 igb_getreg(TXDCTL8
),
3335 igb_getreg(TXDCTL9
),
3336 igb_getreg(TXDCTL10
),
3337 igb_getreg(TXDCTL11
),
3338 igb_getreg(TXDCTL12
),
3339 igb_getreg(TXDCTL13
),
3340 igb_getreg(TXDCTL14
),
3341 igb_getreg(TXDCTL15
),
3352 igb_getreg(TXCTL10
),
3353 igb_getreg(TXCTL11
),
3354 igb_getreg(TXCTL12
),
3355 igb_getreg(TXCTL13
),
3356 igb_getreg(TXCTL14
),
3357 igb_getreg(TXCTL15
),
3358 igb_getreg(TDWBAL0
),
3359 igb_getreg(TDWBAL1
),
3360 igb_getreg(TDWBAL2
),
3361 igb_getreg(TDWBAL3
),
3362 igb_getreg(TDWBAL4
),
3363 igb_getreg(TDWBAL5
),
3364 igb_getreg(TDWBAL6
),
3365 igb_getreg(TDWBAL7
),
3366 igb_getreg(TDWBAL8
),
3367 igb_getreg(TDWBAL9
),
3368 igb_getreg(TDWBAL10
),
3369 igb_getreg(TDWBAL11
),
3370 igb_getreg(TDWBAL12
),
3371 igb_getreg(TDWBAL13
),
3372 igb_getreg(TDWBAL14
),
3373 igb_getreg(TDWBAL15
),
3374 igb_getreg(TDWBAH0
),
3375 igb_getreg(TDWBAH1
),
3376 igb_getreg(TDWBAH2
),
3377 igb_getreg(TDWBAH3
),
3378 igb_getreg(TDWBAH4
),
3379 igb_getreg(TDWBAH5
),
3380 igb_getreg(TDWBAH6
),
3381 igb_getreg(TDWBAH7
),
3382 igb_getreg(TDWBAH8
),
3383 igb_getreg(TDWBAH9
),
3384 igb_getreg(TDWBAH10
),
3385 igb_getreg(TDWBAH11
),
3386 igb_getreg(TDWBAH12
),
3387 igb_getreg(TDWBAH13
),
3388 igb_getreg(TDWBAH14
),
3389 igb_getreg(TDWBAH15
),
3390 igb_getreg(PVTCTRL0
),
3391 igb_getreg(PVTCTRL1
),
3392 igb_getreg(PVTCTRL2
),
3393 igb_getreg(PVTCTRL3
),
3394 igb_getreg(PVTCTRL4
),
3395 igb_getreg(PVTCTRL5
),
3396 igb_getreg(PVTCTRL6
),
3397 igb_getreg(PVTCTRL7
),
3398 igb_getreg(PVTEIMS0
),
3399 igb_getreg(PVTEIMS1
),
3400 igb_getreg(PVTEIMS2
),
3401 igb_getreg(PVTEIMS3
),
3402 igb_getreg(PVTEIMS4
),
3403 igb_getreg(PVTEIMS5
),
3404 igb_getreg(PVTEIMS6
),
3405 igb_getreg(PVTEIMS7
),
3406 igb_getreg(PVTEIAC0
),
3407 igb_getreg(PVTEIAC1
),
3408 igb_getreg(PVTEIAC2
),
3409 igb_getreg(PVTEIAC3
),
3410 igb_getreg(PVTEIAC4
),
3411 igb_getreg(PVTEIAC5
),
3412 igb_getreg(PVTEIAC6
),
3413 igb_getreg(PVTEIAC7
),
3414 igb_getreg(PVTEIAM0
),
3415 igb_getreg(PVTEIAM1
),
3416 igb_getreg(PVTEIAM2
),
3417 igb_getreg(PVTEIAM3
),
3418 igb_getreg(PVTEIAM4
),
3419 igb_getreg(PVTEIAM5
),
3420 igb_getreg(PVTEIAM6
),
3421 igb_getreg(PVTEIAM7
),
3422 igb_getreg(PVFGPRC0
),
3423 igb_getreg(PVFGPRC1
),
3424 igb_getreg(PVFGPRC2
),
3425 igb_getreg(PVFGPRC3
),
3426 igb_getreg(PVFGPRC4
),
3427 igb_getreg(PVFGPRC5
),
3428 igb_getreg(PVFGPRC6
),
3429 igb_getreg(PVFGPRC7
),
3430 igb_getreg(PVFGPTC0
),
3431 igb_getreg(PVFGPTC1
),
3432 igb_getreg(PVFGPTC2
),
3433 igb_getreg(PVFGPTC3
),
3434 igb_getreg(PVFGPTC4
),
3435 igb_getreg(PVFGPTC5
),
3436 igb_getreg(PVFGPTC6
),
3437 igb_getreg(PVFGPTC7
),
3438 igb_getreg(PVFGORC0
),
3439 igb_getreg(PVFGORC1
),
3440 igb_getreg(PVFGORC2
),
3441 igb_getreg(PVFGORC3
),
3442 igb_getreg(PVFGORC4
),
3443 igb_getreg(PVFGORC5
),
3444 igb_getreg(PVFGORC6
),
3445 igb_getreg(PVFGORC7
),
3446 igb_getreg(PVFGOTC0
),
3447 igb_getreg(PVFGOTC1
),
3448 igb_getreg(PVFGOTC2
),
3449 igb_getreg(PVFGOTC3
),
3450 igb_getreg(PVFGOTC4
),
3451 igb_getreg(PVFGOTC5
),
3452 igb_getreg(PVFGOTC6
),
3453 igb_getreg(PVFGOTC7
),
3454 igb_getreg(PVFMPRC0
),
3455 igb_getreg(PVFMPRC1
),
3456 igb_getreg(PVFMPRC2
),
3457 igb_getreg(PVFMPRC3
),
3458 igb_getreg(PVFMPRC4
),
3459 igb_getreg(PVFMPRC5
),
3460 igb_getreg(PVFMPRC6
),
3461 igb_getreg(PVFMPRC7
),
3462 igb_getreg(PVFGPRLBC0
),
3463 igb_getreg(PVFGPRLBC1
),
3464 igb_getreg(PVFGPRLBC2
),
3465 igb_getreg(PVFGPRLBC3
),
3466 igb_getreg(PVFGPRLBC4
),
3467 igb_getreg(PVFGPRLBC5
),
3468 igb_getreg(PVFGPRLBC6
),
3469 igb_getreg(PVFGPRLBC7
),
3470 igb_getreg(PVFGPTLBC0
),
3471 igb_getreg(PVFGPTLBC1
),
3472 igb_getreg(PVFGPTLBC2
),
3473 igb_getreg(PVFGPTLBC3
),
3474 igb_getreg(PVFGPTLBC4
),
3475 igb_getreg(PVFGPTLBC5
),
3476 igb_getreg(PVFGPTLBC6
),
3477 igb_getreg(PVFGPTLBC7
),
3478 igb_getreg(PVFGORLBC0
),
3479 igb_getreg(PVFGORLBC1
),
3480 igb_getreg(PVFGORLBC2
),
3481 igb_getreg(PVFGORLBC3
),
3482 igb_getreg(PVFGORLBC4
),
3483 igb_getreg(PVFGORLBC5
),
3484 igb_getreg(PVFGORLBC6
),
3485 igb_getreg(PVFGORLBC7
),
3486 igb_getreg(PVFGOTLBC0
),
3487 igb_getreg(PVFGOTLBC1
),
3488 igb_getreg(PVFGOTLBC2
),
3489 igb_getreg(PVFGOTLBC3
),
3490 igb_getreg(PVFGOTLBC4
),
3491 igb_getreg(PVFGOTLBC5
),
3492 igb_getreg(PVFGOTLBC6
),
3493 igb_getreg(PVFGOTLBC7
),
3508 igb_getreg(RDBAL10
),
3509 igb_getreg(RDBAL11
),
3510 igb_getreg(RDBAL12
),
3511 igb_getreg(RDBAL13
),
3512 igb_getreg(RDBAL14
),
3513 igb_getreg(RDBAL15
),
3524 igb_getreg(TDBAH10
),
3525 igb_getreg(TDBAH11
),
3526 igb_getreg(TDBAH12
),
3527 igb_getreg(TDBAH13
),
3528 igb_getreg(TDBAH14
),
3529 igb_getreg(TDBAH15
),
3532 igb_getreg(XOFFRXC
),
3538 igb_getreg(FUNCTAG
),
3544 igb_getreg(RXDCTL0
),
3545 igb_getreg(RXDCTL1
),
3546 igb_getreg(RXDCTL2
),
3547 igb_getreg(RXDCTL3
),
3548 igb_getreg(RXDCTL4
),
3549 igb_getreg(RXDCTL5
),
3550 igb_getreg(RXDCTL6
),
3551 igb_getreg(RXDCTL7
),
3552 igb_getreg(RXDCTL8
),
3553 igb_getreg(RXDCTL9
),
3554 igb_getreg(RXDCTL10
),
3555 igb_getreg(RXDCTL11
),
3556 igb_getreg(RXDCTL12
),
3557 igb_getreg(RXDCTL13
),
3558 igb_getreg(RXDCTL14
),
3559 igb_getreg(RXDCTL15
),
3560 igb_getreg(RXSTMPL
),
3561 igb_getreg(TIMADJH
),
3571 [TOTH
] = igb_mac_read_clr8
,
3572 [GOTCH
] = igb_mac_read_clr8
,
3573 [PRC64
] = igb_mac_read_clr4
,
3574 [PRC255
] = igb_mac_read_clr4
,
3575 [PRC1023
] = igb_mac_read_clr4
,
3576 [PTC64
] = igb_mac_read_clr4
,
3577 [PTC255
] = igb_mac_read_clr4
,
3578 [PTC1023
] = igb_mac_read_clr4
,
3579 [GPRC
] = igb_mac_read_clr4
,
3580 [TPT
] = igb_mac_read_clr4
,
3581 [RUC
] = igb_mac_read_clr4
,
3582 [BPRC
] = igb_mac_read_clr4
,
3583 [MPTC
] = igb_mac_read_clr4
,
3584 [IAC
] = igb_mac_read_clr4
,
3585 [ICR
] = igb_mac_icr_read
,
3586 [STATUS
] = igb_get_status
,
3587 [ICS
] = igb_mac_ics_read
,
3589 * 8.8.10: Reading the IMC register returns the value of the IMS register.
3591 [IMC
] = igb_mac_ims_read
,
3592 [TORH
] = igb_mac_read_clr8
,
3593 [GORCH
] = igb_mac_read_clr8
,
3594 [PRC127
] = igb_mac_read_clr4
,
3595 [PRC511
] = igb_mac_read_clr4
,
3596 [PRC1522
] = igb_mac_read_clr4
,
3597 [PTC127
] = igb_mac_read_clr4
,
3598 [PTC511
] = igb_mac_read_clr4
,
3599 [PTC1522
] = igb_mac_read_clr4
,
3600 [GPTC
] = igb_mac_read_clr4
,
3601 [TPR
] = igb_mac_read_clr4
,
3602 [ROC
] = igb_mac_read_clr4
,
3603 [MPRC
] = igb_mac_read_clr4
,
3604 [BPTC
] = igb_mac_read_clr4
,
3605 [TSCTC
] = igb_mac_read_clr4
,
3606 [CTRL
] = igb_get_ctrl
,
3607 [SWSM
] = igb_mac_swsm_read
,
3608 [IMS
] = igb_mac_ims_read
,
3609 [SYSTIML
] = igb_get_systiml
,
3610 [RXSATRH
] = igb_get_rxsatrh
,
3611 [TXSTMPH
] = igb_get_txstmph
,
3613 [CRCERRS
... MPC
] = igb_mac_readreg
,
3614 [IP6AT
... IP6AT
+ 3] = igb_mac_readreg
,
3615 [IP4AT
... IP4AT
+ 6] = igb_mac_readreg
,
3616 [RA
... RA
+ 31] = igb_mac_readreg
,
3617 [RA2
... RA2
+ 31] = igb_mac_readreg
,
3618 [WUPM
... WUPM
+ 31] = igb_mac_readreg
,
3619 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_readreg
,
3620 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = igb_mac_readreg
,
3621 [FFMT
... FFMT
+ 254] = igb_mac_readreg
,
3622 [MDEF
... MDEF
+ 7] = igb_mac_readreg
,
3623 [FTFT
... FTFT
+ 254] = igb_mac_readreg
,
3624 [RETA
... RETA
+ 31] = igb_mac_readreg
,
3625 [RSSRK
... RSSRK
+ 9] = igb_mac_readreg
,
3626 [MAVTV0
... MAVTV3
] = igb_mac_readreg
,
3627 [EITR0
... EITR0
+ IGB_INTR_NUM
- 1] = igb_mac_eitr_read
,
3628 [PVTEICR0
] = igb_mac_read_clr4
,
3629 [PVTEICR1
] = igb_mac_read_clr4
,
3630 [PVTEICR2
] = igb_mac_read_clr4
,
3631 [PVTEICR3
] = igb_mac_read_clr4
,
3632 [PVTEICR4
] = igb_mac_read_clr4
,
3633 [PVTEICR5
] = igb_mac_read_clr4
,
3634 [PVTEICR6
] = igb_mac_read_clr4
,
3635 [PVTEICR7
] = igb_mac_read_clr4
,
3638 [FWSM
] = igb_mac_readreg
,
3639 [SW_FW_SYNC
] = igb_mac_readreg
,
3640 [HTCBDPC
] = igb_mac_read_clr4
,
3641 [EICR
] = igb_mac_read_clr4
,
3642 [EIMS
] = igb_mac_readreg
,
3643 [EIAM
] = igb_mac_readreg
,
3644 [IVAR0
... IVAR0
+ 7] = igb_mac_readreg
,
3645 igb_getreg(IVAR_MISC
),
3646 igb_getreg(TSYNCRXCFG
),
3647 [ETQF0
... ETQF0
+ 7] = igb_mac_readreg
,
3649 [P2VMAILBOX0
... P2VMAILBOX7
] = igb_mac_readreg
,
3650 [V2PMAILBOX0
... V2PMAILBOX7
] = igb_mac_vfmailbox_read
,
3651 igb_getreg(MBVFICR
),
3652 [VMBMEM0
... VMBMEM0
+ 127] = igb_mac_readreg
,
3653 igb_getreg(MBVFIMR
),
3660 [VLVF0
... VLVF0
+ E1000_VLVF_ARRAY_SIZE
- 1] = igb_mac_readreg
,
3661 [VMVIR0
... VMVIR7
] = igb_mac_readreg
,
3662 [VMOLR0
... VMOLR7
] = igb_mac_readreg
,
3663 [WVBR
] = igb_mac_read_clr4
,
3664 [RQDPC0
] = igb_mac_read_clr4
,
3665 [RQDPC1
] = igb_mac_read_clr4
,
3666 [RQDPC2
] = igb_mac_read_clr4
,
3667 [RQDPC3
] = igb_mac_read_clr4
,
3668 [RQDPC4
] = igb_mac_read_clr4
,
3669 [RQDPC5
] = igb_mac_read_clr4
,
3670 [RQDPC6
] = igb_mac_read_clr4
,
3671 [RQDPC7
] = igb_mac_read_clr4
,
3672 [RQDPC8
] = igb_mac_read_clr4
,
3673 [RQDPC9
] = igb_mac_read_clr4
,
3674 [RQDPC10
] = igb_mac_read_clr4
,
3675 [RQDPC11
] = igb_mac_read_clr4
,
3676 [RQDPC12
] = igb_mac_read_clr4
,
3677 [RQDPC13
] = igb_mac_read_clr4
,
3678 [RQDPC14
] = igb_mac_read_clr4
,
3679 [RQDPC15
] = igb_mac_read_clr4
,
3680 [VTIVAR
... VTIVAR
+ 7] = igb_mac_readreg
,
3681 [VTIVAR_MISC
... VTIVAR_MISC
+ 7] = igb_mac_readreg
,
3683 enum { IGB_NREADOPS
= ARRAY_SIZE(igb_macreg_readops
) };
3685 #define igb_putreg(x) [x] = igb_mac_writereg
3686 typedef void (*writeops
)(IGBCore
*, int, uint32_t);
3687 static const writeops igb_macreg_writeops
[] = {
3700 igb_putreg(RDBAH10
),
3701 igb_putreg(RDBAH11
),
3702 igb_putreg(RDBAH12
),
3703 igb_putreg(RDBAH13
),
3704 igb_putreg(RDBAH14
),
3705 igb_putreg(RDBAH15
),
3706 igb_putreg(SRRCTL0
),
3707 igb_putreg(SRRCTL1
),
3708 igb_putreg(SRRCTL2
),
3709 igb_putreg(SRRCTL3
),
3710 igb_putreg(SRRCTL4
),
3711 igb_putreg(SRRCTL5
),
3712 igb_putreg(SRRCTL6
),
3713 igb_putreg(SRRCTL7
),
3714 igb_putreg(SRRCTL8
),
3715 igb_putreg(SRRCTL9
),
3716 igb_putreg(SRRCTL10
),
3717 igb_putreg(SRRCTL11
),
3718 igb_putreg(SRRCTL12
),
3719 igb_putreg(SRRCTL13
),
3720 igb_putreg(SRRCTL14
),
3721 igb_putreg(SRRCTL15
),
3722 igb_putreg(RXDCTL0
),
3723 igb_putreg(RXDCTL1
),
3724 igb_putreg(RXDCTL2
),
3725 igb_putreg(RXDCTL3
),
3726 igb_putreg(RXDCTL4
),
3727 igb_putreg(RXDCTL5
),
3728 igb_putreg(RXDCTL6
),
3729 igb_putreg(RXDCTL7
),
3730 igb_putreg(RXDCTL8
),
3731 igb_putreg(RXDCTL9
),
3732 igb_putreg(RXDCTL10
),
3733 igb_putreg(RXDCTL11
),
3734 igb_putreg(RXDCTL12
),
3735 igb_putreg(RXDCTL13
),
3736 igb_putreg(RXDCTL14
),
3737 igb_putreg(RXDCTL15
),
3740 igb_putreg(TCTL_EXT
),
3759 igb_putreg(TDBAH10
),
3760 igb_putreg(TDBAH11
),
3761 igb_putreg(TDBAH12
),
3762 igb_putreg(TDBAH13
),
3763 igb_putreg(TDBAH14
),
3764 igb_putreg(TDBAH15
),
3770 igb_putreg(FUNCTAG
),
3782 igb_putreg(TXDCTL0
),
3783 igb_putreg(TXDCTL1
),
3784 igb_putreg(TXDCTL2
),
3785 igb_putreg(TXDCTL3
),
3786 igb_putreg(TXDCTL4
),
3787 igb_putreg(TXDCTL5
),
3788 igb_putreg(TXDCTL6
),
3789 igb_putreg(TXDCTL7
),
3790 igb_putreg(TXDCTL8
),
3791 igb_putreg(TXDCTL9
),
3792 igb_putreg(TXDCTL10
),
3793 igb_putreg(TXDCTL11
),
3794 igb_putreg(TXDCTL12
),
3795 igb_putreg(TXDCTL13
),
3796 igb_putreg(TXDCTL14
),
3797 igb_putreg(TXDCTL15
),
3808 igb_putreg(TXCTL10
),
3809 igb_putreg(TXCTL11
),
3810 igb_putreg(TXCTL12
),
3811 igb_putreg(TXCTL13
),
3812 igb_putreg(TXCTL14
),
3813 igb_putreg(TXCTL15
),
3814 igb_putreg(TDWBAL0
),
3815 igb_putreg(TDWBAL1
),
3816 igb_putreg(TDWBAL2
),
3817 igb_putreg(TDWBAL3
),
3818 igb_putreg(TDWBAL4
),
3819 igb_putreg(TDWBAL5
),
3820 igb_putreg(TDWBAL6
),
3821 igb_putreg(TDWBAL7
),
3822 igb_putreg(TDWBAL8
),
3823 igb_putreg(TDWBAL9
),
3824 igb_putreg(TDWBAL10
),
3825 igb_putreg(TDWBAL11
),
3826 igb_putreg(TDWBAL12
),
3827 igb_putreg(TDWBAL13
),
3828 igb_putreg(TDWBAL14
),
3829 igb_putreg(TDWBAL15
),
3830 igb_putreg(TDWBAH0
),
3831 igb_putreg(TDWBAH1
),
3832 igb_putreg(TDWBAH2
),
3833 igb_putreg(TDWBAH3
),
3834 igb_putreg(TDWBAH4
),
3835 igb_putreg(TDWBAH5
),
3836 igb_putreg(TDWBAH6
),
3837 igb_putreg(TDWBAH7
),
3838 igb_putreg(TDWBAH8
),
3839 igb_putreg(TDWBAH9
),
3840 igb_putreg(TDWBAH10
),
3841 igb_putreg(TDWBAH11
),
3842 igb_putreg(TDWBAH12
),
3843 igb_putreg(TDWBAH13
),
3844 igb_putreg(TDWBAH14
),
3845 igb_putreg(TDWBAH15
),
3847 igb_putreg(RXSTMPH
),
3848 igb_putreg(RXSTMPL
),
3849 igb_putreg(RXSATRL
),
3850 igb_putreg(RXSATRH
),
3851 igb_putreg(TXSTMPL
),
3852 igb_putreg(TXSTMPH
),
3853 igb_putreg(SYSTIML
),
3854 igb_putreg(SYSTIMH
),
3855 igb_putreg(TIMADJL
),
3856 igb_putreg(TSYNCRXCTL
),
3857 igb_putreg(TSYNCTXCTL
),
3858 igb_putreg(EEMNGCTL
),
3864 [TDH0
] = igb_set_16bit
,
3865 [TDH1
] = igb_set_16bit
,
3866 [TDH2
] = igb_set_16bit
,
3867 [TDH3
] = igb_set_16bit
,
3868 [TDH4
] = igb_set_16bit
,
3869 [TDH5
] = igb_set_16bit
,
3870 [TDH6
] = igb_set_16bit
,
3871 [TDH7
] = igb_set_16bit
,
3872 [TDH8
] = igb_set_16bit
,
3873 [TDH9
] = igb_set_16bit
,
3874 [TDH10
] = igb_set_16bit
,
3875 [TDH11
] = igb_set_16bit
,
3876 [TDH12
] = igb_set_16bit
,
3877 [TDH13
] = igb_set_16bit
,
3878 [TDH14
] = igb_set_16bit
,
3879 [TDH15
] = igb_set_16bit
,
3880 [TDT0
] = igb_set_tdt
,
3881 [TDT1
] = igb_set_tdt
,
3882 [TDT2
] = igb_set_tdt
,
3883 [TDT3
] = igb_set_tdt
,
3884 [TDT4
] = igb_set_tdt
,
3885 [TDT5
] = igb_set_tdt
,
3886 [TDT6
] = igb_set_tdt
,
3887 [TDT7
] = igb_set_tdt
,
3888 [TDT8
] = igb_set_tdt
,
3889 [TDT9
] = igb_set_tdt
,
3890 [TDT10
] = igb_set_tdt
,
3891 [TDT11
] = igb_set_tdt
,
3892 [TDT12
] = igb_set_tdt
,
3893 [TDT13
] = igb_set_tdt
,
3894 [TDT14
] = igb_set_tdt
,
3895 [TDT15
] = igb_set_tdt
,
3896 [MDIC
] = igb_set_mdic
,
3897 [ICS
] = igb_set_ics
,
3898 [RDH0
] = igb_set_16bit
,
3899 [RDH1
] = igb_set_16bit
,
3900 [RDH2
] = igb_set_16bit
,
3901 [RDH3
] = igb_set_16bit
,
3902 [RDH4
] = igb_set_16bit
,
3903 [RDH5
] = igb_set_16bit
,
3904 [RDH6
] = igb_set_16bit
,
3905 [RDH7
] = igb_set_16bit
,
3906 [RDH8
] = igb_set_16bit
,
3907 [RDH9
] = igb_set_16bit
,
3908 [RDH10
] = igb_set_16bit
,
3909 [RDH11
] = igb_set_16bit
,
3910 [RDH12
] = igb_set_16bit
,
3911 [RDH13
] = igb_set_16bit
,
3912 [RDH14
] = igb_set_16bit
,
3913 [RDH15
] = igb_set_16bit
,
3914 [RDT0
] = igb_set_rdt
,
3915 [RDT1
] = igb_set_rdt
,
3916 [RDT2
] = igb_set_rdt
,
3917 [RDT3
] = igb_set_rdt
,
3918 [RDT4
] = igb_set_rdt
,
3919 [RDT5
] = igb_set_rdt
,
3920 [RDT6
] = igb_set_rdt
,
3921 [RDT7
] = igb_set_rdt
,
3922 [RDT8
] = igb_set_rdt
,
3923 [RDT9
] = igb_set_rdt
,
3924 [RDT10
] = igb_set_rdt
,
3925 [RDT11
] = igb_set_rdt
,
3926 [RDT12
] = igb_set_rdt
,
3927 [RDT13
] = igb_set_rdt
,
3928 [RDT14
] = igb_set_rdt
,
3929 [RDT15
] = igb_set_rdt
,
3930 [IMC
] = igb_set_imc
,
3931 [IMS
] = igb_set_ims
,
3932 [ICR
] = igb_set_icr
,
3933 [EECD
] = igb_set_eecd
,
3934 [RCTL
] = igb_set_rx_control
,
3935 [CTRL
] = igb_set_ctrl
,
3936 [EERD
] = igb_set_eerd
,
3937 [TDFH
] = igb_set_13bit
,
3938 [TDFT
] = igb_set_13bit
,
3939 [TDFHS
] = igb_set_13bit
,
3940 [TDFTS
] = igb_set_13bit
,
3941 [TDFPC
] = igb_set_13bit
,
3942 [RDFH
] = igb_set_13bit
,
3943 [RDFT
] = igb_set_13bit
,
3944 [RDFHS
] = igb_set_13bit
,
3945 [RDFTS
] = igb_set_13bit
,
3946 [RDFPC
] = igb_set_13bit
,
3947 [GCR
] = igb_set_gcr
,
3948 [RXCSUM
] = igb_set_rxcsum
,
3949 [TDLEN0
] = igb_set_dlen
,
3950 [TDLEN1
] = igb_set_dlen
,
3951 [TDLEN2
] = igb_set_dlen
,
3952 [TDLEN3
] = igb_set_dlen
,
3953 [TDLEN4
] = igb_set_dlen
,
3954 [TDLEN5
] = igb_set_dlen
,
3955 [TDLEN6
] = igb_set_dlen
,
3956 [TDLEN7
] = igb_set_dlen
,
3957 [TDLEN8
] = igb_set_dlen
,
3958 [TDLEN9
] = igb_set_dlen
,
3959 [TDLEN10
] = igb_set_dlen
,
3960 [TDLEN11
] = igb_set_dlen
,
3961 [TDLEN12
] = igb_set_dlen
,
3962 [TDLEN13
] = igb_set_dlen
,
3963 [TDLEN14
] = igb_set_dlen
,
3964 [TDLEN15
] = igb_set_dlen
,
3965 [RDLEN0
] = igb_set_dlen
,
3966 [RDLEN1
] = igb_set_dlen
,
3967 [RDLEN2
] = igb_set_dlen
,
3968 [RDLEN3
] = igb_set_dlen
,
3969 [RDLEN4
] = igb_set_dlen
,
3970 [RDLEN5
] = igb_set_dlen
,
3971 [RDLEN6
] = igb_set_dlen
,
3972 [RDLEN7
] = igb_set_dlen
,
3973 [RDLEN8
] = igb_set_dlen
,
3974 [RDLEN9
] = igb_set_dlen
,
3975 [RDLEN10
] = igb_set_dlen
,
3976 [RDLEN11
] = igb_set_dlen
,
3977 [RDLEN12
] = igb_set_dlen
,
3978 [RDLEN13
] = igb_set_dlen
,
3979 [RDLEN14
] = igb_set_dlen
,
3980 [RDLEN15
] = igb_set_dlen
,
3981 [TDBAL0
] = igb_set_dbal
,
3982 [TDBAL1
] = igb_set_dbal
,
3983 [TDBAL2
] = igb_set_dbal
,
3984 [TDBAL3
] = igb_set_dbal
,
3985 [TDBAL4
] = igb_set_dbal
,
3986 [TDBAL5
] = igb_set_dbal
,
3987 [TDBAL6
] = igb_set_dbal
,
3988 [TDBAL7
] = igb_set_dbal
,
3989 [TDBAL8
] = igb_set_dbal
,
3990 [TDBAL9
] = igb_set_dbal
,
3991 [TDBAL10
] = igb_set_dbal
,
3992 [TDBAL11
] = igb_set_dbal
,
3993 [TDBAL12
] = igb_set_dbal
,
3994 [TDBAL13
] = igb_set_dbal
,
3995 [TDBAL14
] = igb_set_dbal
,
3996 [TDBAL15
] = igb_set_dbal
,
3997 [RDBAL0
] = igb_set_dbal
,
3998 [RDBAL1
] = igb_set_dbal
,
3999 [RDBAL2
] = igb_set_dbal
,
4000 [RDBAL3
] = igb_set_dbal
,
4001 [RDBAL4
] = igb_set_dbal
,
4002 [RDBAL5
] = igb_set_dbal
,
4003 [RDBAL6
] = igb_set_dbal
,
4004 [RDBAL7
] = igb_set_dbal
,
4005 [RDBAL8
] = igb_set_dbal
,
4006 [RDBAL9
] = igb_set_dbal
,
4007 [RDBAL10
] = igb_set_dbal
,
4008 [RDBAL11
] = igb_set_dbal
,
4009 [RDBAL12
] = igb_set_dbal
,
4010 [RDBAL13
] = igb_set_dbal
,
4011 [RDBAL14
] = igb_set_dbal
,
4012 [RDBAL15
] = igb_set_dbal
,
4013 [STATUS
] = igb_set_status
,
4014 [PBACLR
] = igb_set_pbaclr
,
4015 [CTRL_EXT
] = igb_set_ctrlext
,
4016 [FCAH
] = igb_set_16bit
,
4017 [FCT
] = igb_set_16bit
,
4018 [FCTTV
] = igb_set_16bit
,
4019 [FCRTV
] = igb_set_16bit
,
4020 [FCRTH
] = igb_set_fcrth
,
4021 [FCRTL
] = igb_set_fcrtl
,
4022 [CTRL_DUP
] = igb_set_ctrl
,
4023 [RFCTL
] = igb_set_rfctl
,
4024 [TIMINCA
] = igb_set_timinca
,
4025 [TIMADJH
] = igb_set_timadjh
,
4027 [IP6AT
... IP6AT
+ 3] = igb_mac_writereg
,
4028 [IP4AT
... IP4AT
+ 6] = igb_mac_writereg
,
4029 [RA
] = igb_mac_writereg
,
4030 [RA
+ 1] = igb_mac_setmacaddr
,
4031 [RA
+ 2 ... RA
+ 31] = igb_mac_writereg
,
4032 [RA2
... RA2
+ 31] = igb_mac_writereg
,
4033 [WUPM
... WUPM
+ 31] = igb_mac_writereg
,
4034 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_writereg
,
4035 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = igb_mac_writereg
,
4036 [FFMT
... FFMT
+ 254] = igb_set_4bit
,
4037 [MDEF
... MDEF
+ 7] = igb_mac_writereg
,
4038 [FTFT
... FTFT
+ 254] = igb_mac_writereg
,
4039 [RETA
... RETA
+ 31] = igb_mac_writereg
,
4040 [RSSRK
... RSSRK
+ 9] = igb_mac_writereg
,
4041 [MAVTV0
... MAVTV3
] = igb_mac_writereg
,
4042 [EITR0
... EITR0
+ IGB_INTR_NUM
- 1] = igb_set_eitr
,
4045 [FWSM
] = igb_mac_writereg
,
4046 [SW_FW_SYNC
] = igb_mac_writereg
,
4047 [EICR
] = igb_set_eicr
,
4048 [EICS
] = igb_set_eics
,
4049 [EIAC
] = igb_set_eiac
,
4050 [EIAM
] = igb_set_eiam
,
4051 [EIMC
] = igb_set_eimc
,
4052 [EIMS
] = igb_set_eims
,
4053 [IVAR0
... IVAR0
+ 7] = igb_mac_writereg
,
4054 igb_putreg(IVAR_MISC
),
4055 igb_putreg(TSYNCRXCFG
),
4056 [ETQF0
... ETQF0
+ 7] = igb_mac_writereg
,
4058 [P2VMAILBOX0
... P2VMAILBOX7
] = igb_set_pfmailbox
,
4059 [V2PMAILBOX0
... V2PMAILBOX7
] = igb_set_vfmailbox
,
4060 [MBVFICR
] = igb_w1c
,
4061 [VMBMEM0
... VMBMEM0
+ 127] = igb_mac_writereg
,
4062 igb_putreg(MBVFIMR
),
4069 [VLVF0
... VLVF0
+ E1000_VLVF_ARRAY_SIZE
- 1] = igb_mac_writereg
,
4070 [VMVIR0
... VMVIR7
] = igb_mac_writereg
,
4071 [VMOLR0
... VMOLR7
] = igb_mac_writereg
,
4072 [UTA
... UTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_writereg
,
4073 [PVTCTRL0
] = igb_set_vtctrl
,
4074 [PVTCTRL1
] = igb_set_vtctrl
,
4075 [PVTCTRL2
] = igb_set_vtctrl
,
4076 [PVTCTRL3
] = igb_set_vtctrl
,
4077 [PVTCTRL4
] = igb_set_vtctrl
,
4078 [PVTCTRL5
] = igb_set_vtctrl
,
4079 [PVTCTRL6
] = igb_set_vtctrl
,
4080 [PVTCTRL7
] = igb_set_vtctrl
,
4081 [PVTEICS0
] = igb_set_vteics
,
4082 [PVTEICS1
] = igb_set_vteics
,
4083 [PVTEICS2
] = igb_set_vteics
,
4084 [PVTEICS3
] = igb_set_vteics
,
4085 [PVTEICS4
] = igb_set_vteics
,
4086 [PVTEICS5
] = igb_set_vteics
,
4087 [PVTEICS6
] = igb_set_vteics
,
4088 [PVTEICS7
] = igb_set_vteics
,
4089 [PVTEIMS0
] = igb_set_vteims
,
4090 [PVTEIMS1
] = igb_set_vteims
,
4091 [PVTEIMS2
] = igb_set_vteims
,
4092 [PVTEIMS3
] = igb_set_vteims
,
4093 [PVTEIMS4
] = igb_set_vteims
,
4094 [PVTEIMS5
] = igb_set_vteims
,
4095 [PVTEIMS6
] = igb_set_vteims
,
4096 [PVTEIMS7
] = igb_set_vteims
,
4097 [PVTEIMC0
] = igb_set_vteimc
,
4098 [PVTEIMC1
] = igb_set_vteimc
,
4099 [PVTEIMC2
] = igb_set_vteimc
,
4100 [PVTEIMC3
] = igb_set_vteimc
,
4101 [PVTEIMC4
] = igb_set_vteimc
,
4102 [PVTEIMC5
] = igb_set_vteimc
,
4103 [PVTEIMC6
] = igb_set_vteimc
,
4104 [PVTEIMC7
] = igb_set_vteimc
,
4105 [PVTEIAC0
] = igb_set_vteiac
,
4106 [PVTEIAC1
] = igb_set_vteiac
,
4107 [PVTEIAC2
] = igb_set_vteiac
,
4108 [PVTEIAC3
] = igb_set_vteiac
,
4109 [PVTEIAC4
] = igb_set_vteiac
,
4110 [PVTEIAC5
] = igb_set_vteiac
,
4111 [PVTEIAC6
] = igb_set_vteiac
,
4112 [PVTEIAC7
] = igb_set_vteiac
,
4113 [PVTEIAM0
] = igb_set_vteiam
,
4114 [PVTEIAM1
] = igb_set_vteiam
,
4115 [PVTEIAM2
] = igb_set_vteiam
,
4116 [PVTEIAM3
] = igb_set_vteiam
,
4117 [PVTEIAM4
] = igb_set_vteiam
,
4118 [PVTEIAM5
] = igb_set_vteiam
,
4119 [PVTEIAM6
] = igb_set_vteiam
,
4120 [PVTEIAM7
] = igb_set_vteiam
,
4121 [PVTEICR0
] = igb_set_vteicr
,
4122 [PVTEICR1
] = igb_set_vteicr
,
4123 [PVTEICR2
] = igb_set_vteicr
,
4124 [PVTEICR3
] = igb_set_vteicr
,
4125 [PVTEICR4
] = igb_set_vteicr
,
4126 [PVTEICR5
] = igb_set_vteicr
,
4127 [PVTEICR6
] = igb_set_vteicr
,
4128 [PVTEICR7
] = igb_set_vteicr
,
4129 [VTIVAR
... VTIVAR
+ 7] = igb_set_vtivar
,
4130 [VTIVAR_MISC
... VTIVAR_MISC
+ 7] = igb_mac_writereg
4132 enum { IGB_NWRITEOPS
= ARRAY_SIZE(igb_macreg_writeops
) };
4134 enum { MAC_ACCESS_PARTIAL
= 1 };
4137 * The array below combines alias offsets of the index values for the
4138 * MAC registers that have aliases, with the indication of not fully
4139 * implemented registers (lowest bit). This combination is possible
4140 * because all of the offsets are even.
4142 static const uint16_t mac_reg_access
[E1000E_MAC_SIZE
] = {
4143 /* Alias index offsets */
4145 [RDFH_A
] = 0xe904, [RDFT_A
] = 0xe904,
4146 [TDFH_A
] = 0xed00, [TDFT_A
] = 0xed00,
4147 [RA_A
... RA_A
+ 31] = 0x14f0,
4148 [VFTA_A
... VFTA_A
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = 0x1400,
4150 [RDBAL0_A
] = 0x2600,
4151 [RDBAH0_A
] = 0x2600,
4152 [RDLEN0_A
] = 0x2600,
4153 [SRRCTL0_A
] = 0x2600,
4156 [RXDCTL0_A
] = 0x2600,
4157 [RXCTL0_A
] = 0x2600,
4158 [RQDPC0_A
] = 0x2600,
4159 [RDBAL1_A
] = 0x25D0,
4160 [RDBAL2_A
] = 0x25A0,
4161 [RDBAL3_A
] = 0x2570,
4162 [RDBAH1_A
] = 0x25D0,
4163 [RDBAH2_A
] = 0x25A0,
4164 [RDBAH3_A
] = 0x2570,
4165 [RDLEN1_A
] = 0x25D0,
4166 [RDLEN2_A
] = 0x25A0,
4167 [RDLEN3_A
] = 0x2570,
4168 [SRRCTL1_A
] = 0x25D0,
4169 [SRRCTL2_A
] = 0x25A0,
4170 [SRRCTL3_A
] = 0x2570,
4177 [RXDCTL1_A
] = 0x25D0,
4178 [RXDCTL2_A
] = 0x25A0,
4179 [RXDCTL3_A
] = 0x2570,
4180 [RXCTL1_A
] = 0x25D0,
4181 [RXCTL2_A
] = 0x25A0,
4182 [RXCTL3_A
] = 0x2570,
4183 [RQDPC1_A
] = 0x25D0,
4184 [RQDPC2_A
] = 0x25A0,
4185 [RQDPC3_A
] = 0x2570,
4186 [TDBAL0_A
] = 0x2A00,
4187 [TDBAH0_A
] = 0x2A00,
4188 [TDLEN0_A
] = 0x2A00,
4191 [TXCTL0_A
] = 0x2A00,
4192 [TDWBAL0_A
] = 0x2A00,
4193 [TDWBAH0_A
] = 0x2A00,
4194 [TDBAL1_A
] = 0x29D0,
4195 [TDBAL2_A
] = 0x29A0,
4196 [TDBAL3_A
] = 0x2970,
4197 [TDBAH1_A
] = 0x29D0,
4198 [TDBAH2_A
] = 0x29A0,
4199 [TDBAH3_A
] = 0x2970,
4200 [TDLEN1_A
] = 0x29D0,
4201 [TDLEN2_A
] = 0x29A0,
4202 [TDLEN3_A
] = 0x2970,
4209 [TXDCTL0_A
] = 0x2A00,
4210 [TXDCTL1_A
] = 0x29D0,
4211 [TXDCTL2_A
] = 0x29A0,
4212 [TXDCTL3_A
] = 0x2970,
4213 [TXCTL1_A
] = 0x29D0,
4214 [TXCTL2_A
] = 0x29A0,
4215 [TXCTL3_A
] = 0x29D0,
4216 [TDWBAL1_A
] = 0x29D0,
4217 [TDWBAL2_A
] = 0x29A0,
4218 [TDWBAL3_A
] = 0x2970,
4219 [TDWBAH1_A
] = 0x29D0,
4220 [TDWBAH2_A
] = 0x29A0,
4221 [TDWBAH3_A
] = 0x2970,
4223 /* Access options */
4224 [RDFH
] = MAC_ACCESS_PARTIAL
, [RDFT
] = MAC_ACCESS_PARTIAL
,
4225 [RDFHS
] = MAC_ACCESS_PARTIAL
, [RDFTS
] = MAC_ACCESS_PARTIAL
,
4226 [RDFPC
] = MAC_ACCESS_PARTIAL
,
4227 [TDFH
] = MAC_ACCESS_PARTIAL
, [TDFT
] = MAC_ACCESS_PARTIAL
,
4228 [TDFHS
] = MAC_ACCESS_PARTIAL
, [TDFTS
] = MAC_ACCESS_PARTIAL
,
4229 [TDFPC
] = MAC_ACCESS_PARTIAL
, [EECD
] = MAC_ACCESS_PARTIAL
,
4230 [FLA
] = MAC_ACCESS_PARTIAL
,
4231 [FCAL
] = MAC_ACCESS_PARTIAL
, [FCAH
] = MAC_ACCESS_PARTIAL
,
4232 [FCT
] = MAC_ACCESS_PARTIAL
, [FCTTV
] = MAC_ACCESS_PARTIAL
,
4233 [FCRTV
] = MAC_ACCESS_PARTIAL
, [FCRTL
] = MAC_ACCESS_PARTIAL
,
4234 [FCRTH
] = MAC_ACCESS_PARTIAL
,
4235 [MAVTV0
... MAVTV3
] = MAC_ACCESS_PARTIAL
4239 igb_core_write(IGBCore
*core
, hwaddr addr
, uint64_t val
, unsigned size
)
4241 uint16_t index
= igb_get_reg_index_with_offset(mac_reg_access
, addr
);
4243 if (index
< IGB_NWRITEOPS
&& igb_macreg_writeops
[index
]) {
4244 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
4245 trace_e1000e_wrn_regs_write_trivial(index
<< 2);
4247 trace_e1000e_core_write(index
<< 2, size
, val
);
4248 igb_macreg_writeops
[index
](core
, index
, val
);
4249 } else if (index
< IGB_NREADOPS
&& igb_macreg_readops
[index
]) {
4250 trace_e1000e_wrn_regs_write_ro(index
<< 2, size
, val
);
4252 trace_e1000e_wrn_regs_write_unknown(index
<< 2, size
, val
);
4257 igb_core_read(IGBCore
*core
, hwaddr addr
, unsigned size
)
4260 uint16_t index
= igb_get_reg_index_with_offset(mac_reg_access
, addr
);
4262 if (index
< IGB_NREADOPS
&& igb_macreg_readops
[index
]) {
4263 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
4264 trace_e1000e_wrn_regs_read_trivial(index
<< 2);
4266 val
= igb_macreg_readops
[index
](core
, index
);
4267 trace_e1000e_core_read(index
<< 2, size
, val
);
4270 trace_e1000e_wrn_regs_read_unknown(index
<< 2, size
);
4276 igb_autoneg_resume(IGBCore
*core
)
4278 if (igb_have_autoneg(core
) &&
4279 !(core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
)) {
4280 qemu_get_queue(core
->owner_nic
)->link_down
= false;
4281 timer_mod(core
->autoneg_timer
,
4282 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 500);
4287 igb_core_pci_realize(IGBCore
*core
,
4288 const uint16_t *eeprom_templ
,
4289 uint32_t eeprom_size
,
4290 const uint8_t *macaddr
)
4294 core
->autoneg_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
4295 igb_autoneg_timer
, core
);
4296 igb_intrmgr_pci_realize(core
);
4298 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
4299 net_tx_pkt_init(&core
->tx
[i
].tx_pkt
, E1000E_MAX_TX_FRAGS
);
4302 net_rx_pkt_init(&core
->rx_pkt
);
4304 e1000x_core_prepare_eeprom(core
->eeprom
,
4307 PCI_DEVICE_GET_CLASS(core
->owner
)->device_id
,
4309 igb_update_rx_offloads(core
);
4313 igb_core_pci_uninit(IGBCore
*core
)
4317 timer_free(core
->autoneg_timer
);
4319 igb_intrmgr_pci_unint(core
);
4321 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
4322 net_tx_pkt_uninit(core
->tx
[i
].tx_pkt
);
4325 net_rx_pkt_uninit(core
->rx_pkt
);
4328 static const uint16_t
4329 igb_phy_reg_init
[] = {
4330 [MII_BMCR
] = MII_BMCR_SPEED1000
|
4334 [MII_BMSR
] = MII_BMSR_EXTCAP
|
4344 [MII_PHYID1
] = IGP03E1000_E_PHY_ID
>> 16,
4345 [MII_PHYID2
] = (IGP03E1000_E_PHY_ID
& 0xfff0) | 1,
4346 [MII_ANAR
] = MII_ANAR_CSMACD
| MII_ANAR_10
|
4347 MII_ANAR_10FD
| MII_ANAR_TX
|
4348 MII_ANAR_TXFD
| MII_ANAR_PAUSE
|
4349 MII_ANAR_PAUSE_ASYM
,
4350 [MII_ANLPAR
] = MII_ANLPAR_10
| MII_ANLPAR_10FD
|
4351 MII_ANLPAR_TX
| MII_ANLPAR_TXFD
|
4352 MII_ANLPAR_T4
| MII_ANLPAR_PAUSE
,
4353 [MII_ANER
] = MII_ANER_NP
| MII_ANER_NWAY
,
4354 [MII_ANNP
] = 0x1 | MII_ANNP_MP
,
4355 [MII_CTRL1000
] = MII_CTRL1000_HALF
| MII_CTRL1000_FULL
|
4356 MII_CTRL1000_PORT
| MII_CTRL1000_MASTER
,
4357 [MII_STAT1000
] = MII_STAT1000_HALF
| MII_STAT1000_FULL
|
4358 MII_STAT1000_ROK
| MII_STAT1000_LOK
,
4359 [MII_EXTSTAT
] = MII_EXTSTAT_1000T_HD
| MII_EXTSTAT_1000T_FD
,
4361 [IGP01E1000_PHY_PORT_CONFIG
] = BIT(5) | BIT(8),
4362 [IGP01E1000_PHY_PORT_STATUS
] = IGP01E1000_PSSR_SPEED_1000MBPS
,
4363 [IGP02E1000_PHY_POWER_MGMT
] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU
|
4364 IGP01E1000_PSCFR_SMART_SPEED
4367 static const uint32_t igb_mac_reg_init
[] = {
4368 [LEDCTL
] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
4369 [EEMNGCTL
] = BIT(31),
4370 [TXDCTL0
] = E1000_TXDCTL_QUEUE_ENABLE
,
4371 [RXDCTL0
] = E1000_RXDCTL_QUEUE_ENABLE
| (1 << 16),
4372 [RXDCTL1
] = 1 << 16,
4373 [RXDCTL2
] = 1 << 16,
4374 [RXDCTL3
] = 1 << 16,
4375 [RXDCTL4
] = 1 << 16,
4376 [RXDCTL5
] = 1 << 16,
4377 [RXDCTL6
] = 1 << 16,
4378 [RXDCTL7
] = 1 << 16,
4379 [RXDCTL8
] = 1 << 16,
4380 [RXDCTL9
] = 1 << 16,
4381 [RXDCTL10
] = 1 << 16,
4382 [RXDCTL11
] = 1 << 16,
4383 [RXDCTL12
] = 1 << 16,
4384 [RXDCTL13
] = 1 << 16,
4385 [RXDCTL14
] = 1 << 16,
4386 [RXDCTL15
] = 1 << 16,
4387 [TIPG
] = 0x08 | (0x04 << 10) | (0x06 << 20),
4388 [CTRL
] = E1000_CTRL_FD
| E1000_CTRL_LRST
| E1000_CTRL_SPD_1000
|
4389 E1000_CTRL_ADVD3WUC
,
4390 [STATUS
] = E1000_STATUS_PHYRA
| BIT(31),
4391 [EECD
] = E1000_EECD_FWE_DIS
| E1000_EECD_PRES
|
4392 (2 << E1000_EECD_SIZE_EX_SHIFT
),
4393 [GCR
] = E1000_L0S_ADJUST
|
4394 E1000_GCR_CMPL_TMOUT_RESEND
|
4395 E1000_GCR_CAP_VER2
|
4396 E1000_L1_ENTRY_LATENCY_MSB
|
4397 E1000_L1_ENTRY_LATENCY_LSB
,
4398 [RXCSUM
] = E1000_RXCSUM_IPOFLD
| E1000_RXCSUM_TUOFLD
,
4401 [TCTL
] = E1000_TCTL_PSP
| (0xF << E1000_CT_SHIFT
) |
4402 (0x40 << E1000_COLD_SHIFT
) | (0x1 << 26) | (0xA << 28),
4403 [TCTL_EXT
] = 0x40 | (0x42 << 10),
4404 [DTXCTL
] = E1000_DTXCTL_8023LL
| E1000_DTXCTL_SPOOF_INT
,
4405 [VET
] = ETH_P_VLAN
| (ETH_P_VLAN
<< 16),
4407 [V2PMAILBOX0
... V2PMAILBOX0
+ IGB_MAX_VF_FUNCTIONS
- 1] = E1000_V2PMAILBOX_RSTI
,
4411 [VMOLR0
... VMOLR0
+ 7] = 0x2600 | E1000_VMOLR_STRCRC
,
4412 [RPLOLR
] = E1000_RPLOLR_STRCRC
,
4414 [TXCTL0
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4415 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4416 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4417 [TXCTL1
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4418 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4419 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4420 [TXCTL2
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4421 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4422 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4423 [TXCTL3
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4424 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4425 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4426 [TXCTL4
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4427 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4428 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4429 [TXCTL5
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4430 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4431 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4432 [TXCTL6
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4433 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4434 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4435 [TXCTL7
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4436 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4437 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4438 [TXCTL8
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4439 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4440 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4441 [TXCTL9
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4442 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4443 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4444 [TXCTL10
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4445 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4446 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4447 [TXCTL11
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4448 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4449 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4450 [TXCTL12
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4451 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4452 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4453 [TXCTL13
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4454 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4455 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4456 [TXCTL14
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4457 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4458 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4459 [TXCTL15
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4460 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4461 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4464 static void igb_reset(IGBCore
*core
, bool sw
)
4469 timer_del(core
->autoneg_timer
);
4471 igb_intrmgr_reset(core
);
4473 memset(core
->phy
, 0, sizeof core
->phy
);
4474 memcpy(core
->phy
, igb_phy_reg_init
, sizeof igb_phy_reg_init
);
4476 for (i
= 0; i
< E1000E_MAC_SIZE
; i
++) {
4478 (i
== RXPBS
|| i
== TXPBS
||
4479 (i
>= EITR0
&& i
< EITR0
+ IGB_INTR_NUM
))) {
4483 core
->mac
[i
] = i
< ARRAY_SIZE(igb_mac_reg_init
) ?
4484 igb_mac_reg_init
[i
] : 0;
4487 if (qemu_get_queue(core
->owner_nic
)->link_down
) {
4488 igb_link_down(core
);
4491 e1000x_reset_mac_addr(core
->owner_nic
, core
->mac
, core
->permanent_mac
);
4493 for (int vfn
= 0; vfn
< IGB_MAX_VF_FUNCTIONS
; vfn
++) {
4494 /* Set RSTI, so VF can identify a PF reset is in progress */
4495 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_RSTI
;
4498 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
4500 memset(tx
->ctx
, 0, sizeof(tx
->ctx
));
4502 tx
->skip_cp
= false;
4507 igb_core_reset(IGBCore
*core
)
4509 igb_reset(core
, false);
4512 void igb_core_pre_save(IGBCore
*core
)
4515 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
4518 * If link is down and auto-negotiation is supported and ongoing,
4519 * complete auto-negotiation immediately. This allows us to look
4520 * at MII_BMSR_AN_COMP to infer link status on load.
4522 if (nc
->link_down
&& igb_have_autoneg(core
)) {
4523 core
->phy
[MII_BMSR
] |= MII_BMSR_AN_COMP
;
4524 igb_update_flowctl_status(core
);
4527 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
4528 if (net_tx_pkt_has_fragments(core
->tx
[i
].tx_pkt
)) {
4529 core
->tx
[i
].skip_cp
= true;
4535 igb_core_post_load(IGBCore
*core
)
4537 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
4540 * nc.link_down can't be migrated, so infer link_down according
4541 * to link status bit in core.mac[STATUS].
4543 nc
->link_down
= (core
->mac
[STATUS
] & E1000_STATUS_LU
) == 0;
4546 * we need to restart intrmgr timers, as an older version of
4547 * QEMU can have stopped them before migration
4549 igb_intrmgr_resume(core
);
4550 igb_autoneg_resume(core
);