2 * Core code for QEMU igb emulation
5 * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
7 * Copyright (c) 2020-2023 Red Hat, Inc.
8 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
9 * Developed by Daynix Computing LTD (http://www.daynix.com)
12 * Akihiko Odaki <akihiko.odaki@daynix.com>
13 * Gal Hammmer <gal.hammer@sap.com>
14 * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
15 * Dmitry Fleytman <dmitry@daynix.com>
16 * Leonid Bloch <leonid@daynix.com>
17 * Yan Vugenfirer <yan@daynix.com>
19 * Based on work done by:
20 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
21 * Copyright (c) 2008 Qumranet
22 * Based on work done by:
23 * Copyright (c) 2007 Dan Aloni
24 * Copyright (c) 2004 Antony T Curtis
26 * This library is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU Lesser General Public
28 * License as published by the Free Software Foundation; either
29 * version 2.1 of the License, or (at your option) any later version.
31 * This library is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 * Lesser General Public License for more details.
36 * You should have received a copy of the GNU Lesser General Public
37 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
40 #include "qemu/osdep.h"
44 #include "hw/net/mii.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "sysemu/runstate.h"
49 #include "net_tx_pkt.h"
50 #include "net_rx_pkt.h"
52 #include "igb_common.h"
53 #include "e1000x_common.h"
58 #define E1000E_MAX_TX_FRAGS (64)
60 union e1000_rx_desc_union
{
61 struct e1000_rx_desc legacy
;
62 union e1000_adv_rx_desc adv
;
65 typedef struct IGBTxPktVmdqCallbackContext
{
68 } IGBTxPktVmdqCallbackContext
;
70 typedef struct L2Header
{
71 struct eth_header eth
;
72 struct vlan_header vlan
[2];
76 uint8_t message_id_transport_specific
;
78 uint16_t message_length
;
79 uint8_t subdomain_number
;
84 uint8_t source_communication_technology
;
85 uint32_t source_uuid_lo
;
86 uint16_t source_uuid_hi
;
87 uint16_t source_port_id
;
90 uint8_t log_message_period
;
94 igb_receive_internal(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
,
95 bool has_vnet
, bool *external_tx
);
97 static void igb_raise_interrupts(IGBCore
*core
, size_t index
, uint32_t causes
);
98 static void igb_reset(IGBCore
*core
, bool sw
);
101 igb_raise_legacy_irq(IGBCore
*core
)
103 trace_e1000e_irq_legacy_notify(true);
104 e1000x_inc_reg_if_not_full(core
->mac
, IAC
);
105 pci_set_irq(core
->owner
, 1);
109 igb_lower_legacy_irq(IGBCore
*core
)
111 trace_e1000e_irq_legacy_notify(false);
112 pci_set_irq(core
->owner
, 0);
115 static void igb_msix_notify(IGBCore
*core
, unsigned int cause
)
117 PCIDevice
*dev
= core
->owner
;
119 uint32_t effective_eiac
;
122 vfn
= 8 - (cause
+ 2) / IGBVF_MSIX_VEC_NUM
;
123 if (vfn
< pcie_sriov_num_vfs(core
->owner
)) {
124 dev
= pcie_sriov_get_vf_at_index(core
->owner
, vfn
);
126 vector
= (cause
+ 2) % IGBVF_MSIX_VEC_NUM
;
127 } else if (cause
>= IGB_MSIX_VEC_NUM
) {
128 qemu_log_mask(LOG_GUEST_ERROR
,
129 "igb: Tried to use vector unavailable for PF");
135 msix_notify(dev
, vector
);
137 trace_e1000e_irq_icr_clear_eiac(core
->mac
[EICR
], core
->mac
[EIAC
]);
138 effective_eiac
= core
->mac
[EIAC
] & BIT(cause
);
139 core
->mac
[EICR
] &= ~effective_eiac
;
143 igb_intrmgr_rearm_timer(IGBIntrDelayTimer
*timer
)
145 int64_t delay_ns
= (int64_t) timer
->core
->mac
[timer
->delay_reg
] *
146 timer
->delay_resolution_ns
;
148 trace_e1000e_irq_rearm_timer(timer
->delay_reg
<< 2, delay_ns
);
150 timer_mod(timer
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + delay_ns
);
152 timer
->running
= true;
156 igb_intmgr_timer_resume(IGBIntrDelayTimer
*timer
)
158 if (timer
->running
) {
159 igb_intrmgr_rearm_timer(timer
);
164 igb_intmgr_timer_pause(IGBIntrDelayTimer
*timer
)
166 if (timer
->running
) {
167 timer_del(timer
->timer
);
172 igb_intrmgr_on_msix_throttling_timer(void *opaque
)
174 IGBIntrDelayTimer
*timer
= opaque
;
175 int idx
= timer
- &timer
->core
->eitr
[0];
177 timer
->running
= false;
179 trace_e1000e_irq_msix_notify_postponed_vec(idx
);
180 igb_msix_notify(timer
->core
, idx
);
184 igb_intrmgr_initialize_all_timers(IGBCore
*core
, bool create
)
188 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
189 core
->eitr
[i
].core
= core
;
190 core
->eitr
[i
].delay_reg
= EITR0
+ i
;
191 core
->eitr
[i
].delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
198 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
199 core
->eitr
[i
].timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
200 igb_intrmgr_on_msix_throttling_timer
,
206 igb_intrmgr_resume(IGBCore
*core
)
210 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
211 igb_intmgr_timer_resume(&core
->eitr
[i
]);
216 igb_intrmgr_pause(IGBCore
*core
)
220 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
221 igb_intmgr_timer_pause(&core
->eitr
[i
]);
226 igb_intrmgr_reset(IGBCore
*core
)
230 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
231 if (core
->eitr
[i
].running
) {
232 timer_del(core
->eitr
[i
].timer
);
233 igb_intrmgr_on_msix_throttling_timer(&core
->eitr
[i
]);
239 igb_intrmgr_pci_unint(IGBCore
*core
)
243 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
244 timer_free(core
->eitr
[i
].timer
);
249 igb_intrmgr_pci_realize(IGBCore
*core
)
251 igb_intrmgr_initialize_all_timers(core
, true);
255 igb_rx_csum_enabled(IGBCore
*core
)
257 return (core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) ? false : true;
261 igb_rx_use_legacy_descriptor(IGBCore
*core
)
264 * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
270 typedef struct E1000ERingInfo
{
280 igb_rx_queue_desctyp_get(IGBCore
*core
, const E1000ERingInfo
*r
)
282 return core
->mac
[E1000_SRRCTL(r
->idx
) >> 2] & E1000_SRRCTL_DESCTYPE_MASK
;
286 igb_rx_use_ps_descriptor(IGBCore
*core
, const E1000ERingInfo
*r
)
288 uint32_t desctyp
= igb_rx_queue_desctyp_get(core
, r
);
289 return desctyp
== E1000_SRRCTL_DESCTYPE_HDR_SPLIT
||
290 desctyp
== E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
294 igb_rss_enabled(IGBCore
*core
)
296 return (core
->mac
[MRQC
] & 3) == E1000_MRQC_ENABLE_RSS_MQ
&&
297 !igb_rx_csum_enabled(core
) &&
298 !igb_rx_use_legacy_descriptor(core
);
301 typedef struct E1000E_RSSInfo_st
{
309 igb_rss_get_hash_type(IGBCore
*core
, struct NetRxPkt
*pkt
)
312 EthL4HdrProto l4hdr_proto
;
314 assert(igb_rss_enabled(core
));
316 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
319 trace_e1000e_rx_rss_ip4(l4hdr_proto
, core
->mac
[MRQC
],
320 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
]),
321 E1000_MRQC_EN_IPV4(core
->mac
[MRQC
]));
323 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
324 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
])) {
325 return E1000_MRQ_RSS_TYPE_IPV4TCP
;
328 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
&&
329 (core
->mac
[MRQC
] & E1000_MRQC_RSS_FIELD_IPV4_UDP
)) {
330 return E1000_MRQ_RSS_TYPE_IPV4UDP
;
333 if (E1000_MRQC_EN_IPV4(core
->mac
[MRQC
])) {
334 return E1000_MRQ_RSS_TYPE_IPV4
;
337 eth_ip6_hdr_info
*ip6info
= net_rx_pkt_get_ip6_info(pkt
);
339 bool ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_EX_DIS
;
340 bool new_ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_NEW_IPV6_EXT_DIS
;
343 * Following two traces must not be combined because resulting
344 * event will have 11 arguments totally and some trace backends
345 * (at least "ust") have limitation of maximum 10 arguments per
346 * event. Events with more arguments fail to compile for
347 * backends like these.
349 trace_e1000e_rx_rss_ip6_rfctl(core
->mac
[RFCTL
]);
350 trace_e1000e_rx_rss_ip6(ex_dis
, new_ex_dis
, l4hdr_proto
,
351 ip6info
->has_ext_hdrs
,
352 ip6info
->rss_ex_dst_valid
,
353 ip6info
->rss_ex_src_valid
,
355 E1000_MRQC_EN_TCPIPV6EX(core
->mac
[MRQC
]),
356 E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
]),
357 E1000_MRQC_EN_IPV6(core
->mac
[MRQC
]));
359 if ((!ex_dis
|| !ip6info
->has_ext_hdrs
) &&
360 (!new_ex_dis
|| !(ip6info
->rss_ex_dst_valid
||
361 ip6info
->rss_ex_src_valid
))) {
363 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
364 E1000_MRQC_EN_TCPIPV6EX(core
->mac
[MRQC
])) {
365 return E1000_MRQ_RSS_TYPE_IPV6TCPEX
;
368 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
&&
369 (core
->mac
[MRQC
] & E1000_MRQC_RSS_FIELD_IPV6_UDP
)) {
370 return E1000_MRQ_RSS_TYPE_IPV6UDP
;
373 if (E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
])) {
374 return E1000_MRQ_RSS_TYPE_IPV6EX
;
379 if (E1000_MRQC_EN_IPV6(core
->mac
[MRQC
])) {
380 return E1000_MRQ_RSS_TYPE_IPV6
;
385 return E1000_MRQ_RSS_TYPE_NONE
;
389 igb_rss_calc_hash(IGBCore
*core
, struct NetRxPkt
*pkt
, E1000E_RSSInfo
*info
)
391 NetRxPktRssType type
;
393 assert(igb_rss_enabled(core
));
395 switch (info
->type
) {
396 case E1000_MRQ_RSS_TYPE_IPV4
:
397 type
= NetPktRssIpV4
;
399 case E1000_MRQ_RSS_TYPE_IPV4TCP
:
400 type
= NetPktRssIpV4Tcp
;
402 case E1000_MRQ_RSS_TYPE_IPV6TCPEX
:
403 type
= NetPktRssIpV6TcpEx
;
405 case E1000_MRQ_RSS_TYPE_IPV6
:
406 type
= NetPktRssIpV6
;
408 case E1000_MRQ_RSS_TYPE_IPV6EX
:
409 type
= NetPktRssIpV6Ex
;
411 case E1000_MRQ_RSS_TYPE_IPV4UDP
:
412 type
= NetPktRssIpV4Udp
;
414 case E1000_MRQ_RSS_TYPE_IPV6UDP
:
415 type
= NetPktRssIpV6Udp
;
422 return net_rx_pkt_calc_rss_hash(pkt
, type
, (uint8_t *) &core
->mac
[RSSRK
]);
426 igb_rss_parse_packet(IGBCore
*core
, struct NetRxPkt
*pkt
, bool tx
,
427 E1000E_RSSInfo
*info
)
429 trace_e1000e_rx_rss_started();
431 if (tx
|| !igb_rss_enabled(core
)) {
432 info
->enabled
= false;
436 trace_e1000e_rx_rss_disabled();
440 info
->enabled
= true;
442 info
->type
= igb_rss_get_hash_type(core
, pkt
);
444 trace_e1000e_rx_rss_type(info
->type
);
446 if (info
->type
== E1000_MRQ_RSS_TYPE_NONE
) {
452 info
->hash
= igb_rss_calc_hash(core
, pkt
, info
);
453 info
->queue
= E1000_RSS_QUEUE(&core
->mac
[RETA
], info
->hash
);
457 igb_tx_insert_vlan(IGBCore
*core
, uint16_t qn
, struct igb_tx
*tx
,
458 uint16_t vlan
, bool insert_vlan
)
460 if (core
->mac
[MRQC
] & 1) {
461 uint16_t pool
= qn
% IGB_NUM_VM_POOLS
;
463 if (core
->mac
[VMVIR0
+ pool
] & E1000_VMVIR_VLANA_DEFAULT
) {
464 /* always insert default VLAN */
466 vlan
= core
->mac
[VMVIR0
+ pool
] & 0xffff;
467 } else if (core
->mac
[VMVIR0
+ pool
] & E1000_VMVIR_VLANA_NEVER
) {
473 net_tx_pkt_setup_vlan_header_ex(tx
->tx_pkt
, vlan
,
474 core
->mac
[VET
] & 0xffff);
479 igb_setup_tx_offloads(IGBCore
*core
, struct igb_tx
*tx
)
481 uint32_t idx
= (tx
->first_olinfo_status
>> 4) & 1;
483 if (tx
->first_cmd_type_len
& E1000_ADVTXD_DCMD_TSE
) {
484 uint32_t mss
= tx
->ctx
[idx
].mss_l4len_idx
>> E1000_ADVTXD_MSS_SHIFT
;
485 if (!net_tx_pkt_build_vheader(tx
->tx_pkt
, true, true, mss
)) {
489 net_tx_pkt_update_ip_checksums(tx
->tx_pkt
);
490 e1000x_inc_reg_if_not_full(core
->mac
, TSCTC
);
494 if ((tx
->first_olinfo_status
& E1000_ADVTXD_POTS_TXSM
) &&
495 !((tx
->ctx
[idx
].type_tucmd_mlhl
& E1000_ADVTXD_TUCMD_L4T_SCTP
) ?
496 net_tx_pkt_update_sctp_checksum(tx
->tx_pkt
) :
497 net_tx_pkt_build_vheader(tx
->tx_pkt
, false, true, 0))) {
501 if (tx
->first_olinfo_status
& E1000_ADVTXD_POTS_IXSM
) {
502 net_tx_pkt_update_ip_hdr_checksum(tx
->tx_pkt
);
508 static void igb_tx_pkt_mac_callback(void *core
,
509 const struct iovec
*iov
,
511 const struct iovec
*virt_iov
,
514 igb_receive_internal(core
, virt_iov
, virt_iovcnt
, true, NULL
);
517 static void igb_tx_pkt_vmdq_callback(void *opaque
,
518 const struct iovec
*iov
,
520 const struct iovec
*virt_iov
,
523 IGBTxPktVmdqCallbackContext
*context
= opaque
;
526 igb_receive_internal(context
->core
, virt_iov
, virt_iovcnt
, true,
530 if (context
->core
->has_vnet
) {
531 qemu_sendv_packet(context
->nc
, virt_iov
, virt_iovcnt
);
533 qemu_sendv_packet(context
->nc
, iov
, iovcnt
);
538 /* TX Packets Switching (7.10.3.6) */
539 static bool igb_tx_pkt_switch(IGBCore
*core
, struct igb_tx
*tx
,
542 IGBTxPktVmdqCallbackContext context
;
544 /* TX switching is only used to serve VM to VM traffic. */
545 if (!(core
->mac
[MRQC
] & 1)) {
549 /* TX switching requires DTXSWC.Loopback_en bit enabled. */
550 if (!(core
->mac
[DTXSWC
] & E1000_DTXSWC_VMDQ_LOOPBACK_EN
)) {
557 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
558 igb_tx_pkt_vmdq_callback
, &context
);
561 return net_tx_pkt_send(tx
->tx_pkt
, nc
);
565 igb_tx_pkt_send(IGBCore
*core
, struct igb_tx
*tx
, int queue_index
)
567 int target_queue
= MIN(core
->max_queue_num
, queue_index
);
568 NetClientState
*queue
= qemu_get_subqueue(core
->owner_nic
, target_queue
);
570 if (!igb_setup_tx_offloads(core
, tx
)) {
574 net_tx_pkt_dump(tx
->tx_pkt
);
576 if ((core
->phy
[MII_BMCR
] & MII_BMCR_LOOPBACK
) ||
577 ((core
->mac
[RCTL
] & E1000_RCTL_LBM_MAC
) == E1000_RCTL_LBM_MAC
)) {
578 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
579 igb_tx_pkt_mac_callback
, core
);
581 return igb_tx_pkt_switch(core
, tx
, queue
);
586 igb_on_tx_done_update_stats(IGBCore
*core
, struct NetTxPkt
*tx_pkt
, int qn
)
588 static const int PTCregs
[6] = { PTC64
, PTC127
, PTC255
, PTC511
,
591 size_t tot_len
= net_tx_pkt_get_total_len(tx_pkt
) + 4;
593 e1000x_increase_size_stats(core
->mac
, PTCregs
, tot_len
);
594 e1000x_inc_reg_if_not_full(core
->mac
, TPT
);
595 e1000x_grow_8reg_if_not_full(core
->mac
, TOTL
, tot_len
);
597 switch (net_tx_pkt_get_packet_type(tx_pkt
)) {
599 e1000x_inc_reg_if_not_full(core
->mac
, BPTC
);
602 e1000x_inc_reg_if_not_full(core
->mac
, MPTC
);
607 g_assert_not_reached();
610 e1000x_inc_reg_if_not_full(core
->mac
, GPTC
);
611 e1000x_grow_8reg_if_not_full(core
->mac
, GOTCL
, tot_len
);
613 if (core
->mac
[MRQC
] & 1) {
614 uint16_t pool
= qn
% IGB_NUM_VM_POOLS
;
616 core
->mac
[PVFGOTC0
+ (pool
* 64)] += tot_len
;
617 core
->mac
[PVFGPTC0
+ (pool
* 64)]++;
622 igb_process_tx_desc(IGBCore
*core
,
625 union e1000_adv_tx_desc
*tx_desc
,
628 struct e1000_adv_tx_context_desc
*tx_ctx_desc
;
629 uint32_t cmd_type_len
;
631 uint64_t buffer_addr
;
634 cmd_type_len
= le32_to_cpu(tx_desc
->read
.cmd_type_len
);
636 if (cmd_type_len
& E1000_ADVTXD_DCMD_DEXT
) {
637 if ((cmd_type_len
& E1000_ADVTXD_DTYP_DATA
) ==
638 E1000_ADVTXD_DTYP_DATA
) {
639 /* advanced transmit data descriptor */
641 tx
->first_cmd_type_len
= cmd_type_len
;
642 tx
->first_olinfo_status
= le32_to_cpu(tx_desc
->read
.olinfo_status
);
645 } else if ((cmd_type_len
& E1000_ADVTXD_DTYP_CTXT
) ==
646 E1000_ADVTXD_DTYP_CTXT
) {
647 /* advanced transmit context descriptor */
648 tx_ctx_desc
= (struct e1000_adv_tx_context_desc
*)tx_desc
;
649 idx
= (le32_to_cpu(tx_ctx_desc
->mss_l4len_idx
) >> 4) & 1;
650 tx
->ctx
[idx
].vlan_macip_lens
= le32_to_cpu(tx_ctx_desc
->vlan_macip_lens
);
651 tx
->ctx
[idx
].seqnum_seed
= le32_to_cpu(tx_ctx_desc
->seqnum_seed
);
652 tx
->ctx
[idx
].type_tucmd_mlhl
= le32_to_cpu(tx_ctx_desc
->type_tucmd_mlhl
);
653 tx
->ctx
[idx
].mss_l4len_idx
= le32_to_cpu(tx_ctx_desc
->mss_l4len_idx
);
656 /* unknown descriptor type */
660 /* legacy descriptor */
662 /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
665 buffer_addr
= le64_to_cpu(tx_desc
->read
.buffer_addr
);
666 length
= cmd_type_len
& 0xFFFF;
669 if (!net_tx_pkt_add_raw_fragment_pci(tx
->tx_pkt
, dev
,
670 buffer_addr
, length
)) {
675 if (cmd_type_len
& E1000_TXD_CMD_EOP
) {
676 if (!tx
->skip_cp
&& net_tx_pkt_parse(tx
->tx_pkt
)) {
677 idx
= (tx
->first_olinfo_status
>> 4) & 1;
678 igb_tx_insert_vlan(core
, queue_index
, tx
,
679 tx
->ctx
[idx
].vlan_macip_lens
>> IGB_TX_FLAGS_VLAN_SHIFT
,
680 !!(tx
->first_cmd_type_len
& E1000_TXD_CMD_VLE
));
682 if ((tx
->first_cmd_type_len
& E1000_ADVTXD_MAC_TSTAMP
) &&
683 (core
->mac
[TSYNCTXCTL
] & E1000_TSYNCTXCTL_ENABLED
) &&
684 !(core
->mac
[TSYNCTXCTL
] & E1000_TSYNCTXCTL_VALID
)) {
685 core
->mac
[TSYNCTXCTL
] |= E1000_TSYNCTXCTL_VALID
;
686 e1000x_timestamp(core
->mac
, core
->timadj
, TXSTMPL
, TXSTMPH
);
689 if (igb_tx_pkt_send(core
, tx
, queue_index
)) {
690 igb_on_tx_done_update_stats(core
, tx
->tx_pkt
, queue_index
);
696 net_tx_pkt_reset(tx
->tx_pkt
, net_tx_pkt_unmap_frag_pci
, dev
);
700 static uint32_t igb_tx_wb_eic(IGBCore
*core
, int queue_idx
)
704 n
= igb_ivar_entry_tx(queue_idx
);
705 ent
= (core
->mac
[IVAR0
+ n
/ 4] >> (8 * (n
% 4))) & 0xff;
707 return (ent
& E1000_IVAR_VALID
) ? BIT(ent
& 0x1f) : 0;
710 static uint32_t igb_rx_wb_eic(IGBCore
*core
, int queue_idx
)
714 n
= igb_ivar_entry_rx(queue_idx
);
715 ent
= (core
->mac
[IVAR0
+ n
/ 4] >> (8 * (n
% 4))) & 0xff;
717 return (ent
& E1000_IVAR_VALID
) ? BIT(ent
& 0x1f) : 0;
721 igb_ring_empty(IGBCore
*core
, const E1000ERingInfo
*r
)
723 return core
->mac
[r
->dh
] == core
->mac
[r
->dt
] ||
724 core
->mac
[r
->dt
] >= core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
;
727 static inline uint64_t
728 igb_ring_base(IGBCore
*core
, const E1000ERingInfo
*r
)
730 uint64_t bah
= core
->mac
[r
->dbah
];
731 uint64_t bal
= core
->mac
[r
->dbal
];
733 return (bah
<< 32) + bal
;
736 static inline uint64_t
737 igb_ring_head_descr(IGBCore
*core
, const E1000ERingInfo
*r
)
739 return igb_ring_base(core
, r
) + E1000_RING_DESC_LEN
* core
->mac
[r
->dh
];
743 igb_ring_advance(IGBCore
*core
, const E1000ERingInfo
*r
, uint32_t count
)
745 core
->mac
[r
->dh
] += count
;
747 if (core
->mac
[r
->dh
] * E1000_RING_DESC_LEN
>= core
->mac
[r
->dlen
]) {
748 core
->mac
[r
->dh
] = 0;
752 static inline uint32_t
753 igb_ring_free_descr_num(IGBCore
*core
, const E1000ERingInfo
*r
)
755 trace_e1000e_ring_free_space(r
->idx
, core
->mac
[r
->dlen
],
756 core
->mac
[r
->dh
], core
->mac
[r
->dt
]);
758 if (core
->mac
[r
->dh
] <= core
->mac
[r
->dt
]) {
759 return core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
762 if (core
->mac
[r
->dh
] > core
->mac
[r
->dt
]) {
763 return core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
+
764 core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
767 g_assert_not_reached();
772 igb_ring_enabled(IGBCore
*core
, const E1000ERingInfo
*r
)
774 return core
->mac
[r
->dlen
] > 0;
777 typedef struct IGB_TxRing_st
{
778 const E1000ERingInfo
*i
;
783 igb_mq_queue_idx(int base_reg_idx
, int reg_idx
)
785 return (reg_idx
- base_reg_idx
) / 16;
789 igb_tx_ring_init(IGBCore
*core
, IGB_TxRing
*txr
, int idx
)
791 static const E1000ERingInfo i
[IGB_NUM_QUEUES
] = {
792 { TDBAH0
, TDBAL0
, TDLEN0
, TDH0
, TDT0
, 0 },
793 { TDBAH1
, TDBAL1
, TDLEN1
, TDH1
, TDT1
, 1 },
794 { TDBAH2
, TDBAL2
, TDLEN2
, TDH2
, TDT2
, 2 },
795 { TDBAH3
, TDBAL3
, TDLEN3
, TDH3
, TDT3
, 3 },
796 { TDBAH4
, TDBAL4
, TDLEN4
, TDH4
, TDT4
, 4 },
797 { TDBAH5
, TDBAL5
, TDLEN5
, TDH5
, TDT5
, 5 },
798 { TDBAH6
, TDBAL6
, TDLEN6
, TDH6
, TDT6
, 6 },
799 { TDBAH7
, TDBAL7
, TDLEN7
, TDH7
, TDT7
, 7 },
800 { TDBAH8
, TDBAL8
, TDLEN8
, TDH8
, TDT8
, 8 },
801 { TDBAH9
, TDBAL9
, TDLEN9
, TDH9
, TDT9
, 9 },
802 { TDBAH10
, TDBAL10
, TDLEN10
, TDH10
, TDT10
, 10 },
803 { TDBAH11
, TDBAL11
, TDLEN11
, TDH11
, TDT11
, 11 },
804 { TDBAH12
, TDBAL12
, TDLEN12
, TDH12
, TDT12
, 12 },
805 { TDBAH13
, TDBAL13
, TDLEN13
, TDH13
, TDT13
, 13 },
806 { TDBAH14
, TDBAL14
, TDLEN14
, TDH14
, TDT14
, 14 },
807 { TDBAH15
, TDBAL15
, TDLEN15
, TDH15
, TDT15
, 15 }
810 assert(idx
< ARRAY_SIZE(i
));
813 txr
->tx
= &core
->tx
[idx
];
816 typedef struct E1000E_RxRing_st
{
817 const E1000ERingInfo
*i
;
821 igb_rx_ring_init(IGBCore
*core
, E1000E_RxRing
*rxr
, int idx
)
823 static const E1000ERingInfo i
[IGB_NUM_QUEUES
] = {
824 { RDBAH0
, RDBAL0
, RDLEN0
, RDH0
, RDT0
, 0 },
825 { RDBAH1
, RDBAL1
, RDLEN1
, RDH1
, RDT1
, 1 },
826 { RDBAH2
, RDBAL2
, RDLEN2
, RDH2
, RDT2
, 2 },
827 { RDBAH3
, RDBAL3
, RDLEN3
, RDH3
, RDT3
, 3 },
828 { RDBAH4
, RDBAL4
, RDLEN4
, RDH4
, RDT4
, 4 },
829 { RDBAH5
, RDBAL5
, RDLEN5
, RDH5
, RDT5
, 5 },
830 { RDBAH6
, RDBAL6
, RDLEN6
, RDH6
, RDT6
, 6 },
831 { RDBAH7
, RDBAL7
, RDLEN7
, RDH7
, RDT7
, 7 },
832 { RDBAH8
, RDBAL8
, RDLEN8
, RDH8
, RDT8
, 8 },
833 { RDBAH9
, RDBAL9
, RDLEN9
, RDH9
, RDT9
, 9 },
834 { RDBAH10
, RDBAL10
, RDLEN10
, RDH10
, RDT10
, 10 },
835 { RDBAH11
, RDBAL11
, RDLEN11
, RDH11
, RDT11
, 11 },
836 { RDBAH12
, RDBAL12
, RDLEN12
, RDH12
, RDT12
, 12 },
837 { RDBAH13
, RDBAL13
, RDLEN13
, RDH13
, RDT13
, 13 },
838 { RDBAH14
, RDBAL14
, RDLEN14
, RDH14
, RDT14
, 14 },
839 { RDBAH15
, RDBAL15
, RDLEN15
, RDH15
, RDT15
, 15 }
842 assert(idx
< ARRAY_SIZE(i
));
848 igb_txdesc_writeback(IGBCore
*core
, dma_addr_t base
,
849 union e1000_adv_tx_desc
*tx_desc
,
850 const E1000ERingInfo
*txi
)
853 uint32_t cmd_type_len
= le32_to_cpu(tx_desc
->read
.cmd_type_len
);
856 tdwba
= core
->mac
[E1000_TDWBAL(txi
->idx
) >> 2];
857 tdwba
|= (uint64_t)core
->mac
[E1000_TDWBAH(txi
->idx
) >> 2] << 32;
859 if (!(cmd_type_len
& E1000_TXD_CMD_RS
)) {
863 d
= pcie_sriov_get_vf_at_index(core
->owner
, txi
->idx
% 8);
869 uint32_t buffer
= cpu_to_le32(core
->mac
[txi
->dh
]);
870 pci_dma_write(d
, tdwba
& ~3, &buffer
, sizeof(buffer
));
872 uint32_t status
= le32_to_cpu(tx_desc
->wb
.status
) | E1000_TXD_STAT_DD
;
874 tx_desc
->wb
.status
= cpu_to_le32(status
);
875 pci_dma_write(d
, base
+ offsetof(union e1000_adv_tx_desc
, wb
),
876 &tx_desc
->wb
, sizeof(tx_desc
->wb
));
879 return igb_tx_wb_eic(core
, txi
->idx
);
883 igb_tx_enabled(IGBCore
*core
, const E1000ERingInfo
*txi
)
885 bool vmdq
= core
->mac
[MRQC
] & 1;
886 uint16_t qn
= txi
->idx
;
887 uint16_t pool
= qn
% IGB_NUM_VM_POOLS
;
889 return (core
->mac
[TCTL
] & E1000_TCTL_EN
) &&
890 (!vmdq
|| core
->mac
[VFTE
] & BIT(pool
)) &&
891 (core
->mac
[TXDCTL0
+ (qn
* 16)] & E1000_TXDCTL_QUEUE_ENABLE
);
895 igb_start_xmit(IGBCore
*core
, const IGB_TxRing
*txr
)
899 union e1000_adv_tx_desc desc
;
900 const E1000ERingInfo
*txi
= txr
->i
;
903 if (!igb_tx_enabled(core
, txi
)) {
904 trace_e1000e_tx_disabled();
908 d
= pcie_sriov_get_vf_at_index(core
->owner
, txi
->idx
% 8);
913 while (!igb_ring_empty(core
, txi
)) {
914 base
= igb_ring_head_descr(core
, txi
);
916 pci_dma_read(d
, base
, &desc
, sizeof(desc
));
918 trace_e1000e_tx_descr((void *)(intptr_t)desc
.read
.buffer_addr
,
919 desc
.read
.cmd_type_len
, desc
.wb
.status
);
921 igb_process_tx_desc(core
, d
, txr
->tx
, &desc
, txi
->idx
);
922 igb_ring_advance(core
, txi
, 1);
923 eic
|= igb_txdesc_writeback(core
, base
, &desc
, txi
);
927 igb_raise_interrupts(core
, EICR
, eic
);
928 igb_raise_interrupts(core
, ICR
, E1000_ICR_TXDW
);
931 net_tx_pkt_reset(txr
->tx
->tx_pkt
, net_tx_pkt_unmap_frag_pci
, d
);
935 igb_rxbufsize(IGBCore
*core
, const E1000ERingInfo
*r
)
937 uint32_t srrctl
= core
->mac
[E1000_SRRCTL(r
->idx
) >> 2];
938 uint32_t bsizepkt
= srrctl
& E1000_SRRCTL_BSIZEPKT_MASK
;
940 return bsizepkt
<< E1000_SRRCTL_BSIZEPKT_SHIFT
;
943 return e1000x_rxbufsize(core
->mac
[RCTL
]);
947 igb_has_rxbufs(IGBCore
*core
, const E1000ERingInfo
*r
, size_t total_size
)
949 uint32_t bufs
= igb_ring_free_descr_num(core
, r
);
950 uint32_t bufsize
= igb_rxbufsize(core
, r
);
952 trace_e1000e_rx_has_buffers(r
->idx
, bufs
, total_size
, bufsize
);
954 return total_size
<= bufs
/ (core
->rx_desc_len
/ E1000_MIN_RX_DESC_LEN
) *
959 igb_rxhdrbufsize(IGBCore
*core
, const E1000ERingInfo
*r
)
961 uint32_t srrctl
= core
->mac
[E1000_SRRCTL(r
->idx
) >> 2];
962 return (srrctl
& E1000_SRRCTL_BSIZEHDRSIZE_MASK
) >>
963 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT
;
967 igb_start_recv(IGBCore
*core
)
971 trace_e1000e_rx_start_recv();
973 for (i
= 0; i
<= core
->max_queue_num
; i
++) {
974 qemu_flush_queued_packets(qemu_get_subqueue(core
->owner_nic
, i
));
979 igb_can_receive(IGBCore
*core
)
983 if (!e1000x_rx_ready(core
->owner
, core
->mac
)) {
987 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
989 if (!(core
->mac
[RXDCTL0
+ (i
* 16)] & E1000_RXDCTL_QUEUE_ENABLE
)) {
993 igb_rx_ring_init(core
, &rxr
, i
);
994 if (igb_ring_enabled(core
, rxr
.i
) && igb_has_rxbufs(core
, rxr
.i
, 1)) {
995 trace_e1000e_rx_can_recv();
1000 trace_e1000e_rx_can_recv_rings_full();
1005 igb_receive(IGBCore
*core
, const uint8_t *buf
, size_t size
)
1007 const struct iovec iov
= {
1008 .iov_base
= (uint8_t *)buf
,
1012 return igb_receive_iov(core
, &iov
, 1);
1016 igb_rx_l3_cso_enabled(IGBCore
*core
)
1018 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_IPOFLD
);
1022 igb_rx_l4_cso_enabled(IGBCore
*core
)
1024 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_TUOFLD
);
1027 static bool igb_rx_is_oversized(IGBCore
*core
, const struct eth_header
*ehdr
,
1028 size_t size
, size_t vlan_num
,
1029 bool lpe
, uint16_t rlpml
)
1031 size_t vlan_header_size
= sizeof(struct vlan_header
) * vlan_num
;
1032 size_t header_size
= sizeof(struct eth_header
) + vlan_header_size
;
1033 return lpe
? size
+ ETH_FCS_LEN
> rlpml
: size
> header_size
+ ETH_MTU
;
1036 static uint16_t igb_receive_assign(IGBCore
*core
, const struct iovec
*iov
,
1037 size_t iovcnt
, size_t iov_ofs
,
1038 const L2Header
*l2_header
, size_t size
,
1039 E1000E_RSSInfo
*rss_info
,
1040 uint16_t *etqf
, bool *ts
, bool *external_tx
)
1042 static const int ta_shift
[] = { 4, 3, 2, 0 };
1043 const struct eth_header
*ehdr
= &l2_header
->eth
;
1044 uint32_t f
, ra
[2], *macp
, rctl
= core
->mac
[RCTL
];
1045 uint16_t queues
= 0;
1046 uint16_t oversized
= 0;
1047 size_t vlan_num
= 0;
1053 memset(rss_info
, 0, sizeof(E1000E_RSSInfo
));
1057 *external_tx
= true;
1060 if (core
->mac
[CTRL_EXT
] & BIT(26)) {
1061 if (be16_to_cpu(ehdr
->h_proto
) == core
->mac
[VET
] >> 16 &&
1062 be16_to_cpu(l2_header
->vlan
[0].h_proto
) == (core
->mac
[VET
] & 0xffff)) {
1066 if (be16_to_cpu(ehdr
->h_proto
) == (core
->mac
[VET
] & 0xffff)) {
1071 lpe
= !!(core
->mac
[RCTL
] & E1000_RCTL_LPE
);
1072 rlpml
= core
->mac
[RLPML
];
1073 if (!(core
->mac
[RCTL
] & E1000_RCTL_SBP
) &&
1074 igb_rx_is_oversized(core
, ehdr
, size
, vlan_num
, lpe
, rlpml
)) {
1075 trace_e1000x_rx_oversized(size
);
1079 for (*etqf
= 0; *etqf
< 8; (*etqf
)++) {
1080 if ((core
->mac
[ETQF0
+ *etqf
] & E1000_ETQF_FILTER_ENABLE
) &&
1081 be16_to_cpu(ehdr
->h_proto
) == (core
->mac
[ETQF0
+ *etqf
] & E1000_ETQF_ETYPE_MASK
)) {
1082 if ((core
->mac
[ETQF0
+ *etqf
] & E1000_ETQF_1588
) &&
1083 (core
->mac
[TSYNCRXCTL
] & E1000_TSYNCRXCTL_ENABLED
) &&
1084 !(core
->mac
[TSYNCRXCTL
] & E1000_TSYNCRXCTL_VALID
) &&
1085 iov_to_buf(iov
, iovcnt
, iov_ofs
+ ETH_HLEN
, &ptp2
, sizeof(ptp2
)) >= sizeof(ptp2
) &&
1086 (ptp2
.version_ptp
& 15) == 2 &&
1087 ptp2
.message_id_transport_specific
== ((core
->mac
[TSYNCRXCFG
] >> 8) & 255)) {
1088 e1000x_timestamp(core
->mac
, core
->timadj
, RXSTMPL
, RXSTMPH
);
1090 core
->mac
[TSYNCRXCTL
] |= E1000_TSYNCRXCTL_VALID
;
1091 core
->mac
[RXSATRL
] = le32_to_cpu(ptp2
.source_uuid_lo
);
1092 core
->mac
[RXSATRH
] = le16_to_cpu(ptp2
.source_uuid_hi
) |
1093 (le16_to_cpu(ptp2
.sequence_id
) << 16);
1100 !e1000x_rx_vlan_filter(core
->mac
, l2_header
->vlan
+ vlan_num
- 1)) {
1104 if (core
->mac
[MRQC
] & 1) {
1105 if (is_broadcast_ether_addr(ehdr
->h_dest
)) {
1106 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1107 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_BAM
) {
1112 for (macp
= core
->mac
+ RA
; macp
< core
->mac
+ RA
+ 32; macp
+= 2) {
1113 if (!(macp
[1] & E1000_RAH_AV
)) {
1116 ra
[0] = cpu_to_le32(macp
[0]);
1117 ra
[1] = cpu_to_le32(macp
[1]);
1118 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1119 queues
|= (macp
[1] & E1000_RAH_POOL_MASK
) / E1000_RAH_POOL_1
;
1123 for (macp
= core
->mac
+ RA2
; macp
< core
->mac
+ RA2
+ 16; macp
+= 2) {
1124 if (!(macp
[1] & E1000_RAH_AV
)) {
1127 ra
[0] = cpu_to_le32(macp
[0]);
1128 ra
[1] = cpu_to_le32(macp
[1]);
1129 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1130 queues
|= (macp
[1] & E1000_RAH_POOL_MASK
) / E1000_RAH_POOL_1
;
1135 macp
= core
->mac
+ (is_multicast_ether_addr(ehdr
->h_dest
) ? MTA
: UTA
);
1137 f
= ta_shift
[(rctl
>> E1000_RCTL_MO_SHIFT
) & 3];
1138 f
= (((ehdr
->h_dest
[5] << 8) | ehdr
->h_dest
[4]) >> f
) & 0xfff;
1139 if (macp
[f
>> 5] & (1 << (f
& 0x1f))) {
1140 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1141 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_ROMPE
) {
1146 } else if (is_unicast_ether_addr(ehdr
->h_dest
) && external_tx
) {
1147 *external_tx
= false;
1151 if (e1000x_vlan_rx_filter_enabled(core
->mac
)) {
1155 uint16_t vid
= be16_to_cpu(l2_header
->vlan
[vlan_num
- 1].h_tci
) & VLAN_VID_MASK
;
1157 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
1158 if ((core
->mac
[VLVF0
+ i
] & E1000_VLVF_VLANID_MASK
) == vid
&&
1159 (core
->mac
[VLVF0
+ i
] & E1000_VLVF_VLANID_ENABLE
)) {
1160 uint32_t poolsel
= core
->mac
[VLVF0
+ i
] & E1000_VLVF_POOLSEL_MASK
;
1161 mask
|= poolsel
>> E1000_VLVF_POOLSEL_SHIFT
;
1165 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1166 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_AUPE
) {
1175 if (is_unicast_ether_addr(ehdr
->h_dest
) && !queues
&& !external_tx
&&
1176 !(core
->mac
[VT_CTL
] & E1000_VT_CTL_DISABLE_DEF_POOL
)) {
1177 uint32_t def_pl
= core
->mac
[VT_CTL
] & E1000_VT_CTL_DEFAULT_POOL_MASK
;
1178 queues
= BIT(def_pl
>> E1000_VT_CTL_DEFAULT_POOL_SHIFT
);
1181 queues
&= core
->mac
[VFRE
];
1183 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1184 lpe
= !!(core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_LPE
);
1185 rlpml
= core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_RLPML_MASK
;
1186 if ((queues
& BIT(i
)) &&
1187 igb_rx_is_oversized(core
, ehdr
, size
, vlan_num
,
1189 oversized
|= BIT(i
);
1192 /* 8.19.37 increment ROC if packet is oversized for all queues */
1193 if (oversized
== queues
) {
1194 trace_e1000x_rx_oversized(size
);
1195 e1000x_inc_reg_if_not_full(core
->mac
, ROC
);
1197 queues
&= ~oversized
;
1201 igb_rss_parse_packet(core
, core
->rx_pkt
,
1202 external_tx
!= NULL
, rss_info
);
1203 /* Sec 8.26.1: PQn = VFn + VQn*8 */
1204 if (rss_info
->queue
& 1) {
1205 for (i
= 0; i
< IGB_NUM_VM_POOLS
; i
++) {
1206 if ((queues
& BIT(i
)) &&
1207 (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_RSSE
)) {
1208 queues
|= BIT(i
+ IGB_NUM_VM_POOLS
);
1215 bool accepted
= e1000x_rx_group_filter(core
->mac
, ehdr
);
1217 for (macp
= core
->mac
+ RA2
; macp
< core
->mac
+ RA2
+ 16; macp
+= 2) {
1218 if (!(macp
[1] & E1000_RAH_AV
)) {
1221 ra
[0] = cpu_to_le32(macp
[0]);
1222 ra
[1] = cpu_to_le32(macp
[1]);
1223 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1224 trace_e1000x_rx_flt_ucast_match((int)(macp
- core
->mac
- RA2
) / 2,
1225 MAC_ARG(ehdr
->h_dest
));
1234 igb_rss_parse_packet(core
, core
->rx_pkt
, false, rss_info
);
1235 queues
= BIT(rss_info
->queue
);
1243 igb_read_lgcy_rx_descr(IGBCore
*core
, struct e1000_rx_desc
*desc
,
1246 *buff_addr
= le64_to_cpu(desc
->buffer_addr
);
1250 igb_read_adv_rx_single_buf_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1253 *buff_addr
= le64_to_cpu(desc
->read
.pkt_addr
);
1257 igb_read_adv_rx_split_buf_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1260 buff_addr
[0] = le64_to_cpu(desc
->read
.hdr_addr
);
1261 buff_addr
[1] = le64_to_cpu(desc
->read
.pkt_addr
);
1264 typedef struct IGBBAState
{
1265 uint16_t written
[IGB_MAX_PS_BUFFERS
];
1269 typedef struct IGBSplitDescriptorData
{
1273 } IGBSplitDescriptorData
;
1275 typedef struct IGBPacketRxDMAState
{
1281 uint32_t rx_desc_packet_buf_size
;
1282 uint32_t rx_desc_header_buf_size
;
1288 hwaddr ba
[IGB_MAX_PS_BUFFERS
];
1289 IGBSplitDescriptorData ps_desc_data
;
1290 } IGBPacketRxDMAState
;
1293 igb_read_rx_descr(IGBCore
*core
,
1294 union e1000_rx_desc_union
*desc
,
1295 IGBPacketRxDMAState
*pdma_st
,
1296 const E1000ERingInfo
*r
)
1300 if (igb_rx_use_legacy_descriptor(core
)) {
1301 igb_read_lgcy_rx_descr(core
, &desc
->legacy
, &pdma_st
->ba
[1]);
1306 /* advanced header split descriptor */
1307 if (igb_rx_use_ps_descriptor(core
, r
)) {
1308 igb_read_adv_rx_split_buf_descr(core
, &desc
->adv
, &pdma_st
->ba
[0]);
1312 /* descriptor replication modes not supported */
1313 desc_type
= igb_rx_queue_desctyp_get(core
, r
);
1314 if (desc_type
!= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF
) {
1315 trace_igb_wrn_rx_desc_modes_not_supp(desc_type
);
1318 /* advanced single buffer descriptor */
1319 igb_read_adv_rx_single_buf_descr(core
, &desc
->adv
, &pdma_st
->ba
[1]);
1324 igb_verify_csum_in_sw(IGBCore
*core
,
1325 struct NetRxPkt
*pkt
,
1326 uint32_t *status_flags
,
1327 EthL4HdrProto l4hdr_proto
)
1330 uint32_t csum_error
;
1332 if (igb_rx_l3_cso_enabled(core
)) {
1333 if (!net_rx_pkt_validate_l3_csum(pkt
, &csum_valid
)) {
1334 trace_e1000e_rx_metadata_l3_csum_validation_failed();
1336 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_IPE
;
1337 *status_flags
|= E1000_RXD_STAT_IPCS
| csum_error
;
1340 trace_e1000e_rx_metadata_l3_cso_disabled();
1343 if (!igb_rx_l4_cso_enabled(core
)) {
1344 trace_e1000e_rx_metadata_l4_cso_disabled();
1348 if (!net_rx_pkt_validate_l4_csum(pkt
, &csum_valid
)) {
1349 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1353 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_TCPE
;
1354 *status_flags
|= E1000_RXD_STAT_TCPCS
| csum_error
;
1356 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1357 *status_flags
|= E1000_RXD_STAT_UDPCS
;
1362 igb_build_rx_metadata_common(IGBCore
*core
,
1363 struct NetRxPkt
*pkt
,
1365 uint32_t *status_flags
,
1368 struct virtio_net_hdr
*vhdr
;
1369 bool hasip4
, hasip6
, csum_valid
;
1370 EthL4HdrProto l4hdr_proto
;
1372 *status_flags
= E1000_RXD_STAT_DD
;
1374 /* No additional metadata needed for non-EOP descriptors */
1379 *status_flags
|= E1000_RXD_STAT_EOP
;
1381 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1382 trace_e1000e_rx_metadata_protocols(hasip4
, hasip6
, l4hdr_proto
);
1385 if (net_rx_pkt_is_vlan_stripped(pkt
)) {
1386 *status_flags
|= E1000_RXD_STAT_VP
;
1387 *vlan_tag
= cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt
));
1388 trace_e1000e_rx_metadata_vlan(*vlan_tag
);
1391 /* RX CSO information */
1392 if (hasip6
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_XSUM_DIS
)) {
1393 trace_e1000e_rx_metadata_ipv6_sum_disabled();
1397 vhdr
= net_rx_pkt_get_vhdr(pkt
);
1399 if (!(vhdr
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
) &&
1400 !(vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
)) {
1401 trace_e1000e_rx_metadata_virthdr_no_csum_info();
1402 igb_verify_csum_in_sw(core
, pkt
, status_flags
, l4hdr_proto
);
1406 if (igb_rx_l3_cso_enabled(core
)) {
1407 *status_flags
|= hasip4
? E1000_RXD_STAT_IPCS
: 0;
1409 trace_e1000e_rx_metadata_l3_cso_disabled();
1412 if (igb_rx_l4_cso_enabled(core
)) {
1413 switch (l4hdr_proto
) {
1414 case ETH_L4_HDR_PROTO_SCTP
:
1415 if (!net_rx_pkt_validate_l4_csum(pkt
, &csum_valid
)) {
1416 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1420 *status_flags
|= E1000_RXDEXT_STATERR_TCPE
;
1423 case ETH_L4_HDR_PROTO_TCP
:
1424 *status_flags
|= E1000_RXD_STAT_TCPCS
;
1427 case ETH_L4_HDR_PROTO_UDP
:
1428 *status_flags
|= E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
;
1435 trace_e1000e_rx_metadata_l4_cso_disabled();
1439 trace_e1000e_rx_metadata_status_flags(*status_flags
);
1440 *status_flags
= cpu_to_le32(*status_flags
);
1444 igb_write_lgcy_rx_descr(IGBCore
*core
, struct e1000_rx_desc
*desc
,
1445 struct NetRxPkt
*pkt
,
1446 const E1000E_RSSInfo
*rss_info
,
1449 uint32_t status_flags
;
1451 assert(!rss_info
->enabled
);
1453 memset(desc
, 0, sizeof(*desc
));
1454 desc
->length
= cpu_to_le16(length
);
1455 igb_build_rx_metadata_common(core
, pkt
, pkt
!= NULL
,
1459 desc
->errors
= (uint8_t) (le32_to_cpu(status_flags
) >> 24);
1460 desc
->status
= (uint8_t) le32_to_cpu(status_flags
);
1464 igb_rx_ps_descriptor_split_always(IGBCore
*core
, const E1000ERingInfo
*r
)
1466 uint32_t desctyp
= igb_rx_queue_desctyp_get(core
, r
);
1467 return desctyp
== E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1471 igb_rx_desc_get_packet_type(IGBCore
*core
, struct NetRxPkt
*pkt
, uint16_t etqf
)
1474 bool hasip4
, hasip6
;
1475 EthL4HdrProto l4hdr_proto
;
1478 pkt_type
= BIT(11) | etqf
;
1482 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1484 if (hasip6
&& !(core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_DIS
)) {
1485 eth_ip6_hdr_info
*ip6hdr_info
= net_rx_pkt_get_ip6_info(pkt
);
1486 pkt_type
= ip6hdr_info
->has_ext_hdrs
? E1000_ADVRXD_PKT_IP6E
:
1487 E1000_ADVRXD_PKT_IP6
;
1488 } else if (hasip4
) {
1489 pkt_type
= E1000_ADVRXD_PKT_IP4
;
1494 switch (l4hdr_proto
) {
1495 case ETH_L4_HDR_PROTO_TCP
:
1496 pkt_type
|= E1000_ADVRXD_PKT_TCP
;
1498 case ETH_L4_HDR_PROTO_UDP
:
1499 pkt_type
|= E1000_ADVRXD_PKT_UDP
;
1501 case ETH_L4_HDR_PROTO_SCTP
:
1502 pkt_type
|= E1000_ADVRXD_PKT_SCTP
;
1512 igb_write_adv_rx_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1513 struct NetRxPkt
*pkt
,
1514 const E1000E_RSSInfo
*rss_info
, uint16_t etqf
, bool ts
,
1517 bool hasip4
, hasip6
;
1518 EthL4HdrProto l4hdr_proto
;
1519 uint16_t rss_type
= 0, pkt_type
;
1520 bool eop
= (pkt
!= NULL
);
1521 uint32_t adv_desc_status_error
= 0;
1522 memset(&desc
->wb
, 0, sizeof(desc
->wb
));
1524 desc
->wb
.upper
.length
= cpu_to_le16(length
);
1525 igb_build_rx_metadata_common(core
, pkt
, eop
,
1526 &desc
->wb
.upper
.status_error
,
1527 &desc
->wb
.upper
.vlan
);
1533 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1535 if ((core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) != 0) {
1536 if (rss_info
->enabled
) {
1537 desc
->wb
.lower
.hi_dword
.rss
= cpu_to_le32(rss_info
->hash
);
1538 rss_type
= rss_info
->type
;
1539 trace_igb_rx_metadata_rss(desc
->wb
.lower
.hi_dword
.rss
, rss_type
);
1541 } else if (hasip4
) {
1542 adv_desc_status_error
|= E1000_RXD_STAT_IPIDV
;
1543 desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
=
1544 cpu_to_le16(net_rx_pkt_get_ip_id(pkt
));
1545 trace_e1000e_rx_metadata_ip_id(
1546 desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
);
1550 adv_desc_status_error
|= BIT(16);
1553 pkt_type
= igb_rx_desc_get_packet_type(core
, pkt
, etqf
);
1554 trace_e1000e_rx_metadata_pkt_type(pkt_type
);
1555 desc
->wb
.lower
.lo_dword
.pkt_info
= cpu_to_le16(rss_type
| (pkt_type
<< 4));
1556 desc
->wb
.upper
.status_error
|= cpu_to_le32(adv_desc_status_error
);
1560 igb_write_adv_ps_rx_descr(IGBCore
*core
,
1561 union e1000_adv_rx_desc
*desc
,
1562 struct NetRxPkt
*pkt
,
1563 const E1000E_RSSInfo
*rss_info
,
1564 const E1000ERingInfo
*r
,
1567 IGBPacketRxDMAState
*pdma_st
)
1570 uint16_t hdr_info
= 0;
1572 if (pdma_st
->do_ps
) {
1573 pkt_len
= pdma_st
->bastate
.written
[1];
1575 pkt_len
= pdma_st
->bastate
.written
[0] + pdma_st
->bastate
.written
[1];
1578 igb_write_adv_rx_descr(core
, desc
, pkt
, rss_info
, etqf
, ts
, pkt_len
);
1580 hdr_info
= (pdma_st
->ps_desc_data
.hdr_len
<< E1000_ADVRXD_HDR_LEN_OFFSET
) &
1581 E1000_ADVRXD_ADV_HDR_LEN_MASK
;
1582 hdr_info
|= pdma_st
->ps_desc_data
.sph
? E1000_ADVRXD_HDR_SPH
: 0;
1583 desc
->wb
.lower
.lo_dword
.hdr_info
= cpu_to_le16(hdr_info
);
1585 desc
->wb
.upper
.status_error
|= cpu_to_le32(
1586 pdma_st
->ps_desc_data
.hbo
? E1000_ADVRXD_ST_ERR_HBO_OFFSET
: 0);
1590 igb_write_rx_descr(IGBCore
*core
,
1591 union e1000_rx_desc_union
*desc
,
1592 struct NetRxPkt
*pkt
,
1593 const E1000E_RSSInfo
*rss_info
,
1596 IGBPacketRxDMAState
*pdma_st
,
1597 const E1000ERingInfo
*r
)
1599 if (igb_rx_use_legacy_descriptor(core
)) {
1600 igb_write_lgcy_rx_descr(core
, &desc
->legacy
, pkt
, rss_info
,
1601 pdma_st
->bastate
.written
[1]);
1602 } else if (igb_rx_use_ps_descriptor(core
, r
)) {
1603 igb_write_adv_ps_rx_descr(core
, &desc
->adv
, pkt
, rss_info
, r
, etqf
, ts
,
1606 igb_write_adv_rx_descr(core
, &desc
->adv
, pkt
, rss_info
,
1607 etqf
, ts
, pdma_st
->bastate
.written
[1]);
1612 igb_pci_dma_write_rx_desc(IGBCore
*core
, PCIDevice
*dev
, dma_addr_t addr
,
1613 union e1000_rx_desc_union
*desc
, dma_addr_t len
)
1615 if (igb_rx_use_legacy_descriptor(core
)) {
1616 struct e1000_rx_desc
*d
= &desc
->legacy
;
1617 size_t offset
= offsetof(struct e1000_rx_desc
, status
);
1618 uint8_t status
= d
->status
;
1620 d
->status
&= ~E1000_RXD_STAT_DD
;
1621 pci_dma_write(dev
, addr
, desc
, len
);
1623 if (status
& E1000_RXD_STAT_DD
) {
1625 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1628 union e1000_adv_rx_desc
*d
= &desc
->adv
;
1630 offsetof(union e1000_adv_rx_desc
, wb
.upper
.status_error
);
1631 uint32_t status
= d
->wb
.upper
.status_error
;
1633 d
->wb
.upper
.status_error
&= ~E1000_RXD_STAT_DD
;
1634 pci_dma_write(dev
, addr
, desc
, len
);
1636 if (status
& E1000_RXD_STAT_DD
) {
1637 d
->wb
.upper
.status_error
= status
;
1638 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1644 igb_update_rx_stats(IGBCore
*core
, const E1000ERingInfo
*rxi
,
1645 size_t pkt_size
, size_t pkt_fcs_size
)
1647 eth_pkt_types_e pkt_type
= net_rx_pkt_get_packet_type(core
->rx_pkt
);
1648 e1000x_update_rx_total_stats(core
->mac
, pkt_type
, pkt_size
, pkt_fcs_size
);
1650 if (core
->mac
[MRQC
] & 1) {
1651 uint16_t pool
= rxi
->idx
% IGB_NUM_VM_POOLS
;
1653 core
->mac
[PVFGORC0
+ (pool
* 64)] += pkt_size
+ 4;
1654 core
->mac
[PVFGPRC0
+ (pool
* 64)]++;
1655 if (pkt_type
== ETH_PKT_MCAST
) {
1656 core
->mac
[PVFMPRC0
+ (pool
* 64)]++;
1662 igb_rx_descr_threshold_hit(IGBCore
*core
, const E1000ERingInfo
*rxi
)
1664 return igb_ring_free_descr_num(core
, rxi
) ==
1665 ((core
->mac
[E1000_SRRCTL(rxi
->idx
) >> 2] >> 20) & 31) * 16;
1669 igb_do_ps(IGBCore
*core
,
1670 const E1000ERingInfo
*r
,
1671 struct NetRxPkt
*pkt
,
1672 IGBPacketRxDMAState
*pdma_st
)
1674 bool hasip4
, hasip6
;
1675 EthL4HdrProto l4hdr_proto
;
1678 size_t bheader_size
;
1679 size_t total_pkt_len
;
1681 if (!igb_rx_use_ps_descriptor(core
, r
)) {
1685 total_pkt_len
= net_rx_pkt_get_total_len(pkt
);
1686 bheader_size
= igb_rxhdrbufsize(core
, r
);
1687 split_always
= igb_rx_ps_descriptor_split_always(core
, r
);
1688 if (split_always
&& total_pkt_len
<= bheader_size
) {
1689 pdma_st
->ps_hdr_len
= total_pkt_len
;
1690 pdma_st
->ps_desc_data
.hdr_len
= total_pkt_len
;
1694 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1697 fragment
= net_rx_pkt_get_ip4_info(pkt
)->fragment
;
1698 } else if (hasip6
) {
1699 fragment
= net_rx_pkt_get_ip6_info(pkt
)->fragment
;
1701 pdma_st
->ps_desc_data
.hdr_len
= bheader_size
;
1702 goto header_not_handled
;
1705 if (fragment
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPFRSP_DIS
)) {
1706 pdma_st
->ps_desc_data
.hdr_len
= bheader_size
;
1707 goto header_not_handled
;
1710 /* no header splitting for SCTP */
1711 if (!fragment
&& (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
||
1712 l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
)) {
1713 pdma_st
->ps_hdr_len
= net_rx_pkt_get_l5_hdr_offset(pkt
);
1715 pdma_st
->ps_hdr_len
= net_rx_pkt_get_l4_hdr_offset(pkt
);
1718 pdma_st
->ps_desc_data
.sph
= true;
1719 pdma_st
->ps_desc_data
.hdr_len
= pdma_st
->ps_hdr_len
;
1721 if (pdma_st
->ps_hdr_len
> bheader_size
) {
1722 pdma_st
->ps_desc_data
.hbo
= true;
1723 goto header_not_handled
;
1730 pdma_st
->ps_hdr_len
= bheader_size
;
1738 igb_truncate_to_descriptor_size(IGBPacketRxDMAState
*pdma_st
, size_t *size
)
1740 if (pdma_st
->do_ps
&& pdma_st
->is_first
) {
1741 if (*size
> pdma_st
->rx_desc_packet_buf_size
+ pdma_st
->ps_hdr_len
) {
1742 *size
= pdma_st
->rx_desc_packet_buf_size
+ pdma_st
->ps_hdr_len
;
1745 if (*size
> pdma_st
->rx_desc_packet_buf_size
) {
1746 *size
= pdma_st
->rx_desc_packet_buf_size
;
1752 igb_write_hdr_frag_to_rx_buffers(IGBCore
*core
,
1754 IGBPacketRxDMAState
*pdma_st
,
1756 dma_addr_t data_len
)
1758 assert(data_len
<= pdma_st
->rx_desc_header_buf_size
-
1759 pdma_st
->bastate
.written
[0]);
1761 pdma_st
->ba
[0] + pdma_st
->bastate
.written
[0],
1763 pdma_st
->bastate
.written
[0] += data_len
;
1764 pdma_st
->bastate
.cur_idx
= 1;
1768 igb_write_header_to_rx_buffers(IGBCore
*core
,
1769 struct NetRxPkt
*pkt
,
1771 IGBPacketRxDMAState
*pdma_st
,
1775 size_t ps_hdr_copied
= 0;
1777 if (!pdma_st
->is_first
) {
1778 /* Leave buffer 0 of each descriptor except first */
1780 pdma_st
->bastate
.cur_idx
= 1;
1785 iov_copy
= MIN(pdma_st
->ps_hdr_len
- ps_hdr_copied
,
1786 pdma_st
->iov
->iov_len
- pdma_st
->iov_ofs
);
1788 igb_write_hdr_frag_to_rx_buffers(core
, d
, pdma_st
,
1789 pdma_st
->iov
->iov_base
,
1792 *copy_size
-= iov_copy
;
1793 ps_hdr_copied
+= iov_copy
;
1795 pdma_st
->iov_ofs
+= iov_copy
;
1796 if (pdma_st
->iov_ofs
== pdma_st
->iov
->iov_len
) {
1798 pdma_st
->iov_ofs
= 0;
1800 } while (ps_hdr_copied
< pdma_st
->ps_hdr_len
);
1802 pdma_st
->is_first
= false;
1806 igb_write_payload_frag_to_rx_buffers(IGBCore
*core
,
1808 IGBPacketRxDMAState
*pdma_st
,
1810 dma_addr_t data_len
)
1812 while (data_len
> 0) {
1813 assert(pdma_st
->bastate
.cur_idx
< IGB_MAX_PS_BUFFERS
);
1815 uint32_t cur_buf_bytes_left
=
1816 pdma_st
->rx_desc_packet_buf_size
-
1817 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
];
1818 uint32_t bytes_to_write
= MIN(data_len
, cur_buf_bytes_left
);
1820 trace_igb_rx_desc_buff_write(
1821 pdma_st
->bastate
.cur_idx
,
1822 pdma_st
->ba
[pdma_st
->bastate
.cur_idx
],
1823 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
],
1828 pdma_st
->ba
[pdma_st
->bastate
.cur_idx
] +
1829 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
],
1830 data
, bytes_to_write
);
1832 pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
] += bytes_to_write
;
1833 data
+= bytes_to_write
;
1834 data_len
-= bytes_to_write
;
1836 if (pdma_st
->bastate
.written
[pdma_st
->bastate
.cur_idx
] ==
1837 pdma_st
->rx_desc_packet_buf_size
) {
1838 pdma_st
->bastate
.cur_idx
++;
1844 igb_write_payload_to_rx_buffers(IGBCore
*core
,
1845 struct NetRxPkt
*pkt
,
1847 IGBPacketRxDMAState
*pdma_st
,
1850 static const uint32_t fcs_pad
;
1853 /* Copy packet payload */
1854 while (*copy_size
) {
1855 iov_copy
= MIN(*copy_size
, pdma_st
->iov
->iov_len
- pdma_st
->iov_ofs
);
1856 igb_write_payload_frag_to_rx_buffers(core
, d
,
1858 pdma_st
->iov
->iov_base
+
1862 *copy_size
-= iov_copy
;
1863 pdma_st
->iov_ofs
+= iov_copy
;
1864 if (pdma_st
->iov_ofs
== pdma_st
->iov
->iov_len
) {
1866 pdma_st
->iov_ofs
= 0;
1870 if (pdma_st
->desc_offset
+ pdma_st
->desc_size
>= pdma_st
->total_size
) {
1871 /* Simulate FCS checksum presence in the last descriptor */
1872 igb_write_payload_frag_to_rx_buffers(core
, d
,
1874 (const char *) &fcs_pad
,
1875 e1000x_fcs_len(core
->mac
));
1880 igb_write_to_rx_buffers(IGBCore
*core
,
1881 struct NetRxPkt
*pkt
,
1883 IGBPacketRxDMAState
*pdma_st
)
1887 if (!(pdma_st
->ba
)[1] || (pdma_st
->do_ps
&& !(pdma_st
->ba
[0]))) {
1888 /* as per intel docs; skip descriptors with null buf addr */
1889 trace_e1000e_rx_null_descriptor();
1893 if (pdma_st
->desc_offset
>= pdma_st
->size
) {
1897 pdma_st
->desc_size
= pdma_st
->total_size
- pdma_st
->desc_offset
;
1898 igb_truncate_to_descriptor_size(pdma_st
, &pdma_st
->desc_size
);
1899 copy_size
= pdma_st
->size
- pdma_st
->desc_offset
;
1900 igb_truncate_to_descriptor_size(pdma_st
, ©_size
);
1902 /* For PS mode copy the packet header first */
1903 if (pdma_st
->do_ps
) {
1904 igb_write_header_to_rx_buffers(core
, pkt
, d
, pdma_st
, ©_size
);
1906 pdma_st
->bastate
.cur_idx
= 1;
1909 igb_write_payload_to_rx_buffers(core
, pkt
, d
, pdma_st
, ©_size
);
1913 igb_write_packet_to_guest(IGBCore
*core
, struct NetRxPkt
*pkt
,
1914 const E1000E_RxRing
*rxr
,
1915 const E1000E_RSSInfo
*rss_info
,
1916 uint16_t etqf
, bool ts
)
1920 union e1000_rx_desc_union desc
;
1921 const E1000ERingInfo
*rxi
;
1924 IGBPacketRxDMAState pdma_st
= {0};
1925 pdma_st
.is_first
= true;
1926 pdma_st
.size
= net_rx_pkt_get_total_len(pkt
);
1927 pdma_st
.total_size
= pdma_st
.size
+ e1000x_fcs_len(core
->mac
);
1930 rx_desc_len
= core
->rx_desc_len
;
1931 pdma_st
.rx_desc_packet_buf_size
= igb_rxbufsize(core
, rxi
);
1932 pdma_st
.rx_desc_header_buf_size
= igb_rxhdrbufsize(core
, rxi
);
1933 pdma_st
.iov
= net_rx_pkt_get_iovec(pkt
);
1934 d
= pcie_sriov_get_vf_at_index(core
->owner
, rxi
->idx
% 8);
1939 pdma_st
.do_ps
= igb_do_ps(core
, rxi
, pkt
, &pdma_st
);
1942 memset(&pdma_st
.bastate
, 0, sizeof(IGBBAState
));
1943 bool is_last
= false;
1945 if (igb_ring_empty(core
, rxi
)) {
1949 base
= igb_ring_head_descr(core
, rxi
);
1950 pci_dma_read(d
, base
, &desc
, rx_desc_len
);
1951 trace_e1000e_rx_descr(rxi
->idx
, base
, rx_desc_len
);
1953 igb_read_rx_descr(core
, &desc
, &pdma_st
, rxi
);
1955 igb_write_to_rx_buffers(core
, pkt
, d
, &pdma_st
);
1956 pdma_st
.desc_offset
+= pdma_st
.desc_size
;
1957 if (pdma_st
.desc_offset
>= pdma_st
.total_size
) {
1961 igb_write_rx_descr(core
, &desc
,
1962 is_last
? pkt
: NULL
,
1967 igb_pci_dma_write_rx_desc(core
, d
, base
, &desc
, rx_desc_len
);
1968 igb_ring_advance(core
, rxi
, rx_desc_len
/ E1000_MIN_RX_DESC_LEN
);
1969 } while (pdma_st
.desc_offset
< pdma_st
.total_size
);
1971 igb_update_rx_stats(core
, rxi
, pdma_st
.size
, pdma_st
.total_size
);
1975 igb_rx_strip_vlan(IGBCore
*core
, const E1000ERingInfo
*rxi
)
1977 if (core
->mac
[MRQC
] & 1) {
1978 uint16_t pool
= rxi
->idx
% IGB_NUM_VM_POOLS
;
1979 /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
1980 return (net_rx_pkt_get_packet_type(core
->rx_pkt
) == ETH_PKT_MCAST
) ?
1981 core
->mac
[RPLOLR
] & E1000_RPLOLR_STRVLAN
:
1982 core
->mac
[VMOLR0
+ pool
] & E1000_VMOLR_STRVLAN
;
1985 return e1000x_vlan_enabled(core
->mac
);
1989 igb_rx_fix_l4_csum(IGBCore
*core
, struct NetRxPkt
*pkt
)
1991 struct virtio_net_hdr
*vhdr
= net_rx_pkt_get_vhdr(pkt
);
1993 if (vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1994 net_rx_pkt_fix_l4_csum(pkt
);
1999 igb_receive_iov(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
)
2001 return igb_receive_internal(core
, iov
, iovcnt
, core
->has_vnet
, NULL
);
2005 igb_receive_internal(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
,
2006 bool has_vnet
, bool *external_tx
)
2008 uint16_t queues
= 0;
2009 uint32_t causes
= 0;
2010 uint32_t ecauses
= 0;
2013 uint8_t octets
[ETH_ZLEN
];
2015 struct iovec min_iov
;
2016 size_t size
, orig_size
;
2019 E1000E_RSSInfo rss_info
;
2023 int strip_vlan_index
;
2026 trace_e1000e_rx_receive_iov(iovcnt
);
2029 *external_tx
= true;
2032 if (!e1000x_hw_rx_enabled(core
->mac
)) {
2036 /* Pull virtio header in */
2038 net_rx_pkt_set_vhdr_iovec(core
->rx_pkt
, iov
, iovcnt
);
2039 iov_ofs
= sizeof(struct virtio_net_hdr
);
2041 net_rx_pkt_unset_vhdr(core
->rx_pkt
);
2044 orig_size
= iov_size(iov
, iovcnt
);
2045 size
= orig_size
- iov_ofs
;
2047 /* Pad to minimum Ethernet frame length */
2048 if (size
< sizeof(buf
)) {
2049 iov_to_buf(iov
, iovcnt
, iov_ofs
, &buf
, size
);
2050 memset(&buf
.octets
[size
], 0, sizeof(buf
) - size
);
2051 e1000x_inc_reg_if_not_full(core
->mac
, RUC
);
2052 min_iov
.iov_base
= &buf
;
2053 min_iov
.iov_len
= size
= sizeof(buf
);
2058 iov_to_buf(iov
, iovcnt
, iov_ofs
, &buf
, sizeof(buf
.l2_header
));
2061 net_rx_pkt_set_packet_type(core
->rx_pkt
,
2062 get_eth_packet_type(&buf
.l2_header
.eth
));
2063 net_rx_pkt_set_protocols(core
->rx_pkt
, iov
, iovcnt
, iov_ofs
);
2065 queues
= igb_receive_assign(core
, iov
, iovcnt
, iov_ofs
,
2066 &buf
.l2_header
, size
,
2067 &rss_info
, &etqf
, &ts
, external_tx
);
2069 trace_e1000e_rx_flt_dropped();
2073 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
2074 if (!(queues
& BIT(i
)) ||
2075 !(core
->mac
[RXDCTL0
+ (i
* 16)] & E1000_RXDCTL_QUEUE_ENABLE
)) {
2079 igb_rx_ring_init(core
, &rxr
, i
);
2081 if (!igb_rx_strip_vlan(core
, rxr
.i
)) {
2082 strip_vlan_index
= -1;
2083 } else if (core
->mac
[CTRL_EXT
] & BIT(26)) {
2084 strip_vlan_index
= 1;
2086 strip_vlan_index
= 0;
2089 net_rx_pkt_attach_iovec_ex(core
->rx_pkt
, iov
, iovcnt
, iov_ofs
,
2091 core
->mac
[VET
] & 0xffff,
2092 core
->mac
[VET
] >> 16);
2094 total_size
= net_rx_pkt_get_total_len(core
->rx_pkt
) +
2095 e1000x_fcs_len(core
->mac
);
2097 if (!igb_has_rxbufs(core
, rxr
.i
, total_size
)) {
2098 causes
|= E1000_ICS_RXO
;
2099 trace_e1000e_rx_not_written_to_guest(rxr
.i
->idx
);
2103 causes
|= E1000_ICR_RXDW
;
2105 igb_rx_fix_l4_csum(core
, core
->rx_pkt
);
2106 igb_write_packet_to_guest(core
, core
->rx_pkt
, &rxr
, &rss_info
, etqf
, ts
);
2108 /* Check if receive descriptor minimum threshold hit */
2109 if (igb_rx_descr_threshold_hit(core
, rxr
.i
)) {
2110 causes
|= E1000_ICS_RXDMT0
;
2113 ecauses
|= igb_rx_wb_eic(core
, rxr
.i
->idx
);
2115 trace_e1000e_rx_written_to_guest(rxr
.i
->idx
);
2118 trace_e1000e_rx_interrupt_set(causes
);
2119 igb_raise_interrupts(core
, EICR
, ecauses
);
2120 igb_raise_interrupts(core
, ICR
, causes
);
2126 igb_have_autoneg(IGBCore
*core
)
2128 return core
->phy
[MII_BMCR
] & MII_BMCR_AUTOEN
;
2131 static void igb_update_flowctl_status(IGBCore
*core
)
2133 if (igb_have_autoneg(core
) && core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
) {
2134 trace_e1000e_link_autoneg_flowctl(true);
2135 core
->mac
[CTRL
] |= E1000_CTRL_TFCE
| E1000_CTRL_RFCE
;
2137 trace_e1000e_link_autoneg_flowctl(false);
2142 igb_link_down(IGBCore
*core
)
2144 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
);
2145 igb_update_flowctl_status(core
);
2149 igb_set_phy_ctrl(IGBCore
*core
, uint16_t val
)
2151 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
2152 core
->phy
[MII_BMCR
] = val
& ~(0x3f | MII_BMCR_RESET
| MII_BMCR_ANRESTART
);
2154 if ((val
& MII_BMCR_ANRESTART
) && igb_have_autoneg(core
)) {
2155 e1000x_restart_autoneg(core
->mac
, core
->phy
, core
->autoneg_timer
);
2159 void igb_core_set_link_status(IGBCore
*core
)
2161 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
2162 uint32_t old_status
= core
->mac
[STATUS
];
2164 trace_e1000e_link_status_changed(nc
->link_down
? false : true);
2166 if (nc
->link_down
) {
2167 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
);
2169 if (igb_have_autoneg(core
) &&
2170 !(core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
)) {
2171 e1000x_restart_autoneg(core
->mac
, core
->phy
,
2172 core
->autoneg_timer
);
2174 e1000x_update_regs_on_link_up(core
->mac
, core
->phy
);
2175 igb_start_recv(core
);
2179 if (core
->mac
[STATUS
] != old_status
) {
2180 igb_raise_interrupts(core
, ICR
, E1000_ICR_LSC
);
2185 igb_set_ctrl(IGBCore
*core
, int index
, uint32_t val
)
2187 trace_e1000e_core_ctrl_write(index
, val
);
2189 /* RST is self clearing */
2190 core
->mac
[CTRL
] = val
& ~E1000_CTRL_RST
;
2191 core
->mac
[CTRL_DUP
] = core
->mac
[CTRL
];
2193 trace_e1000e_link_set_params(
2194 !!(val
& E1000_CTRL_ASDE
),
2195 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
2196 !!(val
& E1000_CTRL_FRCSPD
),
2197 !!(val
& E1000_CTRL_FRCDPX
),
2198 !!(val
& E1000_CTRL_RFCE
),
2199 !!(val
& E1000_CTRL_TFCE
));
2201 if (val
& E1000_CTRL_RST
) {
2202 trace_e1000e_core_ctrl_sw_reset();
2203 igb_reset(core
, true);
2206 if (val
& E1000_CTRL_PHY_RST
) {
2207 trace_e1000e_core_ctrl_phy_reset();
2208 core
->mac
[STATUS
] |= E1000_STATUS_PHYRA
;
2213 igb_set_rfctl(IGBCore
*core
, int index
, uint32_t val
)
2215 trace_e1000e_rx_set_rfctl(val
);
2217 if (!(val
& E1000_RFCTL_ISCSI_DIS
)) {
2218 trace_e1000e_wrn_iscsi_filtering_not_supported();
2221 if (!(val
& E1000_RFCTL_NFSW_DIS
)) {
2222 trace_e1000e_wrn_nfsw_filtering_not_supported();
2225 if (!(val
& E1000_RFCTL_NFSR_DIS
)) {
2226 trace_e1000e_wrn_nfsr_filtering_not_supported();
2229 core
->mac
[RFCTL
] = val
;
2233 igb_calc_rxdesclen(IGBCore
*core
)
2235 if (igb_rx_use_legacy_descriptor(core
)) {
2236 core
->rx_desc_len
= sizeof(struct e1000_rx_desc
);
2238 core
->rx_desc_len
= sizeof(union e1000_adv_rx_desc
);
2240 trace_e1000e_rx_desc_len(core
->rx_desc_len
);
2244 igb_set_rx_control(IGBCore
*core
, int index
, uint32_t val
)
2246 core
->mac
[RCTL
] = val
;
2247 trace_e1000e_rx_set_rctl(core
->mac
[RCTL
]);
2249 if (val
& E1000_RCTL_DTYP_MASK
) {
2250 qemu_log_mask(LOG_GUEST_ERROR
,
2251 "igb: RCTL.DTYP must be zero for compatibility");
2254 if (val
& E1000_RCTL_EN
) {
2255 igb_calc_rxdesclen(core
);
2256 igb_start_recv(core
);
2261 igb_postpone_interrupt(IGBIntrDelayTimer
*timer
)
2263 if (timer
->running
) {
2264 trace_e1000e_irq_postponed_by_xitr(timer
->delay_reg
<< 2);
2269 if (timer
->core
->mac
[timer
->delay_reg
] != 0) {
2270 igb_intrmgr_rearm_timer(timer
);
2277 igb_eitr_should_postpone(IGBCore
*core
, int idx
)
2279 return igb_postpone_interrupt(&core
->eitr
[idx
]);
2282 static void igb_send_msix(IGBCore
*core
, uint32_t causes
)
2286 for (vector
= 0; vector
< IGB_INTR_NUM
; ++vector
) {
2287 if ((causes
& BIT(vector
)) && !igb_eitr_should_postpone(core
, vector
)) {
2289 trace_e1000e_irq_msix_notify_vec(vector
);
2290 igb_msix_notify(core
, vector
);
2296 igb_fix_icr_asserted(IGBCore
*core
)
2298 core
->mac
[ICR
] &= ~E1000_ICR_ASSERTED
;
2299 if (core
->mac
[ICR
]) {
2300 core
->mac
[ICR
] |= E1000_ICR_ASSERTED
;
2303 trace_e1000e_irq_fix_icr_asserted(core
->mac
[ICR
]);
2306 static void igb_raise_interrupts(IGBCore
*core
, size_t index
, uint32_t causes
)
2308 uint32_t old_causes
= core
->mac
[ICR
] & core
->mac
[IMS
];
2309 uint32_t old_ecauses
= core
->mac
[EICR
] & core
->mac
[EIMS
];
2310 uint32_t raised_causes
;
2311 uint32_t raised_ecauses
;
2314 trace_e1000e_irq_set(index
<< 2,
2315 core
->mac
[index
], core
->mac
[index
] | causes
);
2317 core
->mac
[index
] |= causes
;
2319 if (core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
) {
2320 raised_causes
= core
->mac
[ICR
] & core
->mac
[IMS
] & ~old_causes
;
2322 if (raised_causes
& E1000_ICR_DRSTA
) {
2323 int_alloc
= core
->mac
[IVAR_MISC
] & 0xff;
2324 if (int_alloc
& E1000_IVAR_VALID
) {
2325 core
->mac
[EICR
] |= BIT(int_alloc
& 0x1f);
2328 /* Check if other bits (excluding the TCP Timer) are enabled. */
2329 if (raised_causes
& ~E1000_ICR_DRSTA
) {
2330 int_alloc
= (core
->mac
[IVAR_MISC
] >> 8) & 0xff;
2331 if (int_alloc
& E1000_IVAR_VALID
) {
2332 core
->mac
[EICR
] |= BIT(int_alloc
& 0x1f);
2336 raised_ecauses
= core
->mac
[EICR
] & core
->mac
[EIMS
] & ~old_ecauses
;
2337 if (!raised_ecauses
) {
2341 igb_send_msix(core
, raised_ecauses
);
2343 igb_fix_icr_asserted(core
);
2345 raised_causes
= core
->mac
[ICR
] & core
->mac
[IMS
] & ~old_causes
;
2346 if (!raised_causes
) {
2350 core
->mac
[EICR
] |= (raised_causes
& E1000_ICR_DRSTA
) | E1000_EICR_OTHER
;
2352 if (msix_enabled(core
->owner
)) {
2353 trace_e1000e_irq_msix_notify_vec(0);
2354 msix_notify(core
->owner
, 0);
2355 } else if (msi_enabled(core
->owner
)) {
2356 trace_e1000e_irq_msi_notify(raised_causes
);
2357 msi_notify(core
->owner
, 0);
2359 igb_raise_legacy_irq(core
);
2364 static void igb_lower_interrupts(IGBCore
*core
, size_t index
, uint32_t causes
)
2366 trace_e1000e_irq_clear(index
<< 2,
2367 core
->mac
[index
], core
->mac
[index
] & ~causes
);
2369 core
->mac
[index
] &= ~causes
;
2371 trace_e1000e_irq_pending_interrupts(core
->mac
[ICR
] & core
->mac
[IMS
],
2372 core
->mac
[ICR
], core
->mac
[IMS
]);
2374 if (!(core
->mac
[ICR
] & core
->mac
[IMS
]) &&
2375 !(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
)) {
2376 core
->mac
[EICR
] &= ~E1000_EICR_OTHER
;
2378 if (!msix_enabled(core
->owner
) && !msi_enabled(core
->owner
)) {
2379 igb_lower_legacy_irq(core
);
2384 static void igb_set_eics(IGBCore
*core
, int index
, uint32_t val
)
2386 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2387 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2389 trace_igb_irq_write_eics(val
, msix
);
2390 igb_raise_interrupts(core
, EICR
, val
& mask
);
2393 static void igb_set_eims(IGBCore
*core
, int index
, uint32_t val
)
2395 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2396 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2398 trace_igb_irq_write_eims(val
, msix
);
2399 igb_raise_interrupts(core
, EIMS
, val
& mask
);
2402 static void mailbox_interrupt_to_vf(IGBCore
*core
, uint16_t vfn
)
2404 uint32_t ent
= core
->mac
[VTIVAR_MISC
+ vfn
];
2407 if ((ent
& E1000_IVAR_VALID
)) {
2408 causes
= (ent
& 0x3) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
);
2409 igb_raise_interrupts(core
, EICR
, causes
);
2413 static void mailbox_interrupt_to_pf(IGBCore
*core
)
2415 igb_raise_interrupts(core
, ICR
, E1000_ICR_VMMB
);
2418 static void igb_set_pfmailbox(IGBCore
*core
, int index
, uint32_t val
)
2420 uint16_t vfn
= index
- P2VMAILBOX0
;
2422 trace_igb_set_pfmailbox(vfn
, val
);
2424 if (val
& E1000_P2VMAILBOX_STS
) {
2425 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFSTS
;
2426 mailbox_interrupt_to_vf(core
, vfn
);
2429 if (val
& E1000_P2VMAILBOX_ACK
) {
2430 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFACK
;
2431 mailbox_interrupt_to_vf(core
, vfn
);
2434 /* Buffer Taken by PF (can be set only if the VFU is cleared). */
2435 if (val
& E1000_P2VMAILBOX_PFU
) {
2436 if (!(core
->mac
[index
] & E1000_P2VMAILBOX_VFU
)) {
2437 core
->mac
[index
] |= E1000_P2VMAILBOX_PFU
;
2438 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFU
;
2441 core
->mac
[index
] &= ~E1000_P2VMAILBOX_PFU
;
2442 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_PFU
;
2445 if (val
& E1000_P2VMAILBOX_RVFU
) {
2446 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_VFU
;
2447 core
->mac
[MBVFICR
] &= ~((E1000_MBVFICR_VFACK_VF1
<< vfn
) |
2448 (E1000_MBVFICR_VFREQ_VF1
<< vfn
));
2452 static void igb_set_vfmailbox(IGBCore
*core
, int index
, uint32_t val
)
2454 uint16_t vfn
= index
- V2PMAILBOX0
;
2456 trace_igb_set_vfmailbox(vfn
, val
);
2458 if (val
& E1000_V2PMAILBOX_REQ
) {
2459 core
->mac
[MBVFICR
] |= E1000_MBVFICR_VFREQ_VF1
<< vfn
;
2460 mailbox_interrupt_to_pf(core
);
2463 if (val
& E1000_V2PMAILBOX_ACK
) {
2464 core
->mac
[MBVFICR
] |= E1000_MBVFICR_VFACK_VF1
<< vfn
;
2465 mailbox_interrupt_to_pf(core
);
2468 /* Buffer Taken by VF (can be set only if the PFU is cleared). */
2469 if (val
& E1000_V2PMAILBOX_VFU
) {
2470 if (!(core
->mac
[index
] & E1000_V2PMAILBOX_PFU
)) {
2471 core
->mac
[index
] |= E1000_V2PMAILBOX_VFU
;
2472 core
->mac
[P2VMAILBOX0
+ vfn
] |= E1000_P2VMAILBOX_VFU
;
2475 core
->mac
[index
] &= ~E1000_V2PMAILBOX_VFU
;
2476 core
->mac
[P2VMAILBOX0
+ vfn
] &= ~E1000_P2VMAILBOX_VFU
;
2480 static void igb_vf_reset(IGBCore
*core
, uint16_t vfn
)
2483 uint16_t qn1
= vfn
+ IGB_NUM_VM_POOLS
;
2485 /* disable Rx and Tx for the VF*/
2486 core
->mac
[RXDCTL0
+ (qn0
* 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE
;
2487 core
->mac
[RXDCTL0
+ (qn1
* 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE
;
2488 core
->mac
[TXDCTL0
+ (qn0
* 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE
;
2489 core
->mac
[TXDCTL0
+ (qn1
* 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE
;
2490 core
->mac
[VFRE
] &= ~BIT(vfn
);
2491 core
->mac
[VFTE
] &= ~BIT(vfn
);
2492 /* indicate VF reset to PF */
2493 core
->mac
[VFLRE
] |= BIT(vfn
);
2494 /* VFLRE and mailbox use the same interrupt cause */
2495 mailbox_interrupt_to_pf(core
);
2498 static void igb_w1c(IGBCore
*core
, int index
, uint32_t val
)
2500 core
->mac
[index
] &= ~val
;
2503 static void igb_set_eimc(IGBCore
*core
, int index
, uint32_t val
)
2505 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2506 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2508 trace_igb_irq_write_eimc(val
, msix
);
2510 /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
2511 igb_lower_interrupts(core
, EIMS
, val
& mask
);
2514 static void igb_set_eiac(IGBCore
*core
, int index
, uint32_t val
)
2516 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2519 trace_igb_irq_write_eiac(val
);
2522 * TODO: When using IOV, the bits that correspond to MSI-X vectors
2523 * that are assigned to a VF are read-only.
2525 core
->mac
[EIAC
] |= (val
& E1000_EICR_MSIX_MASK
);
2529 static void igb_set_eiam(IGBCore
*core
, int index
, uint32_t val
)
2531 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2534 * TODO: When using IOV, the bits that correspond to MSI-X vectors that
2535 * are assigned to a VF are read-only.
2538 ~(val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
));
2540 trace_igb_irq_write_eiam(val
, msix
);
2543 static void igb_set_eicr(IGBCore
*core
, int index
, uint32_t val
)
2545 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2548 * TODO: In IOV mode, only bit zero of this vector is available for the PF
2551 uint32_t mask
= msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
;
2553 trace_igb_irq_write_eicr(val
, msix
);
2554 igb_lower_interrupts(core
, EICR
, val
& mask
);
2557 static void igb_set_vtctrl(IGBCore
*core
, int index
, uint32_t val
)
2561 if (val
& E1000_CTRL_RST
) {
2562 vfn
= (index
- PVTCTRL0
) / 0x40;
2563 igb_vf_reset(core
, vfn
);
2567 static void igb_set_vteics(IGBCore
*core
, int index
, uint32_t val
)
2569 uint16_t vfn
= (index
- PVTEICS0
) / 0x40;
2571 core
->mac
[index
] = val
;
2572 igb_set_eics(core
, EICS
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2575 static void igb_set_vteims(IGBCore
*core
, int index
, uint32_t val
)
2577 uint16_t vfn
= (index
- PVTEIMS0
) / 0x40;
2579 core
->mac
[index
] = val
;
2580 igb_set_eims(core
, EIMS
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2583 static void igb_set_vteimc(IGBCore
*core
, int index
, uint32_t val
)
2585 uint16_t vfn
= (index
- PVTEIMC0
) / 0x40;
2587 core
->mac
[index
] = val
;
2588 igb_set_eimc(core
, EIMC
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2591 static void igb_set_vteiac(IGBCore
*core
, int index
, uint32_t val
)
2593 uint16_t vfn
= (index
- PVTEIAC0
) / 0x40;
2595 core
->mac
[index
] = val
;
2596 igb_set_eiac(core
, EIAC
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2599 static void igb_set_vteiam(IGBCore
*core
, int index
, uint32_t val
)
2601 uint16_t vfn
= (index
- PVTEIAM0
) / 0x40;
2603 core
->mac
[index
] = val
;
2604 igb_set_eiam(core
, EIAM
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2607 static void igb_set_vteicr(IGBCore
*core
, int index
, uint32_t val
)
2609 uint16_t vfn
= (index
- PVTEICR0
) / 0x40;
2611 core
->mac
[index
] = val
;
2612 igb_set_eicr(core
, EICR
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2615 static void igb_set_vtivar(IGBCore
*core
, int index
, uint32_t val
)
2617 uint16_t vfn
= (index
- VTIVAR
);
2622 core
->mac
[index
] = val
;
2624 /* Get assigned vector associated with queue Rx#0. */
2625 if ((val
& E1000_IVAR_VALID
)) {
2626 n
= igb_ivar_entry_rx(qn
);
2627 ent
= E1000_IVAR_VALID
| (24 - vfn
* IGBVF_MSIX_VEC_NUM
- (2 - (val
& 0x7)));
2628 core
->mac
[IVAR0
+ n
/ 4] |= ent
<< 8 * (n
% 4);
2631 /* Get assigned vector associated with queue Tx#0 */
2633 if ((ent
& E1000_IVAR_VALID
)) {
2634 n
= igb_ivar_entry_tx(qn
);
2635 ent
= E1000_IVAR_VALID
| (24 - vfn
* IGBVF_MSIX_VEC_NUM
- (2 - (ent
& 0x7)));
2636 core
->mac
[IVAR0
+ n
/ 4] |= ent
<< 8 * (n
% 4);
2640 * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
2645 igb_autoneg_timer(void *opaque
)
2647 IGBCore
*core
= opaque
;
2648 if (!qemu_get_queue(core
->owner_nic
)->link_down
) {
2649 e1000x_update_regs_on_autoneg_done(core
->mac
, core
->phy
);
2650 igb_start_recv(core
);
2652 igb_update_flowctl_status(core
);
2653 /* signal link status change to the guest */
2654 igb_raise_interrupts(core
, ICR
, E1000_ICR_LSC
);
2658 static inline uint16_t
2659 igb_get_reg_index_with_offset(const uint16_t *mac_reg_access
, hwaddr addr
)
2661 uint16_t index
= (addr
& 0x1ffff) >> 2;
2662 return index
+ (mac_reg_access
[index
] & 0xfffe);
2665 static const char igb_phy_regcap
[MAX_PHY_REG_ADDRESS
+ 1] = {
2666 [MII_BMCR
] = PHY_RW
,
2668 [MII_PHYID1
] = PHY_R
,
2669 [MII_PHYID2
] = PHY_R
,
2670 [MII_ANAR
] = PHY_RW
,
2671 [MII_ANLPAR
] = PHY_R
,
2673 [MII_ANNP
] = PHY_RW
,
2674 [MII_ANLPRNP
] = PHY_R
,
2675 [MII_CTRL1000
] = PHY_RW
,
2676 [MII_STAT1000
] = PHY_R
,
2677 [MII_EXTSTAT
] = PHY_R
,
2679 [IGP01E1000_PHY_PORT_CONFIG
] = PHY_RW
,
2680 [IGP01E1000_PHY_PORT_STATUS
] = PHY_R
,
2681 [IGP01E1000_PHY_PORT_CTRL
] = PHY_RW
,
2682 [IGP01E1000_PHY_LINK_HEALTH
] = PHY_R
,
2683 [IGP02E1000_PHY_POWER_MGMT
] = PHY_RW
,
2684 [IGP01E1000_PHY_PAGE_SELECT
] = PHY_W
2688 igb_phy_reg_write(IGBCore
*core
, uint32_t addr
, uint16_t data
)
2690 assert(addr
<= MAX_PHY_REG_ADDRESS
);
2692 if (addr
== MII_BMCR
) {
2693 igb_set_phy_ctrl(core
, data
);
2695 core
->phy
[addr
] = data
;
2700 igb_set_mdic(IGBCore
*core
, int index
, uint32_t val
)
2702 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
2703 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
2705 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) { /* phy # */
2706 val
= core
->mac
[MDIC
] | E1000_MDIC_ERROR
;
2707 } else if (val
& E1000_MDIC_OP_READ
) {
2708 if (!(igb_phy_regcap
[addr
] & PHY_R
)) {
2709 trace_igb_core_mdic_read_unhandled(addr
);
2710 val
|= E1000_MDIC_ERROR
;
2712 val
= (val
^ data
) | core
->phy
[addr
];
2713 trace_igb_core_mdic_read(addr
, val
);
2715 } else if (val
& E1000_MDIC_OP_WRITE
) {
2716 if (!(igb_phy_regcap
[addr
] & PHY_W
)) {
2717 trace_igb_core_mdic_write_unhandled(addr
);
2718 val
|= E1000_MDIC_ERROR
;
2720 trace_igb_core_mdic_write(addr
, data
);
2721 igb_phy_reg_write(core
, addr
, data
);
2724 core
->mac
[MDIC
] = val
| E1000_MDIC_READY
;
2726 if (val
& E1000_MDIC_INT_EN
) {
2727 igb_raise_interrupts(core
, ICR
, E1000_ICR_MDAC
);
2732 igb_set_rdt(IGBCore
*core
, int index
, uint32_t val
)
2734 core
->mac
[index
] = val
& 0xffff;
2735 trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0
, index
), val
);
2736 igb_start_recv(core
);
2740 igb_set_status(IGBCore
*core
, int index
, uint32_t val
)
2742 if ((val
& E1000_STATUS_PHYRA
) == 0) {
2743 core
->mac
[index
] &= ~E1000_STATUS_PHYRA
;
2748 igb_set_ctrlext(IGBCore
*core
, int index
, uint32_t val
)
2750 trace_igb_link_set_ext_params(!!(val
& E1000_CTRL_EXT_ASDCHK
),
2751 !!(val
& E1000_CTRL_EXT_SPD_BYPS
),
2752 !!(val
& E1000_CTRL_EXT_PFRSTD
));
2754 /* Zero self-clearing bits */
2755 val
&= ~(E1000_CTRL_EXT_ASDCHK
| E1000_CTRL_EXT_EE_RST
);
2756 core
->mac
[CTRL_EXT
] = val
;
2758 if (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_PFRSTD
) {
2759 for (int vfn
= 0; vfn
< IGB_MAX_VF_FUNCTIONS
; vfn
++) {
2760 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_RSTI
;
2761 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_RSTD
;
2767 igb_set_pbaclr(IGBCore
*core
, int index
, uint32_t val
)
2771 core
->mac
[PBACLR
] = val
& E1000_PBACLR_VALID_MASK
;
2773 if (!msix_enabled(core
->owner
)) {
2777 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
2778 if (core
->mac
[PBACLR
] & BIT(i
)) {
2779 msix_clr_pending(core
->owner
, i
);
2785 igb_set_fcrth(IGBCore
*core
, int index
, uint32_t val
)
2787 core
->mac
[FCRTH
] = val
& 0xFFF8;
2791 igb_set_fcrtl(IGBCore
*core
, int index
, uint32_t val
)
2793 core
->mac
[FCRTL
] = val
& 0x8000FFF8;
2796 #define IGB_LOW_BITS_SET_FUNC(num) \
2798 igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2800 core->mac[index] = val & (BIT(num) - 1); \
2803 IGB_LOW_BITS_SET_FUNC(4)
2804 IGB_LOW_BITS_SET_FUNC(13)
2805 IGB_LOW_BITS_SET_FUNC(16)
2808 igb_set_dlen(IGBCore
*core
, int index
, uint32_t val
)
2810 core
->mac
[index
] = val
& 0xffff0;
2814 igb_set_dbal(IGBCore
*core
, int index
, uint32_t val
)
2816 core
->mac
[index
] = val
& E1000_XDBAL_MASK
;
2820 igb_set_tdt(IGBCore
*core
, int index
, uint32_t val
)
2823 int qn
= igb_mq_queue_idx(TDT0
, index
);
2825 core
->mac
[index
] = val
& 0xffff;
2827 igb_tx_ring_init(core
, &txr
, qn
);
2828 igb_start_xmit(core
, &txr
);
2832 igb_set_ics(IGBCore
*core
, int index
, uint32_t val
)
2834 trace_e1000e_irq_write_ics(val
);
2835 igb_raise_interrupts(core
, ICR
, val
);
2839 igb_set_imc(IGBCore
*core
, int index
, uint32_t val
)
2841 trace_e1000e_irq_ims_clear_set_imc(val
);
2842 igb_lower_interrupts(core
, IMS
, val
);
2846 igb_set_ims(IGBCore
*core
, int index
, uint32_t val
)
2848 igb_raise_interrupts(core
, IMS
, val
& 0x77D4FBFD);
2851 static void igb_nsicr(IGBCore
*core
)
2854 * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
2855 * least one bit is set in the IMS and there is a true interrupt as
2856 * reflected in ICR.INTA.
2858 if ((core
->mac
[GPIE
] & E1000_GPIE_NSICR
) ||
2859 (core
->mac
[IMS
] && (core
->mac
[ICR
] & E1000_ICR_INT_ASSERTED
))) {
2860 igb_lower_interrupts(core
, IMS
, core
->mac
[IAM
]);
2864 static void igb_set_icr(IGBCore
*core
, int index
, uint32_t val
)
2867 igb_lower_interrupts(core
, ICR
, val
);
2871 igb_mac_readreg(IGBCore
*core
, int index
)
2873 return core
->mac
[index
];
2877 igb_mac_ics_read(IGBCore
*core
, int index
)
2879 trace_e1000e_irq_read_ics(core
->mac
[ICS
]);
2880 return core
->mac
[ICS
];
2884 igb_mac_ims_read(IGBCore
*core
, int index
)
2886 trace_e1000e_irq_read_ims(core
->mac
[IMS
]);
2887 return core
->mac
[IMS
];
2891 igb_mac_swsm_read(IGBCore
*core
, int index
)
2893 uint32_t val
= core
->mac
[SWSM
];
2894 core
->mac
[SWSM
] = val
| E1000_SWSM_SMBI
;
2899 igb_mac_eitr_read(IGBCore
*core
, int index
)
2901 return core
->eitr_guest_value
[index
- EITR0
];
2904 static uint32_t igb_mac_vfmailbox_read(IGBCore
*core
, int index
)
2906 uint32_t val
= core
->mac
[index
];
2908 core
->mac
[index
] &= ~(E1000_V2PMAILBOX_PFSTS
| E1000_V2PMAILBOX_PFACK
|
2909 E1000_V2PMAILBOX_RSTD
);
2915 igb_mac_icr_read(IGBCore
*core
, int index
)
2917 uint32_t ret
= core
->mac
[ICR
];
2919 if (core
->mac
[GPIE
] & E1000_GPIE_NSICR
) {
2920 trace_igb_irq_icr_clear_gpie_nsicr();
2921 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2922 } else if (core
->mac
[IMS
] == 0) {
2923 trace_e1000e_irq_icr_clear_zero_ims();
2924 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2925 } else if (core
->mac
[ICR
] & E1000_ICR_INT_ASSERTED
) {
2926 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2927 } else if (!msix_enabled(core
->owner
)) {
2928 trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2929 igb_lower_interrupts(core
, ICR
, 0xffffffff);
2937 igb_mac_read_clr4(IGBCore
*core
, int index
)
2939 uint32_t ret
= core
->mac
[index
];
2941 core
->mac
[index
] = 0;
2946 igb_mac_read_clr8(IGBCore
*core
, int index
)
2948 uint32_t ret
= core
->mac
[index
];
2950 core
->mac
[index
] = 0;
2951 core
->mac
[index
- 1] = 0;
2956 igb_get_ctrl(IGBCore
*core
, int index
)
2958 uint32_t val
= core
->mac
[CTRL
];
2960 trace_e1000e_link_read_params(
2961 !!(val
& E1000_CTRL_ASDE
),
2962 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
2963 !!(val
& E1000_CTRL_FRCSPD
),
2964 !!(val
& E1000_CTRL_FRCDPX
),
2965 !!(val
& E1000_CTRL_RFCE
),
2966 !!(val
& E1000_CTRL_TFCE
));
2971 static uint32_t igb_get_status(IGBCore
*core
, int index
)
2973 uint32_t res
= core
->mac
[STATUS
];
2974 uint16_t num_vfs
= pcie_sriov_num_vfs(core
->owner
);
2976 if (core
->mac
[CTRL
] & E1000_CTRL_FRCDPX
) {
2977 res
|= (core
->mac
[CTRL
] & E1000_CTRL_FD
) ? E1000_STATUS_FD
: 0;
2979 res
|= E1000_STATUS_FD
;
2982 if ((core
->mac
[CTRL
] & E1000_CTRL_FRCSPD
) ||
2983 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_SPD_BYPS
)) {
2984 switch (core
->mac
[CTRL
] & E1000_CTRL_SPD_SEL
) {
2985 case E1000_CTRL_SPD_10
:
2986 res
|= E1000_STATUS_SPEED_10
;
2988 case E1000_CTRL_SPD_100
:
2989 res
|= E1000_STATUS_SPEED_100
;
2991 case E1000_CTRL_SPD_1000
:
2993 res
|= E1000_STATUS_SPEED_1000
;
2997 res
|= E1000_STATUS_SPEED_1000
;
3001 res
|= num_vfs
<< E1000_STATUS_NUM_VFS_SHIFT
;
3002 res
|= E1000_STATUS_IOV_MODE
;
3005 if (!(core
->mac
[CTRL
] & E1000_CTRL_GIO_MASTER_DISABLE
)) {
3006 res
|= E1000_STATUS_GIO_MASTER_ENABLE
;
3013 igb_mac_writereg(IGBCore
*core
, int index
, uint32_t val
)
3015 core
->mac
[index
] = val
;
3019 igb_mac_setmacaddr(IGBCore
*core
, int index
, uint32_t val
)
3021 uint32_t macaddr
[2];
3023 core
->mac
[index
] = val
;
3025 macaddr
[0] = cpu_to_le32(core
->mac
[RA
]);
3026 macaddr
[1] = cpu_to_le32(core
->mac
[RA
+ 1]);
3027 qemu_format_nic_info_str(qemu_get_queue(core
->owner_nic
),
3028 (uint8_t *) macaddr
);
3030 trace_e1000e_mac_set_sw(MAC_ARG(macaddr
));
3034 igb_set_eecd(IGBCore
*core
, int index
, uint32_t val
)
3036 static const uint32_t ro_bits
= E1000_EECD_PRES
|
3037 E1000_EECD_AUTO_RD
|
3038 E1000_EECD_SIZE_EX_MASK
;
3040 core
->mac
[EECD
] = (core
->mac
[EECD
] & ro_bits
) | (val
& ~ro_bits
);
3044 igb_set_eerd(IGBCore
*core
, int index
, uint32_t val
)
3046 uint32_t addr
= (val
>> E1000_EERW_ADDR_SHIFT
) & E1000_EERW_ADDR_MASK
;
3050 if ((addr
< IGB_EEPROM_SIZE
) && (val
& E1000_EERW_START
)) {
3051 data
= core
->eeprom
[addr
];
3052 flags
= E1000_EERW_DONE
;
3055 core
->mac
[EERD
] = flags
|
3056 (addr
<< E1000_EERW_ADDR_SHIFT
) |
3057 (data
<< E1000_EERW_DATA_SHIFT
);
3061 igb_set_eitr(IGBCore
*core
, int index
, uint32_t val
)
3063 uint32_t eitr_num
= index
- EITR0
;
3065 trace_igb_irq_eitr_set(eitr_num
, val
);
3067 core
->eitr_guest_value
[eitr_num
] = val
& ~E1000_EITR_CNT_IGNR
;
3068 core
->mac
[index
] = val
& 0x7FFE;
3072 igb_update_rx_offloads(IGBCore
*core
)
3074 int cso_state
= igb_rx_l4_cso_enabled(core
);
3076 trace_e1000e_rx_set_cso(cso_state
);
3078 if (core
->has_vnet
) {
3079 qemu_set_offload(qemu_get_queue(core
->owner_nic
)->peer
,
3080 cso_state
, 0, 0, 0, 0, 0, 0);
3085 igb_set_rxcsum(IGBCore
*core
, int index
, uint32_t val
)
3087 core
->mac
[RXCSUM
] = val
;
3088 igb_update_rx_offloads(core
);
3092 igb_set_gcr(IGBCore
*core
, int index
, uint32_t val
)
3094 uint32_t ro_bits
= core
->mac
[GCR
] & E1000_GCR_RO_BITS
;
3095 core
->mac
[GCR
] = (val
& ~E1000_GCR_RO_BITS
) | ro_bits
;
3098 static uint32_t igb_get_systiml(IGBCore
*core
, int index
)
3100 e1000x_timestamp(core
->mac
, core
->timadj
, SYSTIML
, SYSTIMH
);
3101 return core
->mac
[SYSTIML
];
3104 static uint32_t igb_get_rxsatrh(IGBCore
*core
, int index
)
3106 core
->mac
[TSYNCRXCTL
] &= ~E1000_TSYNCRXCTL_VALID
;
3107 return core
->mac
[RXSATRH
];
3110 static uint32_t igb_get_txstmph(IGBCore
*core
, int index
)
3112 core
->mac
[TSYNCTXCTL
] &= ~E1000_TSYNCTXCTL_VALID
;
3113 return core
->mac
[TXSTMPH
];
3116 static void igb_set_timinca(IGBCore
*core
, int index
, uint32_t val
)
3118 e1000x_set_timinca(core
->mac
, &core
->timadj
, val
);
3121 static void igb_set_timadjh(IGBCore
*core
, int index
, uint32_t val
)
3123 core
->mac
[TIMADJH
] = val
;
3124 core
->timadj
+= core
->mac
[TIMADJL
] | ((int64_t)core
->mac
[TIMADJH
] << 32);
3127 #define igb_getreg(x) [x] = igb_mac_readreg
3128 typedef uint32_t (*readops
)(IGBCore
*, int);
3129 static const readops igb_macreg_readops
[] = {
3159 igb_getreg(RDBAH10
),
3160 igb_getreg(RDBAH11
),
3161 igb_getreg(RDBAH12
),
3162 igb_getreg(RDBAH13
),
3163 igb_getreg(RDBAH14
),
3164 igb_getreg(RDBAH15
),
3175 igb_getreg(TDBAL10
),
3176 igb_getreg(TDBAL11
),
3177 igb_getreg(TDBAL12
),
3178 igb_getreg(TDBAL13
),
3179 igb_getreg(TDBAL14
),
3180 igb_getreg(TDBAL15
),
3191 igb_getreg(RDLEN10
),
3192 igb_getreg(RDLEN11
),
3193 igb_getreg(RDLEN12
),
3194 igb_getreg(RDLEN13
),
3195 igb_getreg(RDLEN14
),
3196 igb_getreg(RDLEN15
),
3197 igb_getreg(SRRCTL0
),
3198 igb_getreg(SRRCTL1
),
3199 igb_getreg(SRRCTL2
),
3200 igb_getreg(SRRCTL3
),
3201 igb_getreg(SRRCTL4
),
3202 igb_getreg(SRRCTL5
),
3203 igb_getreg(SRRCTL6
),
3204 igb_getreg(SRRCTL7
),
3205 igb_getreg(SRRCTL8
),
3206 igb_getreg(SRRCTL9
),
3207 igb_getreg(SRRCTL10
),
3208 igb_getreg(SRRCTL11
),
3209 igb_getreg(SRRCTL12
),
3210 igb_getreg(SRRCTL13
),
3211 igb_getreg(SRRCTL14
),
3212 igb_getreg(SRRCTL15
),
3213 igb_getreg(LATECOL
),
3237 igb_getreg(RXSTMPH
),
3238 igb_getreg(TXSTMPL
),
3239 igb_getreg(TIMADJL
),
3277 igb_getreg(FLMNGCTL
),
3278 igb_getreg(FLMNGCNT
),
3279 igb_getreg(TSYNCTXCTL
),
3280 igb_getreg(EEMNGDATA
),
3281 igb_getreg(CTRL_EXT
),
3282 igb_getreg(SYSTIMH
),
3283 igb_getreg(EEMNGCTL
),
3284 igb_getreg(FLMNGDATA
),
3285 igb_getreg(TSYNCRXCTL
),
3288 igb_getreg(TCTL_EXT
),
3310 igb_getreg(XOFFTXC
),
3314 igb_getreg(TIMINCA
),
3320 igb_getreg(RXSATRL
),
3332 igb_getreg(TDLEN10
),
3333 igb_getreg(TDLEN11
),
3334 igb_getreg(TDLEN12
),
3335 igb_getreg(TDLEN13
),
3336 igb_getreg(TDLEN14
),
3337 igb_getreg(TDLEN15
),
3342 igb_getreg(TXDCTL0
),
3343 igb_getreg(TXDCTL1
),
3344 igb_getreg(TXDCTL2
),
3345 igb_getreg(TXDCTL3
),
3346 igb_getreg(TXDCTL4
),
3347 igb_getreg(TXDCTL5
),
3348 igb_getreg(TXDCTL6
),
3349 igb_getreg(TXDCTL7
),
3350 igb_getreg(TXDCTL8
),
3351 igb_getreg(TXDCTL9
),
3352 igb_getreg(TXDCTL10
),
3353 igb_getreg(TXDCTL11
),
3354 igb_getreg(TXDCTL12
),
3355 igb_getreg(TXDCTL13
),
3356 igb_getreg(TXDCTL14
),
3357 igb_getreg(TXDCTL15
),
3368 igb_getreg(TXCTL10
),
3369 igb_getreg(TXCTL11
),
3370 igb_getreg(TXCTL12
),
3371 igb_getreg(TXCTL13
),
3372 igb_getreg(TXCTL14
),
3373 igb_getreg(TXCTL15
),
3374 igb_getreg(TDWBAL0
),
3375 igb_getreg(TDWBAL1
),
3376 igb_getreg(TDWBAL2
),
3377 igb_getreg(TDWBAL3
),
3378 igb_getreg(TDWBAL4
),
3379 igb_getreg(TDWBAL5
),
3380 igb_getreg(TDWBAL6
),
3381 igb_getreg(TDWBAL7
),
3382 igb_getreg(TDWBAL8
),
3383 igb_getreg(TDWBAL9
),
3384 igb_getreg(TDWBAL10
),
3385 igb_getreg(TDWBAL11
),
3386 igb_getreg(TDWBAL12
),
3387 igb_getreg(TDWBAL13
),
3388 igb_getreg(TDWBAL14
),
3389 igb_getreg(TDWBAL15
),
3390 igb_getreg(TDWBAH0
),
3391 igb_getreg(TDWBAH1
),
3392 igb_getreg(TDWBAH2
),
3393 igb_getreg(TDWBAH3
),
3394 igb_getreg(TDWBAH4
),
3395 igb_getreg(TDWBAH5
),
3396 igb_getreg(TDWBAH6
),
3397 igb_getreg(TDWBAH7
),
3398 igb_getreg(TDWBAH8
),
3399 igb_getreg(TDWBAH9
),
3400 igb_getreg(TDWBAH10
),
3401 igb_getreg(TDWBAH11
),
3402 igb_getreg(TDWBAH12
),
3403 igb_getreg(TDWBAH13
),
3404 igb_getreg(TDWBAH14
),
3405 igb_getreg(TDWBAH15
),
3406 igb_getreg(PVTCTRL0
),
3407 igb_getreg(PVTCTRL1
),
3408 igb_getreg(PVTCTRL2
),
3409 igb_getreg(PVTCTRL3
),
3410 igb_getreg(PVTCTRL4
),
3411 igb_getreg(PVTCTRL5
),
3412 igb_getreg(PVTCTRL6
),
3413 igb_getreg(PVTCTRL7
),
3414 igb_getreg(PVTEIMS0
),
3415 igb_getreg(PVTEIMS1
),
3416 igb_getreg(PVTEIMS2
),
3417 igb_getreg(PVTEIMS3
),
3418 igb_getreg(PVTEIMS4
),
3419 igb_getreg(PVTEIMS5
),
3420 igb_getreg(PVTEIMS6
),
3421 igb_getreg(PVTEIMS7
),
3422 igb_getreg(PVTEIAC0
),
3423 igb_getreg(PVTEIAC1
),
3424 igb_getreg(PVTEIAC2
),
3425 igb_getreg(PVTEIAC3
),
3426 igb_getreg(PVTEIAC4
),
3427 igb_getreg(PVTEIAC5
),
3428 igb_getreg(PVTEIAC6
),
3429 igb_getreg(PVTEIAC7
),
3430 igb_getreg(PVTEIAM0
),
3431 igb_getreg(PVTEIAM1
),
3432 igb_getreg(PVTEIAM2
),
3433 igb_getreg(PVTEIAM3
),
3434 igb_getreg(PVTEIAM4
),
3435 igb_getreg(PVTEIAM5
),
3436 igb_getreg(PVTEIAM6
),
3437 igb_getreg(PVTEIAM7
),
3438 igb_getreg(PVFGPRC0
),
3439 igb_getreg(PVFGPRC1
),
3440 igb_getreg(PVFGPRC2
),
3441 igb_getreg(PVFGPRC3
),
3442 igb_getreg(PVFGPRC4
),
3443 igb_getreg(PVFGPRC5
),
3444 igb_getreg(PVFGPRC6
),
3445 igb_getreg(PVFGPRC7
),
3446 igb_getreg(PVFGPTC0
),
3447 igb_getreg(PVFGPTC1
),
3448 igb_getreg(PVFGPTC2
),
3449 igb_getreg(PVFGPTC3
),
3450 igb_getreg(PVFGPTC4
),
3451 igb_getreg(PVFGPTC5
),
3452 igb_getreg(PVFGPTC6
),
3453 igb_getreg(PVFGPTC7
),
3454 igb_getreg(PVFGORC0
),
3455 igb_getreg(PVFGORC1
),
3456 igb_getreg(PVFGORC2
),
3457 igb_getreg(PVFGORC3
),
3458 igb_getreg(PVFGORC4
),
3459 igb_getreg(PVFGORC5
),
3460 igb_getreg(PVFGORC6
),
3461 igb_getreg(PVFGORC7
),
3462 igb_getreg(PVFGOTC0
),
3463 igb_getreg(PVFGOTC1
),
3464 igb_getreg(PVFGOTC2
),
3465 igb_getreg(PVFGOTC3
),
3466 igb_getreg(PVFGOTC4
),
3467 igb_getreg(PVFGOTC5
),
3468 igb_getreg(PVFGOTC6
),
3469 igb_getreg(PVFGOTC7
),
3470 igb_getreg(PVFMPRC0
),
3471 igb_getreg(PVFMPRC1
),
3472 igb_getreg(PVFMPRC2
),
3473 igb_getreg(PVFMPRC3
),
3474 igb_getreg(PVFMPRC4
),
3475 igb_getreg(PVFMPRC5
),
3476 igb_getreg(PVFMPRC6
),
3477 igb_getreg(PVFMPRC7
),
3478 igb_getreg(PVFGPRLBC0
),
3479 igb_getreg(PVFGPRLBC1
),
3480 igb_getreg(PVFGPRLBC2
),
3481 igb_getreg(PVFGPRLBC3
),
3482 igb_getreg(PVFGPRLBC4
),
3483 igb_getreg(PVFGPRLBC5
),
3484 igb_getreg(PVFGPRLBC6
),
3485 igb_getreg(PVFGPRLBC7
),
3486 igb_getreg(PVFGPTLBC0
),
3487 igb_getreg(PVFGPTLBC1
),
3488 igb_getreg(PVFGPTLBC2
),
3489 igb_getreg(PVFGPTLBC3
),
3490 igb_getreg(PVFGPTLBC4
),
3491 igb_getreg(PVFGPTLBC5
),
3492 igb_getreg(PVFGPTLBC6
),
3493 igb_getreg(PVFGPTLBC7
),
3494 igb_getreg(PVFGORLBC0
),
3495 igb_getreg(PVFGORLBC1
),
3496 igb_getreg(PVFGORLBC2
),
3497 igb_getreg(PVFGORLBC3
),
3498 igb_getreg(PVFGORLBC4
),
3499 igb_getreg(PVFGORLBC5
),
3500 igb_getreg(PVFGORLBC6
),
3501 igb_getreg(PVFGORLBC7
),
3502 igb_getreg(PVFGOTLBC0
),
3503 igb_getreg(PVFGOTLBC1
),
3504 igb_getreg(PVFGOTLBC2
),
3505 igb_getreg(PVFGOTLBC3
),
3506 igb_getreg(PVFGOTLBC4
),
3507 igb_getreg(PVFGOTLBC5
),
3508 igb_getreg(PVFGOTLBC6
),
3509 igb_getreg(PVFGOTLBC7
),
3524 igb_getreg(RDBAL10
),
3525 igb_getreg(RDBAL11
),
3526 igb_getreg(RDBAL12
),
3527 igb_getreg(RDBAL13
),
3528 igb_getreg(RDBAL14
),
3529 igb_getreg(RDBAL15
),
3540 igb_getreg(TDBAH10
),
3541 igb_getreg(TDBAH11
),
3542 igb_getreg(TDBAH12
),
3543 igb_getreg(TDBAH13
),
3544 igb_getreg(TDBAH14
),
3545 igb_getreg(TDBAH15
),
3548 igb_getreg(XOFFRXC
),
3554 igb_getreg(FUNCTAG
),
3560 igb_getreg(RXDCTL0
),
3561 igb_getreg(RXDCTL1
),
3562 igb_getreg(RXDCTL2
),
3563 igb_getreg(RXDCTL3
),
3564 igb_getreg(RXDCTL4
),
3565 igb_getreg(RXDCTL5
),
3566 igb_getreg(RXDCTL6
),
3567 igb_getreg(RXDCTL7
),
3568 igb_getreg(RXDCTL8
),
3569 igb_getreg(RXDCTL9
),
3570 igb_getreg(RXDCTL10
),
3571 igb_getreg(RXDCTL11
),
3572 igb_getreg(RXDCTL12
),
3573 igb_getreg(RXDCTL13
),
3574 igb_getreg(RXDCTL14
),
3575 igb_getreg(RXDCTL15
),
3576 igb_getreg(RXSTMPL
),
3577 igb_getreg(TIMADJH
),
3587 [TOTH
] = igb_mac_read_clr8
,
3588 [GOTCH
] = igb_mac_read_clr8
,
3589 [PRC64
] = igb_mac_read_clr4
,
3590 [PRC255
] = igb_mac_read_clr4
,
3591 [PRC1023
] = igb_mac_read_clr4
,
3592 [PTC64
] = igb_mac_read_clr4
,
3593 [PTC255
] = igb_mac_read_clr4
,
3594 [PTC1023
] = igb_mac_read_clr4
,
3595 [GPRC
] = igb_mac_read_clr4
,
3596 [TPT
] = igb_mac_read_clr4
,
3597 [RUC
] = igb_mac_read_clr4
,
3598 [BPRC
] = igb_mac_read_clr4
,
3599 [MPTC
] = igb_mac_read_clr4
,
3600 [IAC
] = igb_mac_read_clr4
,
3601 [ICR
] = igb_mac_icr_read
,
3602 [STATUS
] = igb_get_status
,
3603 [ICS
] = igb_mac_ics_read
,
3605 * 8.8.10: Reading the IMC register returns the value of the IMS register.
3607 [IMC
] = igb_mac_ims_read
,
3608 [TORH
] = igb_mac_read_clr8
,
3609 [GORCH
] = igb_mac_read_clr8
,
3610 [PRC127
] = igb_mac_read_clr4
,
3611 [PRC511
] = igb_mac_read_clr4
,
3612 [PRC1522
] = igb_mac_read_clr4
,
3613 [PTC127
] = igb_mac_read_clr4
,
3614 [PTC511
] = igb_mac_read_clr4
,
3615 [PTC1522
] = igb_mac_read_clr4
,
3616 [GPTC
] = igb_mac_read_clr4
,
3617 [TPR
] = igb_mac_read_clr4
,
3618 [ROC
] = igb_mac_read_clr4
,
3619 [MPRC
] = igb_mac_read_clr4
,
3620 [BPTC
] = igb_mac_read_clr4
,
3621 [TSCTC
] = igb_mac_read_clr4
,
3622 [CTRL
] = igb_get_ctrl
,
3623 [SWSM
] = igb_mac_swsm_read
,
3624 [IMS
] = igb_mac_ims_read
,
3625 [SYSTIML
] = igb_get_systiml
,
3626 [RXSATRH
] = igb_get_rxsatrh
,
3627 [TXSTMPH
] = igb_get_txstmph
,
3629 [CRCERRS
... MPC
] = igb_mac_readreg
,
3630 [IP6AT
... IP6AT
+ 3] = igb_mac_readreg
,
3631 [IP4AT
... IP4AT
+ 6] = igb_mac_readreg
,
3632 [RA
... RA
+ 31] = igb_mac_readreg
,
3633 [RA2
... RA2
+ 31] = igb_mac_readreg
,
3634 [WUPM
... WUPM
+ 31] = igb_mac_readreg
,
3635 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_readreg
,
3636 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = igb_mac_readreg
,
3637 [FFMT
... FFMT
+ 254] = igb_mac_readreg
,
3638 [MDEF
... MDEF
+ 7] = igb_mac_readreg
,
3639 [FTFT
... FTFT
+ 254] = igb_mac_readreg
,
3640 [RETA
... RETA
+ 31] = igb_mac_readreg
,
3641 [RSSRK
... RSSRK
+ 9] = igb_mac_readreg
,
3642 [MAVTV0
... MAVTV3
] = igb_mac_readreg
,
3643 [EITR0
... EITR0
+ IGB_INTR_NUM
- 1] = igb_mac_eitr_read
,
3644 [PVTEICR0
] = igb_mac_read_clr4
,
3645 [PVTEICR1
] = igb_mac_read_clr4
,
3646 [PVTEICR2
] = igb_mac_read_clr4
,
3647 [PVTEICR3
] = igb_mac_read_clr4
,
3648 [PVTEICR4
] = igb_mac_read_clr4
,
3649 [PVTEICR5
] = igb_mac_read_clr4
,
3650 [PVTEICR6
] = igb_mac_read_clr4
,
3651 [PVTEICR7
] = igb_mac_read_clr4
,
3654 [FWSM
] = igb_mac_readreg
,
3655 [SW_FW_SYNC
] = igb_mac_readreg
,
3656 [HTCBDPC
] = igb_mac_read_clr4
,
3657 [EICR
] = igb_mac_read_clr4
,
3658 [EIMS
] = igb_mac_readreg
,
3659 [EIAM
] = igb_mac_readreg
,
3660 [IVAR0
... IVAR0
+ 7] = igb_mac_readreg
,
3661 igb_getreg(IVAR_MISC
),
3662 igb_getreg(TSYNCRXCFG
),
3663 [ETQF0
... ETQF0
+ 7] = igb_mac_readreg
,
3665 [P2VMAILBOX0
... P2VMAILBOX7
] = igb_mac_readreg
,
3666 [V2PMAILBOX0
... V2PMAILBOX7
] = igb_mac_vfmailbox_read
,
3667 igb_getreg(MBVFICR
),
3668 [VMBMEM0
... VMBMEM0
+ 127] = igb_mac_readreg
,
3669 igb_getreg(MBVFIMR
),
3676 [VLVF0
... VLVF0
+ E1000_VLVF_ARRAY_SIZE
- 1] = igb_mac_readreg
,
3677 [VMVIR0
... VMVIR7
] = igb_mac_readreg
,
3678 [VMOLR0
... VMOLR7
] = igb_mac_readreg
,
3679 [WVBR
] = igb_mac_read_clr4
,
3680 [RQDPC0
] = igb_mac_read_clr4
,
3681 [RQDPC1
] = igb_mac_read_clr4
,
3682 [RQDPC2
] = igb_mac_read_clr4
,
3683 [RQDPC3
] = igb_mac_read_clr4
,
3684 [RQDPC4
] = igb_mac_read_clr4
,
3685 [RQDPC5
] = igb_mac_read_clr4
,
3686 [RQDPC6
] = igb_mac_read_clr4
,
3687 [RQDPC7
] = igb_mac_read_clr4
,
3688 [RQDPC8
] = igb_mac_read_clr4
,
3689 [RQDPC9
] = igb_mac_read_clr4
,
3690 [RQDPC10
] = igb_mac_read_clr4
,
3691 [RQDPC11
] = igb_mac_read_clr4
,
3692 [RQDPC12
] = igb_mac_read_clr4
,
3693 [RQDPC13
] = igb_mac_read_clr4
,
3694 [RQDPC14
] = igb_mac_read_clr4
,
3695 [RQDPC15
] = igb_mac_read_clr4
,
3696 [VTIVAR
... VTIVAR
+ 7] = igb_mac_readreg
,
3697 [VTIVAR_MISC
... VTIVAR_MISC
+ 7] = igb_mac_readreg
,
3699 enum { IGB_NREADOPS
= ARRAY_SIZE(igb_macreg_readops
) };
3701 #define igb_putreg(x) [x] = igb_mac_writereg
3702 typedef void (*writeops
)(IGBCore
*, int, uint32_t);
3703 static const writeops igb_macreg_writeops
[] = {
3716 igb_putreg(RDBAH10
),
3717 igb_putreg(RDBAH11
),
3718 igb_putreg(RDBAH12
),
3719 igb_putreg(RDBAH13
),
3720 igb_putreg(RDBAH14
),
3721 igb_putreg(RDBAH15
),
3722 igb_putreg(SRRCTL0
),
3723 igb_putreg(SRRCTL1
),
3724 igb_putreg(SRRCTL2
),
3725 igb_putreg(SRRCTL3
),
3726 igb_putreg(SRRCTL4
),
3727 igb_putreg(SRRCTL5
),
3728 igb_putreg(SRRCTL6
),
3729 igb_putreg(SRRCTL7
),
3730 igb_putreg(SRRCTL8
),
3731 igb_putreg(SRRCTL9
),
3732 igb_putreg(SRRCTL10
),
3733 igb_putreg(SRRCTL11
),
3734 igb_putreg(SRRCTL12
),
3735 igb_putreg(SRRCTL13
),
3736 igb_putreg(SRRCTL14
),
3737 igb_putreg(SRRCTL15
),
3738 igb_putreg(RXDCTL0
),
3739 igb_putreg(RXDCTL1
),
3740 igb_putreg(RXDCTL2
),
3741 igb_putreg(RXDCTL3
),
3742 igb_putreg(RXDCTL4
),
3743 igb_putreg(RXDCTL5
),
3744 igb_putreg(RXDCTL6
),
3745 igb_putreg(RXDCTL7
),
3746 igb_putreg(RXDCTL8
),
3747 igb_putreg(RXDCTL9
),
3748 igb_putreg(RXDCTL10
),
3749 igb_putreg(RXDCTL11
),
3750 igb_putreg(RXDCTL12
),
3751 igb_putreg(RXDCTL13
),
3752 igb_putreg(RXDCTL14
),
3753 igb_putreg(RXDCTL15
),
3756 igb_putreg(TCTL_EXT
),
3775 igb_putreg(TDBAH10
),
3776 igb_putreg(TDBAH11
),
3777 igb_putreg(TDBAH12
),
3778 igb_putreg(TDBAH13
),
3779 igb_putreg(TDBAH14
),
3780 igb_putreg(TDBAH15
),
3786 igb_putreg(FUNCTAG
),
3798 igb_putreg(TXDCTL0
),
3799 igb_putreg(TXDCTL1
),
3800 igb_putreg(TXDCTL2
),
3801 igb_putreg(TXDCTL3
),
3802 igb_putreg(TXDCTL4
),
3803 igb_putreg(TXDCTL5
),
3804 igb_putreg(TXDCTL6
),
3805 igb_putreg(TXDCTL7
),
3806 igb_putreg(TXDCTL8
),
3807 igb_putreg(TXDCTL9
),
3808 igb_putreg(TXDCTL10
),
3809 igb_putreg(TXDCTL11
),
3810 igb_putreg(TXDCTL12
),
3811 igb_putreg(TXDCTL13
),
3812 igb_putreg(TXDCTL14
),
3813 igb_putreg(TXDCTL15
),
3824 igb_putreg(TXCTL10
),
3825 igb_putreg(TXCTL11
),
3826 igb_putreg(TXCTL12
),
3827 igb_putreg(TXCTL13
),
3828 igb_putreg(TXCTL14
),
3829 igb_putreg(TXCTL15
),
3830 igb_putreg(TDWBAL0
),
3831 igb_putreg(TDWBAL1
),
3832 igb_putreg(TDWBAL2
),
3833 igb_putreg(TDWBAL3
),
3834 igb_putreg(TDWBAL4
),
3835 igb_putreg(TDWBAL5
),
3836 igb_putreg(TDWBAL6
),
3837 igb_putreg(TDWBAL7
),
3838 igb_putreg(TDWBAL8
),
3839 igb_putreg(TDWBAL9
),
3840 igb_putreg(TDWBAL10
),
3841 igb_putreg(TDWBAL11
),
3842 igb_putreg(TDWBAL12
),
3843 igb_putreg(TDWBAL13
),
3844 igb_putreg(TDWBAL14
),
3845 igb_putreg(TDWBAL15
),
3846 igb_putreg(TDWBAH0
),
3847 igb_putreg(TDWBAH1
),
3848 igb_putreg(TDWBAH2
),
3849 igb_putreg(TDWBAH3
),
3850 igb_putreg(TDWBAH4
),
3851 igb_putreg(TDWBAH5
),
3852 igb_putreg(TDWBAH6
),
3853 igb_putreg(TDWBAH7
),
3854 igb_putreg(TDWBAH8
),
3855 igb_putreg(TDWBAH9
),
3856 igb_putreg(TDWBAH10
),
3857 igb_putreg(TDWBAH11
),
3858 igb_putreg(TDWBAH12
),
3859 igb_putreg(TDWBAH13
),
3860 igb_putreg(TDWBAH14
),
3861 igb_putreg(TDWBAH15
),
3863 igb_putreg(RXSTMPH
),
3864 igb_putreg(RXSTMPL
),
3865 igb_putreg(RXSATRL
),
3866 igb_putreg(RXSATRH
),
3867 igb_putreg(TXSTMPL
),
3868 igb_putreg(TXSTMPH
),
3869 igb_putreg(SYSTIML
),
3870 igb_putreg(SYSTIMH
),
3871 igb_putreg(TIMADJL
),
3872 igb_putreg(TSYNCRXCTL
),
3873 igb_putreg(TSYNCTXCTL
),
3874 igb_putreg(EEMNGCTL
),
3880 [TDH0
] = igb_set_16bit
,
3881 [TDH1
] = igb_set_16bit
,
3882 [TDH2
] = igb_set_16bit
,
3883 [TDH3
] = igb_set_16bit
,
3884 [TDH4
] = igb_set_16bit
,
3885 [TDH5
] = igb_set_16bit
,
3886 [TDH6
] = igb_set_16bit
,
3887 [TDH7
] = igb_set_16bit
,
3888 [TDH8
] = igb_set_16bit
,
3889 [TDH9
] = igb_set_16bit
,
3890 [TDH10
] = igb_set_16bit
,
3891 [TDH11
] = igb_set_16bit
,
3892 [TDH12
] = igb_set_16bit
,
3893 [TDH13
] = igb_set_16bit
,
3894 [TDH14
] = igb_set_16bit
,
3895 [TDH15
] = igb_set_16bit
,
3896 [TDT0
] = igb_set_tdt
,
3897 [TDT1
] = igb_set_tdt
,
3898 [TDT2
] = igb_set_tdt
,
3899 [TDT3
] = igb_set_tdt
,
3900 [TDT4
] = igb_set_tdt
,
3901 [TDT5
] = igb_set_tdt
,
3902 [TDT6
] = igb_set_tdt
,
3903 [TDT7
] = igb_set_tdt
,
3904 [TDT8
] = igb_set_tdt
,
3905 [TDT9
] = igb_set_tdt
,
3906 [TDT10
] = igb_set_tdt
,
3907 [TDT11
] = igb_set_tdt
,
3908 [TDT12
] = igb_set_tdt
,
3909 [TDT13
] = igb_set_tdt
,
3910 [TDT14
] = igb_set_tdt
,
3911 [TDT15
] = igb_set_tdt
,
3912 [MDIC
] = igb_set_mdic
,
3913 [ICS
] = igb_set_ics
,
3914 [RDH0
] = igb_set_16bit
,
3915 [RDH1
] = igb_set_16bit
,
3916 [RDH2
] = igb_set_16bit
,
3917 [RDH3
] = igb_set_16bit
,
3918 [RDH4
] = igb_set_16bit
,
3919 [RDH5
] = igb_set_16bit
,
3920 [RDH6
] = igb_set_16bit
,
3921 [RDH7
] = igb_set_16bit
,
3922 [RDH8
] = igb_set_16bit
,
3923 [RDH9
] = igb_set_16bit
,
3924 [RDH10
] = igb_set_16bit
,
3925 [RDH11
] = igb_set_16bit
,
3926 [RDH12
] = igb_set_16bit
,
3927 [RDH13
] = igb_set_16bit
,
3928 [RDH14
] = igb_set_16bit
,
3929 [RDH15
] = igb_set_16bit
,
3930 [RDT0
] = igb_set_rdt
,
3931 [RDT1
] = igb_set_rdt
,
3932 [RDT2
] = igb_set_rdt
,
3933 [RDT3
] = igb_set_rdt
,
3934 [RDT4
] = igb_set_rdt
,
3935 [RDT5
] = igb_set_rdt
,
3936 [RDT6
] = igb_set_rdt
,
3937 [RDT7
] = igb_set_rdt
,
3938 [RDT8
] = igb_set_rdt
,
3939 [RDT9
] = igb_set_rdt
,
3940 [RDT10
] = igb_set_rdt
,
3941 [RDT11
] = igb_set_rdt
,
3942 [RDT12
] = igb_set_rdt
,
3943 [RDT13
] = igb_set_rdt
,
3944 [RDT14
] = igb_set_rdt
,
3945 [RDT15
] = igb_set_rdt
,
3946 [IMC
] = igb_set_imc
,
3947 [IMS
] = igb_set_ims
,
3948 [ICR
] = igb_set_icr
,
3949 [EECD
] = igb_set_eecd
,
3950 [RCTL
] = igb_set_rx_control
,
3951 [CTRL
] = igb_set_ctrl
,
3952 [EERD
] = igb_set_eerd
,
3953 [TDFH
] = igb_set_13bit
,
3954 [TDFT
] = igb_set_13bit
,
3955 [TDFHS
] = igb_set_13bit
,
3956 [TDFTS
] = igb_set_13bit
,
3957 [TDFPC
] = igb_set_13bit
,
3958 [RDFH
] = igb_set_13bit
,
3959 [RDFT
] = igb_set_13bit
,
3960 [RDFHS
] = igb_set_13bit
,
3961 [RDFTS
] = igb_set_13bit
,
3962 [RDFPC
] = igb_set_13bit
,
3963 [GCR
] = igb_set_gcr
,
3964 [RXCSUM
] = igb_set_rxcsum
,
3965 [TDLEN0
] = igb_set_dlen
,
3966 [TDLEN1
] = igb_set_dlen
,
3967 [TDLEN2
] = igb_set_dlen
,
3968 [TDLEN3
] = igb_set_dlen
,
3969 [TDLEN4
] = igb_set_dlen
,
3970 [TDLEN5
] = igb_set_dlen
,
3971 [TDLEN6
] = igb_set_dlen
,
3972 [TDLEN7
] = igb_set_dlen
,
3973 [TDLEN8
] = igb_set_dlen
,
3974 [TDLEN9
] = igb_set_dlen
,
3975 [TDLEN10
] = igb_set_dlen
,
3976 [TDLEN11
] = igb_set_dlen
,
3977 [TDLEN12
] = igb_set_dlen
,
3978 [TDLEN13
] = igb_set_dlen
,
3979 [TDLEN14
] = igb_set_dlen
,
3980 [TDLEN15
] = igb_set_dlen
,
3981 [RDLEN0
] = igb_set_dlen
,
3982 [RDLEN1
] = igb_set_dlen
,
3983 [RDLEN2
] = igb_set_dlen
,
3984 [RDLEN3
] = igb_set_dlen
,
3985 [RDLEN4
] = igb_set_dlen
,
3986 [RDLEN5
] = igb_set_dlen
,
3987 [RDLEN6
] = igb_set_dlen
,
3988 [RDLEN7
] = igb_set_dlen
,
3989 [RDLEN8
] = igb_set_dlen
,
3990 [RDLEN9
] = igb_set_dlen
,
3991 [RDLEN10
] = igb_set_dlen
,
3992 [RDLEN11
] = igb_set_dlen
,
3993 [RDLEN12
] = igb_set_dlen
,
3994 [RDLEN13
] = igb_set_dlen
,
3995 [RDLEN14
] = igb_set_dlen
,
3996 [RDLEN15
] = igb_set_dlen
,
3997 [TDBAL0
] = igb_set_dbal
,
3998 [TDBAL1
] = igb_set_dbal
,
3999 [TDBAL2
] = igb_set_dbal
,
4000 [TDBAL3
] = igb_set_dbal
,
4001 [TDBAL4
] = igb_set_dbal
,
4002 [TDBAL5
] = igb_set_dbal
,
4003 [TDBAL6
] = igb_set_dbal
,
4004 [TDBAL7
] = igb_set_dbal
,
4005 [TDBAL8
] = igb_set_dbal
,
4006 [TDBAL9
] = igb_set_dbal
,
4007 [TDBAL10
] = igb_set_dbal
,
4008 [TDBAL11
] = igb_set_dbal
,
4009 [TDBAL12
] = igb_set_dbal
,
4010 [TDBAL13
] = igb_set_dbal
,
4011 [TDBAL14
] = igb_set_dbal
,
4012 [TDBAL15
] = igb_set_dbal
,
4013 [RDBAL0
] = igb_set_dbal
,
4014 [RDBAL1
] = igb_set_dbal
,
4015 [RDBAL2
] = igb_set_dbal
,
4016 [RDBAL3
] = igb_set_dbal
,
4017 [RDBAL4
] = igb_set_dbal
,
4018 [RDBAL5
] = igb_set_dbal
,
4019 [RDBAL6
] = igb_set_dbal
,
4020 [RDBAL7
] = igb_set_dbal
,
4021 [RDBAL8
] = igb_set_dbal
,
4022 [RDBAL9
] = igb_set_dbal
,
4023 [RDBAL10
] = igb_set_dbal
,
4024 [RDBAL11
] = igb_set_dbal
,
4025 [RDBAL12
] = igb_set_dbal
,
4026 [RDBAL13
] = igb_set_dbal
,
4027 [RDBAL14
] = igb_set_dbal
,
4028 [RDBAL15
] = igb_set_dbal
,
4029 [STATUS
] = igb_set_status
,
4030 [PBACLR
] = igb_set_pbaclr
,
4031 [CTRL_EXT
] = igb_set_ctrlext
,
4032 [FCAH
] = igb_set_16bit
,
4033 [FCT
] = igb_set_16bit
,
4034 [FCTTV
] = igb_set_16bit
,
4035 [FCRTV
] = igb_set_16bit
,
4036 [FCRTH
] = igb_set_fcrth
,
4037 [FCRTL
] = igb_set_fcrtl
,
4038 [CTRL_DUP
] = igb_set_ctrl
,
4039 [RFCTL
] = igb_set_rfctl
,
4040 [TIMINCA
] = igb_set_timinca
,
4041 [TIMADJH
] = igb_set_timadjh
,
4043 [IP6AT
... IP6AT
+ 3] = igb_mac_writereg
,
4044 [IP4AT
... IP4AT
+ 6] = igb_mac_writereg
,
4045 [RA
] = igb_mac_writereg
,
4046 [RA
+ 1] = igb_mac_setmacaddr
,
4047 [RA
+ 2 ... RA
+ 31] = igb_mac_writereg
,
4048 [RA2
... RA2
+ 31] = igb_mac_writereg
,
4049 [WUPM
... WUPM
+ 31] = igb_mac_writereg
,
4050 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_writereg
,
4051 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = igb_mac_writereg
,
4052 [FFMT
... FFMT
+ 254] = igb_set_4bit
,
4053 [MDEF
... MDEF
+ 7] = igb_mac_writereg
,
4054 [FTFT
... FTFT
+ 254] = igb_mac_writereg
,
4055 [RETA
... RETA
+ 31] = igb_mac_writereg
,
4056 [RSSRK
... RSSRK
+ 9] = igb_mac_writereg
,
4057 [MAVTV0
... MAVTV3
] = igb_mac_writereg
,
4058 [EITR0
... EITR0
+ IGB_INTR_NUM
- 1] = igb_set_eitr
,
4061 [FWSM
] = igb_mac_writereg
,
4062 [SW_FW_SYNC
] = igb_mac_writereg
,
4063 [EICR
] = igb_set_eicr
,
4064 [EICS
] = igb_set_eics
,
4065 [EIAC
] = igb_set_eiac
,
4066 [EIAM
] = igb_set_eiam
,
4067 [EIMC
] = igb_set_eimc
,
4068 [EIMS
] = igb_set_eims
,
4069 [IVAR0
... IVAR0
+ 7] = igb_mac_writereg
,
4070 igb_putreg(IVAR_MISC
),
4071 igb_putreg(TSYNCRXCFG
),
4072 [ETQF0
... ETQF0
+ 7] = igb_mac_writereg
,
4074 [P2VMAILBOX0
... P2VMAILBOX7
] = igb_set_pfmailbox
,
4075 [V2PMAILBOX0
... V2PMAILBOX7
] = igb_set_vfmailbox
,
4076 [MBVFICR
] = igb_w1c
,
4077 [VMBMEM0
... VMBMEM0
+ 127] = igb_mac_writereg
,
4078 igb_putreg(MBVFIMR
),
4085 [VLVF0
... VLVF0
+ E1000_VLVF_ARRAY_SIZE
- 1] = igb_mac_writereg
,
4086 [VMVIR0
... VMVIR7
] = igb_mac_writereg
,
4087 [VMOLR0
... VMOLR7
] = igb_mac_writereg
,
4088 [UTA
... UTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_writereg
,
4089 [PVTCTRL0
] = igb_set_vtctrl
,
4090 [PVTCTRL1
] = igb_set_vtctrl
,
4091 [PVTCTRL2
] = igb_set_vtctrl
,
4092 [PVTCTRL3
] = igb_set_vtctrl
,
4093 [PVTCTRL4
] = igb_set_vtctrl
,
4094 [PVTCTRL5
] = igb_set_vtctrl
,
4095 [PVTCTRL6
] = igb_set_vtctrl
,
4096 [PVTCTRL7
] = igb_set_vtctrl
,
4097 [PVTEICS0
] = igb_set_vteics
,
4098 [PVTEICS1
] = igb_set_vteics
,
4099 [PVTEICS2
] = igb_set_vteics
,
4100 [PVTEICS3
] = igb_set_vteics
,
4101 [PVTEICS4
] = igb_set_vteics
,
4102 [PVTEICS5
] = igb_set_vteics
,
4103 [PVTEICS6
] = igb_set_vteics
,
4104 [PVTEICS7
] = igb_set_vteics
,
4105 [PVTEIMS0
] = igb_set_vteims
,
4106 [PVTEIMS1
] = igb_set_vteims
,
4107 [PVTEIMS2
] = igb_set_vteims
,
4108 [PVTEIMS3
] = igb_set_vteims
,
4109 [PVTEIMS4
] = igb_set_vteims
,
4110 [PVTEIMS5
] = igb_set_vteims
,
4111 [PVTEIMS6
] = igb_set_vteims
,
4112 [PVTEIMS7
] = igb_set_vteims
,
4113 [PVTEIMC0
] = igb_set_vteimc
,
4114 [PVTEIMC1
] = igb_set_vteimc
,
4115 [PVTEIMC2
] = igb_set_vteimc
,
4116 [PVTEIMC3
] = igb_set_vteimc
,
4117 [PVTEIMC4
] = igb_set_vteimc
,
4118 [PVTEIMC5
] = igb_set_vteimc
,
4119 [PVTEIMC6
] = igb_set_vteimc
,
4120 [PVTEIMC7
] = igb_set_vteimc
,
4121 [PVTEIAC0
] = igb_set_vteiac
,
4122 [PVTEIAC1
] = igb_set_vteiac
,
4123 [PVTEIAC2
] = igb_set_vteiac
,
4124 [PVTEIAC3
] = igb_set_vteiac
,
4125 [PVTEIAC4
] = igb_set_vteiac
,
4126 [PVTEIAC5
] = igb_set_vteiac
,
4127 [PVTEIAC6
] = igb_set_vteiac
,
4128 [PVTEIAC7
] = igb_set_vteiac
,
4129 [PVTEIAM0
] = igb_set_vteiam
,
4130 [PVTEIAM1
] = igb_set_vteiam
,
4131 [PVTEIAM2
] = igb_set_vteiam
,
4132 [PVTEIAM3
] = igb_set_vteiam
,
4133 [PVTEIAM4
] = igb_set_vteiam
,
4134 [PVTEIAM5
] = igb_set_vteiam
,
4135 [PVTEIAM6
] = igb_set_vteiam
,
4136 [PVTEIAM7
] = igb_set_vteiam
,
4137 [PVTEICR0
] = igb_set_vteicr
,
4138 [PVTEICR1
] = igb_set_vteicr
,
4139 [PVTEICR2
] = igb_set_vteicr
,
4140 [PVTEICR3
] = igb_set_vteicr
,
4141 [PVTEICR4
] = igb_set_vteicr
,
4142 [PVTEICR5
] = igb_set_vteicr
,
4143 [PVTEICR6
] = igb_set_vteicr
,
4144 [PVTEICR7
] = igb_set_vteicr
,
4145 [VTIVAR
... VTIVAR
+ 7] = igb_set_vtivar
,
4146 [VTIVAR_MISC
... VTIVAR_MISC
+ 7] = igb_mac_writereg
4148 enum { IGB_NWRITEOPS
= ARRAY_SIZE(igb_macreg_writeops
) };
4150 enum { MAC_ACCESS_PARTIAL
= 1 };
4153 * The array below combines alias offsets of the index values for the
4154 * MAC registers that have aliases, with the indication of not fully
4155 * implemented registers (lowest bit). This combination is possible
4156 * because all of the offsets are even.
4158 static const uint16_t mac_reg_access
[E1000E_MAC_SIZE
] = {
4159 /* Alias index offsets */
4161 [RDFH_A
] = 0xe904, [RDFT_A
] = 0xe904,
4162 [TDFH_A
] = 0xed00, [TDFT_A
] = 0xed00,
4163 [RA_A
... RA_A
+ 31] = 0x14f0,
4164 [VFTA_A
... VFTA_A
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = 0x1400,
4166 [RDBAL0_A
] = 0x2600,
4167 [RDBAH0_A
] = 0x2600,
4168 [RDLEN0_A
] = 0x2600,
4169 [SRRCTL0_A
] = 0x2600,
4172 [RXDCTL0_A
] = 0x2600,
4173 [RXCTL0_A
] = 0x2600,
4174 [RQDPC0_A
] = 0x2600,
4175 [RDBAL1_A
] = 0x25D0,
4176 [RDBAL2_A
] = 0x25A0,
4177 [RDBAL3_A
] = 0x2570,
4178 [RDBAH1_A
] = 0x25D0,
4179 [RDBAH2_A
] = 0x25A0,
4180 [RDBAH3_A
] = 0x2570,
4181 [RDLEN1_A
] = 0x25D0,
4182 [RDLEN2_A
] = 0x25A0,
4183 [RDLEN3_A
] = 0x2570,
4184 [SRRCTL1_A
] = 0x25D0,
4185 [SRRCTL2_A
] = 0x25A0,
4186 [SRRCTL3_A
] = 0x2570,
4193 [RXDCTL1_A
] = 0x25D0,
4194 [RXDCTL2_A
] = 0x25A0,
4195 [RXDCTL3_A
] = 0x2570,
4196 [RXCTL1_A
] = 0x25D0,
4197 [RXCTL2_A
] = 0x25A0,
4198 [RXCTL3_A
] = 0x2570,
4199 [RQDPC1_A
] = 0x25D0,
4200 [RQDPC2_A
] = 0x25A0,
4201 [RQDPC3_A
] = 0x2570,
4202 [TDBAL0_A
] = 0x2A00,
4203 [TDBAH0_A
] = 0x2A00,
4204 [TDLEN0_A
] = 0x2A00,
4207 [TXCTL0_A
] = 0x2A00,
4208 [TDWBAL0_A
] = 0x2A00,
4209 [TDWBAH0_A
] = 0x2A00,
4210 [TDBAL1_A
] = 0x29D0,
4211 [TDBAL2_A
] = 0x29A0,
4212 [TDBAL3_A
] = 0x2970,
4213 [TDBAH1_A
] = 0x29D0,
4214 [TDBAH2_A
] = 0x29A0,
4215 [TDBAH3_A
] = 0x2970,
4216 [TDLEN1_A
] = 0x29D0,
4217 [TDLEN2_A
] = 0x29A0,
4218 [TDLEN3_A
] = 0x2970,
4225 [TXDCTL0_A
] = 0x2A00,
4226 [TXDCTL1_A
] = 0x29D0,
4227 [TXDCTL2_A
] = 0x29A0,
4228 [TXDCTL3_A
] = 0x2970,
4229 [TXCTL1_A
] = 0x29D0,
4230 [TXCTL2_A
] = 0x29A0,
4231 [TXCTL3_A
] = 0x29D0,
4232 [TDWBAL1_A
] = 0x29D0,
4233 [TDWBAL2_A
] = 0x29A0,
4234 [TDWBAL3_A
] = 0x2970,
4235 [TDWBAH1_A
] = 0x29D0,
4236 [TDWBAH2_A
] = 0x29A0,
4237 [TDWBAH3_A
] = 0x2970,
4239 /* Access options */
4240 [RDFH
] = MAC_ACCESS_PARTIAL
, [RDFT
] = MAC_ACCESS_PARTIAL
,
4241 [RDFHS
] = MAC_ACCESS_PARTIAL
, [RDFTS
] = MAC_ACCESS_PARTIAL
,
4242 [RDFPC
] = MAC_ACCESS_PARTIAL
,
4243 [TDFH
] = MAC_ACCESS_PARTIAL
, [TDFT
] = MAC_ACCESS_PARTIAL
,
4244 [TDFHS
] = MAC_ACCESS_PARTIAL
, [TDFTS
] = MAC_ACCESS_PARTIAL
,
4245 [TDFPC
] = MAC_ACCESS_PARTIAL
, [EECD
] = MAC_ACCESS_PARTIAL
,
4246 [FLA
] = MAC_ACCESS_PARTIAL
,
4247 [FCAL
] = MAC_ACCESS_PARTIAL
, [FCAH
] = MAC_ACCESS_PARTIAL
,
4248 [FCT
] = MAC_ACCESS_PARTIAL
, [FCTTV
] = MAC_ACCESS_PARTIAL
,
4249 [FCRTV
] = MAC_ACCESS_PARTIAL
, [FCRTL
] = MAC_ACCESS_PARTIAL
,
4250 [FCRTH
] = MAC_ACCESS_PARTIAL
,
4251 [MAVTV0
... MAVTV3
] = MAC_ACCESS_PARTIAL
4255 igb_core_write(IGBCore
*core
, hwaddr addr
, uint64_t val
, unsigned size
)
4257 uint16_t index
= igb_get_reg_index_with_offset(mac_reg_access
, addr
);
4259 if (index
< IGB_NWRITEOPS
&& igb_macreg_writeops
[index
]) {
4260 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
4261 trace_e1000e_wrn_regs_write_trivial(index
<< 2);
4263 trace_e1000e_core_write(index
<< 2, size
, val
);
4264 igb_macreg_writeops
[index
](core
, index
, val
);
4265 } else if (index
< IGB_NREADOPS
&& igb_macreg_readops
[index
]) {
4266 trace_e1000e_wrn_regs_write_ro(index
<< 2, size
, val
);
4268 trace_e1000e_wrn_regs_write_unknown(index
<< 2, size
, val
);
4273 igb_core_read(IGBCore
*core
, hwaddr addr
, unsigned size
)
4276 uint16_t index
= igb_get_reg_index_with_offset(mac_reg_access
, addr
);
4278 if (index
< IGB_NREADOPS
&& igb_macreg_readops
[index
]) {
4279 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
4280 trace_e1000e_wrn_regs_read_trivial(index
<< 2);
4282 val
= igb_macreg_readops
[index
](core
, index
);
4283 trace_e1000e_core_read(index
<< 2, size
, val
);
4286 trace_e1000e_wrn_regs_read_unknown(index
<< 2, size
);
4292 igb_autoneg_pause(IGBCore
*core
)
4294 timer_del(core
->autoneg_timer
);
4298 igb_autoneg_resume(IGBCore
*core
)
4300 if (igb_have_autoneg(core
) &&
4301 !(core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
)) {
4302 qemu_get_queue(core
->owner_nic
)->link_down
= false;
4303 timer_mod(core
->autoneg_timer
,
4304 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 500);
4309 igb_vm_state_change(void *opaque
, bool running
, RunState state
)
4311 IGBCore
*core
= opaque
;
4314 trace_e1000e_vm_state_running();
4315 igb_intrmgr_resume(core
);
4316 igb_autoneg_resume(core
);
4318 trace_e1000e_vm_state_stopped();
4319 igb_autoneg_pause(core
);
4320 igb_intrmgr_pause(core
);
4325 igb_core_pci_realize(IGBCore
*core
,
4326 const uint16_t *eeprom_templ
,
4327 uint32_t eeprom_size
,
4328 const uint8_t *macaddr
)
4332 core
->autoneg_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
4333 igb_autoneg_timer
, core
);
4334 igb_intrmgr_pci_realize(core
);
4336 core
->vmstate
= qemu_add_vm_change_state_handler(igb_vm_state_change
, core
);
4338 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
4339 net_tx_pkt_init(&core
->tx
[i
].tx_pkt
, E1000E_MAX_TX_FRAGS
);
4342 net_rx_pkt_init(&core
->rx_pkt
);
4344 e1000x_core_prepare_eeprom(core
->eeprom
,
4347 PCI_DEVICE_GET_CLASS(core
->owner
)->device_id
,
4349 igb_update_rx_offloads(core
);
4353 igb_core_pci_uninit(IGBCore
*core
)
4357 timer_free(core
->autoneg_timer
);
4359 igb_intrmgr_pci_unint(core
);
4361 qemu_del_vm_change_state_handler(core
->vmstate
);
4363 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
4364 net_tx_pkt_uninit(core
->tx
[i
].tx_pkt
);
4367 net_rx_pkt_uninit(core
->rx_pkt
);
4370 static const uint16_t
4371 igb_phy_reg_init
[] = {
4372 [MII_BMCR
] = MII_BMCR_SPEED1000
|
4376 [MII_BMSR
] = MII_BMSR_EXTCAP
|
4386 [MII_PHYID1
] = IGP03E1000_E_PHY_ID
>> 16,
4387 [MII_PHYID2
] = (IGP03E1000_E_PHY_ID
& 0xfff0) | 1,
4388 [MII_ANAR
] = MII_ANAR_CSMACD
| MII_ANAR_10
|
4389 MII_ANAR_10FD
| MII_ANAR_TX
|
4390 MII_ANAR_TXFD
| MII_ANAR_PAUSE
|
4391 MII_ANAR_PAUSE_ASYM
,
4392 [MII_ANLPAR
] = MII_ANLPAR_10
| MII_ANLPAR_10FD
|
4393 MII_ANLPAR_TX
| MII_ANLPAR_TXFD
|
4394 MII_ANLPAR_T4
| MII_ANLPAR_PAUSE
,
4395 [MII_ANER
] = MII_ANER_NP
| MII_ANER_NWAY
,
4396 [MII_ANNP
] = 0x1 | MII_ANNP_MP
,
4397 [MII_CTRL1000
] = MII_CTRL1000_HALF
| MII_CTRL1000_FULL
|
4398 MII_CTRL1000_PORT
| MII_CTRL1000_MASTER
,
4399 [MII_STAT1000
] = MII_STAT1000_HALF
| MII_STAT1000_FULL
|
4400 MII_STAT1000_ROK
| MII_STAT1000_LOK
,
4401 [MII_EXTSTAT
] = MII_EXTSTAT_1000T_HD
| MII_EXTSTAT_1000T_FD
,
4403 [IGP01E1000_PHY_PORT_CONFIG
] = BIT(5) | BIT(8),
4404 [IGP01E1000_PHY_PORT_STATUS
] = IGP01E1000_PSSR_SPEED_1000MBPS
,
4405 [IGP02E1000_PHY_POWER_MGMT
] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU
|
4406 IGP01E1000_PSCFR_SMART_SPEED
4409 static const uint32_t igb_mac_reg_init
[] = {
4410 [LEDCTL
] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
4411 [EEMNGCTL
] = BIT(31),
4412 [TXDCTL0
] = E1000_TXDCTL_QUEUE_ENABLE
,
4413 [RXDCTL0
] = E1000_RXDCTL_QUEUE_ENABLE
| (1 << 16),
4414 [RXDCTL1
] = 1 << 16,
4415 [RXDCTL2
] = 1 << 16,
4416 [RXDCTL3
] = 1 << 16,
4417 [RXDCTL4
] = 1 << 16,
4418 [RXDCTL5
] = 1 << 16,
4419 [RXDCTL6
] = 1 << 16,
4420 [RXDCTL7
] = 1 << 16,
4421 [RXDCTL8
] = 1 << 16,
4422 [RXDCTL9
] = 1 << 16,
4423 [RXDCTL10
] = 1 << 16,
4424 [RXDCTL11
] = 1 << 16,
4425 [RXDCTL12
] = 1 << 16,
4426 [RXDCTL13
] = 1 << 16,
4427 [RXDCTL14
] = 1 << 16,
4428 [RXDCTL15
] = 1 << 16,
4429 [TIPG
] = 0x08 | (0x04 << 10) | (0x06 << 20),
4430 [CTRL
] = E1000_CTRL_FD
| E1000_CTRL_LRST
| E1000_CTRL_SPD_1000
|
4431 E1000_CTRL_ADVD3WUC
,
4432 [STATUS
] = E1000_STATUS_PHYRA
| BIT(31),
4433 [EECD
] = E1000_EECD_FWE_DIS
| E1000_EECD_PRES
|
4434 (2 << E1000_EECD_SIZE_EX_SHIFT
),
4435 [GCR
] = E1000_L0S_ADJUST
|
4436 E1000_GCR_CMPL_TMOUT_RESEND
|
4437 E1000_GCR_CAP_VER2
|
4438 E1000_L1_ENTRY_LATENCY_MSB
|
4439 E1000_L1_ENTRY_LATENCY_LSB
,
4440 [RXCSUM
] = E1000_RXCSUM_IPOFLD
| E1000_RXCSUM_TUOFLD
,
4443 [TCTL
] = E1000_TCTL_PSP
| (0xF << E1000_CT_SHIFT
) |
4444 (0x40 << E1000_COLD_SHIFT
) | (0x1 << 26) | (0xA << 28),
4445 [TCTL_EXT
] = 0x40 | (0x42 << 10),
4446 [DTXCTL
] = E1000_DTXCTL_8023LL
| E1000_DTXCTL_SPOOF_INT
,
4447 [VET
] = ETH_P_VLAN
| (ETH_P_VLAN
<< 16),
4449 [V2PMAILBOX0
... V2PMAILBOX0
+ IGB_MAX_VF_FUNCTIONS
- 1] = E1000_V2PMAILBOX_RSTI
,
4453 [VMOLR0
... VMOLR0
+ 7] = 0x2600 | E1000_VMOLR_STRCRC
,
4454 [RPLOLR
] = E1000_RPLOLR_STRCRC
,
4456 [TXCTL0
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4457 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4458 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4459 [TXCTL1
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4460 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4461 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4462 [TXCTL2
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4463 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4464 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4465 [TXCTL3
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4466 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4467 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4468 [TXCTL4
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4469 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4470 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4471 [TXCTL5
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4472 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4473 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4474 [TXCTL6
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4475 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4476 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4477 [TXCTL7
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4478 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4479 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4480 [TXCTL8
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4481 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4482 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4483 [TXCTL9
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4484 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4485 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4486 [TXCTL10
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4487 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4488 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4489 [TXCTL11
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4490 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4491 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4492 [TXCTL12
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4493 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4494 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4495 [TXCTL13
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4496 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4497 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4498 [TXCTL14
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4499 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4500 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4501 [TXCTL15
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
4502 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
4503 E1000_DCA_TXCTRL_DESC_RRO_EN
,
4506 static void igb_reset(IGBCore
*core
, bool sw
)
4511 timer_del(core
->autoneg_timer
);
4513 igb_intrmgr_reset(core
);
4515 memset(core
->phy
, 0, sizeof core
->phy
);
4516 memcpy(core
->phy
, igb_phy_reg_init
, sizeof igb_phy_reg_init
);
4518 for (i
= 0; i
< E1000E_MAC_SIZE
; i
++) {
4520 (i
== RXPBS
|| i
== TXPBS
||
4521 (i
>= EITR0
&& i
< EITR0
+ IGB_INTR_NUM
))) {
4525 core
->mac
[i
] = i
< ARRAY_SIZE(igb_mac_reg_init
) ?
4526 igb_mac_reg_init
[i
] : 0;
4529 if (qemu_get_queue(core
->owner_nic
)->link_down
) {
4530 igb_link_down(core
);
4533 e1000x_reset_mac_addr(core
->owner_nic
, core
->mac
, core
->permanent_mac
);
4535 for (int vfn
= 0; vfn
< IGB_MAX_VF_FUNCTIONS
; vfn
++) {
4536 /* Set RSTI, so VF can identify a PF reset is in progress */
4537 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_RSTI
;
4540 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
4542 memset(tx
->ctx
, 0, sizeof(tx
->ctx
));
4544 tx
->skip_cp
= false;
4549 igb_core_reset(IGBCore
*core
)
4551 igb_reset(core
, false);
4554 void igb_core_pre_save(IGBCore
*core
)
4557 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
4560 * If link is down and auto-negotiation is supported and ongoing,
4561 * complete auto-negotiation immediately. This allows us to look
4562 * at MII_BMSR_AN_COMP to infer link status on load.
4564 if (nc
->link_down
&& igb_have_autoneg(core
)) {
4565 core
->phy
[MII_BMSR
] |= MII_BMSR_AN_COMP
;
4566 igb_update_flowctl_status(core
);
4569 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
4570 if (net_tx_pkt_has_fragments(core
->tx
[i
].tx_pkt
)) {
4571 core
->tx
[i
].skip_cp
= true;
4577 igb_core_post_load(IGBCore
*core
)
4579 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
4582 * nc.link_down can't be migrated, so infer link_down according
4583 * to link status bit in core.mac[STATUS].
4585 nc
->link_down
= (core
->mac
[STATUS
] & E1000_STATUS_LU
) == 0;