dump: Recognize "fd:" protocols on Windows hosts
[qemu/armbru.git] / hw / net / igb_core.c
blobf6a5e2327b5abe775e025ebe490f2e26302e1ccd
1 /*
2 * Core code for QEMU igb emulation
4 * Datasheet:
5 * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
7 * Copyright (c) 2020-2023 Red Hat, Inc.
8 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
9 * Developed by Daynix Computing LTD (http://www.daynix.com)
11 * Authors:
12 * Akihiko Odaki <akihiko.odaki@daynix.com>
13 * Gal Hammmer <gal.hammer@sap.com>
14 * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
15 * Dmitry Fleytman <dmitry@daynix.com>
16 * Leonid Bloch <leonid@daynix.com>
17 * Yan Vugenfirer <yan@daynix.com>
19 * Based on work done by:
20 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
21 * Copyright (c) 2008 Qumranet
22 * Based on work done by:
23 * Copyright (c) 2007 Dan Aloni
24 * Copyright (c) 2004 Antony T Curtis
26 * This library is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU Lesser General Public
28 * License as published by the Free Software Foundation; either
29 * version 2.1 of the License, or (at your option) any later version.
31 * This library is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 * Lesser General Public License for more details.
36 * You should have received a copy of the GNU Lesser General Public
37 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
40 #include "qemu/osdep.h"
41 #include "qemu/log.h"
42 #include "net/net.h"
43 #include "net/tap.h"
44 #include "hw/net/mii.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "sysemu/runstate.h"
49 #include "net_tx_pkt.h"
50 #include "net_rx_pkt.h"
52 #include "igb_common.h"
53 #include "e1000x_common.h"
54 #include "igb_core.h"
56 #include "trace.h"
58 #define E1000E_MAX_TX_FRAGS (64)
60 union e1000_rx_desc_union {
61 struct e1000_rx_desc legacy;
62 union e1000_adv_rx_desc adv;
65 typedef struct IGBTxPktVmdqCallbackContext {
66 IGBCore *core;
67 NetClientState *nc;
68 } IGBTxPktVmdqCallbackContext;
70 typedef struct L2Header {
71 struct eth_header eth;
72 struct vlan_header vlan[2];
73 } L2Header;
75 typedef struct PTP2 {
76 uint8_t message_id_transport_specific;
77 uint8_t version_ptp;
78 uint16_t message_length;
79 uint8_t subdomain_number;
80 uint8_t reserved0;
81 uint16_t flags;
82 uint64_t correction;
83 uint8_t reserved1[5];
84 uint8_t source_communication_technology;
85 uint32_t source_uuid_lo;
86 uint16_t source_uuid_hi;
87 uint16_t source_port_id;
88 uint16_t sequence_id;
89 uint8_t control;
90 uint8_t log_message_period;
91 } PTP2;
93 static ssize_t
94 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
95 bool has_vnet, bool *external_tx);
97 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes);
98 static void igb_reset(IGBCore *core, bool sw);
100 static inline void
101 igb_raise_legacy_irq(IGBCore *core)
103 trace_e1000e_irq_legacy_notify(true);
104 e1000x_inc_reg_if_not_full(core->mac, IAC);
105 pci_set_irq(core->owner, 1);
108 static inline void
109 igb_lower_legacy_irq(IGBCore *core)
111 trace_e1000e_irq_legacy_notify(false);
112 pci_set_irq(core->owner, 0);
115 static void igb_msix_notify(IGBCore *core, unsigned int cause)
117 PCIDevice *dev = core->owner;
118 uint16_t vfn;
119 uint32_t effective_eiac;
120 unsigned int vector;
122 vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM;
123 if (vfn < pcie_sriov_num_vfs(core->owner)) {
124 dev = pcie_sriov_get_vf_at_index(core->owner, vfn);
125 assert(dev);
126 vector = (cause + 2) % IGBVF_MSIX_VEC_NUM;
127 } else if (cause >= IGB_MSIX_VEC_NUM) {
128 qemu_log_mask(LOG_GUEST_ERROR,
129 "igb: Tried to use vector unavailable for PF");
130 return;
131 } else {
132 vector = cause;
135 msix_notify(dev, vector);
137 trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]);
138 effective_eiac = core->mac[EIAC] & BIT(cause);
139 core->mac[EICR] &= ~effective_eiac;
142 static inline void
143 igb_intrmgr_rearm_timer(IGBIntrDelayTimer *timer)
145 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
146 timer->delay_resolution_ns;
148 trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
150 timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
152 timer->running = true;
155 static void
156 igb_intmgr_timer_resume(IGBIntrDelayTimer *timer)
158 if (timer->running) {
159 igb_intrmgr_rearm_timer(timer);
163 static void
164 igb_intmgr_timer_pause(IGBIntrDelayTimer *timer)
166 if (timer->running) {
167 timer_del(timer->timer);
171 static void
172 igb_intrmgr_on_msix_throttling_timer(void *opaque)
174 IGBIntrDelayTimer *timer = opaque;
175 int idx = timer - &timer->core->eitr[0];
177 timer->running = false;
179 trace_e1000e_irq_msix_notify_postponed_vec(idx);
180 igb_msix_notify(timer->core, idx);
183 static void
184 igb_intrmgr_initialize_all_timers(IGBCore *core, bool create)
186 int i;
188 for (i = 0; i < IGB_INTR_NUM; i++) {
189 core->eitr[i].core = core;
190 core->eitr[i].delay_reg = EITR0 + i;
191 core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
194 if (!create) {
195 return;
198 for (i = 0; i < IGB_INTR_NUM; i++) {
199 core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
200 igb_intrmgr_on_msix_throttling_timer,
201 &core->eitr[i]);
205 static void
206 igb_intrmgr_resume(IGBCore *core)
208 int i;
210 for (i = 0; i < IGB_INTR_NUM; i++) {
211 igb_intmgr_timer_resume(&core->eitr[i]);
215 static void
216 igb_intrmgr_pause(IGBCore *core)
218 int i;
220 for (i = 0; i < IGB_INTR_NUM; i++) {
221 igb_intmgr_timer_pause(&core->eitr[i]);
225 static void
226 igb_intrmgr_reset(IGBCore *core)
228 int i;
230 for (i = 0; i < IGB_INTR_NUM; i++) {
231 if (core->eitr[i].running) {
232 timer_del(core->eitr[i].timer);
233 igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
238 static void
239 igb_intrmgr_pci_unint(IGBCore *core)
241 int i;
243 for (i = 0; i < IGB_INTR_NUM; i++) {
244 timer_free(core->eitr[i].timer);
248 static void
249 igb_intrmgr_pci_realize(IGBCore *core)
251 igb_intrmgr_initialize_all_timers(core, true);
254 static inline bool
255 igb_rx_csum_enabled(IGBCore *core)
257 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true;
260 static inline bool
261 igb_rx_use_legacy_descriptor(IGBCore *core)
264 * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
265 * descriptor.
267 return false;
270 typedef struct E1000ERingInfo {
271 int dbah;
272 int dbal;
273 int dlen;
274 int dh;
275 int dt;
276 int idx;
277 } E1000ERingInfo;
279 static uint32_t
280 igb_rx_queue_desctyp_get(IGBCore *core, const E1000ERingInfo *r)
282 return core->mac[E1000_SRRCTL(r->idx) >> 2] & E1000_SRRCTL_DESCTYPE_MASK;
285 static bool
286 igb_rx_use_ps_descriptor(IGBCore *core, const E1000ERingInfo *r)
288 uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
289 return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT ||
290 desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
293 static inline bool
294 igb_rss_enabled(IGBCore *core)
296 return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ &&
297 !igb_rx_csum_enabled(core) &&
298 !igb_rx_use_legacy_descriptor(core);
301 typedef struct E1000E_RSSInfo_st {
302 bool enabled;
303 uint32_t hash;
304 uint32_t queue;
305 uint32_t type;
306 } E1000E_RSSInfo;
308 static uint32_t
309 igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt)
311 bool hasip4, hasip6;
312 EthL4HdrProto l4hdr_proto;
314 assert(igb_rss_enabled(core));
316 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
318 if (hasip4) {
319 trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC],
320 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
321 E1000_MRQC_EN_IPV4(core->mac[MRQC]));
323 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
324 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
325 return E1000_MRQ_RSS_TYPE_IPV4TCP;
328 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
329 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) {
330 return E1000_MRQ_RSS_TYPE_IPV4UDP;
333 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
334 return E1000_MRQ_RSS_TYPE_IPV4;
336 } else if (hasip6) {
337 eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
339 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
340 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
343 * Following two traces must not be combined because resulting
344 * event will have 11 arguments totally and some trace backends
345 * (at least "ust") have limitation of maximum 10 arguments per
346 * event. Events with more arguments fail to compile for
347 * backends like these.
349 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
350 trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto,
351 ip6info->has_ext_hdrs,
352 ip6info->rss_ex_dst_valid,
353 ip6info->rss_ex_src_valid,
354 core->mac[MRQC],
355 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]),
356 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
357 E1000_MRQC_EN_IPV6(core->mac[MRQC]));
359 if ((!ex_dis || !ip6info->has_ext_hdrs) &&
360 (!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
361 ip6info->rss_ex_src_valid))) {
363 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
364 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) {
365 return E1000_MRQ_RSS_TYPE_IPV6TCPEX;
368 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
369 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) {
370 return E1000_MRQ_RSS_TYPE_IPV6UDP;
373 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
374 return E1000_MRQ_RSS_TYPE_IPV6EX;
379 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
380 return E1000_MRQ_RSS_TYPE_IPV6;
385 return E1000_MRQ_RSS_TYPE_NONE;
388 static uint32_t
389 igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info)
391 NetRxPktRssType type;
393 assert(igb_rss_enabled(core));
395 switch (info->type) {
396 case E1000_MRQ_RSS_TYPE_IPV4:
397 type = NetPktRssIpV4;
398 break;
399 case E1000_MRQ_RSS_TYPE_IPV4TCP:
400 type = NetPktRssIpV4Tcp;
401 break;
402 case E1000_MRQ_RSS_TYPE_IPV6TCPEX:
403 type = NetPktRssIpV6TcpEx;
404 break;
405 case E1000_MRQ_RSS_TYPE_IPV6:
406 type = NetPktRssIpV6;
407 break;
408 case E1000_MRQ_RSS_TYPE_IPV6EX:
409 type = NetPktRssIpV6Ex;
410 break;
411 case E1000_MRQ_RSS_TYPE_IPV4UDP:
412 type = NetPktRssIpV4Udp;
413 break;
414 case E1000_MRQ_RSS_TYPE_IPV6UDP:
415 type = NetPktRssIpV6Udp;
416 break;
417 default:
418 assert(false);
419 return 0;
422 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
425 static void
426 igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
427 E1000E_RSSInfo *info)
429 trace_e1000e_rx_rss_started();
431 if (tx || !igb_rss_enabled(core)) {
432 info->enabled = false;
433 info->hash = 0;
434 info->queue = 0;
435 info->type = 0;
436 trace_e1000e_rx_rss_disabled();
437 return;
440 info->enabled = true;
442 info->type = igb_rss_get_hash_type(core, pkt);
444 trace_e1000e_rx_rss_type(info->type);
446 if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
447 info->hash = 0;
448 info->queue = 0;
449 return;
452 info->hash = igb_rss_calc_hash(core, pkt, info);
453 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
456 static void
457 igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx,
458 uint16_t vlan, bool insert_vlan)
460 if (core->mac[MRQC] & 1) {
461 uint16_t pool = qn % IGB_NUM_VM_POOLS;
463 if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
464 /* always insert default VLAN */
465 insert_vlan = true;
466 vlan = core->mac[VMVIR0 + pool] & 0xffff;
467 } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
468 insert_vlan = false;
472 if (insert_vlan) {
473 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
474 core->mac[VET] & 0xffff);
478 static bool
479 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
481 uint32_t idx = (tx->first_olinfo_status >> 4) & 1;
483 if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) {
484 uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT;
485 if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) {
486 return false;
489 net_tx_pkt_update_ip_checksums(tx->tx_pkt);
490 e1000x_inc_reg_if_not_full(core->mac, TSCTC);
491 return true;
494 if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) &&
495 !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ?
496 net_tx_pkt_update_sctp_checksum(tx->tx_pkt) :
497 net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) {
498 return false;
501 if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) {
502 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
505 return true;
508 static void igb_tx_pkt_mac_callback(void *core,
509 const struct iovec *iov,
510 int iovcnt,
511 const struct iovec *virt_iov,
512 int virt_iovcnt)
514 igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL);
517 static void igb_tx_pkt_vmdq_callback(void *opaque,
518 const struct iovec *iov,
519 int iovcnt,
520 const struct iovec *virt_iov,
521 int virt_iovcnt)
523 IGBTxPktVmdqCallbackContext *context = opaque;
524 bool external_tx;
526 igb_receive_internal(context->core, virt_iov, virt_iovcnt, true,
527 &external_tx);
529 if (external_tx) {
530 if (context->core->has_vnet) {
531 qemu_sendv_packet(context->nc, virt_iov, virt_iovcnt);
532 } else {
533 qemu_sendv_packet(context->nc, iov, iovcnt);
538 /* TX Packets Switching (7.10.3.6) */
539 static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx,
540 NetClientState *nc)
542 IGBTxPktVmdqCallbackContext context;
544 /* TX switching is only used to serve VM to VM traffic. */
545 if (!(core->mac[MRQC] & 1)) {
546 goto send_out;
549 /* TX switching requires DTXSWC.Loopback_en bit enabled. */
550 if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) {
551 goto send_out;
554 context.core = core;
555 context.nc = nc;
557 return net_tx_pkt_send_custom(tx->tx_pkt, false,
558 igb_tx_pkt_vmdq_callback, &context);
560 send_out:
561 return net_tx_pkt_send(tx->tx_pkt, nc);
564 static bool
565 igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index)
567 int target_queue = MIN(core->max_queue_num, queue_index);
568 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
570 if (!igb_setup_tx_offloads(core, tx)) {
571 return false;
574 net_tx_pkt_dump(tx->tx_pkt);
576 if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) ||
577 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
578 return net_tx_pkt_send_custom(tx->tx_pkt, false,
579 igb_tx_pkt_mac_callback, core);
580 } else {
581 return igb_tx_pkt_switch(core, tx, queue);
585 static void
586 igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn)
588 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
589 PTC1023, PTC1522 };
591 size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4;
593 e1000x_increase_size_stats(core->mac, PTCregs, tot_len);
594 e1000x_inc_reg_if_not_full(core->mac, TPT);
595 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len);
597 switch (net_tx_pkt_get_packet_type(tx_pkt)) {
598 case ETH_PKT_BCAST:
599 e1000x_inc_reg_if_not_full(core->mac, BPTC);
600 break;
601 case ETH_PKT_MCAST:
602 e1000x_inc_reg_if_not_full(core->mac, MPTC);
603 break;
604 case ETH_PKT_UCAST:
605 break;
606 default:
607 g_assert_not_reached();
610 e1000x_inc_reg_if_not_full(core->mac, GPTC);
611 e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len);
613 if (core->mac[MRQC] & 1) {
614 uint16_t pool = qn % IGB_NUM_VM_POOLS;
616 core->mac[PVFGOTC0 + (pool * 64)] += tot_len;
617 core->mac[PVFGPTC0 + (pool * 64)]++;
621 static void
622 igb_process_tx_desc(IGBCore *core,
623 PCIDevice *dev,
624 struct igb_tx *tx,
625 union e1000_adv_tx_desc *tx_desc,
626 int queue_index)
628 struct e1000_adv_tx_context_desc *tx_ctx_desc;
629 uint32_t cmd_type_len;
630 uint32_t idx;
631 uint64_t buffer_addr;
632 uint16_t length;
634 cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
636 if (cmd_type_len & E1000_ADVTXD_DCMD_DEXT) {
637 if ((cmd_type_len & E1000_ADVTXD_DTYP_DATA) ==
638 E1000_ADVTXD_DTYP_DATA) {
639 /* advanced transmit data descriptor */
640 if (tx->first) {
641 tx->first_cmd_type_len = cmd_type_len;
642 tx->first_olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
643 tx->first = false;
645 } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) ==
646 E1000_ADVTXD_DTYP_CTXT) {
647 /* advanced transmit context descriptor */
648 tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc;
649 idx = (le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 4) & 1;
650 tx->ctx[idx].vlan_macip_lens = le32_to_cpu(tx_ctx_desc->vlan_macip_lens);
651 tx->ctx[idx].seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed);
652 tx->ctx[idx].type_tucmd_mlhl = le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl);
653 tx->ctx[idx].mss_l4len_idx = le32_to_cpu(tx_ctx_desc->mss_l4len_idx);
654 return;
655 } else {
656 /* unknown descriptor type */
657 return;
659 } else {
660 /* legacy descriptor */
662 /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
665 buffer_addr = le64_to_cpu(tx_desc->read.buffer_addr);
666 length = cmd_type_len & 0xFFFF;
668 if (!tx->skip_cp) {
669 if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev,
670 buffer_addr, length)) {
671 tx->skip_cp = true;
675 if (cmd_type_len & E1000_TXD_CMD_EOP) {
676 if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
677 idx = (tx->first_olinfo_status >> 4) & 1;
678 igb_tx_insert_vlan(core, queue_index, tx,
679 tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT,
680 !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE));
682 if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) &&
683 (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) &&
684 !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) {
685 core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID;
686 e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH);
689 if (igb_tx_pkt_send(core, tx, queue_index)) {
690 igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index);
694 tx->first = true;
695 tx->skip_cp = false;
696 net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev);
700 static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx)
702 uint32_t n, ent = 0;
704 n = igb_ivar_entry_tx(queue_idx);
705 ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
707 return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
710 static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx)
712 uint32_t n, ent = 0;
714 n = igb_ivar_entry_rx(queue_idx);
715 ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
717 return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
720 static inline bool
721 igb_ring_empty(IGBCore *core, const E1000ERingInfo *r)
723 return core->mac[r->dh] == core->mac[r->dt] ||
724 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
727 static inline uint64_t
728 igb_ring_base(IGBCore *core, const E1000ERingInfo *r)
730 uint64_t bah = core->mac[r->dbah];
731 uint64_t bal = core->mac[r->dbal];
733 return (bah << 32) + bal;
736 static inline uint64_t
737 igb_ring_head_descr(IGBCore *core, const E1000ERingInfo *r)
739 return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
742 static inline void
743 igb_ring_advance(IGBCore *core, const E1000ERingInfo *r, uint32_t count)
745 core->mac[r->dh] += count;
747 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
748 core->mac[r->dh] = 0;
752 static inline uint32_t
753 igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r)
755 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
756 core->mac[r->dh], core->mac[r->dt]);
758 if (core->mac[r->dh] <= core->mac[r->dt]) {
759 return core->mac[r->dt] - core->mac[r->dh];
762 if (core->mac[r->dh] > core->mac[r->dt]) {
763 return core->mac[r->dlen] / E1000_RING_DESC_LEN +
764 core->mac[r->dt] - core->mac[r->dh];
767 g_assert_not_reached();
768 return 0;
771 static inline bool
772 igb_ring_enabled(IGBCore *core, const E1000ERingInfo *r)
774 return core->mac[r->dlen] > 0;
777 typedef struct IGB_TxRing_st {
778 const E1000ERingInfo *i;
779 struct igb_tx *tx;
780 } IGB_TxRing;
782 static inline int
783 igb_mq_queue_idx(int base_reg_idx, int reg_idx)
785 return (reg_idx - base_reg_idx) / 16;
788 static inline void
789 igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx)
791 static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
792 { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 },
793 { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 },
794 { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 },
795 { TDBAH3, TDBAL3, TDLEN3, TDH3, TDT3, 3 },
796 { TDBAH4, TDBAL4, TDLEN4, TDH4, TDT4, 4 },
797 { TDBAH5, TDBAL5, TDLEN5, TDH5, TDT5, 5 },
798 { TDBAH6, TDBAL6, TDLEN6, TDH6, TDT6, 6 },
799 { TDBAH7, TDBAL7, TDLEN7, TDH7, TDT7, 7 },
800 { TDBAH8, TDBAL8, TDLEN8, TDH8, TDT8, 8 },
801 { TDBAH9, TDBAL9, TDLEN9, TDH9, TDT9, 9 },
802 { TDBAH10, TDBAL10, TDLEN10, TDH10, TDT10, 10 },
803 { TDBAH11, TDBAL11, TDLEN11, TDH11, TDT11, 11 },
804 { TDBAH12, TDBAL12, TDLEN12, TDH12, TDT12, 12 },
805 { TDBAH13, TDBAL13, TDLEN13, TDH13, TDT13, 13 },
806 { TDBAH14, TDBAL14, TDLEN14, TDH14, TDT14, 14 },
807 { TDBAH15, TDBAL15, TDLEN15, TDH15, TDT15, 15 }
810 assert(idx < ARRAY_SIZE(i));
812 txr->i = &i[idx];
813 txr->tx = &core->tx[idx];
816 typedef struct E1000E_RxRing_st {
817 const E1000ERingInfo *i;
818 } E1000E_RxRing;
820 static inline void
821 igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx)
823 static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
824 { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 },
825 { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 },
826 { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 },
827 { RDBAH3, RDBAL3, RDLEN3, RDH3, RDT3, 3 },
828 { RDBAH4, RDBAL4, RDLEN4, RDH4, RDT4, 4 },
829 { RDBAH5, RDBAL5, RDLEN5, RDH5, RDT5, 5 },
830 { RDBAH6, RDBAL6, RDLEN6, RDH6, RDT6, 6 },
831 { RDBAH7, RDBAL7, RDLEN7, RDH7, RDT7, 7 },
832 { RDBAH8, RDBAL8, RDLEN8, RDH8, RDT8, 8 },
833 { RDBAH9, RDBAL9, RDLEN9, RDH9, RDT9, 9 },
834 { RDBAH10, RDBAL10, RDLEN10, RDH10, RDT10, 10 },
835 { RDBAH11, RDBAL11, RDLEN11, RDH11, RDT11, 11 },
836 { RDBAH12, RDBAL12, RDLEN12, RDH12, RDT12, 12 },
837 { RDBAH13, RDBAL13, RDLEN13, RDH13, RDT13, 13 },
838 { RDBAH14, RDBAL14, RDLEN14, RDH14, RDT14, 14 },
839 { RDBAH15, RDBAL15, RDLEN15, RDH15, RDT15, 15 }
842 assert(idx < ARRAY_SIZE(i));
844 rxr->i = &i[idx];
847 static uint32_t
848 igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
849 union e1000_adv_tx_desc *tx_desc,
850 const E1000ERingInfo *txi)
852 PCIDevice *d;
853 uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
854 uint64_t tdwba;
856 tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2];
857 tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32;
859 if (!(cmd_type_len & E1000_TXD_CMD_RS)) {
860 return 0;
863 d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
864 if (!d) {
865 d = core->owner;
868 if (tdwba & 1) {
869 uint32_t buffer = cpu_to_le32(core->mac[txi->dh]);
870 pci_dma_write(d, tdwba & ~3, &buffer, sizeof(buffer));
871 } else {
872 uint32_t status = le32_to_cpu(tx_desc->wb.status) | E1000_TXD_STAT_DD;
874 tx_desc->wb.status = cpu_to_le32(status);
875 pci_dma_write(d, base + offsetof(union e1000_adv_tx_desc, wb),
876 &tx_desc->wb, sizeof(tx_desc->wb));
879 return igb_tx_wb_eic(core, txi->idx);
882 static inline bool
883 igb_tx_enabled(IGBCore *core, const E1000ERingInfo *txi)
885 bool vmdq = core->mac[MRQC] & 1;
886 uint16_t qn = txi->idx;
887 uint16_t pool = qn % IGB_NUM_VM_POOLS;
889 return (core->mac[TCTL] & E1000_TCTL_EN) &&
890 (!vmdq || core->mac[VFTE] & BIT(pool)) &&
891 (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
894 static void
895 igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
897 PCIDevice *d;
898 dma_addr_t base;
899 union e1000_adv_tx_desc desc;
900 const E1000ERingInfo *txi = txr->i;
901 uint32_t eic = 0;
903 if (!igb_tx_enabled(core, txi)) {
904 trace_e1000e_tx_disabled();
905 return;
908 d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
909 if (!d) {
910 d = core->owner;
913 while (!igb_ring_empty(core, txi)) {
914 base = igb_ring_head_descr(core, txi);
916 pci_dma_read(d, base, &desc, sizeof(desc));
918 trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
919 desc.read.cmd_type_len, desc.wb.status);
921 igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
922 igb_ring_advance(core, txi, 1);
923 eic |= igb_txdesc_writeback(core, base, &desc, txi);
926 if (eic) {
927 igb_raise_interrupts(core, EICR, eic);
928 igb_raise_interrupts(core, ICR, E1000_ICR_TXDW);
931 net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d);
934 static uint32_t
935 igb_rxbufsize(IGBCore *core, const E1000ERingInfo *r)
937 uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
938 uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK;
939 if (bsizepkt) {
940 return bsizepkt << E1000_SRRCTL_BSIZEPKT_SHIFT;
943 return e1000x_rxbufsize(core->mac[RCTL]);
946 static bool
947 igb_has_rxbufs(IGBCore *core, const E1000ERingInfo *r, size_t total_size)
949 uint32_t bufs = igb_ring_free_descr_num(core, r);
950 uint32_t bufsize = igb_rxbufsize(core, r);
952 trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, bufsize);
954 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
955 bufsize;
958 static uint32_t
959 igb_rxhdrbufsize(IGBCore *core, const E1000ERingInfo *r)
961 uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
962 return (srrctl & E1000_SRRCTL_BSIZEHDRSIZE_MASK) >>
963 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
966 void
967 igb_start_recv(IGBCore *core)
969 int i;
971 trace_e1000e_rx_start_recv();
973 for (i = 0; i <= core->max_queue_num; i++) {
974 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i));
978 bool
979 igb_can_receive(IGBCore *core)
981 int i;
983 if (!e1000x_rx_ready(core->owner, core->mac)) {
984 return false;
987 for (i = 0; i < IGB_NUM_QUEUES; i++) {
988 E1000E_RxRing rxr;
989 if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
990 continue;
993 igb_rx_ring_init(core, &rxr, i);
994 if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) {
995 trace_e1000e_rx_can_recv();
996 return true;
1000 trace_e1000e_rx_can_recv_rings_full();
1001 return false;
1004 ssize_t
1005 igb_receive(IGBCore *core, const uint8_t *buf, size_t size)
1007 const struct iovec iov = {
1008 .iov_base = (uint8_t *)buf,
1009 .iov_len = size
1012 return igb_receive_iov(core, &iov, 1);
1015 static inline bool
1016 igb_rx_l3_cso_enabled(IGBCore *core)
1018 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
1021 static inline bool
1022 igb_rx_l4_cso_enabled(IGBCore *core)
1024 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
1027 static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr,
1028 size_t size, size_t vlan_num,
1029 bool lpe, uint16_t rlpml)
1031 size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num;
1032 size_t header_size = sizeof(struct eth_header) + vlan_header_size;
1033 return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU;
1036 static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov,
1037 size_t iovcnt, size_t iov_ofs,
1038 const L2Header *l2_header, size_t size,
1039 E1000E_RSSInfo *rss_info,
1040 uint16_t *etqf, bool *ts, bool *external_tx)
1042 static const int ta_shift[] = { 4, 3, 2, 0 };
1043 const struct eth_header *ehdr = &l2_header->eth;
1044 uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
1045 uint16_t queues = 0;
1046 uint16_t oversized = 0;
1047 size_t vlan_num = 0;
1048 PTP2 ptp2;
1049 bool lpe;
1050 uint16_t rlpml;
1051 int i;
1053 memset(rss_info, 0, sizeof(E1000E_RSSInfo));
1054 *ts = false;
1056 if (external_tx) {
1057 *external_tx = true;
1060 if (core->mac[CTRL_EXT] & BIT(26)) {
1061 if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 &&
1062 be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) {
1063 vlan_num = 2;
1065 } else {
1066 if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) {
1067 vlan_num = 1;
1071 lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE);
1072 rlpml = core->mac[RLPML];
1073 if (!(core->mac[RCTL] & E1000_RCTL_SBP) &&
1074 igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) {
1075 trace_e1000x_rx_oversized(size);
1076 return queues;
1079 for (*etqf = 0; *etqf < 8; (*etqf)++) {
1080 if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) &&
1081 be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) {
1082 if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) &&
1083 (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) &&
1084 !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) &&
1085 iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) &&
1086 (ptp2.version_ptp & 15) == 2 &&
1087 ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) {
1088 e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH);
1089 *ts = true;
1090 core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID;
1091 core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo);
1092 core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) |
1093 (le16_to_cpu(ptp2.sequence_id) << 16);
1095 break;
1099 if (vlan_num &&
1100 !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) {
1101 return queues;
1104 if (core->mac[MRQC] & 1) {
1105 if (is_broadcast_ether_addr(ehdr->h_dest)) {
1106 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1107 if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) {
1108 queues |= BIT(i);
1111 } else {
1112 for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) {
1113 if (!(macp[1] & E1000_RAH_AV)) {
1114 continue;
1116 ra[0] = cpu_to_le32(macp[0]);
1117 ra[1] = cpu_to_le32(macp[1]);
1118 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1119 queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
1123 for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
1124 if (!(macp[1] & E1000_RAH_AV)) {
1125 continue;
1127 ra[0] = cpu_to_le32(macp[0]);
1128 ra[1] = cpu_to_le32(macp[1]);
1129 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1130 queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
1134 if (!queues) {
1135 macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA);
1137 f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
1138 f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff;
1139 if (macp[f >> 5] & (1 << (f & 0x1f))) {
1140 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1141 if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) {
1142 queues |= BIT(i);
1146 } else if (is_unicast_ether_addr(ehdr->h_dest) && external_tx) {
1147 *external_tx = false;
1151 if (e1000x_vlan_rx_filter_enabled(core->mac)) {
1152 uint16_t mask = 0;
1154 if (vlan_num) {
1155 uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK;
1157 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
1158 if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid &&
1159 (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) {
1160 uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK;
1161 mask |= poolsel >> E1000_VLVF_POOLSEL_SHIFT;
1164 } else {
1165 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1166 if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) {
1167 mask |= BIT(i);
1172 queues &= mask;
1175 if (is_unicast_ether_addr(ehdr->h_dest) && !queues && !external_tx &&
1176 !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) {
1177 uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK;
1178 queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1181 queues &= core->mac[VFRE];
1182 if (queues) {
1183 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1184 lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE);
1185 rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK;
1186 if ((queues & BIT(i)) &&
1187 igb_rx_is_oversized(core, ehdr, size, vlan_num,
1188 lpe, rlpml)) {
1189 oversized |= BIT(i);
1192 /* 8.19.37 increment ROC if packet is oversized for all queues */
1193 if (oversized == queues) {
1194 trace_e1000x_rx_oversized(size);
1195 e1000x_inc_reg_if_not_full(core->mac, ROC);
1197 queues &= ~oversized;
1200 if (queues) {
1201 igb_rss_parse_packet(core, core->rx_pkt,
1202 external_tx != NULL, rss_info);
1203 /* Sec 8.26.1: PQn = VFn + VQn*8 */
1204 if (rss_info->queue & 1) {
1205 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1206 if ((queues & BIT(i)) &&
1207 (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) {
1208 queues |= BIT(i + IGB_NUM_VM_POOLS);
1209 queues &= ~BIT(i);
1214 } else {
1215 bool accepted = e1000x_rx_group_filter(core->mac, ehdr);
1216 if (!accepted) {
1217 for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
1218 if (!(macp[1] & E1000_RAH_AV)) {
1219 continue;
1221 ra[0] = cpu_to_le32(macp[0]);
1222 ra[1] = cpu_to_le32(macp[1]);
1223 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1224 trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2,
1225 MAC_ARG(ehdr->h_dest));
1227 accepted = true;
1228 break;
1233 if (accepted) {
1234 igb_rss_parse_packet(core, core->rx_pkt, false, rss_info);
1235 queues = BIT(rss_info->queue);
1239 return queues;
1242 static inline void
1243 igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
1244 hwaddr *buff_addr)
1246 *buff_addr = le64_to_cpu(desc->buffer_addr);
1249 static inline void
1250 igb_read_adv_rx_single_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1251 hwaddr *buff_addr)
1253 *buff_addr = le64_to_cpu(desc->read.pkt_addr);
1256 static inline void
1257 igb_read_adv_rx_split_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1258 hwaddr *buff_addr)
1260 buff_addr[0] = le64_to_cpu(desc->read.hdr_addr);
1261 buff_addr[1] = le64_to_cpu(desc->read.pkt_addr);
1264 typedef struct IGBBAState {
1265 uint16_t written[IGB_MAX_PS_BUFFERS];
1266 uint8_t cur_idx;
1267 } IGBBAState;
1269 typedef struct IGBSplitDescriptorData {
1270 bool sph;
1271 bool hbo;
1272 size_t hdr_len;
1273 } IGBSplitDescriptorData;
1275 typedef struct IGBPacketRxDMAState {
1276 size_t size;
1277 size_t total_size;
1278 size_t ps_hdr_len;
1279 size_t desc_size;
1280 size_t desc_offset;
1281 uint32_t rx_desc_packet_buf_size;
1282 uint32_t rx_desc_header_buf_size;
1283 struct iovec *iov;
1284 size_t iov_ofs;
1285 bool do_ps;
1286 bool is_first;
1287 IGBBAState bastate;
1288 hwaddr ba[IGB_MAX_PS_BUFFERS];
1289 IGBSplitDescriptorData ps_desc_data;
1290 } IGBPacketRxDMAState;
1292 static inline void
1293 igb_read_rx_descr(IGBCore *core,
1294 union e1000_rx_desc_union *desc,
1295 IGBPacketRxDMAState *pdma_st,
1296 const E1000ERingInfo *r)
1298 uint32_t desc_type;
1300 if (igb_rx_use_legacy_descriptor(core)) {
1301 igb_read_lgcy_rx_descr(core, &desc->legacy, &pdma_st->ba[1]);
1302 pdma_st->ba[0] = 0;
1303 return;
1306 /* advanced header split descriptor */
1307 if (igb_rx_use_ps_descriptor(core, r)) {
1308 igb_read_adv_rx_split_buf_descr(core, &desc->adv, &pdma_st->ba[0]);
1309 return;
1312 /* descriptor replication modes not supported */
1313 desc_type = igb_rx_queue_desctyp_get(core, r);
1314 if (desc_type != E1000_SRRCTL_DESCTYPE_ADV_ONEBUF) {
1315 trace_igb_wrn_rx_desc_modes_not_supp(desc_type);
1318 /* advanced single buffer descriptor */
1319 igb_read_adv_rx_single_buf_descr(core, &desc->adv, &pdma_st->ba[1]);
1320 pdma_st->ba[0] = 0;
1323 static void
1324 igb_verify_csum_in_sw(IGBCore *core,
1325 struct NetRxPkt *pkt,
1326 uint32_t *status_flags,
1327 EthL4HdrProto l4hdr_proto)
1329 bool csum_valid;
1330 uint32_t csum_error;
1332 if (igb_rx_l3_cso_enabled(core)) {
1333 if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) {
1334 trace_e1000e_rx_metadata_l3_csum_validation_failed();
1335 } else {
1336 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE;
1337 *status_flags |= E1000_RXD_STAT_IPCS | csum_error;
1339 } else {
1340 trace_e1000e_rx_metadata_l3_cso_disabled();
1343 if (!igb_rx_l4_cso_enabled(core)) {
1344 trace_e1000e_rx_metadata_l4_cso_disabled();
1345 return;
1348 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
1349 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1350 return;
1353 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE;
1354 *status_flags |= E1000_RXD_STAT_TCPCS | csum_error;
1356 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
1357 *status_flags |= E1000_RXD_STAT_UDPCS;
1361 static void
1362 igb_build_rx_metadata_common(IGBCore *core,
1363 struct NetRxPkt *pkt,
1364 bool is_eop,
1365 uint32_t *status_flags,
1366 uint16_t *vlan_tag)
1368 struct virtio_net_hdr *vhdr;
1369 bool hasip4, hasip6, csum_valid;
1370 EthL4HdrProto l4hdr_proto;
1372 *status_flags = E1000_RXD_STAT_DD;
1374 /* No additional metadata needed for non-EOP descriptors */
1375 if (!is_eop) {
1376 goto func_exit;
1379 *status_flags |= E1000_RXD_STAT_EOP;
1381 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1382 trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto);
1384 /* VLAN state */
1385 if (net_rx_pkt_is_vlan_stripped(pkt)) {
1386 *status_flags |= E1000_RXD_STAT_VP;
1387 *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
1388 trace_e1000e_rx_metadata_vlan(*vlan_tag);
1391 /* RX CSO information */
1392 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
1393 trace_e1000e_rx_metadata_ipv6_sum_disabled();
1394 goto func_exit;
1397 vhdr = net_rx_pkt_get_vhdr(pkt);
1399 if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
1400 !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
1401 trace_e1000e_rx_metadata_virthdr_no_csum_info();
1402 igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto);
1403 goto func_exit;
1406 if (igb_rx_l3_cso_enabled(core)) {
1407 *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0;
1408 } else {
1409 trace_e1000e_rx_metadata_l3_cso_disabled();
1412 if (igb_rx_l4_cso_enabled(core)) {
1413 switch (l4hdr_proto) {
1414 case ETH_L4_HDR_PROTO_SCTP:
1415 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
1416 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1417 goto func_exit;
1419 if (!csum_valid) {
1420 *status_flags |= E1000_RXDEXT_STATERR_TCPE;
1422 /* fall through */
1423 case ETH_L4_HDR_PROTO_TCP:
1424 *status_flags |= E1000_RXD_STAT_TCPCS;
1425 break;
1427 case ETH_L4_HDR_PROTO_UDP:
1428 *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
1429 break;
1431 default:
1432 break;
1434 } else {
1435 trace_e1000e_rx_metadata_l4_cso_disabled();
1438 func_exit:
1439 trace_e1000e_rx_metadata_status_flags(*status_flags);
1440 *status_flags = cpu_to_le32(*status_flags);
1443 static inline void
1444 igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
1445 struct NetRxPkt *pkt,
1446 const E1000E_RSSInfo *rss_info,
1447 uint16_t length)
1449 uint32_t status_flags;
1451 assert(!rss_info->enabled);
1453 memset(desc, 0, sizeof(*desc));
1454 desc->length = cpu_to_le16(length);
1455 igb_build_rx_metadata_common(core, pkt, pkt != NULL,
1456 &status_flags,
1457 &desc->special);
1459 desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
1460 desc->status = (uint8_t) le32_to_cpu(status_flags);
1463 static bool
1464 igb_rx_ps_descriptor_split_always(IGBCore *core, const E1000ERingInfo *r)
1466 uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
1467 return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1470 static uint16_t
1471 igb_rx_desc_get_packet_type(IGBCore *core, struct NetRxPkt *pkt, uint16_t etqf)
1473 uint16_t pkt_type;
1474 bool hasip4, hasip6;
1475 EthL4HdrProto l4hdr_proto;
1477 if (etqf < 8) {
1478 pkt_type = BIT(11) | etqf;
1479 return pkt_type;
1482 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1484 if (hasip6 && !(core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
1485 eth_ip6_hdr_info *ip6hdr_info = net_rx_pkt_get_ip6_info(pkt);
1486 pkt_type = ip6hdr_info->has_ext_hdrs ? E1000_ADVRXD_PKT_IP6E :
1487 E1000_ADVRXD_PKT_IP6;
1488 } else if (hasip4) {
1489 pkt_type = E1000_ADVRXD_PKT_IP4;
1490 } else {
1491 pkt_type = 0;
1494 switch (l4hdr_proto) {
1495 case ETH_L4_HDR_PROTO_TCP:
1496 pkt_type |= E1000_ADVRXD_PKT_TCP;
1497 break;
1498 case ETH_L4_HDR_PROTO_UDP:
1499 pkt_type |= E1000_ADVRXD_PKT_UDP;
1500 break;
1501 case ETH_L4_HDR_PROTO_SCTP:
1502 pkt_type |= E1000_ADVRXD_PKT_SCTP;
1503 break;
1504 default:
1505 break;
1508 return pkt_type;
1511 static inline void
1512 igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1513 struct NetRxPkt *pkt,
1514 const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts,
1515 uint16_t length)
1517 bool hasip4, hasip6;
1518 EthL4HdrProto l4hdr_proto;
1519 uint16_t rss_type = 0, pkt_type;
1520 bool eop = (pkt != NULL);
1521 uint32_t adv_desc_status_error = 0;
1522 memset(&desc->wb, 0, sizeof(desc->wb));
1524 desc->wb.upper.length = cpu_to_le16(length);
1525 igb_build_rx_metadata_common(core, pkt, eop,
1526 &desc->wb.upper.status_error,
1527 &desc->wb.upper.vlan);
1529 if (!eop) {
1530 return;
1533 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1535 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
1536 if (rss_info->enabled) {
1537 desc->wb.lower.hi_dword.rss = cpu_to_le32(rss_info->hash);
1538 rss_type = rss_info->type;
1539 trace_igb_rx_metadata_rss(desc->wb.lower.hi_dword.rss, rss_type);
1541 } else if (hasip4) {
1542 adv_desc_status_error |= E1000_RXD_STAT_IPIDV;
1543 desc->wb.lower.hi_dword.csum_ip.ip_id =
1544 cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
1545 trace_e1000e_rx_metadata_ip_id(
1546 desc->wb.lower.hi_dword.csum_ip.ip_id);
1549 if (ts) {
1550 adv_desc_status_error |= BIT(16);
1553 pkt_type = igb_rx_desc_get_packet_type(core, pkt, etqf);
1554 trace_e1000e_rx_metadata_pkt_type(pkt_type);
1555 desc->wb.lower.lo_dword.pkt_info = cpu_to_le16(rss_type | (pkt_type << 4));
1556 desc->wb.upper.status_error |= cpu_to_le32(adv_desc_status_error);
1559 static inline void
1560 igb_write_adv_ps_rx_descr(IGBCore *core,
1561 union e1000_adv_rx_desc *desc,
1562 struct NetRxPkt *pkt,
1563 const E1000E_RSSInfo *rss_info,
1564 const E1000ERingInfo *r,
1565 uint16_t etqf,
1566 bool ts,
1567 IGBPacketRxDMAState *pdma_st)
1569 size_t pkt_len;
1570 uint16_t hdr_info = 0;
1572 if (pdma_st->do_ps) {
1573 pkt_len = pdma_st->bastate.written[1];
1574 } else {
1575 pkt_len = pdma_st->bastate.written[0] + pdma_st->bastate.written[1];
1578 igb_write_adv_rx_descr(core, desc, pkt, rss_info, etqf, ts, pkt_len);
1580 hdr_info = (pdma_st->ps_desc_data.hdr_len << E1000_ADVRXD_HDR_LEN_OFFSET) &
1581 E1000_ADVRXD_ADV_HDR_LEN_MASK;
1582 hdr_info |= pdma_st->ps_desc_data.sph ? E1000_ADVRXD_HDR_SPH : 0;
1583 desc->wb.lower.lo_dword.hdr_info = cpu_to_le16(hdr_info);
1585 desc->wb.upper.status_error |= cpu_to_le32(
1586 pdma_st->ps_desc_data.hbo ? E1000_ADVRXD_ST_ERR_HBO_OFFSET : 0);
1589 static inline void
1590 igb_write_rx_descr(IGBCore *core,
1591 union e1000_rx_desc_union *desc,
1592 struct NetRxPkt *pkt,
1593 const E1000E_RSSInfo *rss_info,
1594 uint16_t etqf,
1595 bool ts,
1596 IGBPacketRxDMAState *pdma_st,
1597 const E1000ERingInfo *r)
1599 if (igb_rx_use_legacy_descriptor(core)) {
1600 igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info,
1601 pdma_st->bastate.written[1]);
1602 } else if (igb_rx_use_ps_descriptor(core, r)) {
1603 igb_write_adv_ps_rx_descr(core, &desc->adv, pkt, rss_info, r, etqf, ts,
1604 pdma_st);
1605 } else {
1606 igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info,
1607 etqf, ts, pdma_st->bastate.written[1]);
1611 static inline void
1612 igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr,
1613 union e1000_rx_desc_union *desc, dma_addr_t len)
1615 if (igb_rx_use_legacy_descriptor(core)) {
1616 struct e1000_rx_desc *d = &desc->legacy;
1617 size_t offset = offsetof(struct e1000_rx_desc, status);
1618 uint8_t status = d->status;
1620 d->status &= ~E1000_RXD_STAT_DD;
1621 pci_dma_write(dev, addr, desc, len);
1623 if (status & E1000_RXD_STAT_DD) {
1624 d->status = status;
1625 pci_dma_write(dev, addr + offset, &status, sizeof(status));
1627 } else {
1628 union e1000_adv_rx_desc *d = &desc->adv;
1629 size_t offset =
1630 offsetof(union e1000_adv_rx_desc, wb.upper.status_error);
1631 uint32_t status = d->wb.upper.status_error;
1633 d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
1634 pci_dma_write(dev, addr, desc, len);
1636 if (status & E1000_RXD_STAT_DD) {
1637 d->wb.upper.status_error = status;
1638 pci_dma_write(dev, addr + offset, &status, sizeof(status));
1643 static void
1644 igb_update_rx_stats(IGBCore *core, const E1000ERingInfo *rxi,
1645 size_t pkt_size, size_t pkt_fcs_size)
1647 eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt);
1648 e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size);
1650 if (core->mac[MRQC] & 1) {
1651 uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
1653 core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4;
1654 core->mac[PVFGPRC0 + (pool * 64)]++;
1655 if (pkt_type == ETH_PKT_MCAST) {
1656 core->mac[PVFMPRC0 + (pool * 64)]++;
1661 static inline bool
1662 igb_rx_descr_threshold_hit(IGBCore *core, const E1000ERingInfo *rxi)
1664 return igb_ring_free_descr_num(core, rxi) ==
1665 ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16;
1668 static bool
1669 igb_do_ps(IGBCore *core,
1670 const E1000ERingInfo *r,
1671 struct NetRxPkt *pkt,
1672 IGBPacketRxDMAState *pdma_st)
1674 bool hasip4, hasip6;
1675 EthL4HdrProto l4hdr_proto;
1676 bool fragment;
1677 bool split_always;
1678 size_t bheader_size;
1679 size_t total_pkt_len;
1681 if (!igb_rx_use_ps_descriptor(core, r)) {
1682 return false;
1685 total_pkt_len = net_rx_pkt_get_total_len(pkt);
1686 bheader_size = igb_rxhdrbufsize(core, r);
1687 split_always = igb_rx_ps_descriptor_split_always(core, r);
1688 if (split_always && total_pkt_len <= bheader_size) {
1689 pdma_st->ps_hdr_len = total_pkt_len;
1690 pdma_st->ps_desc_data.hdr_len = total_pkt_len;
1691 return true;
1694 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1696 if (hasip4) {
1697 fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
1698 } else if (hasip6) {
1699 fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
1700 } else {
1701 pdma_st->ps_desc_data.hdr_len = bheader_size;
1702 goto header_not_handled;
1705 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) {
1706 pdma_st->ps_desc_data.hdr_len = bheader_size;
1707 goto header_not_handled;
1710 /* no header splitting for SCTP */
1711 if (!fragment && (l4hdr_proto == ETH_L4_HDR_PROTO_UDP ||
1712 l4hdr_proto == ETH_L4_HDR_PROTO_TCP)) {
1713 pdma_st->ps_hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
1714 } else {
1715 pdma_st->ps_hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
1718 pdma_st->ps_desc_data.sph = true;
1719 pdma_st->ps_desc_data.hdr_len = pdma_st->ps_hdr_len;
1721 if (pdma_st->ps_hdr_len > bheader_size) {
1722 pdma_st->ps_desc_data.hbo = true;
1723 goto header_not_handled;
1726 return true;
1728 header_not_handled:
1729 if (split_always) {
1730 pdma_st->ps_hdr_len = bheader_size;
1731 return true;
1734 return false;
1737 static void
1738 igb_truncate_to_descriptor_size(IGBPacketRxDMAState *pdma_st, size_t *size)
1740 if (pdma_st->do_ps && pdma_st->is_first) {
1741 if (*size > pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len) {
1742 *size = pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len;
1744 } else {
1745 if (*size > pdma_st->rx_desc_packet_buf_size) {
1746 *size = pdma_st->rx_desc_packet_buf_size;
1751 static inline void
1752 igb_write_hdr_frag_to_rx_buffers(IGBCore *core,
1753 PCIDevice *d,
1754 IGBPacketRxDMAState *pdma_st,
1755 const char *data,
1756 dma_addr_t data_len)
1758 assert(data_len <= pdma_st->rx_desc_header_buf_size -
1759 pdma_st->bastate.written[0]);
1760 pci_dma_write(d,
1761 pdma_st->ba[0] + pdma_st->bastate.written[0],
1762 data, data_len);
1763 pdma_st->bastate.written[0] += data_len;
1764 pdma_st->bastate.cur_idx = 1;
1767 static void
1768 igb_write_header_to_rx_buffers(IGBCore *core,
1769 struct NetRxPkt *pkt,
1770 PCIDevice *d,
1771 IGBPacketRxDMAState *pdma_st,
1772 size_t *copy_size)
1774 size_t iov_copy;
1775 size_t ps_hdr_copied = 0;
1777 if (!pdma_st->is_first) {
1778 /* Leave buffer 0 of each descriptor except first */
1779 /* empty */
1780 pdma_st->bastate.cur_idx = 1;
1781 return;
1784 do {
1785 iov_copy = MIN(pdma_st->ps_hdr_len - ps_hdr_copied,
1786 pdma_st->iov->iov_len - pdma_st->iov_ofs);
1788 igb_write_hdr_frag_to_rx_buffers(core, d, pdma_st,
1789 pdma_st->iov->iov_base,
1790 iov_copy);
1792 *copy_size -= iov_copy;
1793 ps_hdr_copied += iov_copy;
1795 pdma_st->iov_ofs += iov_copy;
1796 if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
1797 pdma_st->iov++;
1798 pdma_st->iov_ofs = 0;
1800 } while (ps_hdr_copied < pdma_st->ps_hdr_len);
1802 pdma_st->is_first = false;
1805 static void
1806 igb_write_payload_frag_to_rx_buffers(IGBCore *core,
1807 PCIDevice *d,
1808 IGBPacketRxDMAState *pdma_st,
1809 const char *data,
1810 dma_addr_t data_len)
1812 while (data_len > 0) {
1813 assert(pdma_st->bastate.cur_idx < IGB_MAX_PS_BUFFERS);
1815 uint32_t cur_buf_bytes_left =
1816 pdma_st->rx_desc_packet_buf_size -
1817 pdma_st->bastate.written[pdma_st->bastate.cur_idx];
1818 uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left);
1820 trace_igb_rx_desc_buff_write(
1821 pdma_st->bastate.cur_idx,
1822 pdma_st->ba[pdma_st->bastate.cur_idx],
1823 pdma_st->bastate.written[pdma_st->bastate.cur_idx],
1824 data,
1825 bytes_to_write);
1827 pci_dma_write(d,
1828 pdma_st->ba[pdma_st->bastate.cur_idx] +
1829 pdma_st->bastate.written[pdma_st->bastate.cur_idx],
1830 data, bytes_to_write);
1832 pdma_st->bastate.written[pdma_st->bastate.cur_idx] += bytes_to_write;
1833 data += bytes_to_write;
1834 data_len -= bytes_to_write;
1836 if (pdma_st->bastate.written[pdma_st->bastate.cur_idx] ==
1837 pdma_st->rx_desc_packet_buf_size) {
1838 pdma_st->bastate.cur_idx++;
1843 static void
1844 igb_write_payload_to_rx_buffers(IGBCore *core,
1845 struct NetRxPkt *pkt,
1846 PCIDevice *d,
1847 IGBPacketRxDMAState *pdma_st,
1848 size_t *copy_size)
1850 static const uint32_t fcs_pad;
1851 size_t iov_copy;
1853 /* Copy packet payload */
1854 while (*copy_size) {
1855 iov_copy = MIN(*copy_size, pdma_st->iov->iov_len - pdma_st->iov_ofs);
1856 igb_write_payload_frag_to_rx_buffers(core, d,
1857 pdma_st,
1858 pdma_st->iov->iov_base +
1859 pdma_st->iov_ofs,
1860 iov_copy);
1862 *copy_size -= iov_copy;
1863 pdma_st->iov_ofs += iov_copy;
1864 if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
1865 pdma_st->iov++;
1866 pdma_st->iov_ofs = 0;
1870 if (pdma_st->desc_offset + pdma_st->desc_size >= pdma_st->total_size) {
1871 /* Simulate FCS checksum presence in the last descriptor */
1872 igb_write_payload_frag_to_rx_buffers(core, d,
1873 pdma_st,
1874 (const char *) &fcs_pad,
1875 e1000x_fcs_len(core->mac));
1879 static void
1880 igb_write_to_rx_buffers(IGBCore *core,
1881 struct NetRxPkt *pkt,
1882 PCIDevice *d,
1883 IGBPacketRxDMAState *pdma_st)
1885 size_t copy_size;
1887 if (!(pdma_st->ba)[1] || (pdma_st->do_ps && !(pdma_st->ba[0]))) {
1888 /* as per intel docs; skip descriptors with null buf addr */
1889 trace_e1000e_rx_null_descriptor();
1890 return;
1893 if (pdma_st->desc_offset >= pdma_st->size) {
1894 return;
1897 pdma_st->desc_size = pdma_st->total_size - pdma_st->desc_offset;
1898 igb_truncate_to_descriptor_size(pdma_st, &pdma_st->desc_size);
1899 copy_size = pdma_st->size - pdma_st->desc_offset;
1900 igb_truncate_to_descriptor_size(pdma_st, &copy_size);
1902 /* For PS mode copy the packet header first */
1903 if (pdma_st->do_ps) {
1904 igb_write_header_to_rx_buffers(core, pkt, d, pdma_st, &copy_size);
1905 } else {
1906 pdma_st->bastate.cur_idx = 1;
1909 igb_write_payload_to_rx_buffers(core, pkt, d, pdma_st, &copy_size);
1912 static void
1913 igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
1914 const E1000E_RxRing *rxr,
1915 const E1000E_RSSInfo *rss_info,
1916 uint16_t etqf, bool ts)
1918 PCIDevice *d;
1919 dma_addr_t base;
1920 union e1000_rx_desc_union desc;
1921 const E1000ERingInfo *rxi;
1922 size_t rx_desc_len;
1924 IGBPacketRxDMAState pdma_st = {0};
1925 pdma_st.is_first = true;
1926 pdma_st.size = net_rx_pkt_get_total_len(pkt);
1927 pdma_st.total_size = pdma_st.size + e1000x_fcs_len(core->mac);
1929 rxi = rxr->i;
1930 rx_desc_len = core->rx_desc_len;
1931 pdma_st.rx_desc_packet_buf_size = igb_rxbufsize(core, rxi);
1932 pdma_st.rx_desc_header_buf_size = igb_rxhdrbufsize(core, rxi);
1933 pdma_st.iov = net_rx_pkt_get_iovec(pkt);
1934 d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8);
1935 if (!d) {
1936 d = core->owner;
1939 pdma_st.do_ps = igb_do_ps(core, rxi, pkt, &pdma_st);
1941 do {
1942 memset(&pdma_st.bastate, 0, sizeof(IGBBAState));
1943 bool is_last = false;
1945 if (igb_ring_empty(core, rxi)) {
1946 return;
1949 base = igb_ring_head_descr(core, rxi);
1950 pci_dma_read(d, base, &desc, rx_desc_len);
1951 trace_e1000e_rx_descr(rxi->idx, base, rx_desc_len);
1953 igb_read_rx_descr(core, &desc, &pdma_st, rxi);
1955 igb_write_to_rx_buffers(core, pkt, d, &pdma_st);
1956 pdma_st.desc_offset += pdma_st.desc_size;
1957 if (pdma_st.desc_offset >= pdma_st.total_size) {
1958 is_last = true;
1961 igb_write_rx_descr(core, &desc,
1962 is_last ? pkt : NULL,
1963 rss_info,
1964 etqf, ts,
1965 &pdma_st,
1966 rxi);
1967 igb_pci_dma_write_rx_desc(core, d, base, &desc, rx_desc_len);
1968 igb_ring_advance(core, rxi, rx_desc_len / E1000_MIN_RX_DESC_LEN);
1969 } while (pdma_st.desc_offset < pdma_st.total_size);
1971 igb_update_rx_stats(core, rxi, pdma_st.size, pdma_st.total_size);
1974 static bool
1975 igb_rx_strip_vlan(IGBCore *core, const E1000ERingInfo *rxi)
1977 if (core->mac[MRQC] & 1) {
1978 uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
1979 /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
1980 return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
1981 core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
1982 core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
1985 return e1000x_vlan_enabled(core->mac);
1988 static inline void
1989 igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
1991 struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
1993 if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1994 net_rx_pkt_fix_l4_csum(pkt);
1998 ssize_t
1999 igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt)
2001 return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL);
2004 static ssize_t
2005 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
2006 bool has_vnet, bool *external_tx)
2008 uint16_t queues = 0;
2009 uint32_t causes = 0;
2010 uint32_t ecauses = 0;
2011 union {
2012 L2Header l2_header;
2013 uint8_t octets[ETH_ZLEN];
2014 } buf;
2015 struct iovec min_iov;
2016 size_t size, orig_size;
2017 size_t iov_ofs = 0;
2018 E1000E_RxRing rxr;
2019 E1000E_RSSInfo rss_info;
2020 uint16_t etqf;
2021 bool ts;
2022 size_t total_size;
2023 int strip_vlan_index;
2024 int i;
2026 trace_e1000e_rx_receive_iov(iovcnt);
2028 if (external_tx) {
2029 *external_tx = true;
2032 if (!e1000x_hw_rx_enabled(core->mac)) {
2033 return -1;
2036 /* Pull virtio header in */
2037 if (has_vnet) {
2038 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt);
2039 iov_ofs = sizeof(struct virtio_net_hdr);
2040 } else {
2041 net_rx_pkt_unset_vhdr(core->rx_pkt);
2044 orig_size = iov_size(iov, iovcnt);
2045 size = orig_size - iov_ofs;
2047 /* Pad to minimum Ethernet frame length */
2048 if (size < sizeof(buf)) {
2049 iov_to_buf(iov, iovcnt, iov_ofs, &buf, size);
2050 memset(&buf.octets[size], 0, sizeof(buf) - size);
2051 e1000x_inc_reg_if_not_full(core->mac, RUC);
2052 min_iov.iov_base = &buf;
2053 min_iov.iov_len = size = sizeof(buf);
2054 iovcnt = 1;
2055 iov = &min_iov;
2056 iov_ofs = 0;
2057 } else {
2058 iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header));
2061 net_rx_pkt_set_packet_type(core->rx_pkt,
2062 get_eth_packet_type(&buf.l2_header.eth));
2063 net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs);
2065 queues = igb_receive_assign(core, iov, iovcnt, iov_ofs,
2066 &buf.l2_header, size,
2067 &rss_info, &etqf, &ts, external_tx);
2068 if (!queues) {
2069 trace_e1000e_rx_flt_dropped();
2070 return orig_size;
2073 for (i = 0; i < IGB_NUM_QUEUES; i++) {
2074 if (!(queues & BIT(i)) ||
2075 !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
2076 continue;
2079 igb_rx_ring_init(core, &rxr, i);
2081 if (!igb_rx_strip_vlan(core, rxr.i)) {
2082 strip_vlan_index = -1;
2083 } else if (core->mac[CTRL_EXT] & BIT(26)) {
2084 strip_vlan_index = 1;
2085 } else {
2086 strip_vlan_index = 0;
2089 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
2090 strip_vlan_index,
2091 core->mac[VET] & 0xffff,
2092 core->mac[VET] >> 16);
2094 total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
2095 e1000x_fcs_len(core->mac);
2097 if (!igb_has_rxbufs(core, rxr.i, total_size)) {
2098 causes |= E1000_ICS_RXO;
2099 trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
2100 continue;
2103 causes |= E1000_ICR_RXDW;
2105 igb_rx_fix_l4_csum(core, core->rx_pkt);
2106 igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts);
2108 /* Check if receive descriptor minimum threshold hit */
2109 if (igb_rx_descr_threshold_hit(core, rxr.i)) {
2110 causes |= E1000_ICS_RXDMT0;
2113 ecauses |= igb_rx_wb_eic(core, rxr.i->idx);
2115 trace_e1000e_rx_written_to_guest(rxr.i->idx);
2118 trace_e1000e_rx_interrupt_set(causes);
2119 igb_raise_interrupts(core, EICR, ecauses);
2120 igb_raise_interrupts(core, ICR, causes);
2122 return orig_size;
2125 static inline bool
2126 igb_have_autoneg(IGBCore *core)
2128 return core->phy[MII_BMCR] & MII_BMCR_AUTOEN;
2131 static void igb_update_flowctl_status(IGBCore *core)
2133 if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) {
2134 trace_e1000e_link_autoneg_flowctl(true);
2135 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE;
2136 } else {
2137 trace_e1000e_link_autoneg_flowctl(false);
2141 static inline void
2142 igb_link_down(IGBCore *core)
2144 e1000x_update_regs_on_link_down(core->mac, core->phy);
2145 igb_update_flowctl_status(core);
2148 static inline void
2149 igb_set_phy_ctrl(IGBCore *core, uint16_t val)
2151 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
2152 core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART);
2154 if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) {
2155 e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer);
2159 void igb_core_set_link_status(IGBCore *core)
2161 NetClientState *nc = qemu_get_queue(core->owner_nic);
2162 uint32_t old_status = core->mac[STATUS];
2164 trace_e1000e_link_status_changed(nc->link_down ? false : true);
2166 if (nc->link_down) {
2167 e1000x_update_regs_on_link_down(core->mac, core->phy);
2168 } else {
2169 if (igb_have_autoneg(core) &&
2170 !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
2171 e1000x_restart_autoneg(core->mac, core->phy,
2172 core->autoneg_timer);
2173 } else {
2174 e1000x_update_regs_on_link_up(core->mac, core->phy);
2175 igb_start_recv(core);
2179 if (core->mac[STATUS] != old_status) {
2180 igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
2184 static void
2185 igb_set_ctrl(IGBCore *core, int index, uint32_t val)
2187 trace_e1000e_core_ctrl_write(index, val);
2189 /* RST is self clearing */
2190 core->mac[CTRL] = val & ~E1000_CTRL_RST;
2191 core->mac[CTRL_DUP] = core->mac[CTRL];
2193 trace_e1000e_link_set_params(
2194 !!(val & E1000_CTRL_ASDE),
2195 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
2196 !!(val & E1000_CTRL_FRCSPD),
2197 !!(val & E1000_CTRL_FRCDPX),
2198 !!(val & E1000_CTRL_RFCE),
2199 !!(val & E1000_CTRL_TFCE));
2201 if (val & E1000_CTRL_RST) {
2202 trace_e1000e_core_ctrl_sw_reset();
2203 igb_reset(core, true);
2206 if (val & E1000_CTRL_PHY_RST) {
2207 trace_e1000e_core_ctrl_phy_reset();
2208 core->mac[STATUS] |= E1000_STATUS_PHYRA;
2212 static void
2213 igb_set_rfctl(IGBCore *core, int index, uint32_t val)
2215 trace_e1000e_rx_set_rfctl(val);
2217 if (!(val & E1000_RFCTL_ISCSI_DIS)) {
2218 trace_e1000e_wrn_iscsi_filtering_not_supported();
2221 if (!(val & E1000_RFCTL_NFSW_DIS)) {
2222 trace_e1000e_wrn_nfsw_filtering_not_supported();
2225 if (!(val & E1000_RFCTL_NFSR_DIS)) {
2226 trace_e1000e_wrn_nfsr_filtering_not_supported();
2229 core->mac[RFCTL] = val;
2232 static void
2233 igb_calc_rxdesclen(IGBCore *core)
2235 if (igb_rx_use_legacy_descriptor(core)) {
2236 core->rx_desc_len = sizeof(struct e1000_rx_desc);
2237 } else {
2238 core->rx_desc_len = sizeof(union e1000_adv_rx_desc);
2240 trace_e1000e_rx_desc_len(core->rx_desc_len);
2243 static void
2244 igb_set_rx_control(IGBCore *core, int index, uint32_t val)
2246 core->mac[RCTL] = val;
2247 trace_e1000e_rx_set_rctl(core->mac[RCTL]);
2249 if (val & E1000_RCTL_DTYP_MASK) {
2250 qemu_log_mask(LOG_GUEST_ERROR,
2251 "igb: RCTL.DTYP must be zero for compatibility");
2254 if (val & E1000_RCTL_EN) {
2255 igb_calc_rxdesclen(core);
2256 igb_start_recv(core);
2260 static inline bool
2261 igb_postpone_interrupt(IGBIntrDelayTimer *timer)
2263 if (timer->running) {
2264 trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
2266 return true;
2269 if (timer->core->mac[timer->delay_reg] != 0) {
2270 igb_intrmgr_rearm_timer(timer);
2273 return false;
2276 static inline bool
2277 igb_eitr_should_postpone(IGBCore *core, int idx)
2279 return igb_postpone_interrupt(&core->eitr[idx]);
2282 static void igb_send_msix(IGBCore *core, uint32_t causes)
2284 int vector;
2286 for (vector = 0; vector < IGB_INTR_NUM; ++vector) {
2287 if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) {
2289 trace_e1000e_irq_msix_notify_vec(vector);
2290 igb_msix_notify(core, vector);
2295 static inline void
2296 igb_fix_icr_asserted(IGBCore *core)
2298 core->mac[ICR] &= ~E1000_ICR_ASSERTED;
2299 if (core->mac[ICR]) {
2300 core->mac[ICR] |= E1000_ICR_ASSERTED;
2303 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]);
2306 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes)
2308 uint32_t old_causes = core->mac[ICR] & core->mac[IMS];
2309 uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS];
2310 uint32_t raised_causes;
2311 uint32_t raised_ecauses;
2312 uint32_t int_alloc;
2314 trace_e1000e_irq_set(index << 2,
2315 core->mac[index], core->mac[index] | causes);
2317 core->mac[index] |= causes;
2319 if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) {
2320 raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
2322 if (raised_causes & E1000_ICR_DRSTA) {
2323 int_alloc = core->mac[IVAR_MISC] & 0xff;
2324 if (int_alloc & E1000_IVAR_VALID) {
2325 core->mac[EICR] |= BIT(int_alloc & 0x1f);
2328 /* Check if other bits (excluding the TCP Timer) are enabled. */
2329 if (raised_causes & ~E1000_ICR_DRSTA) {
2330 int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff;
2331 if (int_alloc & E1000_IVAR_VALID) {
2332 core->mac[EICR] |= BIT(int_alloc & 0x1f);
2336 raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses;
2337 if (!raised_ecauses) {
2338 return;
2341 igb_send_msix(core, raised_ecauses);
2342 } else {
2343 igb_fix_icr_asserted(core);
2345 raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
2346 if (!raised_causes) {
2347 return;
2350 core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER;
2352 if (msix_enabled(core->owner)) {
2353 trace_e1000e_irq_msix_notify_vec(0);
2354 msix_notify(core->owner, 0);
2355 } else if (msi_enabled(core->owner)) {
2356 trace_e1000e_irq_msi_notify(raised_causes);
2357 msi_notify(core->owner, 0);
2358 } else {
2359 igb_raise_legacy_irq(core);
2364 static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes)
2366 trace_e1000e_irq_clear(index << 2,
2367 core->mac[index], core->mac[index] & ~causes);
2369 core->mac[index] &= ~causes;
2371 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS],
2372 core->mac[ICR], core->mac[IMS]);
2374 if (!(core->mac[ICR] & core->mac[IMS]) &&
2375 !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) {
2376 core->mac[EICR] &= ~E1000_EICR_OTHER;
2378 if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) {
2379 igb_lower_legacy_irq(core);
2384 static void igb_set_eics(IGBCore *core, int index, uint32_t val)
2386 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2387 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2389 trace_igb_irq_write_eics(val, msix);
2390 igb_raise_interrupts(core, EICR, val & mask);
2393 static void igb_set_eims(IGBCore *core, int index, uint32_t val)
2395 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2396 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2398 trace_igb_irq_write_eims(val, msix);
2399 igb_raise_interrupts(core, EIMS, val & mask);
2402 static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn)
2404 uint32_t ent = core->mac[VTIVAR_MISC + vfn];
2405 uint32_t causes;
2407 if ((ent & E1000_IVAR_VALID)) {
2408 causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM);
2409 igb_raise_interrupts(core, EICR, causes);
2413 static void mailbox_interrupt_to_pf(IGBCore *core)
2415 igb_raise_interrupts(core, ICR, E1000_ICR_VMMB);
2418 static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val)
2420 uint16_t vfn = index - P2VMAILBOX0;
2422 trace_igb_set_pfmailbox(vfn, val);
2424 if (val & E1000_P2VMAILBOX_STS) {
2425 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS;
2426 mailbox_interrupt_to_vf(core, vfn);
2429 if (val & E1000_P2VMAILBOX_ACK) {
2430 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK;
2431 mailbox_interrupt_to_vf(core, vfn);
2434 /* Buffer Taken by PF (can be set only if the VFU is cleared). */
2435 if (val & E1000_P2VMAILBOX_PFU) {
2436 if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) {
2437 core->mac[index] |= E1000_P2VMAILBOX_PFU;
2438 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU;
2440 } else {
2441 core->mac[index] &= ~E1000_P2VMAILBOX_PFU;
2442 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU;
2445 if (val & E1000_P2VMAILBOX_RVFU) {
2446 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU;
2447 core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) |
2448 (E1000_MBVFICR_VFREQ_VF1 << vfn));
2452 static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
2454 uint16_t vfn = index - V2PMAILBOX0;
2456 trace_igb_set_vfmailbox(vfn, val);
2458 if (val & E1000_V2PMAILBOX_REQ) {
2459 core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn;
2460 mailbox_interrupt_to_pf(core);
2463 if (val & E1000_V2PMAILBOX_ACK) {
2464 core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn;
2465 mailbox_interrupt_to_pf(core);
2468 /* Buffer Taken by VF (can be set only if the PFU is cleared). */
2469 if (val & E1000_V2PMAILBOX_VFU) {
2470 if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) {
2471 core->mac[index] |= E1000_V2PMAILBOX_VFU;
2472 core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU;
2474 } else {
2475 core->mac[index] &= ~E1000_V2PMAILBOX_VFU;
2476 core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU;
2480 static void igb_vf_reset(IGBCore *core, uint16_t vfn)
2482 uint16_t qn0 = vfn;
2483 uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
2485 /* disable Rx and Tx for the VF*/
2486 core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
2487 core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
2488 core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
2489 core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
2490 core->mac[VFRE] &= ~BIT(vfn);
2491 core->mac[VFTE] &= ~BIT(vfn);
2492 /* indicate VF reset to PF */
2493 core->mac[VFLRE] |= BIT(vfn);
2494 /* VFLRE and mailbox use the same interrupt cause */
2495 mailbox_interrupt_to_pf(core);
2498 static void igb_w1c(IGBCore *core, int index, uint32_t val)
2500 core->mac[index] &= ~val;
2503 static void igb_set_eimc(IGBCore *core, int index, uint32_t val)
2505 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2506 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2508 trace_igb_irq_write_eimc(val, msix);
2510 /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
2511 igb_lower_interrupts(core, EIMS, val & mask);
2514 static void igb_set_eiac(IGBCore *core, int index, uint32_t val)
2516 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2518 if (msix) {
2519 trace_igb_irq_write_eiac(val);
2522 * TODO: When using IOV, the bits that correspond to MSI-X vectors
2523 * that are assigned to a VF are read-only.
2525 core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK);
2529 static void igb_set_eiam(IGBCore *core, int index, uint32_t val)
2531 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2534 * TODO: When using IOV, the bits that correspond to MSI-X vectors that
2535 * are assigned to a VF are read-only.
2537 core->mac[EIAM] |=
2538 ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
2540 trace_igb_irq_write_eiam(val, msix);
2543 static void igb_set_eicr(IGBCore *core, int index, uint32_t val)
2545 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2548 * TODO: In IOV mode, only bit zero of this vector is available for the PF
2549 * function.
2551 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2553 trace_igb_irq_write_eicr(val, msix);
2554 igb_lower_interrupts(core, EICR, val & mask);
2557 static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
2559 uint16_t vfn;
2561 if (val & E1000_CTRL_RST) {
2562 vfn = (index - PVTCTRL0) / 0x40;
2563 igb_vf_reset(core, vfn);
2567 static void igb_set_vteics(IGBCore *core, int index, uint32_t val)
2569 uint16_t vfn = (index - PVTEICS0) / 0x40;
2571 core->mac[index] = val;
2572 igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2575 static void igb_set_vteims(IGBCore *core, int index, uint32_t val)
2577 uint16_t vfn = (index - PVTEIMS0) / 0x40;
2579 core->mac[index] = val;
2580 igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2583 static void igb_set_vteimc(IGBCore *core, int index, uint32_t val)
2585 uint16_t vfn = (index - PVTEIMC0) / 0x40;
2587 core->mac[index] = val;
2588 igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2591 static void igb_set_vteiac(IGBCore *core, int index, uint32_t val)
2593 uint16_t vfn = (index - PVTEIAC0) / 0x40;
2595 core->mac[index] = val;
2596 igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2599 static void igb_set_vteiam(IGBCore *core, int index, uint32_t val)
2601 uint16_t vfn = (index - PVTEIAM0) / 0x40;
2603 core->mac[index] = val;
2604 igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2607 static void igb_set_vteicr(IGBCore *core, int index, uint32_t val)
2609 uint16_t vfn = (index - PVTEICR0) / 0x40;
2611 core->mac[index] = val;
2612 igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2615 static void igb_set_vtivar(IGBCore *core, int index, uint32_t val)
2617 uint16_t vfn = (index - VTIVAR);
2618 uint16_t qn = vfn;
2619 uint8_t ent;
2620 int n;
2622 core->mac[index] = val;
2624 /* Get assigned vector associated with queue Rx#0. */
2625 if ((val & E1000_IVAR_VALID)) {
2626 n = igb_ivar_entry_rx(qn);
2627 ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (val & 0x7)));
2628 core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
2631 /* Get assigned vector associated with queue Tx#0 */
2632 ent = val >> 8;
2633 if ((ent & E1000_IVAR_VALID)) {
2634 n = igb_ivar_entry_tx(qn);
2635 ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (ent & 0x7)));
2636 core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
2640 * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
2644 static inline void
2645 igb_autoneg_timer(void *opaque)
2647 IGBCore *core = opaque;
2648 if (!qemu_get_queue(core->owner_nic)->link_down) {
2649 e1000x_update_regs_on_autoneg_done(core->mac, core->phy);
2650 igb_start_recv(core);
2652 igb_update_flowctl_status(core);
2653 /* signal link status change to the guest */
2654 igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
2658 static inline uint16_t
2659 igb_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
2661 uint16_t index = (addr & 0x1ffff) >> 2;
2662 return index + (mac_reg_access[index] & 0xfffe);
2665 static const char igb_phy_regcap[MAX_PHY_REG_ADDRESS + 1] = {
2666 [MII_BMCR] = PHY_RW,
2667 [MII_BMSR] = PHY_R,
2668 [MII_PHYID1] = PHY_R,
2669 [MII_PHYID2] = PHY_R,
2670 [MII_ANAR] = PHY_RW,
2671 [MII_ANLPAR] = PHY_R,
2672 [MII_ANER] = PHY_R,
2673 [MII_ANNP] = PHY_RW,
2674 [MII_ANLPRNP] = PHY_R,
2675 [MII_CTRL1000] = PHY_RW,
2676 [MII_STAT1000] = PHY_R,
2677 [MII_EXTSTAT] = PHY_R,
2679 [IGP01E1000_PHY_PORT_CONFIG] = PHY_RW,
2680 [IGP01E1000_PHY_PORT_STATUS] = PHY_R,
2681 [IGP01E1000_PHY_PORT_CTRL] = PHY_RW,
2682 [IGP01E1000_PHY_LINK_HEALTH] = PHY_R,
2683 [IGP02E1000_PHY_POWER_MGMT] = PHY_RW,
2684 [IGP01E1000_PHY_PAGE_SELECT] = PHY_W
2687 static void
2688 igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data)
2690 assert(addr <= MAX_PHY_REG_ADDRESS);
2692 if (addr == MII_BMCR) {
2693 igb_set_phy_ctrl(core, data);
2694 } else {
2695 core->phy[addr] = data;
2699 static void
2700 igb_set_mdic(IGBCore *core, int index, uint32_t val)
2702 uint32_t data = val & E1000_MDIC_DATA_MASK;
2703 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
2705 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */
2706 val = core->mac[MDIC] | E1000_MDIC_ERROR;
2707 } else if (val & E1000_MDIC_OP_READ) {
2708 if (!(igb_phy_regcap[addr] & PHY_R)) {
2709 trace_igb_core_mdic_read_unhandled(addr);
2710 val |= E1000_MDIC_ERROR;
2711 } else {
2712 val = (val ^ data) | core->phy[addr];
2713 trace_igb_core_mdic_read(addr, val);
2715 } else if (val & E1000_MDIC_OP_WRITE) {
2716 if (!(igb_phy_regcap[addr] & PHY_W)) {
2717 trace_igb_core_mdic_write_unhandled(addr);
2718 val |= E1000_MDIC_ERROR;
2719 } else {
2720 trace_igb_core_mdic_write(addr, data);
2721 igb_phy_reg_write(core, addr, data);
2724 core->mac[MDIC] = val | E1000_MDIC_READY;
2726 if (val & E1000_MDIC_INT_EN) {
2727 igb_raise_interrupts(core, ICR, E1000_ICR_MDAC);
2731 static void
2732 igb_set_rdt(IGBCore *core, int index, uint32_t val)
2734 core->mac[index] = val & 0xffff;
2735 trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0, index), val);
2736 igb_start_recv(core);
2739 static void
2740 igb_set_status(IGBCore *core, int index, uint32_t val)
2742 if ((val & E1000_STATUS_PHYRA) == 0) {
2743 core->mac[index] &= ~E1000_STATUS_PHYRA;
2747 static void
2748 igb_set_ctrlext(IGBCore *core, int index, uint32_t val)
2750 trace_igb_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
2751 !!(val & E1000_CTRL_EXT_SPD_BYPS),
2752 !!(val & E1000_CTRL_EXT_PFRSTD));
2754 /* Zero self-clearing bits */
2755 val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
2756 core->mac[CTRL_EXT] = val;
2758 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) {
2759 for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
2760 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI;
2761 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD;
2766 static void
2767 igb_set_pbaclr(IGBCore *core, int index, uint32_t val)
2769 int i;
2771 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
2773 if (!msix_enabled(core->owner)) {
2774 return;
2777 for (i = 0; i < IGB_INTR_NUM; i++) {
2778 if (core->mac[PBACLR] & BIT(i)) {
2779 msix_clr_pending(core->owner, i);
2784 static void
2785 igb_set_fcrth(IGBCore *core, int index, uint32_t val)
2787 core->mac[FCRTH] = val & 0xFFF8;
2790 static void
2791 igb_set_fcrtl(IGBCore *core, int index, uint32_t val)
2793 core->mac[FCRTL] = val & 0x8000FFF8;
2796 #define IGB_LOW_BITS_SET_FUNC(num) \
2797 static void \
2798 igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2800 core->mac[index] = val & (BIT(num) - 1); \
2803 IGB_LOW_BITS_SET_FUNC(4)
2804 IGB_LOW_BITS_SET_FUNC(13)
2805 IGB_LOW_BITS_SET_FUNC(16)
2807 static void
2808 igb_set_dlen(IGBCore *core, int index, uint32_t val)
2810 core->mac[index] = val & 0xffff0;
2813 static void
2814 igb_set_dbal(IGBCore *core, int index, uint32_t val)
2816 core->mac[index] = val & E1000_XDBAL_MASK;
2819 static void
2820 igb_set_tdt(IGBCore *core, int index, uint32_t val)
2822 IGB_TxRing txr;
2823 int qn = igb_mq_queue_idx(TDT0, index);
2825 core->mac[index] = val & 0xffff;
2827 igb_tx_ring_init(core, &txr, qn);
2828 igb_start_xmit(core, &txr);
2831 static void
2832 igb_set_ics(IGBCore *core, int index, uint32_t val)
2834 trace_e1000e_irq_write_ics(val);
2835 igb_raise_interrupts(core, ICR, val);
2838 static void
2839 igb_set_imc(IGBCore *core, int index, uint32_t val)
2841 trace_e1000e_irq_ims_clear_set_imc(val);
2842 igb_lower_interrupts(core, IMS, val);
2845 static void
2846 igb_set_ims(IGBCore *core, int index, uint32_t val)
2848 igb_raise_interrupts(core, IMS, val & 0x77D4FBFD);
2851 static void igb_nsicr(IGBCore *core)
2854 * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
2855 * least one bit is set in the IMS and there is a true interrupt as
2856 * reflected in ICR.INTA.
2858 if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
2859 (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
2860 igb_lower_interrupts(core, IMS, core->mac[IAM]);
2864 static void igb_set_icr(IGBCore *core, int index, uint32_t val)
2866 igb_nsicr(core);
2867 igb_lower_interrupts(core, ICR, val);
2870 static uint32_t
2871 igb_mac_readreg(IGBCore *core, int index)
2873 return core->mac[index];
2876 static uint32_t
2877 igb_mac_ics_read(IGBCore *core, int index)
2879 trace_e1000e_irq_read_ics(core->mac[ICS]);
2880 return core->mac[ICS];
2883 static uint32_t
2884 igb_mac_ims_read(IGBCore *core, int index)
2886 trace_e1000e_irq_read_ims(core->mac[IMS]);
2887 return core->mac[IMS];
2890 static uint32_t
2891 igb_mac_swsm_read(IGBCore *core, int index)
2893 uint32_t val = core->mac[SWSM];
2894 core->mac[SWSM] = val | E1000_SWSM_SMBI;
2895 return val;
2898 static uint32_t
2899 igb_mac_eitr_read(IGBCore *core, int index)
2901 return core->eitr_guest_value[index - EITR0];
2904 static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index)
2906 uint32_t val = core->mac[index];
2908 core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK |
2909 E1000_V2PMAILBOX_RSTD);
2911 return val;
2914 static uint32_t
2915 igb_mac_icr_read(IGBCore *core, int index)
2917 uint32_t ret = core->mac[ICR];
2919 if (core->mac[GPIE] & E1000_GPIE_NSICR) {
2920 trace_igb_irq_icr_clear_gpie_nsicr();
2921 igb_lower_interrupts(core, ICR, 0xffffffff);
2922 } else if (core->mac[IMS] == 0) {
2923 trace_e1000e_irq_icr_clear_zero_ims();
2924 igb_lower_interrupts(core, ICR, 0xffffffff);
2925 } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) {
2926 igb_lower_interrupts(core, ICR, 0xffffffff);
2927 } else if (!msix_enabled(core->owner)) {
2928 trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2929 igb_lower_interrupts(core, ICR, 0xffffffff);
2932 igb_nsicr(core);
2933 return ret;
2936 static uint32_t
2937 igb_mac_read_clr4(IGBCore *core, int index)
2939 uint32_t ret = core->mac[index];
2941 core->mac[index] = 0;
2942 return ret;
2945 static uint32_t
2946 igb_mac_read_clr8(IGBCore *core, int index)
2948 uint32_t ret = core->mac[index];
2950 core->mac[index] = 0;
2951 core->mac[index - 1] = 0;
2952 return ret;
2955 static uint32_t
2956 igb_get_ctrl(IGBCore *core, int index)
2958 uint32_t val = core->mac[CTRL];
2960 trace_e1000e_link_read_params(
2961 !!(val & E1000_CTRL_ASDE),
2962 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
2963 !!(val & E1000_CTRL_FRCSPD),
2964 !!(val & E1000_CTRL_FRCDPX),
2965 !!(val & E1000_CTRL_RFCE),
2966 !!(val & E1000_CTRL_TFCE));
2968 return val;
2971 static uint32_t igb_get_status(IGBCore *core, int index)
2973 uint32_t res = core->mac[STATUS];
2974 uint16_t num_vfs = pcie_sriov_num_vfs(core->owner);
2976 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
2977 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
2978 } else {
2979 res |= E1000_STATUS_FD;
2982 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
2983 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
2984 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
2985 case E1000_CTRL_SPD_10:
2986 res |= E1000_STATUS_SPEED_10;
2987 break;
2988 case E1000_CTRL_SPD_100:
2989 res |= E1000_STATUS_SPEED_100;
2990 break;
2991 case E1000_CTRL_SPD_1000:
2992 default:
2993 res |= E1000_STATUS_SPEED_1000;
2994 break;
2996 } else {
2997 res |= E1000_STATUS_SPEED_1000;
3000 if (num_vfs) {
3001 res |= num_vfs << E1000_STATUS_NUM_VFS_SHIFT;
3002 res |= E1000_STATUS_IOV_MODE;
3005 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) {
3006 res |= E1000_STATUS_GIO_MASTER_ENABLE;
3009 return res;
3012 static void
3013 igb_mac_writereg(IGBCore *core, int index, uint32_t val)
3015 core->mac[index] = val;
3018 static void
3019 igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val)
3021 uint32_t macaddr[2];
3023 core->mac[index] = val;
3025 macaddr[0] = cpu_to_le32(core->mac[RA]);
3026 macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
3027 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
3028 (uint8_t *) macaddr);
3030 trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
3033 static void
3034 igb_set_eecd(IGBCore *core, int index, uint32_t val)
3036 static const uint32_t ro_bits = E1000_EECD_PRES |
3037 E1000_EECD_AUTO_RD |
3038 E1000_EECD_SIZE_EX_MASK;
3040 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
3043 static void
3044 igb_set_eerd(IGBCore *core, int index, uint32_t val)
3046 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
3047 uint32_t flags = 0;
3048 uint32_t data = 0;
3050 if ((addr < IGB_EEPROM_SIZE) && (val & E1000_EERW_START)) {
3051 data = core->eeprom[addr];
3052 flags = E1000_EERW_DONE;
3055 core->mac[EERD] = flags |
3056 (addr << E1000_EERW_ADDR_SHIFT) |
3057 (data << E1000_EERW_DATA_SHIFT);
3060 static void
3061 igb_set_eitr(IGBCore *core, int index, uint32_t val)
3063 uint32_t eitr_num = index - EITR0;
3065 trace_igb_irq_eitr_set(eitr_num, val);
3067 core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR;
3068 core->mac[index] = val & 0x7FFE;
3071 static void
3072 igb_update_rx_offloads(IGBCore *core)
3074 int cso_state = igb_rx_l4_cso_enabled(core);
3076 trace_e1000e_rx_set_cso(cso_state);
3078 if (core->has_vnet) {
3079 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer,
3080 cso_state, 0, 0, 0, 0, 0, 0);
3084 static void
3085 igb_set_rxcsum(IGBCore *core, int index, uint32_t val)
3087 core->mac[RXCSUM] = val;
3088 igb_update_rx_offloads(core);
3091 static void
3092 igb_set_gcr(IGBCore *core, int index, uint32_t val)
3094 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS;
3095 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
3098 static uint32_t igb_get_systiml(IGBCore *core, int index)
3100 e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH);
3101 return core->mac[SYSTIML];
3104 static uint32_t igb_get_rxsatrh(IGBCore *core, int index)
3106 core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID;
3107 return core->mac[RXSATRH];
3110 static uint32_t igb_get_txstmph(IGBCore *core, int index)
3112 core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID;
3113 return core->mac[TXSTMPH];
3116 static void igb_set_timinca(IGBCore *core, int index, uint32_t val)
3118 e1000x_set_timinca(core->mac, &core->timadj, val);
3121 static void igb_set_timadjh(IGBCore *core, int index, uint32_t val)
3123 core->mac[TIMADJH] = val;
3124 core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32);
3127 #define igb_getreg(x) [x] = igb_mac_readreg
3128 typedef uint32_t (*readops)(IGBCore *, int);
3129 static const readops igb_macreg_readops[] = {
3130 igb_getreg(WUFC),
3131 igb_getreg(MANC),
3132 igb_getreg(TOTL),
3133 igb_getreg(RDT0),
3134 igb_getreg(RDT1),
3135 igb_getreg(RDT2),
3136 igb_getreg(RDT3),
3137 igb_getreg(RDT4),
3138 igb_getreg(RDT5),
3139 igb_getreg(RDT6),
3140 igb_getreg(RDT7),
3141 igb_getreg(RDT8),
3142 igb_getreg(RDT9),
3143 igb_getreg(RDT10),
3144 igb_getreg(RDT11),
3145 igb_getreg(RDT12),
3146 igb_getreg(RDT13),
3147 igb_getreg(RDT14),
3148 igb_getreg(RDT15),
3149 igb_getreg(RDBAH0),
3150 igb_getreg(RDBAH1),
3151 igb_getreg(RDBAH2),
3152 igb_getreg(RDBAH3),
3153 igb_getreg(RDBAH4),
3154 igb_getreg(RDBAH5),
3155 igb_getreg(RDBAH6),
3156 igb_getreg(RDBAH7),
3157 igb_getreg(RDBAH8),
3158 igb_getreg(RDBAH9),
3159 igb_getreg(RDBAH10),
3160 igb_getreg(RDBAH11),
3161 igb_getreg(RDBAH12),
3162 igb_getreg(RDBAH13),
3163 igb_getreg(RDBAH14),
3164 igb_getreg(RDBAH15),
3165 igb_getreg(TDBAL0),
3166 igb_getreg(TDBAL1),
3167 igb_getreg(TDBAL2),
3168 igb_getreg(TDBAL3),
3169 igb_getreg(TDBAL4),
3170 igb_getreg(TDBAL5),
3171 igb_getreg(TDBAL6),
3172 igb_getreg(TDBAL7),
3173 igb_getreg(TDBAL8),
3174 igb_getreg(TDBAL9),
3175 igb_getreg(TDBAL10),
3176 igb_getreg(TDBAL11),
3177 igb_getreg(TDBAL12),
3178 igb_getreg(TDBAL13),
3179 igb_getreg(TDBAL14),
3180 igb_getreg(TDBAL15),
3181 igb_getreg(RDLEN0),
3182 igb_getreg(RDLEN1),
3183 igb_getreg(RDLEN2),
3184 igb_getreg(RDLEN3),
3185 igb_getreg(RDLEN4),
3186 igb_getreg(RDLEN5),
3187 igb_getreg(RDLEN6),
3188 igb_getreg(RDLEN7),
3189 igb_getreg(RDLEN8),
3190 igb_getreg(RDLEN9),
3191 igb_getreg(RDLEN10),
3192 igb_getreg(RDLEN11),
3193 igb_getreg(RDLEN12),
3194 igb_getreg(RDLEN13),
3195 igb_getreg(RDLEN14),
3196 igb_getreg(RDLEN15),
3197 igb_getreg(SRRCTL0),
3198 igb_getreg(SRRCTL1),
3199 igb_getreg(SRRCTL2),
3200 igb_getreg(SRRCTL3),
3201 igb_getreg(SRRCTL4),
3202 igb_getreg(SRRCTL5),
3203 igb_getreg(SRRCTL6),
3204 igb_getreg(SRRCTL7),
3205 igb_getreg(SRRCTL8),
3206 igb_getreg(SRRCTL9),
3207 igb_getreg(SRRCTL10),
3208 igb_getreg(SRRCTL11),
3209 igb_getreg(SRRCTL12),
3210 igb_getreg(SRRCTL13),
3211 igb_getreg(SRRCTL14),
3212 igb_getreg(SRRCTL15),
3213 igb_getreg(LATECOL),
3214 igb_getreg(XONTXC),
3215 igb_getreg(TDFH),
3216 igb_getreg(TDFT),
3217 igb_getreg(TDFHS),
3218 igb_getreg(TDFTS),
3219 igb_getreg(TDFPC),
3220 igb_getreg(WUS),
3221 igb_getreg(RDFH),
3222 igb_getreg(RDFT),
3223 igb_getreg(RDFHS),
3224 igb_getreg(RDFTS),
3225 igb_getreg(RDFPC),
3226 igb_getreg(GORCL),
3227 igb_getreg(MGTPRC),
3228 igb_getreg(EERD),
3229 igb_getreg(EIAC),
3230 igb_getreg(MANC2H),
3231 igb_getreg(RXCSUM),
3232 igb_getreg(GSCL_3),
3233 igb_getreg(GSCN_2),
3234 igb_getreg(FCAH),
3235 igb_getreg(FCRTH),
3236 igb_getreg(FLOP),
3237 igb_getreg(RXSTMPH),
3238 igb_getreg(TXSTMPL),
3239 igb_getreg(TIMADJL),
3240 igb_getreg(RDH0),
3241 igb_getreg(RDH1),
3242 igb_getreg(RDH2),
3243 igb_getreg(RDH3),
3244 igb_getreg(RDH4),
3245 igb_getreg(RDH5),
3246 igb_getreg(RDH6),
3247 igb_getreg(RDH7),
3248 igb_getreg(RDH8),
3249 igb_getreg(RDH9),
3250 igb_getreg(RDH10),
3251 igb_getreg(RDH11),
3252 igb_getreg(RDH12),
3253 igb_getreg(RDH13),
3254 igb_getreg(RDH14),
3255 igb_getreg(RDH15),
3256 igb_getreg(TDT0),
3257 igb_getreg(TDT1),
3258 igb_getreg(TDT2),
3259 igb_getreg(TDT3),
3260 igb_getreg(TDT4),
3261 igb_getreg(TDT5),
3262 igb_getreg(TDT6),
3263 igb_getreg(TDT7),
3264 igb_getreg(TDT8),
3265 igb_getreg(TDT9),
3266 igb_getreg(TDT10),
3267 igb_getreg(TDT11),
3268 igb_getreg(TDT12),
3269 igb_getreg(TDT13),
3270 igb_getreg(TDT14),
3271 igb_getreg(TDT15),
3272 igb_getreg(TNCRS),
3273 igb_getreg(RJC),
3274 igb_getreg(IAM),
3275 igb_getreg(GSCL_2),
3276 igb_getreg(TIPG),
3277 igb_getreg(FLMNGCTL),
3278 igb_getreg(FLMNGCNT),
3279 igb_getreg(TSYNCTXCTL),
3280 igb_getreg(EEMNGDATA),
3281 igb_getreg(CTRL_EXT),
3282 igb_getreg(SYSTIMH),
3283 igb_getreg(EEMNGCTL),
3284 igb_getreg(FLMNGDATA),
3285 igb_getreg(TSYNCRXCTL),
3286 igb_getreg(LEDCTL),
3287 igb_getreg(TCTL),
3288 igb_getreg(TCTL_EXT),
3289 igb_getreg(DTXCTL),
3290 igb_getreg(RXPBS),
3291 igb_getreg(TDH0),
3292 igb_getreg(TDH1),
3293 igb_getreg(TDH2),
3294 igb_getreg(TDH3),
3295 igb_getreg(TDH4),
3296 igb_getreg(TDH5),
3297 igb_getreg(TDH6),
3298 igb_getreg(TDH7),
3299 igb_getreg(TDH8),
3300 igb_getreg(TDH9),
3301 igb_getreg(TDH10),
3302 igb_getreg(TDH11),
3303 igb_getreg(TDH12),
3304 igb_getreg(TDH13),
3305 igb_getreg(TDH14),
3306 igb_getreg(TDH15),
3307 igb_getreg(ECOL),
3308 igb_getreg(DC),
3309 igb_getreg(RLEC),
3310 igb_getreg(XOFFTXC),
3311 igb_getreg(RFC),
3312 igb_getreg(RNBC),
3313 igb_getreg(MGTPTC),
3314 igb_getreg(TIMINCA),
3315 igb_getreg(FACTPS),
3316 igb_getreg(GSCL_1),
3317 igb_getreg(GSCN_0),
3318 igb_getreg(PBACLR),
3319 igb_getreg(FCTTV),
3320 igb_getreg(RXSATRL),
3321 igb_getreg(TORL),
3322 igb_getreg(TDLEN0),
3323 igb_getreg(TDLEN1),
3324 igb_getreg(TDLEN2),
3325 igb_getreg(TDLEN3),
3326 igb_getreg(TDLEN4),
3327 igb_getreg(TDLEN5),
3328 igb_getreg(TDLEN6),
3329 igb_getreg(TDLEN7),
3330 igb_getreg(TDLEN8),
3331 igb_getreg(TDLEN9),
3332 igb_getreg(TDLEN10),
3333 igb_getreg(TDLEN11),
3334 igb_getreg(TDLEN12),
3335 igb_getreg(TDLEN13),
3336 igb_getreg(TDLEN14),
3337 igb_getreg(TDLEN15),
3338 igb_getreg(MCC),
3339 igb_getreg(WUC),
3340 igb_getreg(EECD),
3341 igb_getreg(FCRTV),
3342 igb_getreg(TXDCTL0),
3343 igb_getreg(TXDCTL1),
3344 igb_getreg(TXDCTL2),
3345 igb_getreg(TXDCTL3),
3346 igb_getreg(TXDCTL4),
3347 igb_getreg(TXDCTL5),
3348 igb_getreg(TXDCTL6),
3349 igb_getreg(TXDCTL7),
3350 igb_getreg(TXDCTL8),
3351 igb_getreg(TXDCTL9),
3352 igb_getreg(TXDCTL10),
3353 igb_getreg(TXDCTL11),
3354 igb_getreg(TXDCTL12),
3355 igb_getreg(TXDCTL13),
3356 igb_getreg(TXDCTL14),
3357 igb_getreg(TXDCTL15),
3358 igb_getreg(TXCTL0),
3359 igb_getreg(TXCTL1),
3360 igb_getreg(TXCTL2),
3361 igb_getreg(TXCTL3),
3362 igb_getreg(TXCTL4),
3363 igb_getreg(TXCTL5),
3364 igb_getreg(TXCTL6),
3365 igb_getreg(TXCTL7),
3366 igb_getreg(TXCTL8),
3367 igb_getreg(TXCTL9),
3368 igb_getreg(TXCTL10),
3369 igb_getreg(TXCTL11),
3370 igb_getreg(TXCTL12),
3371 igb_getreg(TXCTL13),
3372 igb_getreg(TXCTL14),
3373 igb_getreg(TXCTL15),
3374 igb_getreg(TDWBAL0),
3375 igb_getreg(TDWBAL1),
3376 igb_getreg(TDWBAL2),
3377 igb_getreg(TDWBAL3),
3378 igb_getreg(TDWBAL4),
3379 igb_getreg(TDWBAL5),
3380 igb_getreg(TDWBAL6),
3381 igb_getreg(TDWBAL7),
3382 igb_getreg(TDWBAL8),
3383 igb_getreg(TDWBAL9),
3384 igb_getreg(TDWBAL10),
3385 igb_getreg(TDWBAL11),
3386 igb_getreg(TDWBAL12),
3387 igb_getreg(TDWBAL13),
3388 igb_getreg(TDWBAL14),
3389 igb_getreg(TDWBAL15),
3390 igb_getreg(TDWBAH0),
3391 igb_getreg(TDWBAH1),
3392 igb_getreg(TDWBAH2),
3393 igb_getreg(TDWBAH3),
3394 igb_getreg(TDWBAH4),
3395 igb_getreg(TDWBAH5),
3396 igb_getreg(TDWBAH6),
3397 igb_getreg(TDWBAH7),
3398 igb_getreg(TDWBAH8),
3399 igb_getreg(TDWBAH9),
3400 igb_getreg(TDWBAH10),
3401 igb_getreg(TDWBAH11),
3402 igb_getreg(TDWBAH12),
3403 igb_getreg(TDWBAH13),
3404 igb_getreg(TDWBAH14),
3405 igb_getreg(TDWBAH15),
3406 igb_getreg(PVTCTRL0),
3407 igb_getreg(PVTCTRL1),
3408 igb_getreg(PVTCTRL2),
3409 igb_getreg(PVTCTRL3),
3410 igb_getreg(PVTCTRL4),
3411 igb_getreg(PVTCTRL5),
3412 igb_getreg(PVTCTRL6),
3413 igb_getreg(PVTCTRL7),
3414 igb_getreg(PVTEIMS0),
3415 igb_getreg(PVTEIMS1),
3416 igb_getreg(PVTEIMS2),
3417 igb_getreg(PVTEIMS3),
3418 igb_getreg(PVTEIMS4),
3419 igb_getreg(PVTEIMS5),
3420 igb_getreg(PVTEIMS6),
3421 igb_getreg(PVTEIMS7),
3422 igb_getreg(PVTEIAC0),
3423 igb_getreg(PVTEIAC1),
3424 igb_getreg(PVTEIAC2),
3425 igb_getreg(PVTEIAC3),
3426 igb_getreg(PVTEIAC4),
3427 igb_getreg(PVTEIAC5),
3428 igb_getreg(PVTEIAC6),
3429 igb_getreg(PVTEIAC7),
3430 igb_getreg(PVTEIAM0),
3431 igb_getreg(PVTEIAM1),
3432 igb_getreg(PVTEIAM2),
3433 igb_getreg(PVTEIAM3),
3434 igb_getreg(PVTEIAM4),
3435 igb_getreg(PVTEIAM5),
3436 igb_getreg(PVTEIAM6),
3437 igb_getreg(PVTEIAM7),
3438 igb_getreg(PVFGPRC0),
3439 igb_getreg(PVFGPRC1),
3440 igb_getreg(PVFGPRC2),
3441 igb_getreg(PVFGPRC3),
3442 igb_getreg(PVFGPRC4),
3443 igb_getreg(PVFGPRC5),
3444 igb_getreg(PVFGPRC6),
3445 igb_getreg(PVFGPRC7),
3446 igb_getreg(PVFGPTC0),
3447 igb_getreg(PVFGPTC1),
3448 igb_getreg(PVFGPTC2),
3449 igb_getreg(PVFGPTC3),
3450 igb_getreg(PVFGPTC4),
3451 igb_getreg(PVFGPTC5),
3452 igb_getreg(PVFGPTC6),
3453 igb_getreg(PVFGPTC7),
3454 igb_getreg(PVFGORC0),
3455 igb_getreg(PVFGORC1),
3456 igb_getreg(PVFGORC2),
3457 igb_getreg(PVFGORC3),
3458 igb_getreg(PVFGORC4),
3459 igb_getreg(PVFGORC5),
3460 igb_getreg(PVFGORC6),
3461 igb_getreg(PVFGORC7),
3462 igb_getreg(PVFGOTC0),
3463 igb_getreg(PVFGOTC1),
3464 igb_getreg(PVFGOTC2),
3465 igb_getreg(PVFGOTC3),
3466 igb_getreg(PVFGOTC4),
3467 igb_getreg(PVFGOTC5),
3468 igb_getreg(PVFGOTC6),
3469 igb_getreg(PVFGOTC7),
3470 igb_getreg(PVFMPRC0),
3471 igb_getreg(PVFMPRC1),
3472 igb_getreg(PVFMPRC2),
3473 igb_getreg(PVFMPRC3),
3474 igb_getreg(PVFMPRC4),
3475 igb_getreg(PVFMPRC5),
3476 igb_getreg(PVFMPRC6),
3477 igb_getreg(PVFMPRC7),
3478 igb_getreg(PVFGPRLBC0),
3479 igb_getreg(PVFGPRLBC1),
3480 igb_getreg(PVFGPRLBC2),
3481 igb_getreg(PVFGPRLBC3),
3482 igb_getreg(PVFGPRLBC4),
3483 igb_getreg(PVFGPRLBC5),
3484 igb_getreg(PVFGPRLBC6),
3485 igb_getreg(PVFGPRLBC7),
3486 igb_getreg(PVFGPTLBC0),
3487 igb_getreg(PVFGPTLBC1),
3488 igb_getreg(PVFGPTLBC2),
3489 igb_getreg(PVFGPTLBC3),
3490 igb_getreg(PVFGPTLBC4),
3491 igb_getreg(PVFGPTLBC5),
3492 igb_getreg(PVFGPTLBC6),
3493 igb_getreg(PVFGPTLBC7),
3494 igb_getreg(PVFGORLBC0),
3495 igb_getreg(PVFGORLBC1),
3496 igb_getreg(PVFGORLBC2),
3497 igb_getreg(PVFGORLBC3),
3498 igb_getreg(PVFGORLBC4),
3499 igb_getreg(PVFGORLBC5),
3500 igb_getreg(PVFGORLBC6),
3501 igb_getreg(PVFGORLBC7),
3502 igb_getreg(PVFGOTLBC0),
3503 igb_getreg(PVFGOTLBC1),
3504 igb_getreg(PVFGOTLBC2),
3505 igb_getreg(PVFGOTLBC3),
3506 igb_getreg(PVFGOTLBC4),
3507 igb_getreg(PVFGOTLBC5),
3508 igb_getreg(PVFGOTLBC6),
3509 igb_getreg(PVFGOTLBC7),
3510 igb_getreg(RCTL),
3511 igb_getreg(MDIC),
3512 igb_getreg(FCRUC),
3513 igb_getreg(VET),
3514 igb_getreg(RDBAL0),
3515 igb_getreg(RDBAL1),
3516 igb_getreg(RDBAL2),
3517 igb_getreg(RDBAL3),
3518 igb_getreg(RDBAL4),
3519 igb_getreg(RDBAL5),
3520 igb_getreg(RDBAL6),
3521 igb_getreg(RDBAL7),
3522 igb_getreg(RDBAL8),
3523 igb_getreg(RDBAL9),
3524 igb_getreg(RDBAL10),
3525 igb_getreg(RDBAL11),
3526 igb_getreg(RDBAL12),
3527 igb_getreg(RDBAL13),
3528 igb_getreg(RDBAL14),
3529 igb_getreg(RDBAL15),
3530 igb_getreg(TDBAH0),
3531 igb_getreg(TDBAH1),
3532 igb_getreg(TDBAH2),
3533 igb_getreg(TDBAH3),
3534 igb_getreg(TDBAH4),
3535 igb_getreg(TDBAH5),
3536 igb_getreg(TDBAH6),
3537 igb_getreg(TDBAH7),
3538 igb_getreg(TDBAH8),
3539 igb_getreg(TDBAH9),
3540 igb_getreg(TDBAH10),
3541 igb_getreg(TDBAH11),
3542 igb_getreg(TDBAH12),
3543 igb_getreg(TDBAH13),
3544 igb_getreg(TDBAH14),
3545 igb_getreg(TDBAH15),
3546 igb_getreg(SCC),
3547 igb_getreg(COLC),
3548 igb_getreg(XOFFRXC),
3549 igb_getreg(IPAV),
3550 igb_getreg(GOTCL),
3551 igb_getreg(MGTPDC),
3552 igb_getreg(GCR),
3553 igb_getreg(MFVAL),
3554 igb_getreg(FUNCTAG),
3555 igb_getreg(GSCL_4),
3556 igb_getreg(GSCN_3),
3557 igb_getreg(MRQC),
3558 igb_getreg(FCT),
3559 igb_getreg(FLA),
3560 igb_getreg(RXDCTL0),
3561 igb_getreg(RXDCTL1),
3562 igb_getreg(RXDCTL2),
3563 igb_getreg(RXDCTL3),
3564 igb_getreg(RXDCTL4),
3565 igb_getreg(RXDCTL5),
3566 igb_getreg(RXDCTL6),
3567 igb_getreg(RXDCTL7),
3568 igb_getreg(RXDCTL8),
3569 igb_getreg(RXDCTL9),
3570 igb_getreg(RXDCTL10),
3571 igb_getreg(RXDCTL11),
3572 igb_getreg(RXDCTL12),
3573 igb_getreg(RXDCTL13),
3574 igb_getreg(RXDCTL14),
3575 igb_getreg(RXDCTL15),
3576 igb_getreg(RXSTMPL),
3577 igb_getreg(TIMADJH),
3578 igb_getreg(FCRTL),
3579 igb_getreg(XONRXC),
3580 igb_getreg(RFCTL),
3581 igb_getreg(GSCN_1),
3582 igb_getreg(FCAL),
3583 igb_getreg(GPIE),
3584 igb_getreg(TXPBS),
3585 igb_getreg(RLPML),
3587 [TOTH] = igb_mac_read_clr8,
3588 [GOTCH] = igb_mac_read_clr8,
3589 [PRC64] = igb_mac_read_clr4,
3590 [PRC255] = igb_mac_read_clr4,
3591 [PRC1023] = igb_mac_read_clr4,
3592 [PTC64] = igb_mac_read_clr4,
3593 [PTC255] = igb_mac_read_clr4,
3594 [PTC1023] = igb_mac_read_clr4,
3595 [GPRC] = igb_mac_read_clr4,
3596 [TPT] = igb_mac_read_clr4,
3597 [RUC] = igb_mac_read_clr4,
3598 [BPRC] = igb_mac_read_clr4,
3599 [MPTC] = igb_mac_read_clr4,
3600 [IAC] = igb_mac_read_clr4,
3601 [ICR] = igb_mac_icr_read,
3602 [STATUS] = igb_get_status,
3603 [ICS] = igb_mac_ics_read,
3605 * 8.8.10: Reading the IMC register returns the value of the IMS register.
3607 [IMC] = igb_mac_ims_read,
3608 [TORH] = igb_mac_read_clr8,
3609 [GORCH] = igb_mac_read_clr8,
3610 [PRC127] = igb_mac_read_clr4,
3611 [PRC511] = igb_mac_read_clr4,
3612 [PRC1522] = igb_mac_read_clr4,
3613 [PTC127] = igb_mac_read_clr4,
3614 [PTC511] = igb_mac_read_clr4,
3615 [PTC1522] = igb_mac_read_clr4,
3616 [GPTC] = igb_mac_read_clr4,
3617 [TPR] = igb_mac_read_clr4,
3618 [ROC] = igb_mac_read_clr4,
3619 [MPRC] = igb_mac_read_clr4,
3620 [BPTC] = igb_mac_read_clr4,
3621 [TSCTC] = igb_mac_read_clr4,
3622 [CTRL] = igb_get_ctrl,
3623 [SWSM] = igb_mac_swsm_read,
3624 [IMS] = igb_mac_ims_read,
3625 [SYSTIML] = igb_get_systiml,
3626 [RXSATRH] = igb_get_rxsatrh,
3627 [TXSTMPH] = igb_get_txstmph,
3629 [CRCERRS ... MPC] = igb_mac_readreg,
3630 [IP6AT ... IP6AT + 3] = igb_mac_readreg,
3631 [IP4AT ... IP4AT + 6] = igb_mac_readreg,
3632 [RA ... RA + 31] = igb_mac_readreg,
3633 [RA2 ... RA2 + 31] = igb_mac_readreg,
3634 [WUPM ... WUPM + 31] = igb_mac_readreg,
3635 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_readreg,
3636 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_readreg,
3637 [FFMT ... FFMT + 254] = igb_mac_readreg,
3638 [MDEF ... MDEF + 7] = igb_mac_readreg,
3639 [FTFT ... FTFT + 254] = igb_mac_readreg,
3640 [RETA ... RETA + 31] = igb_mac_readreg,
3641 [RSSRK ... RSSRK + 9] = igb_mac_readreg,
3642 [MAVTV0 ... MAVTV3] = igb_mac_readreg,
3643 [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_mac_eitr_read,
3644 [PVTEICR0] = igb_mac_read_clr4,
3645 [PVTEICR1] = igb_mac_read_clr4,
3646 [PVTEICR2] = igb_mac_read_clr4,
3647 [PVTEICR3] = igb_mac_read_clr4,
3648 [PVTEICR4] = igb_mac_read_clr4,
3649 [PVTEICR5] = igb_mac_read_clr4,
3650 [PVTEICR6] = igb_mac_read_clr4,
3651 [PVTEICR7] = igb_mac_read_clr4,
3653 /* IGB specific: */
3654 [FWSM] = igb_mac_readreg,
3655 [SW_FW_SYNC] = igb_mac_readreg,
3656 [HTCBDPC] = igb_mac_read_clr4,
3657 [EICR] = igb_mac_read_clr4,
3658 [EIMS] = igb_mac_readreg,
3659 [EIAM] = igb_mac_readreg,
3660 [IVAR0 ... IVAR0 + 7] = igb_mac_readreg,
3661 igb_getreg(IVAR_MISC),
3662 igb_getreg(TSYNCRXCFG),
3663 [ETQF0 ... ETQF0 + 7] = igb_mac_readreg,
3664 igb_getreg(VT_CTL),
3665 [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg,
3666 [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read,
3667 igb_getreg(MBVFICR),
3668 [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_readreg,
3669 igb_getreg(MBVFIMR),
3670 igb_getreg(VFLRE),
3671 igb_getreg(VFRE),
3672 igb_getreg(VFTE),
3673 igb_getreg(QDE),
3674 igb_getreg(DTXSWC),
3675 igb_getreg(RPLOLR),
3676 [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_readreg,
3677 [VMVIR0 ... VMVIR7] = igb_mac_readreg,
3678 [VMOLR0 ... VMOLR7] = igb_mac_readreg,
3679 [WVBR] = igb_mac_read_clr4,
3680 [RQDPC0] = igb_mac_read_clr4,
3681 [RQDPC1] = igb_mac_read_clr4,
3682 [RQDPC2] = igb_mac_read_clr4,
3683 [RQDPC3] = igb_mac_read_clr4,
3684 [RQDPC4] = igb_mac_read_clr4,
3685 [RQDPC5] = igb_mac_read_clr4,
3686 [RQDPC6] = igb_mac_read_clr4,
3687 [RQDPC7] = igb_mac_read_clr4,
3688 [RQDPC8] = igb_mac_read_clr4,
3689 [RQDPC9] = igb_mac_read_clr4,
3690 [RQDPC10] = igb_mac_read_clr4,
3691 [RQDPC11] = igb_mac_read_clr4,
3692 [RQDPC12] = igb_mac_read_clr4,
3693 [RQDPC13] = igb_mac_read_clr4,
3694 [RQDPC14] = igb_mac_read_clr4,
3695 [RQDPC15] = igb_mac_read_clr4,
3696 [VTIVAR ... VTIVAR + 7] = igb_mac_readreg,
3697 [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_readreg,
3699 enum { IGB_NREADOPS = ARRAY_SIZE(igb_macreg_readops) };
3701 #define igb_putreg(x) [x] = igb_mac_writereg
3702 typedef void (*writeops)(IGBCore *, int, uint32_t);
3703 static const writeops igb_macreg_writeops[] = {
3704 igb_putreg(SWSM),
3705 igb_putreg(WUFC),
3706 igb_putreg(RDBAH0),
3707 igb_putreg(RDBAH1),
3708 igb_putreg(RDBAH2),
3709 igb_putreg(RDBAH3),
3710 igb_putreg(RDBAH4),
3711 igb_putreg(RDBAH5),
3712 igb_putreg(RDBAH6),
3713 igb_putreg(RDBAH7),
3714 igb_putreg(RDBAH8),
3715 igb_putreg(RDBAH9),
3716 igb_putreg(RDBAH10),
3717 igb_putreg(RDBAH11),
3718 igb_putreg(RDBAH12),
3719 igb_putreg(RDBAH13),
3720 igb_putreg(RDBAH14),
3721 igb_putreg(RDBAH15),
3722 igb_putreg(SRRCTL0),
3723 igb_putreg(SRRCTL1),
3724 igb_putreg(SRRCTL2),
3725 igb_putreg(SRRCTL3),
3726 igb_putreg(SRRCTL4),
3727 igb_putreg(SRRCTL5),
3728 igb_putreg(SRRCTL6),
3729 igb_putreg(SRRCTL7),
3730 igb_putreg(SRRCTL8),
3731 igb_putreg(SRRCTL9),
3732 igb_putreg(SRRCTL10),
3733 igb_putreg(SRRCTL11),
3734 igb_putreg(SRRCTL12),
3735 igb_putreg(SRRCTL13),
3736 igb_putreg(SRRCTL14),
3737 igb_putreg(SRRCTL15),
3738 igb_putreg(RXDCTL0),
3739 igb_putreg(RXDCTL1),
3740 igb_putreg(RXDCTL2),
3741 igb_putreg(RXDCTL3),
3742 igb_putreg(RXDCTL4),
3743 igb_putreg(RXDCTL5),
3744 igb_putreg(RXDCTL6),
3745 igb_putreg(RXDCTL7),
3746 igb_putreg(RXDCTL8),
3747 igb_putreg(RXDCTL9),
3748 igb_putreg(RXDCTL10),
3749 igb_putreg(RXDCTL11),
3750 igb_putreg(RXDCTL12),
3751 igb_putreg(RXDCTL13),
3752 igb_putreg(RXDCTL14),
3753 igb_putreg(RXDCTL15),
3754 igb_putreg(LEDCTL),
3755 igb_putreg(TCTL),
3756 igb_putreg(TCTL_EXT),
3757 igb_putreg(DTXCTL),
3758 igb_putreg(RXPBS),
3759 igb_putreg(RQDPC0),
3760 igb_putreg(FCAL),
3761 igb_putreg(FCRUC),
3762 igb_putreg(WUC),
3763 igb_putreg(WUS),
3764 igb_putreg(IPAV),
3765 igb_putreg(TDBAH0),
3766 igb_putreg(TDBAH1),
3767 igb_putreg(TDBAH2),
3768 igb_putreg(TDBAH3),
3769 igb_putreg(TDBAH4),
3770 igb_putreg(TDBAH5),
3771 igb_putreg(TDBAH6),
3772 igb_putreg(TDBAH7),
3773 igb_putreg(TDBAH8),
3774 igb_putreg(TDBAH9),
3775 igb_putreg(TDBAH10),
3776 igb_putreg(TDBAH11),
3777 igb_putreg(TDBAH12),
3778 igb_putreg(TDBAH13),
3779 igb_putreg(TDBAH14),
3780 igb_putreg(TDBAH15),
3781 igb_putreg(IAM),
3782 igb_putreg(MANC),
3783 igb_putreg(MANC2H),
3784 igb_putreg(MFVAL),
3785 igb_putreg(FACTPS),
3786 igb_putreg(FUNCTAG),
3787 igb_putreg(GSCL_1),
3788 igb_putreg(GSCL_2),
3789 igb_putreg(GSCL_3),
3790 igb_putreg(GSCL_4),
3791 igb_putreg(GSCN_0),
3792 igb_putreg(GSCN_1),
3793 igb_putreg(GSCN_2),
3794 igb_putreg(GSCN_3),
3795 igb_putreg(MRQC),
3796 igb_putreg(FLOP),
3797 igb_putreg(FLA),
3798 igb_putreg(TXDCTL0),
3799 igb_putreg(TXDCTL1),
3800 igb_putreg(TXDCTL2),
3801 igb_putreg(TXDCTL3),
3802 igb_putreg(TXDCTL4),
3803 igb_putreg(TXDCTL5),
3804 igb_putreg(TXDCTL6),
3805 igb_putreg(TXDCTL7),
3806 igb_putreg(TXDCTL8),
3807 igb_putreg(TXDCTL9),
3808 igb_putreg(TXDCTL10),
3809 igb_putreg(TXDCTL11),
3810 igb_putreg(TXDCTL12),
3811 igb_putreg(TXDCTL13),
3812 igb_putreg(TXDCTL14),
3813 igb_putreg(TXDCTL15),
3814 igb_putreg(TXCTL0),
3815 igb_putreg(TXCTL1),
3816 igb_putreg(TXCTL2),
3817 igb_putreg(TXCTL3),
3818 igb_putreg(TXCTL4),
3819 igb_putreg(TXCTL5),
3820 igb_putreg(TXCTL6),
3821 igb_putreg(TXCTL7),
3822 igb_putreg(TXCTL8),
3823 igb_putreg(TXCTL9),
3824 igb_putreg(TXCTL10),
3825 igb_putreg(TXCTL11),
3826 igb_putreg(TXCTL12),
3827 igb_putreg(TXCTL13),
3828 igb_putreg(TXCTL14),
3829 igb_putreg(TXCTL15),
3830 igb_putreg(TDWBAL0),
3831 igb_putreg(TDWBAL1),
3832 igb_putreg(TDWBAL2),
3833 igb_putreg(TDWBAL3),
3834 igb_putreg(TDWBAL4),
3835 igb_putreg(TDWBAL5),
3836 igb_putreg(TDWBAL6),
3837 igb_putreg(TDWBAL7),
3838 igb_putreg(TDWBAL8),
3839 igb_putreg(TDWBAL9),
3840 igb_putreg(TDWBAL10),
3841 igb_putreg(TDWBAL11),
3842 igb_putreg(TDWBAL12),
3843 igb_putreg(TDWBAL13),
3844 igb_putreg(TDWBAL14),
3845 igb_putreg(TDWBAL15),
3846 igb_putreg(TDWBAH0),
3847 igb_putreg(TDWBAH1),
3848 igb_putreg(TDWBAH2),
3849 igb_putreg(TDWBAH3),
3850 igb_putreg(TDWBAH4),
3851 igb_putreg(TDWBAH5),
3852 igb_putreg(TDWBAH6),
3853 igb_putreg(TDWBAH7),
3854 igb_putreg(TDWBAH8),
3855 igb_putreg(TDWBAH9),
3856 igb_putreg(TDWBAH10),
3857 igb_putreg(TDWBAH11),
3858 igb_putreg(TDWBAH12),
3859 igb_putreg(TDWBAH13),
3860 igb_putreg(TDWBAH14),
3861 igb_putreg(TDWBAH15),
3862 igb_putreg(TIPG),
3863 igb_putreg(RXSTMPH),
3864 igb_putreg(RXSTMPL),
3865 igb_putreg(RXSATRL),
3866 igb_putreg(RXSATRH),
3867 igb_putreg(TXSTMPL),
3868 igb_putreg(TXSTMPH),
3869 igb_putreg(SYSTIML),
3870 igb_putreg(SYSTIMH),
3871 igb_putreg(TIMADJL),
3872 igb_putreg(TSYNCRXCTL),
3873 igb_putreg(TSYNCTXCTL),
3874 igb_putreg(EEMNGCTL),
3875 igb_putreg(GPIE),
3876 igb_putreg(TXPBS),
3877 igb_putreg(RLPML),
3878 igb_putreg(VET),
3880 [TDH0] = igb_set_16bit,
3881 [TDH1] = igb_set_16bit,
3882 [TDH2] = igb_set_16bit,
3883 [TDH3] = igb_set_16bit,
3884 [TDH4] = igb_set_16bit,
3885 [TDH5] = igb_set_16bit,
3886 [TDH6] = igb_set_16bit,
3887 [TDH7] = igb_set_16bit,
3888 [TDH8] = igb_set_16bit,
3889 [TDH9] = igb_set_16bit,
3890 [TDH10] = igb_set_16bit,
3891 [TDH11] = igb_set_16bit,
3892 [TDH12] = igb_set_16bit,
3893 [TDH13] = igb_set_16bit,
3894 [TDH14] = igb_set_16bit,
3895 [TDH15] = igb_set_16bit,
3896 [TDT0] = igb_set_tdt,
3897 [TDT1] = igb_set_tdt,
3898 [TDT2] = igb_set_tdt,
3899 [TDT3] = igb_set_tdt,
3900 [TDT4] = igb_set_tdt,
3901 [TDT5] = igb_set_tdt,
3902 [TDT6] = igb_set_tdt,
3903 [TDT7] = igb_set_tdt,
3904 [TDT8] = igb_set_tdt,
3905 [TDT9] = igb_set_tdt,
3906 [TDT10] = igb_set_tdt,
3907 [TDT11] = igb_set_tdt,
3908 [TDT12] = igb_set_tdt,
3909 [TDT13] = igb_set_tdt,
3910 [TDT14] = igb_set_tdt,
3911 [TDT15] = igb_set_tdt,
3912 [MDIC] = igb_set_mdic,
3913 [ICS] = igb_set_ics,
3914 [RDH0] = igb_set_16bit,
3915 [RDH1] = igb_set_16bit,
3916 [RDH2] = igb_set_16bit,
3917 [RDH3] = igb_set_16bit,
3918 [RDH4] = igb_set_16bit,
3919 [RDH5] = igb_set_16bit,
3920 [RDH6] = igb_set_16bit,
3921 [RDH7] = igb_set_16bit,
3922 [RDH8] = igb_set_16bit,
3923 [RDH9] = igb_set_16bit,
3924 [RDH10] = igb_set_16bit,
3925 [RDH11] = igb_set_16bit,
3926 [RDH12] = igb_set_16bit,
3927 [RDH13] = igb_set_16bit,
3928 [RDH14] = igb_set_16bit,
3929 [RDH15] = igb_set_16bit,
3930 [RDT0] = igb_set_rdt,
3931 [RDT1] = igb_set_rdt,
3932 [RDT2] = igb_set_rdt,
3933 [RDT3] = igb_set_rdt,
3934 [RDT4] = igb_set_rdt,
3935 [RDT5] = igb_set_rdt,
3936 [RDT6] = igb_set_rdt,
3937 [RDT7] = igb_set_rdt,
3938 [RDT8] = igb_set_rdt,
3939 [RDT9] = igb_set_rdt,
3940 [RDT10] = igb_set_rdt,
3941 [RDT11] = igb_set_rdt,
3942 [RDT12] = igb_set_rdt,
3943 [RDT13] = igb_set_rdt,
3944 [RDT14] = igb_set_rdt,
3945 [RDT15] = igb_set_rdt,
3946 [IMC] = igb_set_imc,
3947 [IMS] = igb_set_ims,
3948 [ICR] = igb_set_icr,
3949 [EECD] = igb_set_eecd,
3950 [RCTL] = igb_set_rx_control,
3951 [CTRL] = igb_set_ctrl,
3952 [EERD] = igb_set_eerd,
3953 [TDFH] = igb_set_13bit,
3954 [TDFT] = igb_set_13bit,
3955 [TDFHS] = igb_set_13bit,
3956 [TDFTS] = igb_set_13bit,
3957 [TDFPC] = igb_set_13bit,
3958 [RDFH] = igb_set_13bit,
3959 [RDFT] = igb_set_13bit,
3960 [RDFHS] = igb_set_13bit,
3961 [RDFTS] = igb_set_13bit,
3962 [RDFPC] = igb_set_13bit,
3963 [GCR] = igb_set_gcr,
3964 [RXCSUM] = igb_set_rxcsum,
3965 [TDLEN0] = igb_set_dlen,
3966 [TDLEN1] = igb_set_dlen,
3967 [TDLEN2] = igb_set_dlen,
3968 [TDLEN3] = igb_set_dlen,
3969 [TDLEN4] = igb_set_dlen,
3970 [TDLEN5] = igb_set_dlen,
3971 [TDLEN6] = igb_set_dlen,
3972 [TDLEN7] = igb_set_dlen,
3973 [TDLEN8] = igb_set_dlen,
3974 [TDLEN9] = igb_set_dlen,
3975 [TDLEN10] = igb_set_dlen,
3976 [TDLEN11] = igb_set_dlen,
3977 [TDLEN12] = igb_set_dlen,
3978 [TDLEN13] = igb_set_dlen,
3979 [TDLEN14] = igb_set_dlen,
3980 [TDLEN15] = igb_set_dlen,
3981 [RDLEN0] = igb_set_dlen,
3982 [RDLEN1] = igb_set_dlen,
3983 [RDLEN2] = igb_set_dlen,
3984 [RDLEN3] = igb_set_dlen,
3985 [RDLEN4] = igb_set_dlen,
3986 [RDLEN5] = igb_set_dlen,
3987 [RDLEN6] = igb_set_dlen,
3988 [RDLEN7] = igb_set_dlen,
3989 [RDLEN8] = igb_set_dlen,
3990 [RDLEN9] = igb_set_dlen,
3991 [RDLEN10] = igb_set_dlen,
3992 [RDLEN11] = igb_set_dlen,
3993 [RDLEN12] = igb_set_dlen,
3994 [RDLEN13] = igb_set_dlen,
3995 [RDLEN14] = igb_set_dlen,
3996 [RDLEN15] = igb_set_dlen,
3997 [TDBAL0] = igb_set_dbal,
3998 [TDBAL1] = igb_set_dbal,
3999 [TDBAL2] = igb_set_dbal,
4000 [TDBAL3] = igb_set_dbal,
4001 [TDBAL4] = igb_set_dbal,
4002 [TDBAL5] = igb_set_dbal,
4003 [TDBAL6] = igb_set_dbal,
4004 [TDBAL7] = igb_set_dbal,
4005 [TDBAL8] = igb_set_dbal,
4006 [TDBAL9] = igb_set_dbal,
4007 [TDBAL10] = igb_set_dbal,
4008 [TDBAL11] = igb_set_dbal,
4009 [TDBAL12] = igb_set_dbal,
4010 [TDBAL13] = igb_set_dbal,
4011 [TDBAL14] = igb_set_dbal,
4012 [TDBAL15] = igb_set_dbal,
4013 [RDBAL0] = igb_set_dbal,
4014 [RDBAL1] = igb_set_dbal,
4015 [RDBAL2] = igb_set_dbal,
4016 [RDBAL3] = igb_set_dbal,
4017 [RDBAL4] = igb_set_dbal,
4018 [RDBAL5] = igb_set_dbal,
4019 [RDBAL6] = igb_set_dbal,
4020 [RDBAL7] = igb_set_dbal,
4021 [RDBAL8] = igb_set_dbal,
4022 [RDBAL9] = igb_set_dbal,
4023 [RDBAL10] = igb_set_dbal,
4024 [RDBAL11] = igb_set_dbal,
4025 [RDBAL12] = igb_set_dbal,
4026 [RDBAL13] = igb_set_dbal,
4027 [RDBAL14] = igb_set_dbal,
4028 [RDBAL15] = igb_set_dbal,
4029 [STATUS] = igb_set_status,
4030 [PBACLR] = igb_set_pbaclr,
4031 [CTRL_EXT] = igb_set_ctrlext,
4032 [FCAH] = igb_set_16bit,
4033 [FCT] = igb_set_16bit,
4034 [FCTTV] = igb_set_16bit,
4035 [FCRTV] = igb_set_16bit,
4036 [FCRTH] = igb_set_fcrth,
4037 [FCRTL] = igb_set_fcrtl,
4038 [CTRL_DUP] = igb_set_ctrl,
4039 [RFCTL] = igb_set_rfctl,
4040 [TIMINCA] = igb_set_timinca,
4041 [TIMADJH] = igb_set_timadjh,
4043 [IP6AT ... IP6AT + 3] = igb_mac_writereg,
4044 [IP4AT ... IP4AT + 6] = igb_mac_writereg,
4045 [RA] = igb_mac_writereg,
4046 [RA + 1] = igb_mac_setmacaddr,
4047 [RA + 2 ... RA + 31] = igb_mac_writereg,
4048 [RA2 ... RA2 + 31] = igb_mac_writereg,
4049 [WUPM ... WUPM + 31] = igb_mac_writereg,
4050 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
4051 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_writereg,
4052 [FFMT ... FFMT + 254] = igb_set_4bit,
4053 [MDEF ... MDEF + 7] = igb_mac_writereg,
4054 [FTFT ... FTFT + 254] = igb_mac_writereg,
4055 [RETA ... RETA + 31] = igb_mac_writereg,
4056 [RSSRK ... RSSRK + 9] = igb_mac_writereg,
4057 [MAVTV0 ... MAVTV3] = igb_mac_writereg,
4058 [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_set_eitr,
4060 /* IGB specific: */
4061 [FWSM] = igb_mac_writereg,
4062 [SW_FW_SYNC] = igb_mac_writereg,
4063 [EICR] = igb_set_eicr,
4064 [EICS] = igb_set_eics,
4065 [EIAC] = igb_set_eiac,
4066 [EIAM] = igb_set_eiam,
4067 [EIMC] = igb_set_eimc,
4068 [EIMS] = igb_set_eims,
4069 [IVAR0 ... IVAR0 + 7] = igb_mac_writereg,
4070 igb_putreg(IVAR_MISC),
4071 igb_putreg(TSYNCRXCFG),
4072 [ETQF0 ... ETQF0 + 7] = igb_mac_writereg,
4073 igb_putreg(VT_CTL),
4074 [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox,
4075 [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox,
4076 [MBVFICR] = igb_w1c,
4077 [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_writereg,
4078 igb_putreg(MBVFIMR),
4079 [VFLRE] = igb_w1c,
4080 igb_putreg(VFRE),
4081 igb_putreg(VFTE),
4082 igb_putreg(QDE),
4083 igb_putreg(DTXSWC),
4084 igb_putreg(RPLOLR),
4085 [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_writereg,
4086 [VMVIR0 ... VMVIR7] = igb_mac_writereg,
4087 [VMOLR0 ... VMOLR7] = igb_mac_writereg,
4088 [UTA ... UTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
4089 [PVTCTRL0] = igb_set_vtctrl,
4090 [PVTCTRL1] = igb_set_vtctrl,
4091 [PVTCTRL2] = igb_set_vtctrl,
4092 [PVTCTRL3] = igb_set_vtctrl,
4093 [PVTCTRL4] = igb_set_vtctrl,
4094 [PVTCTRL5] = igb_set_vtctrl,
4095 [PVTCTRL6] = igb_set_vtctrl,
4096 [PVTCTRL7] = igb_set_vtctrl,
4097 [PVTEICS0] = igb_set_vteics,
4098 [PVTEICS1] = igb_set_vteics,
4099 [PVTEICS2] = igb_set_vteics,
4100 [PVTEICS3] = igb_set_vteics,
4101 [PVTEICS4] = igb_set_vteics,
4102 [PVTEICS5] = igb_set_vteics,
4103 [PVTEICS6] = igb_set_vteics,
4104 [PVTEICS7] = igb_set_vteics,
4105 [PVTEIMS0] = igb_set_vteims,
4106 [PVTEIMS1] = igb_set_vteims,
4107 [PVTEIMS2] = igb_set_vteims,
4108 [PVTEIMS3] = igb_set_vteims,
4109 [PVTEIMS4] = igb_set_vteims,
4110 [PVTEIMS5] = igb_set_vteims,
4111 [PVTEIMS6] = igb_set_vteims,
4112 [PVTEIMS7] = igb_set_vteims,
4113 [PVTEIMC0] = igb_set_vteimc,
4114 [PVTEIMC1] = igb_set_vteimc,
4115 [PVTEIMC2] = igb_set_vteimc,
4116 [PVTEIMC3] = igb_set_vteimc,
4117 [PVTEIMC4] = igb_set_vteimc,
4118 [PVTEIMC5] = igb_set_vteimc,
4119 [PVTEIMC6] = igb_set_vteimc,
4120 [PVTEIMC7] = igb_set_vteimc,
4121 [PVTEIAC0] = igb_set_vteiac,
4122 [PVTEIAC1] = igb_set_vteiac,
4123 [PVTEIAC2] = igb_set_vteiac,
4124 [PVTEIAC3] = igb_set_vteiac,
4125 [PVTEIAC4] = igb_set_vteiac,
4126 [PVTEIAC5] = igb_set_vteiac,
4127 [PVTEIAC6] = igb_set_vteiac,
4128 [PVTEIAC7] = igb_set_vteiac,
4129 [PVTEIAM0] = igb_set_vteiam,
4130 [PVTEIAM1] = igb_set_vteiam,
4131 [PVTEIAM2] = igb_set_vteiam,
4132 [PVTEIAM3] = igb_set_vteiam,
4133 [PVTEIAM4] = igb_set_vteiam,
4134 [PVTEIAM5] = igb_set_vteiam,
4135 [PVTEIAM6] = igb_set_vteiam,
4136 [PVTEIAM7] = igb_set_vteiam,
4137 [PVTEICR0] = igb_set_vteicr,
4138 [PVTEICR1] = igb_set_vteicr,
4139 [PVTEICR2] = igb_set_vteicr,
4140 [PVTEICR3] = igb_set_vteicr,
4141 [PVTEICR4] = igb_set_vteicr,
4142 [PVTEICR5] = igb_set_vteicr,
4143 [PVTEICR6] = igb_set_vteicr,
4144 [PVTEICR7] = igb_set_vteicr,
4145 [VTIVAR ... VTIVAR + 7] = igb_set_vtivar,
4146 [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_writereg
4148 enum { IGB_NWRITEOPS = ARRAY_SIZE(igb_macreg_writeops) };
4150 enum { MAC_ACCESS_PARTIAL = 1 };
4153 * The array below combines alias offsets of the index values for the
4154 * MAC registers that have aliases, with the indication of not fully
4155 * implemented registers (lowest bit). This combination is possible
4156 * because all of the offsets are even.
4158 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = {
4159 /* Alias index offsets */
4160 [FCRTL_A] = 0x07fe,
4161 [RDFH_A] = 0xe904, [RDFT_A] = 0xe904,
4162 [TDFH_A] = 0xed00, [TDFT_A] = 0xed00,
4163 [RA_A ... RA_A + 31] = 0x14f0,
4164 [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400,
4166 [RDBAL0_A] = 0x2600,
4167 [RDBAH0_A] = 0x2600,
4168 [RDLEN0_A] = 0x2600,
4169 [SRRCTL0_A] = 0x2600,
4170 [RDH0_A] = 0x2600,
4171 [RDT0_A] = 0x2600,
4172 [RXDCTL0_A] = 0x2600,
4173 [RXCTL0_A] = 0x2600,
4174 [RQDPC0_A] = 0x2600,
4175 [RDBAL1_A] = 0x25D0,
4176 [RDBAL2_A] = 0x25A0,
4177 [RDBAL3_A] = 0x2570,
4178 [RDBAH1_A] = 0x25D0,
4179 [RDBAH2_A] = 0x25A0,
4180 [RDBAH3_A] = 0x2570,
4181 [RDLEN1_A] = 0x25D0,
4182 [RDLEN2_A] = 0x25A0,
4183 [RDLEN3_A] = 0x2570,
4184 [SRRCTL1_A] = 0x25D0,
4185 [SRRCTL2_A] = 0x25A0,
4186 [SRRCTL3_A] = 0x2570,
4187 [RDH1_A] = 0x25D0,
4188 [RDH2_A] = 0x25A0,
4189 [RDH3_A] = 0x2570,
4190 [RDT1_A] = 0x25D0,
4191 [RDT2_A] = 0x25A0,
4192 [RDT3_A] = 0x2570,
4193 [RXDCTL1_A] = 0x25D0,
4194 [RXDCTL2_A] = 0x25A0,
4195 [RXDCTL3_A] = 0x2570,
4196 [RXCTL1_A] = 0x25D0,
4197 [RXCTL2_A] = 0x25A0,
4198 [RXCTL3_A] = 0x2570,
4199 [RQDPC1_A] = 0x25D0,
4200 [RQDPC2_A] = 0x25A0,
4201 [RQDPC3_A] = 0x2570,
4202 [TDBAL0_A] = 0x2A00,
4203 [TDBAH0_A] = 0x2A00,
4204 [TDLEN0_A] = 0x2A00,
4205 [TDH0_A] = 0x2A00,
4206 [TDT0_A] = 0x2A00,
4207 [TXCTL0_A] = 0x2A00,
4208 [TDWBAL0_A] = 0x2A00,
4209 [TDWBAH0_A] = 0x2A00,
4210 [TDBAL1_A] = 0x29D0,
4211 [TDBAL2_A] = 0x29A0,
4212 [TDBAL3_A] = 0x2970,
4213 [TDBAH1_A] = 0x29D0,
4214 [TDBAH2_A] = 0x29A0,
4215 [TDBAH3_A] = 0x2970,
4216 [TDLEN1_A] = 0x29D0,
4217 [TDLEN2_A] = 0x29A0,
4218 [TDLEN3_A] = 0x2970,
4219 [TDH1_A] = 0x29D0,
4220 [TDH2_A] = 0x29A0,
4221 [TDH3_A] = 0x2970,
4222 [TDT1_A] = 0x29D0,
4223 [TDT2_A] = 0x29A0,
4224 [TDT3_A] = 0x2970,
4225 [TXDCTL0_A] = 0x2A00,
4226 [TXDCTL1_A] = 0x29D0,
4227 [TXDCTL2_A] = 0x29A0,
4228 [TXDCTL3_A] = 0x2970,
4229 [TXCTL1_A] = 0x29D0,
4230 [TXCTL2_A] = 0x29A0,
4231 [TXCTL3_A] = 0x29D0,
4232 [TDWBAL1_A] = 0x29D0,
4233 [TDWBAL2_A] = 0x29A0,
4234 [TDWBAL3_A] = 0x2970,
4235 [TDWBAH1_A] = 0x29D0,
4236 [TDWBAH2_A] = 0x29A0,
4237 [TDWBAH3_A] = 0x2970,
4239 /* Access options */
4240 [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL,
4241 [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL,
4242 [RDFPC] = MAC_ACCESS_PARTIAL,
4243 [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL,
4244 [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL,
4245 [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL,
4246 [FLA] = MAC_ACCESS_PARTIAL,
4247 [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL,
4248 [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL,
4249 [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL,
4250 [FCRTH] = MAC_ACCESS_PARTIAL,
4251 [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL
4254 void
4255 igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size)
4257 uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
4259 if (index < IGB_NWRITEOPS && igb_macreg_writeops[index]) {
4260 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
4261 trace_e1000e_wrn_regs_write_trivial(index << 2);
4263 trace_e1000e_core_write(index << 2, size, val);
4264 igb_macreg_writeops[index](core, index, val);
4265 } else if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
4266 trace_e1000e_wrn_regs_write_ro(index << 2, size, val);
4267 } else {
4268 trace_e1000e_wrn_regs_write_unknown(index << 2, size, val);
4272 uint64_t
4273 igb_core_read(IGBCore *core, hwaddr addr, unsigned size)
4275 uint64_t val;
4276 uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
4278 if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
4279 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
4280 trace_e1000e_wrn_regs_read_trivial(index << 2);
4282 val = igb_macreg_readops[index](core, index);
4283 trace_e1000e_core_read(index << 2, size, val);
4284 return val;
4285 } else {
4286 trace_e1000e_wrn_regs_read_unknown(index << 2, size);
4288 return 0;
4291 static inline void
4292 igb_autoneg_pause(IGBCore *core)
4294 timer_del(core->autoneg_timer);
4297 static void
4298 igb_autoneg_resume(IGBCore *core)
4300 if (igb_have_autoneg(core) &&
4301 !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
4302 qemu_get_queue(core->owner_nic)->link_down = false;
4303 timer_mod(core->autoneg_timer,
4304 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
4308 static void
4309 igb_vm_state_change(void *opaque, bool running, RunState state)
4311 IGBCore *core = opaque;
4313 if (running) {
4314 trace_e1000e_vm_state_running();
4315 igb_intrmgr_resume(core);
4316 igb_autoneg_resume(core);
4317 } else {
4318 trace_e1000e_vm_state_stopped();
4319 igb_autoneg_pause(core);
4320 igb_intrmgr_pause(core);
4324 void
4325 igb_core_pci_realize(IGBCore *core,
4326 const uint16_t *eeprom_templ,
4327 uint32_t eeprom_size,
4328 const uint8_t *macaddr)
4330 int i;
4332 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
4333 igb_autoneg_timer, core);
4334 igb_intrmgr_pci_realize(core);
4336 core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
4338 for (i = 0; i < IGB_NUM_QUEUES; i++) {
4339 net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
4342 net_rx_pkt_init(&core->rx_pkt);
4344 e1000x_core_prepare_eeprom(core->eeprom,
4345 eeprom_templ,
4346 eeprom_size,
4347 PCI_DEVICE_GET_CLASS(core->owner)->device_id,
4348 macaddr);
4349 igb_update_rx_offloads(core);
4352 void
4353 igb_core_pci_uninit(IGBCore *core)
4355 int i;
4357 timer_free(core->autoneg_timer);
4359 igb_intrmgr_pci_unint(core);
4361 qemu_del_vm_change_state_handler(core->vmstate);
4363 for (i = 0; i < IGB_NUM_QUEUES; i++) {
4364 net_tx_pkt_uninit(core->tx[i].tx_pkt);
4367 net_rx_pkt_uninit(core->rx_pkt);
4370 static const uint16_t
4371 igb_phy_reg_init[] = {
4372 [MII_BMCR] = MII_BMCR_SPEED1000 |
4373 MII_BMCR_FD |
4374 MII_BMCR_AUTOEN,
4376 [MII_BMSR] = MII_BMSR_EXTCAP |
4377 MII_BMSR_LINK_ST |
4378 MII_BMSR_AUTONEG |
4379 MII_BMSR_MFPS |
4380 MII_BMSR_EXTSTAT |
4381 MII_BMSR_10T_HD |
4382 MII_BMSR_10T_FD |
4383 MII_BMSR_100TX_HD |
4384 MII_BMSR_100TX_FD,
4386 [MII_PHYID1] = IGP03E1000_E_PHY_ID >> 16,
4387 [MII_PHYID2] = (IGP03E1000_E_PHY_ID & 0xfff0) | 1,
4388 [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
4389 MII_ANAR_10FD | MII_ANAR_TX |
4390 MII_ANAR_TXFD | MII_ANAR_PAUSE |
4391 MII_ANAR_PAUSE_ASYM,
4392 [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
4393 MII_ANLPAR_TX | MII_ANLPAR_TXFD |
4394 MII_ANLPAR_T4 | MII_ANLPAR_PAUSE,
4395 [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY,
4396 [MII_ANNP] = 0x1 | MII_ANNP_MP,
4397 [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL |
4398 MII_CTRL1000_PORT | MII_CTRL1000_MASTER,
4399 [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
4400 MII_STAT1000_ROK | MII_STAT1000_LOK,
4401 [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD,
4403 [IGP01E1000_PHY_PORT_CONFIG] = BIT(5) | BIT(8),
4404 [IGP01E1000_PHY_PORT_STATUS] = IGP01E1000_PSSR_SPEED_1000MBPS,
4405 [IGP02E1000_PHY_POWER_MGMT] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU |
4406 IGP01E1000_PSCFR_SMART_SPEED
4409 static const uint32_t igb_mac_reg_init[] = {
4410 [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
4411 [EEMNGCTL] = BIT(31),
4412 [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
4413 [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
4414 [RXDCTL1] = 1 << 16,
4415 [RXDCTL2] = 1 << 16,
4416 [RXDCTL3] = 1 << 16,
4417 [RXDCTL4] = 1 << 16,
4418 [RXDCTL5] = 1 << 16,
4419 [RXDCTL6] = 1 << 16,
4420 [RXDCTL7] = 1 << 16,
4421 [RXDCTL8] = 1 << 16,
4422 [RXDCTL9] = 1 << 16,
4423 [RXDCTL10] = 1 << 16,
4424 [RXDCTL11] = 1 << 16,
4425 [RXDCTL12] = 1 << 16,
4426 [RXDCTL13] = 1 << 16,
4427 [RXDCTL14] = 1 << 16,
4428 [RXDCTL15] = 1 << 16,
4429 [TIPG] = 0x08 | (0x04 << 10) | (0x06 << 20),
4430 [CTRL] = E1000_CTRL_FD | E1000_CTRL_LRST | E1000_CTRL_SPD_1000 |
4431 E1000_CTRL_ADVD3WUC,
4432 [STATUS] = E1000_STATUS_PHYRA | BIT(31),
4433 [EECD] = E1000_EECD_FWE_DIS | E1000_EECD_PRES |
4434 (2 << E1000_EECD_SIZE_EX_SHIFT),
4435 [GCR] = E1000_L0S_ADJUST |
4436 E1000_GCR_CMPL_TMOUT_RESEND |
4437 E1000_GCR_CAP_VER2 |
4438 E1000_L1_ENTRY_LATENCY_MSB |
4439 E1000_L1_ENTRY_LATENCY_LSB,
4440 [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD,
4441 [TXPBS] = 0x28,
4442 [RXPBS] = 0x40,
4443 [TCTL] = E1000_TCTL_PSP | (0xF << E1000_CT_SHIFT) |
4444 (0x40 << E1000_COLD_SHIFT) | (0x1 << 26) | (0xA << 28),
4445 [TCTL_EXT] = 0x40 | (0x42 << 10),
4446 [DTXCTL] = E1000_DTXCTL_8023LL | E1000_DTXCTL_SPOOF_INT,
4447 [VET] = ETH_P_VLAN | (ETH_P_VLAN << 16),
4449 [V2PMAILBOX0 ... V2PMAILBOX0 + IGB_MAX_VF_FUNCTIONS - 1] = E1000_V2PMAILBOX_RSTI,
4450 [MBVFIMR] = 0xFF,
4451 [VFRE] = 0xFF,
4452 [VFTE] = 0xFF,
4453 [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC,
4454 [RPLOLR] = E1000_RPLOLR_STRCRC,
4455 [RLPML] = 0x2600,
4456 [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4457 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4458 E1000_DCA_TXCTRL_DESC_RRO_EN,
4459 [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4460 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4461 E1000_DCA_TXCTRL_DESC_RRO_EN,
4462 [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4463 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4464 E1000_DCA_TXCTRL_DESC_RRO_EN,
4465 [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4466 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4467 E1000_DCA_TXCTRL_DESC_RRO_EN,
4468 [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4469 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4470 E1000_DCA_TXCTRL_DESC_RRO_EN,
4471 [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4472 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4473 E1000_DCA_TXCTRL_DESC_RRO_EN,
4474 [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4475 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4476 E1000_DCA_TXCTRL_DESC_RRO_EN,
4477 [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4478 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4479 E1000_DCA_TXCTRL_DESC_RRO_EN,
4480 [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4481 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4482 E1000_DCA_TXCTRL_DESC_RRO_EN,
4483 [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4484 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4485 E1000_DCA_TXCTRL_DESC_RRO_EN,
4486 [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4487 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4488 E1000_DCA_TXCTRL_DESC_RRO_EN,
4489 [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4490 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4491 E1000_DCA_TXCTRL_DESC_RRO_EN,
4492 [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4493 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4494 E1000_DCA_TXCTRL_DESC_RRO_EN,
4495 [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4496 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4497 E1000_DCA_TXCTRL_DESC_RRO_EN,
4498 [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4499 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4500 E1000_DCA_TXCTRL_DESC_RRO_EN,
4501 [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4502 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4503 E1000_DCA_TXCTRL_DESC_RRO_EN,
4506 static void igb_reset(IGBCore *core, bool sw)
4508 struct igb_tx *tx;
4509 int i;
4511 timer_del(core->autoneg_timer);
4513 igb_intrmgr_reset(core);
4515 memset(core->phy, 0, sizeof core->phy);
4516 memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init);
4518 for (i = 0; i < E1000E_MAC_SIZE; i++) {
4519 if (sw &&
4520 (i == RXPBS || i == TXPBS ||
4521 (i >= EITR0 && i < EITR0 + IGB_INTR_NUM))) {
4522 continue;
4525 core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ?
4526 igb_mac_reg_init[i] : 0;
4529 if (qemu_get_queue(core->owner_nic)->link_down) {
4530 igb_link_down(core);
4533 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
4535 for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
4536 /* Set RSTI, so VF can identify a PF reset is in progress */
4537 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI;
4540 for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
4541 tx = &core->tx[i];
4542 memset(tx->ctx, 0, sizeof(tx->ctx));
4543 tx->first = true;
4544 tx->skip_cp = false;
4548 void
4549 igb_core_reset(IGBCore *core)
4551 igb_reset(core, false);
4554 void igb_core_pre_save(IGBCore *core)
4556 int i;
4557 NetClientState *nc = qemu_get_queue(core->owner_nic);
4560 * If link is down and auto-negotiation is supported and ongoing,
4561 * complete auto-negotiation immediately. This allows us to look
4562 * at MII_BMSR_AN_COMP to infer link status on load.
4564 if (nc->link_down && igb_have_autoneg(core)) {
4565 core->phy[MII_BMSR] |= MII_BMSR_AN_COMP;
4566 igb_update_flowctl_status(core);
4569 for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
4570 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
4571 core->tx[i].skip_cp = true;
4577 igb_core_post_load(IGBCore *core)
4579 NetClientState *nc = qemu_get_queue(core->owner_nic);
4582 * nc.link_down can't be migrated, so infer link_down according
4583 * to link status bit in core.mac[STATUS].
4585 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
4587 return 0;