2 * Core code for QEMU e1000e emulation
4 * Software developer's manuals:
5 * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
7 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
8 * Developed by Daynix Computing LTD (http://www.daynix.com)
11 * Dmitry Fleytman <dmitry@daynix.com>
12 * Leonid Bloch <leonid@daynix.com>
13 * Yan Vugenfirer <yan@daynix.com>
15 * Based on work done by:
16 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
17 * Copyright (c) 2008 Qumranet
18 * Based on work done by:
19 * Copyright (c) 2007 Dan Aloni
20 * Copyright (c) 2004 Antony T Curtis
22 * This library is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU Lesser General Public
24 * License as published by the Free Software Foundation; either
25 * version 2.1 of the License, or (at your option) any later version.
27 * This library is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
30 * Lesser General Public License for more details.
32 * You should have received a copy of the GNU Lesser General Public
33 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
36 #include "qemu/osdep.h"
40 #include "hw/net/mii.h"
41 #include "hw/pci/msi.h"
42 #include "hw/pci/msix.h"
43 #include "sysemu/runstate.h"
45 #include "net_tx_pkt.h"
46 #include "net_rx_pkt.h"
48 #include "e1000_common.h"
49 #include "e1000x_common.h"
50 #include "e1000e_core.h"
54 /* No more then 7813 interrupts per second according to spec 10.2.4.2 */
55 #define E1000E_MIN_XITR (500)
57 #define E1000E_MAX_TX_FRAGS (64)
59 union e1000_rx_desc_union
{
60 struct e1000_rx_desc legacy
;
61 union e1000_rx_desc_extended extended
;
62 union e1000_rx_desc_packet_split packet_split
;
66 e1000e_receive_internal(E1000ECore
*core
, const struct iovec
*iov
, int iovcnt
,
70 e1000e_set_interrupt_cause(E1000ECore
*core
, uint32_t val
);
72 static void e1000e_reset(E1000ECore
*core
, bool sw
);
75 e1000e_process_ts_option(E1000ECore
*core
, struct e1000_tx_desc
*dp
)
77 if (le32_to_cpu(dp
->upper
.data
) & E1000_TXD_EXTCMD_TSTAMP
) {
78 trace_e1000e_wrn_no_ts_support();
83 e1000e_process_snap_option(E1000ECore
*core
, uint32_t cmd_and_length
)
85 if (cmd_and_length
& E1000_TXD_CMD_SNAP
) {
86 trace_e1000e_wrn_no_snap_support();
91 e1000e_raise_legacy_irq(E1000ECore
*core
)
93 trace_e1000e_irq_legacy_notify(true);
94 e1000x_inc_reg_if_not_full(core
->mac
, IAC
);
95 pci_set_irq(core
->owner
, 1);
99 e1000e_lower_legacy_irq(E1000ECore
*core
)
101 trace_e1000e_irq_legacy_notify(false);
102 pci_set_irq(core
->owner
, 0);
106 e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer
*timer
)
108 int64_t delay_ns
= (int64_t) timer
->core
->mac
[timer
->delay_reg
] *
109 timer
->delay_resolution_ns
;
111 trace_e1000e_irq_rearm_timer(timer
->delay_reg
<< 2, delay_ns
);
113 timer_mod(timer
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + delay_ns
);
115 timer
->running
= true;
119 e1000e_intmgr_timer_resume(E1000IntrDelayTimer
*timer
)
121 if (timer
->running
) {
122 e1000e_intrmgr_rearm_timer(timer
);
127 e1000e_intmgr_timer_pause(E1000IntrDelayTimer
*timer
)
129 if (timer
->running
) {
130 timer_del(timer
->timer
);
135 e1000e_intrmgr_stop_timer(E1000IntrDelayTimer
*timer
)
137 if (timer
->running
) {
138 timer_del(timer
->timer
);
139 timer
->running
= false;
144 e1000e_intrmgr_fire_delayed_interrupts(E1000ECore
*core
)
146 trace_e1000e_irq_fire_delayed_interrupts();
147 e1000e_set_interrupt_cause(core
, 0);
151 e1000e_intrmgr_on_timer(void *opaque
)
153 E1000IntrDelayTimer
*timer
= opaque
;
155 trace_e1000e_irq_throttling_timer(timer
->delay_reg
<< 2);
157 timer
->running
= false;
158 e1000e_intrmgr_fire_delayed_interrupts(timer
->core
);
162 e1000e_intrmgr_on_throttling_timer(void *opaque
)
164 E1000IntrDelayTimer
*timer
= opaque
;
166 timer
->running
= false;
168 if (timer
->core
->mac
[IMS
] & timer
->core
->mac
[ICR
]) {
169 if (msi_enabled(timer
->core
->owner
)) {
170 trace_e1000e_irq_msi_notify_postponed();
171 msi_notify(timer
->core
->owner
, 0);
173 trace_e1000e_irq_legacy_notify_postponed();
174 e1000e_raise_legacy_irq(timer
->core
);
180 e1000e_intrmgr_on_msix_throttling_timer(void *opaque
)
182 E1000IntrDelayTimer
*timer
= opaque
;
183 int idx
= timer
- &timer
->core
->eitr
[0];
185 timer
->running
= false;
187 trace_e1000e_irq_msix_notify_postponed_vec(idx
);
188 msix_notify(timer
->core
->owner
, idx
);
192 e1000e_intrmgr_initialize_all_timers(E1000ECore
*core
, bool create
)
196 core
->radv
.delay_reg
= RADV
;
197 core
->rdtr
.delay_reg
= RDTR
;
198 core
->raid
.delay_reg
= RAID
;
199 core
->tadv
.delay_reg
= TADV
;
200 core
->tidv
.delay_reg
= TIDV
;
202 core
->radv
.delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
203 core
->rdtr
.delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
204 core
->raid
.delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
205 core
->tadv
.delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
206 core
->tidv
.delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
208 core
->radv
.core
= core
;
209 core
->rdtr
.core
= core
;
210 core
->raid
.core
= core
;
211 core
->tadv
.core
= core
;
212 core
->tidv
.core
= core
;
214 core
->itr
.core
= core
;
215 core
->itr
.delay_reg
= ITR
;
216 core
->itr
.delay_resolution_ns
= E1000_INTR_THROTTLING_NS_RES
;
218 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
219 core
->eitr
[i
].core
= core
;
220 core
->eitr
[i
].delay_reg
= EITR
+ i
;
221 core
->eitr
[i
].delay_resolution_ns
= E1000_INTR_THROTTLING_NS_RES
;
229 timer_new_ns(QEMU_CLOCK_VIRTUAL
, e1000e_intrmgr_on_timer
, &core
->radv
);
231 timer_new_ns(QEMU_CLOCK_VIRTUAL
, e1000e_intrmgr_on_timer
, &core
->rdtr
);
233 timer_new_ns(QEMU_CLOCK_VIRTUAL
, e1000e_intrmgr_on_timer
, &core
->raid
);
236 timer_new_ns(QEMU_CLOCK_VIRTUAL
, e1000e_intrmgr_on_timer
, &core
->tadv
);
238 timer_new_ns(QEMU_CLOCK_VIRTUAL
, e1000e_intrmgr_on_timer
, &core
->tidv
);
240 core
->itr
.timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
241 e1000e_intrmgr_on_throttling_timer
,
244 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
245 core
->eitr
[i
].timer
=
246 timer_new_ns(QEMU_CLOCK_VIRTUAL
,
247 e1000e_intrmgr_on_msix_throttling_timer
,
253 e1000e_intrmgr_stop_delay_timers(E1000ECore
*core
)
255 e1000e_intrmgr_stop_timer(&core
->radv
);
256 e1000e_intrmgr_stop_timer(&core
->rdtr
);
257 e1000e_intrmgr_stop_timer(&core
->raid
);
258 e1000e_intrmgr_stop_timer(&core
->tidv
);
259 e1000e_intrmgr_stop_timer(&core
->tadv
);
263 e1000e_intrmgr_delay_rx_causes(E1000ECore
*core
, uint32_t *causes
)
265 uint32_t delayable_causes
;
266 uint32_t rdtr
= core
->mac
[RDTR
];
267 uint32_t radv
= core
->mac
[RADV
];
268 uint32_t raid
= core
->mac
[RAID
];
270 if (msix_enabled(core
->owner
)) {
274 delayable_causes
= E1000_ICR_RXQ0
|
278 if (!(core
->mac
[RFCTL
] & E1000_RFCTL_ACK_DIS
)) {
279 delayable_causes
|= E1000_ICR_ACK
;
282 /* Clean up all causes that may be delayed */
283 core
->delayed_causes
|= *causes
& delayable_causes
;
284 *causes
&= ~delayable_causes
;
287 * Check if delayed RX interrupts disabled by client
288 * or if there are causes that cannot be delayed
290 if ((rdtr
== 0) || (*causes
!= 0)) {
295 * Check if delayed RX ACK interrupts disabled by client
296 * and there is an ACK packet received
298 if ((raid
== 0) && (core
->delayed_causes
& E1000_ICR_ACK
)) {
302 /* All causes delayed */
303 e1000e_intrmgr_rearm_timer(&core
->rdtr
);
305 if (!core
->radv
.running
&& (radv
!= 0)) {
306 e1000e_intrmgr_rearm_timer(&core
->radv
);
309 if (!core
->raid
.running
&& (core
->delayed_causes
& E1000_ICR_ACK
)) {
310 e1000e_intrmgr_rearm_timer(&core
->raid
);
317 e1000e_intrmgr_delay_tx_causes(E1000ECore
*core
, uint32_t *causes
)
319 static const uint32_t delayable_causes
= E1000_ICR_TXQ0
|
324 if (msix_enabled(core
->owner
)) {
328 /* Clean up all causes that may be delayed */
329 core
->delayed_causes
|= *causes
& delayable_causes
;
330 *causes
&= ~delayable_causes
;
332 /* If there are causes that cannot be delayed */
337 /* All causes delayed */
338 e1000e_intrmgr_rearm_timer(&core
->tidv
);
340 if (!core
->tadv
.running
&& (core
->mac
[TADV
] != 0)) {
341 e1000e_intrmgr_rearm_timer(&core
->tadv
);
348 e1000e_intmgr_collect_delayed_causes(E1000ECore
*core
)
352 if (msix_enabled(core
->owner
)) {
353 assert(core
->delayed_causes
== 0);
357 res
= core
->delayed_causes
;
358 core
->delayed_causes
= 0;
360 e1000e_intrmgr_stop_delay_timers(core
);
366 e1000e_intrmgr_fire_all_timers(E1000ECore
*core
)
370 if (core
->itr
.running
) {
371 timer_del(core
->itr
.timer
);
372 e1000e_intrmgr_on_throttling_timer(&core
->itr
);
375 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
376 if (core
->eitr
[i
].running
) {
377 timer_del(core
->eitr
[i
].timer
);
378 e1000e_intrmgr_on_msix_throttling_timer(&core
->eitr
[i
]);
384 e1000e_intrmgr_resume(E1000ECore
*core
)
388 e1000e_intmgr_timer_resume(&core
->radv
);
389 e1000e_intmgr_timer_resume(&core
->rdtr
);
390 e1000e_intmgr_timer_resume(&core
->raid
);
391 e1000e_intmgr_timer_resume(&core
->tidv
);
392 e1000e_intmgr_timer_resume(&core
->tadv
);
394 e1000e_intmgr_timer_resume(&core
->itr
);
396 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
397 e1000e_intmgr_timer_resume(&core
->eitr
[i
]);
402 e1000e_intrmgr_pause(E1000ECore
*core
)
406 e1000e_intmgr_timer_pause(&core
->radv
);
407 e1000e_intmgr_timer_pause(&core
->rdtr
);
408 e1000e_intmgr_timer_pause(&core
->raid
);
409 e1000e_intmgr_timer_pause(&core
->tidv
);
410 e1000e_intmgr_timer_pause(&core
->tadv
);
412 e1000e_intmgr_timer_pause(&core
->itr
);
414 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
415 e1000e_intmgr_timer_pause(&core
->eitr
[i
]);
420 e1000e_intrmgr_reset(E1000ECore
*core
)
424 core
->delayed_causes
= 0;
426 e1000e_intrmgr_stop_delay_timers(core
);
428 e1000e_intrmgr_stop_timer(&core
->itr
);
430 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
431 e1000e_intrmgr_stop_timer(&core
->eitr
[i
]);
436 e1000e_intrmgr_pci_unint(E1000ECore
*core
)
440 timer_free(core
->radv
.timer
);
441 timer_free(core
->rdtr
.timer
);
442 timer_free(core
->raid
.timer
);
444 timer_free(core
->tadv
.timer
);
445 timer_free(core
->tidv
.timer
);
447 timer_free(core
->itr
.timer
);
449 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
450 timer_free(core
->eitr
[i
].timer
);
455 e1000e_intrmgr_pci_realize(E1000ECore
*core
)
457 e1000e_intrmgr_initialize_all_timers(core
, true);
461 e1000e_rx_csum_enabled(E1000ECore
*core
)
463 return (core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) ? false : true;
467 e1000e_rx_use_legacy_descriptor(E1000ECore
*core
)
469 return (core
->mac
[RFCTL
] & E1000_RFCTL_EXTEN
) ? false : true;
473 e1000e_rx_use_ps_descriptor(E1000ECore
*core
)
475 return !e1000e_rx_use_legacy_descriptor(core
) &&
476 (core
->mac
[RCTL
] & E1000_RCTL_DTYP_PS
);
480 e1000e_rss_enabled(E1000ECore
*core
)
482 return E1000_MRQC_ENABLED(core
->mac
[MRQC
]) &&
483 !e1000e_rx_csum_enabled(core
) &&
484 !e1000e_rx_use_legacy_descriptor(core
);
487 typedef struct E1000E_RSSInfo_st
{
495 e1000e_rss_get_hash_type(E1000ECore
*core
, struct NetRxPkt
*pkt
)
498 EthL4HdrProto l4hdr_proto
;
500 assert(e1000e_rss_enabled(core
));
502 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
505 trace_e1000e_rx_rss_ip4(l4hdr_proto
, core
->mac
[MRQC
],
506 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
]),
507 E1000_MRQC_EN_IPV4(core
->mac
[MRQC
]));
509 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
510 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
])) {
511 return E1000_MRQ_RSS_TYPE_IPV4TCP
;
514 if (E1000_MRQC_EN_IPV4(core
->mac
[MRQC
])) {
515 return E1000_MRQ_RSS_TYPE_IPV4
;
518 eth_ip6_hdr_info
*ip6info
= net_rx_pkt_get_ip6_info(pkt
);
520 bool ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_EX_DIS
;
521 bool new_ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_NEW_IPV6_EXT_DIS
;
524 * Following two traces must not be combined because resulting
525 * event will have 11 arguments totally and some trace backends
526 * (at least "ust") have limitation of maximum 10 arguments per
527 * event. Events with more arguments fail to compile for
528 * backends like these.
530 trace_e1000e_rx_rss_ip6_rfctl(core
->mac
[RFCTL
]);
531 trace_e1000e_rx_rss_ip6(ex_dis
, new_ex_dis
, l4hdr_proto
,
532 ip6info
->has_ext_hdrs
,
533 ip6info
->rss_ex_dst_valid
,
534 ip6info
->rss_ex_src_valid
,
536 E1000_MRQC_EN_TCPIPV6EX(core
->mac
[MRQC
]),
537 E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
]),
538 E1000_MRQC_EN_IPV6(core
->mac
[MRQC
]));
540 if ((!ex_dis
|| !ip6info
->has_ext_hdrs
) &&
541 (!new_ex_dis
|| !(ip6info
->rss_ex_dst_valid
||
542 ip6info
->rss_ex_src_valid
))) {
544 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
545 E1000_MRQC_EN_TCPIPV6EX(core
->mac
[MRQC
])) {
546 return E1000_MRQ_RSS_TYPE_IPV6TCPEX
;
549 if (E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
])) {
550 return E1000_MRQ_RSS_TYPE_IPV6EX
;
555 if (E1000_MRQC_EN_IPV6(core
->mac
[MRQC
])) {
556 return E1000_MRQ_RSS_TYPE_IPV6
;
561 return E1000_MRQ_RSS_TYPE_NONE
;
565 e1000e_rss_calc_hash(E1000ECore
*core
,
566 struct NetRxPkt
*pkt
,
567 E1000E_RSSInfo
*info
)
569 NetRxPktRssType type
;
571 assert(e1000e_rss_enabled(core
));
573 switch (info
->type
) {
574 case E1000_MRQ_RSS_TYPE_IPV4
:
575 type
= NetPktRssIpV4
;
577 case E1000_MRQ_RSS_TYPE_IPV4TCP
:
578 type
= NetPktRssIpV4Tcp
;
580 case E1000_MRQ_RSS_TYPE_IPV6TCPEX
:
581 type
= NetPktRssIpV6TcpEx
;
583 case E1000_MRQ_RSS_TYPE_IPV6
:
584 type
= NetPktRssIpV6
;
586 case E1000_MRQ_RSS_TYPE_IPV6EX
:
587 type
= NetPktRssIpV6Ex
;
594 return net_rx_pkt_calc_rss_hash(pkt
, type
, (uint8_t *) &core
->mac
[RSSRK
]);
598 e1000e_rss_parse_packet(E1000ECore
*core
,
599 struct NetRxPkt
*pkt
,
600 E1000E_RSSInfo
*info
)
602 trace_e1000e_rx_rss_started();
604 if (!e1000e_rss_enabled(core
)) {
605 info
->enabled
= false;
609 trace_e1000e_rx_rss_disabled();
613 info
->enabled
= true;
615 info
->type
= e1000e_rss_get_hash_type(core
, pkt
);
617 trace_e1000e_rx_rss_type(info
->type
);
619 if (info
->type
== E1000_MRQ_RSS_TYPE_NONE
) {
625 info
->hash
= e1000e_rss_calc_hash(core
, pkt
, info
);
626 info
->queue
= E1000_RSS_QUEUE(&core
->mac
[RETA
], info
->hash
);
630 e1000e_setup_tx_offloads(E1000ECore
*core
, struct e1000e_tx
*tx
)
632 if (tx
->props
.tse
&& tx
->cptse
) {
633 if (!net_tx_pkt_build_vheader(tx
->tx_pkt
, true, true, tx
->props
.mss
)) {
637 net_tx_pkt_update_ip_checksums(tx
->tx_pkt
);
638 e1000x_inc_reg_if_not_full(core
->mac
, TSCTC
);
642 if (tx
->sum_needed
& E1000_TXD_POPTS_TXSM
) {
643 if (!net_tx_pkt_build_vheader(tx
->tx_pkt
, false, true, 0)) {
648 if (tx
->sum_needed
& E1000_TXD_POPTS_IXSM
) {
649 net_tx_pkt_update_ip_hdr_checksum(tx
->tx_pkt
);
655 static void e1000e_tx_pkt_callback(void *core
,
656 const struct iovec
*iov
,
658 const struct iovec
*virt_iov
,
661 e1000e_receive_internal(core
, virt_iov
, virt_iovcnt
, true);
665 e1000e_tx_pkt_send(E1000ECore
*core
, struct e1000e_tx
*tx
, int queue_index
)
667 int target_queue
= MIN(core
->max_queue_num
, queue_index
);
668 NetClientState
*queue
= qemu_get_subqueue(core
->owner_nic
, target_queue
);
670 if (!e1000e_setup_tx_offloads(core
, tx
)) {
674 net_tx_pkt_dump(tx
->tx_pkt
);
676 if ((core
->phy
[0][MII_BMCR
] & MII_BMCR_LOOPBACK
) ||
677 ((core
->mac
[RCTL
] & E1000_RCTL_LBM_MAC
) == E1000_RCTL_LBM_MAC
)) {
678 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
679 e1000e_tx_pkt_callback
, core
);
681 return net_tx_pkt_send(tx
->tx_pkt
, queue
);
686 e1000e_on_tx_done_update_stats(E1000ECore
*core
, struct NetTxPkt
*tx_pkt
)
688 static const int PTCregs
[6] = { PTC64
, PTC127
, PTC255
, PTC511
,
691 size_t tot_len
= net_tx_pkt_get_total_len(tx_pkt
) + 4;
693 e1000x_increase_size_stats(core
->mac
, PTCregs
, tot_len
);
694 e1000x_inc_reg_if_not_full(core
->mac
, TPT
);
695 e1000x_grow_8reg_if_not_full(core
->mac
, TOTL
, tot_len
);
697 switch (net_tx_pkt_get_packet_type(tx_pkt
)) {
699 e1000x_inc_reg_if_not_full(core
->mac
, BPTC
);
702 e1000x_inc_reg_if_not_full(core
->mac
, MPTC
);
707 g_assert_not_reached();
710 e1000x_inc_reg_if_not_full(core
->mac
, GPTC
);
711 e1000x_grow_8reg_if_not_full(core
->mac
, GOTCL
, tot_len
);
715 e1000e_process_tx_desc(E1000ECore
*core
,
716 struct e1000e_tx
*tx
,
717 struct e1000_tx_desc
*dp
,
720 uint32_t txd_lower
= le32_to_cpu(dp
->lower
.data
);
721 uint32_t dtype
= txd_lower
& (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
);
722 unsigned int split_size
= txd_lower
& 0xffff;
724 struct e1000_context_desc
*xp
= (struct e1000_context_desc
*)dp
;
725 bool eop
= txd_lower
& E1000_TXD_CMD_EOP
;
727 if (dtype
== E1000_TXD_CMD_DEXT
) { /* context descriptor */
728 e1000x_read_tx_ctx_descr(xp
, &tx
->props
);
729 e1000e_process_snap_option(core
, le32_to_cpu(xp
->cmd_and_length
));
731 } else if (dtype
== (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
)) {
732 /* data descriptor */
733 tx
->sum_needed
= le32_to_cpu(dp
->upper
.data
) >> 8;
734 tx
->cptse
= (txd_lower
& E1000_TXD_CMD_TSE
) ? 1 : 0;
735 e1000e_process_ts_option(core
, dp
);
737 /* legacy descriptor */
738 e1000e_process_ts_option(core
, dp
);
742 addr
= le64_to_cpu(dp
->buffer_addr
);
745 if (!net_tx_pkt_add_raw_fragment_pci(tx
->tx_pkt
, core
->owner
,
752 if (!tx
->skip_cp
&& net_tx_pkt_parse(tx
->tx_pkt
)) {
753 if (e1000x_vlan_enabled(core
->mac
) &&
754 e1000x_is_vlan_txd(txd_lower
)) {
755 net_tx_pkt_setup_vlan_header_ex(tx
->tx_pkt
,
756 le16_to_cpu(dp
->upper
.fields
.special
), core
->mac
[VET
]);
758 if (e1000e_tx_pkt_send(core
, tx
, queue_index
)) {
759 e1000e_on_tx_done_update_stats(core
, tx
->tx_pkt
);
764 net_tx_pkt_reset(tx
->tx_pkt
, net_tx_pkt_unmap_frag_pci
, core
->owner
);
771 static inline uint32_t
772 e1000e_tx_wb_interrupt_cause(E1000ECore
*core
, int queue_idx
)
774 if (!msix_enabled(core
->owner
)) {
775 return E1000_ICR_TXDW
;
778 return (queue_idx
== 0) ? E1000_ICR_TXQ0
: E1000_ICR_TXQ1
;
781 static inline uint32_t
782 e1000e_rx_wb_interrupt_cause(E1000ECore
*core
, int queue_idx
,
783 bool min_threshold_hit
)
785 if (!msix_enabled(core
->owner
)) {
786 return E1000_ICS_RXT0
| (min_threshold_hit
? E1000_ICS_RXDMT0
: 0);
789 return (queue_idx
== 0) ? E1000_ICR_RXQ0
: E1000_ICR_RXQ1
;
793 e1000e_txdesc_writeback(E1000ECore
*core
, dma_addr_t base
,
794 struct e1000_tx_desc
*dp
, bool *ide
, int queue_idx
)
796 uint32_t txd_upper
, txd_lower
= le32_to_cpu(dp
->lower
.data
);
798 if (!(txd_lower
& E1000_TXD_CMD_RS
) &&
799 !(core
->mac
[IVAR
] & E1000_IVAR_TX_INT_EVERY_WB
)) {
803 *ide
= (txd_lower
& E1000_TXD_CMD_IDE
) ? true : false;
805 txd_upper
= le32_to_cpu(dp
->upper
.data
) | E1000_TXD_STAT_DD
;
807 dp
->upper
.data
= cpu_to_le32(txd_upper
);
808 pci_dma_write(core
->owner
, base
+ ((char *)&dp
->upper
- (char *)dp
),
809 &dp
->upper
, sizeof(dp
->upper
));
810 return e1000e_tx_wb_interrupt_cause(core
, queue_idx
);
813 typedef struct E1000E_RingInfo_st
{
823 e1000e_ring_empty(E1000ECore
*core
, const E1000E_RingInfo
*r
)
825 return core
->mac
[r
->dh
] == core
->mac
[r
->dt
] ||
826 core
->mac
[r
->dt
] >= core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
;
829 static inline uint64_t
830 e1000e_ring_base(E1000ECore
*core
, const E1000E_RingInfo
*r
)
832 uint64_t bah
= core
->mac
[r
->dbah
];
833 uint64_t bal
= core
->mac
[r
->dbal
];
835 return (bah
<< 32) + bal
;
838 static inline uint64_t
839 e1000e_ring_head_descr(E1000ECore
*core
, const E1000E_RingInfo
*r
)
841 return e1000e_ring_base(core
, r
) + E1000_RING_DESC_LEN
* core
->mac
[r
->dh
];
845 e1000e_ring_advance(E1000ECore
*core
, const E1000E_RingInfo
*r
, uint32_t count
)
847 core
->mac
[r
->dh
] += count
;
849 if (core
->mac
[r
->dh
] * E1000_RING_DESC_LEN
>= core
->mac
[r
->dlen
]) {
850 core
->mac
[r
->dh
] = 0;
854 static inline uint32_t
855 e1000e_ring_free_descr_num(E1000ECore
*core
, const E1000E_RingInfo
*r
)
857 trace_e1000e_ring_free_space(r
->idx
, core
->mac
[r
->dlen
],
858 core
->mac
[r
->dh
], core
->mac
[r
->dt
]);
860 if (core
->mac
[r
->dh
] <= core
->mac
[r
->dt
]) {
861 return core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
864 if (core
->mac
[r
->dh
] > core
->mac
[r
->dt
]) {
865 return core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
+
866 core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
869 g_assert_not_reached();
874 e1000e_ring_enabled(E1000ECore
*core
, const E1000E_RingInfo
*r
)
876 return core
->mac
[r
->dlen
] > 0;
879 static inline uint32_t
880 e1000e_ring_len(E1000ECore
*core
, const E1000E_RingInfo
*r
)
882 return core
->mac
[r
->dlen
];
885 typedef struct E1000E_TxRing_st
{
886 const E1000E_RingInfo
*i
;
887 struct e1000e_tx
*tx
;
891 e1000e_mq_queue_idx(int base_reg_idx
, int reg_idx
)
893 return (reg_idx
- base_reg_idx
) / (0x100 >> 2);
897 e1000e_tx_ring_init(E1000ECore
*core
, E1000E_TxRing
*txr
, int idx
)
899 static const E1000E_RingInfo i
[E1000E_NUM_QUEUES
] = {
900 { TDBAH
, TDBAL
, TDLEN
, TDH
, TDT
, 0 },
901 { TDBAH1
, TDBAL1
, TDLEN1
, TDH1
, TDT1
, 1 }
904 assert(idx
< ARRAY_SIZE(i
));
907 txr
->tx
= &core
->tx
[idx
];
910 typedef struct E1000E_RxRing_st
{
911 const E1000E_RingInfo
*i
;
915 e1000e_rx_ring_init(E1000ECore
*core
, E1000E_RxRing
*rxr
, int idx
)
917 static const E1000E_RingInfo i
[E1000E_NUM_QUEUES
] = {
918 { RDBAH0
, RDBAL0
, RDLEN0
, RDH0
, RDT0
, 0 },
919 { RDBAH1
, RDBAL1
, RDLEN1
, RDH1
, RDT1
, 1 }
922 assert(idx
< ARRAY_SIZE(i
));
928 e1000e_start_xmit(E1000ECore
*core
, const E1000E_TxRing
*txr
)
931 struct e1000_tx_desc desc
;
933 const E1000E_RingInfo
*txi
= txr
->i
;
934 uint32_t cause
= E1000_ICS_TXQE
;
936 if (!(core
->mac
[TCTL
] & E1000_TCTL_EN
)) {
937 trace_e1000e_tx_disabled();
941 while (!e1000e_ring_empty(core
, txi
)) {
942 base
= e1000e_ring_head_descr(core
, txi
);
944 pci_dma_read(core
->owner
, base
, &desc
, sizeof(desc
));
946 trace_e1000e_tx_descr((void *)(intptr_t)desc
.buffer_addr
,
947 desc
.lower
.data
, desc
.upper
.data
);
949 e1000e_process_tx_desc(core
, txr
->tx
, &desc
, txi
->idx
);
950 cause
|= e1000e_txdesc_writeback(core
, base
, &desc
, &ide
, txi
->idx
);
952 e1000e_ring_advance(core
, txi
, 1);
955 if (!ide
|| !e1000e_intrmgr_delay_tx_causes(core
, &cause
)) {
956 e1000e_set_interrupt_cause(core
, cause
);
959 net_tx_pkt_reset(txr
->tx
->tx_pkt
, net_tx_pkt_unmap_frag_pci
, core
->owner
);
963 e1000e_has_rxbufs(E1000ECore
*core
, const E1000E_RingInfo
*r
,
966 uint32_t bufs
= e1000e_ring_free_descr_num(core
, r
);
968 trace_e1000e_rx_has_buffers(r
->idx
, bufs
, total_size
,
969 core
->rx_desc_buf_size
);
971 return total_size
<= bufs
/ (core
->rx_desc_len
/ E1000_MIN_RX_DESC_LEN
) *
972 core
->rx_desc_buf_size
;
976 e1000e_start_recv(E1000ECore
*core
)
980 trace_e1000e_rx_start_recv();
982 for (i
= 0; i
<= core
->max_queue_num
; i
++) {
983 qemu_flush_queued_packets(qemu_get_subqueue(core
->owner_nic
, i
));
988 e1000e_can_receive(E1000ECore
*core
)
992 if (!e1000x_rx_ready(core
->owner
, core
->mac
)) {
996 for (i
= 0; i
< E1000E_NUM_QUEUES
; i
++) {
999 e1000e_rx_ring_init(core
, &rxr
, i
);
1000 if (e1000e_ring_enabled(core
, rxr
.i
) &&
1001 e1000e_has_rxbufs(core
, rxr
.i
, 1)) {
1002 trace_e1000e_rx_can_recv();
1007 trace_e1000e_rx_can_recv_rings_full();
1012 e1000e_receive(E1000ECore
*core
, const uint8_t *buf
, size_t size
)
1014 const struct iovec iov
= {
1015 .iov_base
= (uint8_t *)buf
,
1019 return e1000e_receive_iov(core
, &iov
, 1);
1023 e1000e_rx_l3_cso_enabled(E1000ECore
*core
)
1025 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_IPOFLD
);
1029 e1000e_rx_l4_cso_enabled(E1000ECore
*core
)
1031 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_TUOFLD
);
1035 e1000e_receive_filter(E1000ECore
*core
, const void *buf
)
1037 return (!e1000x_is_vlan_packet(buf
, core
->mac
[VET
]) ||
1038 e1000x_rx_vlan_filter(core
->mac
, PKT_GET_VLAN_HDR(buf
))) &&
1039 e1000x_rx_group_filter(core
->mac
, buf
);
1043 e1000e_read_lgcy_rx_descr(E1000ECore
*core
, struct e1000_rx_desc
*desc
,
1046 *buff_addr
= le64_to_cpu(desc
->buffer_addr
);
1050 e1000e_read_ext_rx_descr(E1000ECore
*core
, union e1000_rx_desc_extended
*desc
,
1053 *buff_addr
= le64_to_cpu(desc
->read
.buffer_addr
);
1057 e1000e_read_ps_rx_descr(E1000ECore
*core
,
1058 union e1000_rx_desc_packet_split
*desc
,
1059 hwaddr buff_addr
[MAX_PS_BUFFERS
])
1063 for (i
= 0; i
< MAX_PS_BUFFERS
; i
++) {
1064 buff_addr
[i
] = le64_to_cpu(desc
->read
.buffer_addr
[i
]);
1067 trace_e1000e_rx_desc_ps_read(buff_addr
[0], buff_addr
[1],
1068 buff_addr
[2], buff_addr
[3]);
1072 e1000e_read_rx_descr(E1000ECore
*core
, union e1000_rx_desc_union
*desc
,
1073 hwaddr buff_addr
[MAX_PS_BUFFERS
])
1075 if (e1000e_rx_use_legacy_descriptor(core
)) {
1076 e1000e_read_lgcy_rx_descr(core
, &desc
->legacy
, &buff_addr
[0]);
1077 buff_addr
[1] = buff_addr
[2] = buff_addr
[3] = 0;
1079 if (core
->mac
[RCTL
] & E1000_RCTL_DTYP_PS
) {
1080 e1000e_read_ps_rx_descr(core
, &desc
->packet_split
, buff_addr
);
1082 e1000e_read_ext_rx_descr(core
, &desc
->extended
, &buff_addr
[0]);
1083 buff_addr
[1] = buff_addr
[2] = buff_addr
[3] = 0;
1089 e1000e_verify_csum_in_sw(E1000ECore
*core
,
1090 struct NetRxPkt
*pkt
,
1091 uint32_t *status_flags
,
1092 EthL4HdrProto l4hdr_proto
)
1095 uint32_t csum_error
;
1097 if (e1000e_rx_l3_cso_enabled(core
)) {
1098 if (!net_rx_pkt_validate_l3_csum(pkt
, &csum_valid
)) {
1099 trace_e1000e_rx_metadata_l3_csum_validation_failed();
1101 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_IPE
;
1102 *status_flags
|= E1000_RXD_STAT_IPCS
| csum_error
;
1105 trace_e1000e_rx_metadata_l3_cso_disabled();
1108 if (!e1000e_rx_l4_cso_enabled(core
)) {
1109 trace_e1000e_rx_metadata_l4_cso_disabled();
1113 if (l4hdr_proto
!= ETH_L4_HDR_PROTO_TCP
&&
1114 l4hdr_proto
!= ETH_L4_HDR_PROTO_UDP
) {
1118 if (!net_rx_pkt_validate_l4_csum(pkt
, &csum_valid
)) {
1119 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1123 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_TCPE
;
1124 *status_flags
|= E1000_RXD_STAT_TCPCS
| csum_error
;
1126 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1127 *status_flags
|= E1000_RXD_STAT_UDPCS
;
1132 e1000e_is_tcp_ack(E1000ECore
*core
, struct NetRxPkt
*rx_pkt
)
1134 if (!net_rx_pkt_is_tcp_ack(rx_pkt
)) {
1138 if (core
->mac
[RFCTL
] & E1000_RFCTL_ACK_DATA_DIS
) {
1139 return !net_rx_pkt_has_tcp_data(rx_pkt
);
1146 e1000e_build_rx_metadata(E1000ECore
*core
,
1147 struct NetRxPkt
*pkt
,
1149 const E1000E_RSSInfo
*rss_info
,
1150 uint32_t *rss
, uint32_t *mrq
,
1151 uint32_t *status_flags
,
1155 struct virtio_net_hdr
*vhdr
;
1156 bool hasip4
, hasip6
;
1157 EthL4HdrProto l4hdr_proto
;
1160 *status_flags
= E1000_RXD_STAT_DD
;
1162 /* No additional metadata needed for non-EOP descriptors */
1167 *status_flags
|= E1000_RXD_STAT_EOP
;
1169 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1170 trace_e1000e_rx_metadata_protocols(hasip4
, hasip6
, l4hdr_proto
);
1173 if (net_rx_pkt_is_vlan_stripped(pkt
)) {
1174 *status_flags
|= E1000_RXD_STAT_VP
;
1175 *vlan_tag
= cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt
));
1176 trace_e1000e_rx_metadata_vlan(*vlan_tag
);
1179 /* Packet parsing results */
1180 if ((core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) != 0) {
1181 if (rss_info
->enabled
) {
1182 *rss
= cpu_to_le32(rss_info
->hash
);
1183 *mrq
= cpu_to_le32(rss_info
->type
| (rss_info
->queue
<< 8));
1184 trace_e1000e_rx_metadata_rss(*rss
, *mrq
);
1186 } else if (hasip4
) {
1187 *status_flags
|= E1000_RXD_STAT_IPIDV
;
1188 *ip_id
= cpu_to_le16(net_rx_pkt_get_ip_id(pkt
));
1189 trace_e1000e_rx_metadata_ip_id(*ip_id
);
1192 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&& e1000e_is_tcp_ack(core
, pkt
)) {
1193 *status_flags
|= E1000_RXD_STAT_ACK
;
1194 trace_e1000e_rx_metadata_ack();
1197 if (hasip6
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_DIS
)) {
1198 trace_e1000e_rx_metadata_ipv6_filtering_disabled();
1199 pkt_type
= E1000_RXD_PKT_MAC
;
1200 } else if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
||
1201 l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1202 pkt_type
= hasip4
? E1000_RXD_PKT_IP4_XDP
: E1000_RXD_PKT_IP6_XDP
;
1203 } else if (hasip4
|| hasip6
) {
1204 pkt_type
= hasip4
? E1000_RXD_PKT_IP4
: E1000_RXD_PKT_IP6
;
1206 pkt_type
= E1000_RXD_PKT_MAC
;
1209 *status_flags
|= E1000_RXD_PKT_TYPE(pkt_type
);
1210 trace_e1000e_rx_metadata_pkt_type(pkt_type
);
1212 /* RX CSO information */
1213 if (hasip6
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_XSUM_DIS
)) {
1214 trace_e1000e_rx_metadata_ipv6_sum_disabled();
1218 vhdr
= net_rx_pkt_get_vhdr(pkt
);
1220 if (!(vhdr
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
) &&
1221 !(vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
)) {
1222 trace_e1000e_rx_metadata_virthdr_no_csum_info();
1223 e1000e_verify_csum_in_sw(core
, pkt
, status_flags
, l4hdr_proto
);
1227 if (e1000e_rx_l3_cso_enabled(core
)) {
1228 *status_flags
|= hasip4
? E1000_RXD_STAT_IPCS
: 0;
1230 trace_e1000e_rx_metadata_l3_cso_disabled();
1233 if (e1000e_rx_l4_cso_enabled(core
)) {
1234 switch (l4hdr_proto
) {
1235 case ETH_L4_HDR_PROTO_TCP
:
1236 *status_flags
|= E1000_RXD_STAT_TCPCS
;
1239 case ETH_L4_HDR_PROTO_UDP
:
1240 *status_flags
|= E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
;
1247 trace_e1000e_rx_metadata_l4_cso_disabled();
1251 trace_e1000e_rx_metadata_status_flags(*status_flags
);
1252 *status_flags
= cpu_to_le32(*status_flags
);
1256 e1000e_write_lgcy_rx_descr(E1000ECore
*core
, struct e1000_rx_desc
*desc
,
1257 struct NetRxPkt
*pkt
,
1258 const E1000E_RSSInfo
*rss_info
,
1261 uint32_t status_flags
, rss
, mrq
;
1264 assert(!rss_info
->enabled
);
1266 desc
->length
= cpu_to_le16(length
);
1269 e1000e_build_rx_metadata(core
, pkt
, pkt
!= NULL
,
1272 &status_flags
, &ip_id
,
1274 desc
->errors
= (uint8_t) (le32_to_cpu(status_flags
) >> 24);
1275 desc
->status
= (uint8_t) le32_to_cpu(status_flags
);
1279 e1000e_write_ext_rx_descr(E1000ECore
*core
, union e1000_rx_desc_extended
*desc
,
1280 struct NetRxPkt
*pkt
,
1281 const E1000E_RSSInfo
*rss_info
,
1284 memset(&desc
->wb
, 0, sizeof(desc
->wb
));
1286 desc
->wb
.upper
.length
= cpu_to_le16(length
);
1288 e1000e_build_rx_metadata(core
, pkt
, pkt
!= NULL
,
1290 &desc
->wb
.lower
.hi_dword
.rss
,
1291 &desc
->wb
.lower
.mrq
,
1292 &desc
->wb
.upper
.status_error
,
1293 &desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
,
1294 &desc
->wb
.upper
.vlan
);
1298 e1000e_write_ps_rx_descr(E1000ECore
*core
,
1299 union e1000_rx_desc_packet_split
*desc
,
1300 struct NetRxPkt
*pkt
,
1301 const E1000E_RSSInfo
*rss_info
,
1303 uint16_t(*written
)[MAX_PS_BUFFERS
])
1307 memset(&desc
->wb
, 0, sizeof(desc
->wb
));
1309 desc
->wb
.middle
.length0
= cpu_to_le16((*written
)[0]);
1311 for (i
= 0; i
< PS_PAGE_BUFFERS
; i
++) {
1312 desc
->wb
.upper
.length
[i
] = cpu_to_le16((*written
)[i
+ 1]);
1315 e1000e_build_rx_metadata(core
, pkt
, pkt
!= NULL
,
1317 &desc
->wb
.lower
.hi_dword
.rss
,
1318 &desc
->wb
.lower
.mrq
,
1319 &desc
->wb
.middle
.status_error
,
1320 &desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
,
1321 &desc
->wb
.middle
.vlan
);
1323 desc
->wb
.upper
.header_status
=
1324 cpu_to_le16(ps_hdr_len
| (ps_hdr_len
? E1000_RXDPS_HDRSTAT_HDRSP
: 0));
1326 trace_e1000e_rx_desc_ps_write((*written
)[0], (*written
)[1],
1327 (*written
)[2], (*written
)[3]);
1331 e1000e_write_rx_descr(E1000ECore
*core
, union e1000_rx_desc_union
*desc
,
1332 struct NetRxPkt
*pkt
, const E1000E_RSSInfo
*rss_info
,
1333 size_t ps_hdr_len
, uint16_t(*written
)[MAX_PS_BUFFERS
])
1335 if (e1000e_rx_use_legacy_descriptor(core
)) {
1336 assert(ps_hdr_len
== 0);
1337 e1000e_write_lgcy_rx_descr(core
, &desc
->legacy
, pkt
, rss_info
,
1340 if (core
->mac
[RCTL
] & E1000_RCTL_DTYP_PS
) {
1341 e1000e_write_ps_rx_descr(core
, &desc
->packet_split
, pkt
, rss_info
,
1342 ps_hdr_len
, written
);
1344 assert(ps_hdr_len
== 0);
1345 e1000e_write_ext_rx_descr(core
, &desc
->extended
, pkt
, rss_info
,
1352 e1000e_pci_dma_write_rx_desc(E1000ECore
*core
, dma_addr_t addr
,
1353 union e1000_rx_desc_union
*desc
, dma_addr_t len
)
1355 PCIDevice
*dev
= core
->owner
;
1357 if (e1000e_rx_use_legacy_descriptor(core
)) {
1358 struct e1000_rx_desc
*d
= &desc
->legacy
;
1359 size_t offset
= offsetof(struct e1000_rx_desc
, status
);
1360 uint8_t status
= d
->status
;
1362 d
->status
&= ~E1000_RXD_STAT_DD
;
1363 pci_dma_write(dev
, addr
, desc
, len
);
1365 if (status
& E1000_RXD_STAT_DD
) {
1367 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1370 if (core
->mac
[RCTL
] & E1000_RCTL_DTYP_PS
) {
1371 union e1000_rx_desc_packet_split
*d
= &desc
->packet_split
;
1372 size_t offset
= offsetof(union e1000_rx_desc_packet_split
,
1373 wb
.middle
.status_error
);
1374 uint32_t status
= d
->wb
.middle
.status_error
;
1376 d
->wb
.middle
.status_error
&= ~E1000_RXD_STAT_DD
;
1377 pci_dma_write(dev
, addr
, desc
, len
);
1379 if (status
& E1000_RXD_STAT_DD
) {
1380 d
->wb
.middle
.status_error
= status
;
1381 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1384 union e1000_rx_desc_extended
*d
= &desc
->extended
;
1385 size_t offset
= offsetof(union e1000_rx_desc_extended
,
1386 wb
.upper
.status_error
);
1387 uint32_t status
= d
->wb
.upper
.status_error
;
1389 d
->wb
.upper
.status_error
&= ~E1000_RXD_STAT_DD
;
1390 pci_dma_write(dev
, addr
, desc
, len
);
1392 if (status
& E1000_RXD_STAT_DD
) {
1393 d
->wb
.upper
.status_error
= status
;
1394 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1400 typedef struct e1000e_ba_state_st
{
1401 uint16_t written
[MAX_PS_BUFFERS
];
1406 e1000e_write_hdr_to_rx_buffers(E1000ECore
*core
,
1407 hwaddr ba
[MAX_PS_BUFFERS
],
1408 e1000e_ba_state
*bastate
,
1410 dma_addr_t data_len
)
1412 assert(data_len
<= core
->rxbuf_sizes
[0] - bastate
->written
[0]);
1414 pci_dma_write(core
->owner
, ba
[0] + bastate
->written
[0], data
, data_len
);
1415 bastate
->written
[0] += data_len
;
1417 bastate
->cur_idx
= 1;
1421 e1000e_write_to_rx_buffers(E1000ECore
*core
,
1422 hwaddr ba
[MAX_PS_BUFFERS
],
1423 e1000e_ba_state
*bastate
,
1425 dma_addr_t data_len
)
1427 while (data_len
> 0) {
1428 uint32_t cur_buf_len
= core
->rxbuf_sizes
[bastate
->cur_idx
];
1429 uint32_t cur_buf_bytes_left
= cur_buf_len
-
1430 bastate
->written
[bastate
->cur_idx
];
1431 uint32_t bytes_to_write
= MIN(data_len
, cur_buf_bytes_left
);
1433 trace_e1000e_rx_desc_buff_write(bastate
->cur_idx
,
1434 ba
[bastate
->cur_idx
],
1435 bastate
->written
[bastate
->cur_idx
],
1439 pci_dma_write(core
->owner
,
1440 ba
[bastate
->cur_idx
] + bastate
->written
[bastate
->cur_idx
],
1441 data
, bytes_to_write
);
1443 bastate
->written
[bastate
->cur_idx
] += bytes_to_write
;
1444 data
+= bytes_to_write
;
1445 data_len
-= bytes_to_write
;
1447 if (bastate
->written
[bastate
->cur_idx
] == cur_buf_len
) {
1451 assert(bastate
->cur_idx
< MAX_PS_BUFFERS
);
1456 e1000e_update_rx_stats(E1000ECore
*core
, size_t pkt_size
, size_t pkt_fcs_size
)
1458 eth_pkt_types_e pkt_type
= net_rx_pkt_get_packet_type(core
->rx_pkt
);
1459 e1000x_update_rx_total_stats(core
->mac
, pkt_type
, pkt_size
, pkt_fcs_size
);
1463 e1000e_rx_descr_threshold_hit(E1000ECore
*core
, const E1000E_RingInfo
*rxi
)
1465 return e1000e_ring_free_descr_num(core
, rxi
) ==
1466 e1000e_ring_len(core
, rxi
) >> core
->rxbuf_min_shift
;
1470 e1000e_do_ps(E1000ECore
*core
, struct NetRxPkt
*pkt
, size_t *hdr_len
)
1472 bool hasip4
, hasip6
;
1473 EthL4HdrProto l4hdr_proto
;
1476 if (!e1000e_rx_use_ps_descriptor(core
)) {
1480 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1483 fragment
= net_rx_pkt_get_ip4_info(pkt
)->fragment
;
1484 } else if (hasip6
) {
1485 fragment
= net_rx_pkt_get_ip6_info(pkt
)->fragment
;
1490 if (fragment
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPFRSP_DIS
)) {
1494 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
||
1495 l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1496 *hdr_len
= net_rx_pkt_get_l5_hdr_offset(pkt
);
1498 *hdr_len
= net_rx_pkt_get_l4_hdr_offset(pkt
);
1501 if ((*hdr_len
> core
->rxbuf_sizes
[0]) ||
1502 (*hdr_len
> net_rx_pkt_get_total_len(pkt
))) {
1510 e1000e_write_packet_to_guest(E1000ECore
*core
, struct NetRxPkt
*pkt
,
1511 const E1000E_RxRing
*rxr
,
1512 const E1000E_RSSInfo
*rss_info
)
1514 PCIDevice
*d
= core
->owner
;
1516 union e1000_rx_desc_union desc
;
1518 size_t desc_offset
= 0;
1521 struct iovec
*iov
= net_rx_pkt_get_iovec(pkt
);
1522 size_t size
= net_rx_pkt_get_total_len(pkt
);
1523 size_t total_size
= size
+ e1000x_fcs_len(core
->mac
);
1524 const E1000E_RingInfo
*rxi
;
1525 size_t ps_hdr_len
= 0;
1526 bool do_ps
= e1000e_do_ps(core
, pkt
, &ps_hdr_len
);
1527 bool is_first
= true;
1532 hwaddr ba
[MAX_PS_BUFFERS
];
1533 e1000e_ba_state bastate
= { { 0 } };
1534 bool is_last
= false;
1536 desc_size
= total_size
- desc_offset
;
1538 if (desc_size
> core
->rx_desc_buf_size
) {
1539 desc_size
= core
->rx_desc_buf_size
;
1542 if (e1000e_ring_empty(core
, rxi
)) {
1546 base
= e1000e_ring_head_descr(core
, rxi
);
1548 pci_dma_read(d
, base
, &desc
, core
->rx_desc_len
);
1550 trace_e1000e_rx_descr(rxi
->idx
, base
, core
->rx_desc_len
);
1552 e1000e_read_rx_descr(core
, &desc
, ba
);
1555 if (desc_offset
< size
) {
1556 static const uint32_t fcs_pad
;
1558 size_t copy_size
= size
- desc_offset
;
1559 if (copy_size
> core
->rx_desc_buf_size
) {
1560 copy_size
= core
->rx_desc_buf_size
;
1563 /* For PS mode copy the packet header first */
1566 size_t ps_hdr_copied
= 0;
1568 iov_copy
= MIN(ps_hdr_len
- ps_hdr_copied
,
1569 iov
->iov_len
- iov_ofs
);
1571 e1000e_write_hdr_to_rx_buffers(core
, ba
, &bastate
,
1572 iov
->iov_base
, iov_copy
);
1574 copy_size
-= iov_copy
;
1575 ps_hdr_copied
+= iov_copy
;
1577 iov_ofs
+= iov_copy
;
1578 if (iov_ofs
== iov
->iov_len
) {
1582 } while (ps_hdr_copied
< ps_hdr_len
);
1586 /* Leave buffer 0 of each descriptor except first */
1587 /* empty as per spec 7.1.5.1 */
1588 e1000e_write_hdr_to_rx_buffers(core
, ba
, &bastate
,
1593 /* Copy packet payload */
1595 iov_copy
= MIN(copy_size
, iov
->iov_len
- iov_ofs
);
1597 e1000e_write_to_rx_buffers(core
, ba
, &bastate
,
1598 iov
->iov_base
+ iov_ofs
, iov_copy
);
1600 copy_size
-= iov_copy
;
1601 iov_ofs
+= iov_copy
;
1602 if (iov_ofs
== iov
->iov_len
) {
1608 if (desc_offset
+ desc_size
>= total_size
) {
1609 /* Simulate FCS checksum presence in the last descriptor */
1610 e1000e_write_to_rx_buffers(core
, ba
, &bastate
,
1611 (const char *) &fcs_pad
, e1000x_fcs_len(core
->mac
));
1614 } else { /* as per intel docs; skip descriptors with null buf addr */
1615 trace_e1000e_rx_null_descriptor();
1617 desc_offset
+= desc_size
;
1618 if (desc_offset
>= total_size
) {
1622 e1000e_write_rx_descr(core
, &desc
, is_last
? core
->rx_pkt
: NULL
,
1623 rss_info
, do_ps
? ps_hdr_len
: 0, &bastate
.written
);
1624 e1000e_pci_dma_write_rx_desc(core
, base
, &desc
, core
->rx_desc_len
);
1626 e1000e_ring_advance(core
, rxi
,
1627 core
->rx_desc_len
/ E1000_MIN_RX_DESC_LEN
);
1629 } while (desc_offset
< total_size
);
1631 e1000e_update_rx_stats(core
, size
, total_size
);
1635 e1000e_rx_fix_l4_csum(E1000ECore
*core
, struct NetRxPkt
*pkt
)
1637 struct virtio_net_hdr
*vhdr
= net_rx_pkt_get_vhdr(pkt
);
1639 if (vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1640 net_rx_pkt_fix_l4_csum(pkt
);
1645 e1000e_receive_iov(E1000ECore
*core
, const struct iovec
*iov
, int iovcnt
)
1647 return e1000e_receive_internal(core
, iov
, iovcnt
, core
->has_vnet
);
1651 e1000e_receive_internal(E1000ECore
*core
, const struct iovec
*iov
, int iovcnt
,
1654 uint32_t causes
= 0;
1655 uint8_t buf
[ETH_ZLEN
];
1656 struct iovec min_iov
;
1657 size_t size
, orig_size
;
1660 E1000E_RSSInfo rss_info
;
1665 trace_e1000e_rx_receive_iov(iovcnt
);
1667 if (!e1000x_hw_rx_enabled(core
->mac
)) {
1671 /* Pull virtio header in */
1673 net_rx_pkt_set_vhdr_iovec(core
->rx_pkt
, iov
, iovcnt
);
1674 iov_ofs
= sizeof(struct virtio_net_hdr
);
1676 net_rx_pkt_unset_vhdr(core
->rx_pkt
);
1679 orig_size
= iov_size(iov
, iovcnt
);
1680 size
= orig_size
- iov_ofs
;
1682 /* Pad to minimum Ethernet frame length */
1683 if (size
< sizeof(buf
)) {
1684 iov_to_buf(iov
, iovcnt
, iov_ofs
, buf
, size
);
1685 memset(&buf
[size
], 0, sizeof(buf
) - size
);
1686 e1000x_inc_reg_if_not_full(core
->mac
, RUC
);
1687 min_iov
.iov_base
= buf
;
1688 min_iov
.iov_len
= size
= sizeof(buf
);
1693 iov_to_buf(iov
, iovcnt
, iov_ofs
, buf
, ETH_HLEN
+ 4);
1696 /* Discard oversized packets if !LPE and !SBP. */
1697 if (e1000x_is_oversized(core
->mac
, size
)) {
1701 net_rx_pkt_set_packet_type(core
->rx_pkt
,
1702 get_eth_packet_type(PKT_GET_ETH_HDR(buf
)));
1704 if (!e1000e_receive_filter(core
, buf
)) {
1705 trace_e1000e_rx_flt_dropped();
1709 net_rx_pkt_attach_iovec_ex(core
->rx_pkt
, iov
, iovcnt
, iov_ofs
,
1710 e1000x_vlan_enabled(core
->mac
) ? 0 : -1,
1713 e1000e_rss_parse_packet(core
, core
->rx_pkt
, &rss_info
);
1714 e1000e_rx_ring_init(core
, &rxr
, rss_info
.queue
);
1716 total_size
= net_rx_pkt_get_total_len(core
->rx_pkt
) +
1717 e1000x_fcs_len(core
->mac
);
1719 if (e1000e_has_rxbufs(core
, rxr
.i
, total_size
)) {
1720 e1000e_rx_fix_l4_csum(core
, core
->rx_pkt
);
1722 e1000e_write_packet_to_guest(core
, core
->rx_pkt
, &rxr
, &rss_info
);
1726 /* Perform small receive detection (RSRPD) */
1727 if (total_size
< core
->mac
[RSRPD
]) {
1728 causes
|= E1000_ICS_SRPD
;
1731 /* Perform ACK receive detection */
1732 if (!(core
->mac
[RFCTL
] & E1000_RFCTL_ACK_DIS
) &&
1733 (e1000e_is_tcp_ack(core
, core
->rx_pkt
))) {
1734 causes
|= E1000_ICS_ACK
;
1737 /* Check if receive descriptor minimum threshold hit */
1738 rdmts_hit
= e1000e_rx_descr_threshold_hit(core
, rxr
.i
);
1739 causes
|= e1000e_rx_wb_interrupt_cause(core
, rxr
.i
->idx
, rdmts_hit
);
1741 trace_e1000e_rx_written_to_guest(rxr
.i
->idx
);
1743 causes
|= E1000_ICS_RXO
;
1746 trace_e1000e_rx_not_written_to_guest(rxr
.i
->idx
);
1749 if (!e1000e_intrmgr_delay_rx_causes(core
, &causes
)) {
1750 trace_e1000e_rx_interrupt_set(causes
);
1751 e1000e_set_interrupt_cause(core
, causes
);
1753 trace_e1000e_rx_interrupt_delayed(causes
);
1760 e1000e_have_autoneg(E1000ECore
*core
)
1762 return core
->phy
[0][MII_BMCR
] & MII_BMCR_AUTOEN
;
1765 static void e1000e_update_flowctl_status(E1000ECore
*core
)
1767 if (e1000e_have_autoneg(core
) &&
1768 core
->phy
[0][MII_BMSR
] & MII_BMSR_AN_COMP
) {
1769 trace_e1000e_link_autoneg_flowctl(true);
1770 core
->mac
[CTRL
] |= E1000_CTRL_TFCE
| E1000_CTRL_RFCE
;
1772 trace_e1000e_link_autoneg_flowctl(false);
1777 e1000e_link_down(E1000ECore
*core
)
1779 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
[0]);
1780 e1000e_update_flowctl_status(core
);
1784 e1000e_set_phy_ctrl(E1000ECore
*core
, int index
, uint16_t val
)
1786 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
1787 core
->phy
[0][MII_BMCR
] = val
& ~(0x3f |
1789 MII_BMCR_ANRESTART
);
1791 if ((val
& MII_BMCR_ANRESTART
) &&
1792 e1000e_have_autoneg(core
)) {
1793 e1000x_restart_autoneg(core
->mac
, core
->phy
[0], core
->autoneg_timer
);
1798 e1000e_set_phy_oem_bits(E1000ECore
*core
, int index
, uint16_t val
)
1800 core
->phy
[0][PHY_OEM_BITS
] = val
& ~BIT(10);
1802 if (val
& BIT(10)) {
1803 e1000x_restart_autoneg(core
->mac
, core
->phy
[0], core
->autoneg_timer
);
1808 e1000e_set_phy_page(E1000ECore
*core
, int index
, uint16_t val
)
1810 core
->phy
[0][PHY_PAGE
] = val
& PHY_PAGE_RW_MASK
;
1814 e1000e_core_set_link_status(E1000ECore
*core
)
1816 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
1817 uint32_t old_status
= core
->mac
[STATUS
];
1819 trace_e1000e_link_status_changed(nc
->link_down
? false : true);
1821 if (nc
->link_down
) {
1822 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
[0]);
1824 if (e1000e_have_autoneg(core
) &&
1825 !(core
->phy
[0][MII_BMSR
] & MII_BMSR_AN_COMP
)) {
1826 e1000x_restart_autoneg(core
->mac
, core
->phy
[0],
1827 core
->autoneg_timer
);
1829 e1000x_update_regs_on_link_up(core
->mac
, core
->phy
[0]);
1830 e1000e_start_recv(core
);
1834 if (core
->mac
[STATUS
] != old_status
) {
1835 e1000e_set_interrupt_cause(core
, E1000_ICR_LSC
);
1840 e1000e_set_ctrl(E1000ECore
*core
, int index
, uint32_t val
)
1842 trace_e1000e_core_ctrl_write(index
, val
);
1844 /* RST is self clearing */
1845 core
->mac
[CTRL
] = val
& ~E1000_CTRL_RST
;
1846 core
->mac
[CTRL_DUP
] = core
->mac
[CTRL
];
1848 trace_e1000e_link_set_params(
1849 !!(val
& E1000_CTRL_ASDE
),
1850 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
1851 !!(val
& E1000_CTRL_FRCSPD
),
1852 !!(val
& E1000_CTRL_FRCDPX
),
1853 !!(val
& E1000_CTRL_RFCE
),
1854 !!(val
& E1000_CTRL_TFCE
));
1856 if (val
& E1000_CTRL_RST
) {
1857 trace_e1000e_core_ctrl_sw_reset();
1858 e1000e_reset(core
, true);
1861 if (val
& E1000_CTRL_PHY_RST
) {
1862 trace_e1000e_core_ctrl_phy_reset();
1863 core
->mac
[STATUS
] |= E1000_STATUS_PHYRA
;
1868 e1000e_set_rfctl(E1000ECore
*core
, int index
, uint32_t val
)
1870 trace_e1000e_rx_set_rfctl(val
);
1872 if (!(val
& E1000_RFCTL_ISCSI_DIS
)) {
1873 trace_e1000e_wrn_iscsi_filtering_not_supported();
1876 if (!(val
& E1000_RFCTL_NFSW_DIS
)) {
1877 trace_e1000e_wrn_nfsw_filtering_not_supported();
1880 if (!(val
& E1000_RFCTL_NFSR_DIS
)) {
1881 trace_e1000e_wrn_nfsr_filtering_not_supported();
1884 core
->mac
[RFCTL
] = val
;
1888 e1000e_calc_per_desc_buf_size(E1000ECore
*core
)
1891 core
->rx_desc_buf_size
= 0;
1893 for (i
= 0; i
< ARRAY_SIZE(core
->rxbuf_sizes
); i
++) {
1894 core
->rx_desc_buf_size
+= core
->rxbuf_sizes
[i
];
1899 e1000e_parse_rxbufsize(E1000ECore
*core
)
1901 uint32_t rctl
= core
->mac
[RCTL
];
1903 memset(core
->rxbuf_sizes
, 0, sizeof(core
->rxbuf_sizes
));
1905 if (rctl
& E1000_RCTL_DTYP_MASK
) {
1908 bsize
= core
->mac
[PSRCTL
] & E1000_PSRCTL_BSIZE0_MASK
;
1909 core
->rxbuf_sizes
[0] = (bsize
>> E1000_PSRCTL_BSIZE0_SHIFT
) * 128;
1911 bsize
= core
->mac
[PSRCTL
] & E1000_PSRCTL_BSIZE1_MASK
;
1912 core
->rxbuf_sizes
[1] = (bsize
>> E1000_PSRCTL_BSIZE1_SHIFT
) * 1024;
1914 bsize
= core
->mac
[PSRCTL
] & E1000_PSRCTL_BSIZE2_MASK
;
1915 core
->rxbuf_sizes
[2] = (bsize
>> E1000_PSRCTL_BSIZE2_SHIFT
) * 1024;
1917 bsize
= core
->mac
[PSRCTL
] & E1000_PSRCTL_BSIZE3_MASK
;
1918 core
->rxbuf_sizes
[3] = (bsize
>> E1000_PSRCTL_BSIZE3_SHIFT
) * 1024;
1919 } else if (rctl
& E1000_RCTL_FLXBUF_MASK
) {
1920 int flxbuf
= rctl
& E1000_RCTL_FLXBUF_MASK
;
1921 core
->rxbuf_sizes
[0] = (flxbuf
>> E1000_RCTL_FLXBUF_SHIFT
) * 1024;
1923 core
->rxbuf_sizes
[0] = e1000x_rxbufsize(rctl
);
1926 trace_e1000e_rx_desc_buff_sizes(core
->rxbuf_sizes
[0], core
->rxbuf_sizes
[1],
1927 core
->rxbuf_sizes
[2], core
->rxbuf_sizes
[3]);
1929 e1000e_calc_per_desc_buf_size(core
);
1933 e1000e_calc_rxdesclen(E1000ECore
*core
)
1935 if (e1000e_rx_use_legacy_descriptor(core
)) {
1936 core
->rx_desc_len
= sizeof(struct e1000_rx_desc
);
1938 if (core
->mac
[RCTL
] & E1000_RCTL_DTYP_PS
) {
1939 core
->rx_desc_len
= sizeof(union e1000_rx_desc_packet_split
);
1941 core
->rx_desc_len
= sizeof(union e1000_rx_desc_extended
);
1944 trace_e1000e_rx_desc_len(core
->rx_desc_len
);
1948 e1000e_set_rx_control(E1000ECore
*core
, int index
, uint32_t val
)
1950 core
->mac
[RCTL
] = val
;
1951 trace_e1000e_rx_set_rctl(core
->mac
[RCTL
]);
1953 if (val
& E1000_RCTL_EN
) {
1954 e1000e_parse_rxbufsize(core
);
1955 e1000e_calc_rxdesclen(core
);
1956 core
->rxbuf_min_shift
= ((val
/ E1000_RCTL_RDMTS_QUAT
) & 3) + 1 +
1957 E1000_RING_DESC_LEN_SHIFT
;
1959 e1000e_start_recv(core
);
1964 void(*e1000e_phyreg_writeops
[E1000E_PHY_PAGES
][E1000E_PHY_PAGE_SIZE
])
1965 (E1000ECore
*, int, uint16_t) = {
1967 [MII_BMCR
] = e1000e_set_phy_ctrl
,
1968 [PHY_PAGE
] = e1000e_set_phy_page
,
1969 [PHY_OEM_BITS
] = e1000e_set_phy_oem_bits
1974 e1000e_postpone_interrupt(E1000IntrDelayTimer
*timer
)
1976 if (timer
->running
) {
1977 trace_e1000e_irq_postponed_by_xitr(timer
->delay_reg
<< 2);
1982 if (timer
->core
->mac
[timer
->delay_reg
] != 0) {
1983 e1000e_intrmgr_rearm_timer(timer
);
1990 e1000e_itr_should_postpone(E1000ECore
*core
)
1992 return e1000e_postpone_interrupt(&core
->itr
);
1996 e1000e_eitr_should_postpone(E1000ECore
*core
, int idx
)
1998 return e1000e_postpone_interrupt(&core
->eitr
[idx
]);
2002 e1000e_msix_notify_one(E1000ECore
*core
, uint32_t cause
, uint32_t int_cfg
)
2004 uint32_t effective_eiac
;
2006 if (E1000_IVAR_ENTRY_VALID(int_cfg
)) {
2007 uint32_t vec
= E1000_IVAR_ENTRY_VEC(int_cfg
);
2008 if (vec
< E1000E_MSIX_VEC_NUM
) {
2009 if (!e1000e_eitr_should_postpone(core
, vec
)) {
2010 trace_e1000e_irq_msix_notify_vec(vec
);
2011 msix_notify(core
->owner
, vec
);
2014 trace_e1000e_wrn_msix_vec_wrong(cause
, int_cfg
);
2017 trace_e1000e_wrn_msix_invalid(cause
, int_cfg
);
2020 if (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_EIAME
) {
2021 trace_e1000e_irq_iam_clear_eiame(core
->mac
[IAM
], cause
);
2022 core
->mac
[IAM
] &= ~cause
;
2025 trace_e1000e_irq_icr_clear_eiac(core
->mac
[ICR
], core
->mac
[EIAC
]);
2027 effective_eiac
= core
->mac
[EIAC
] & cause
;
2029 core
->mac
[ICR
] &= ~effective_eiac
;
2031 if (!(core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_IAME
)) {
2032 core
->mac
[IMS
] &= ~effective_eiac
;
2037 e1000e_msix_notify(E1000ECore
*core
, uint32_t causes
)
2039 if (causes
& E1000_ICR_RXQ0
) {
2040 e1000e_msix_notify_one(core
, E1000_ICR_RXQ0
,
2041 E1000_IVAR_RXQ0(core
->mac
[IVAR
]));
2044 if (causes
& E1000_ICR_RXQ1
) {
2045 e1000e_msix_notify_one(core
, E1000_ICR_RXQ1
,
2046 E1000_IVAR_RXQ1(core
->mac
[IVAR
]));
2049 if (causes
& E1000_ICR_TXQ0
) {
2050 e1000e_msix_notify_one(core
, E1000_ICR_TXQ0
,
2051 E1000_IVAR_TXQ0(core
->mac
[IVAR
]));
2054 if (causes
& E1000_ICR_TXQ1
) {
2055 e1000e_msix_notify_one(core
, E1000_ICR_TXQ1
,
2056 E1000_IVAR_TXQ1(core
->mac
[IVAR
]));
2059 if (causes
& E1000_ICR_OTHER
) {
2060 e1000e_msix_notify_one(core
, E1000_ICR_OTHER
,
2061 E1000_IVAR_OTHER(core
->mac
[IVAR
]));
2066 e1000e_msix_clear_one(E1000ECore
*core
, uint32_t cause
, uint32_t int_cfg
)
2068 if (E1000_IVAR_ENTRY_VALID(int_cfg
)) {
2069 uint32_t vec
= E1000_IVAR_ENTRY_VEC(int_cfg
);
2070 if (vec
< E1000E_MSIX_VEC_NUM
) {
2071 trace_e1000e_irq_msix_pending_clearing(cause
, int_cfg
, vec
);
2072 msix_clr_pending(core
->owner
, vec
);
2074 trace_e1000e_wrn_msix_vec_wrong(cause
, int_cfg
);
2077 trace_e1000e_wrn_msix_invalid(cause
, int_cfg
);
2082 e1000e_msix_clear(E1000ECore
*core
, uint32_t causes
)
2084 if (causes
& E1000_ICR_RXQ0
) {
2085 e1000e_msix_clear_one(core
, E1000_ICR_RXQ0
,
2086 E1000_IVAR_RXQ0(core
->mac
[IVAR
]));
2089 if (causes
& E1000_ICR_RXQ1
) {
2090 e1000e_msix_clear_one(core
, E1000_ICR_RXQ1
,
2091 E1000_IVAR_RXQ1(core
->mac
[IVAR
]));
2094 if (causes
& E1000_ICR_TXQ0
) {
2095 e1000e_msix_clear_one(core
, E1000_ICR_TXQ0
,
2096 E1000_IVAR_TXQ0(core
->mac
[IVAR
]));
2099 if (causes
& E1000_ICR_TXQ1
) {
2100 e1000e_msix_clear_one(core
, E1000_ICR_TXQ1
,
2101 E1000_IVAR_TXQ1(core
->mac
[IVAR
]));
2104 if (causes
& E1000_ICR_OTHER
) {
2105 e1000e_msix_clear_one(core
, E1000_ICR_OTHER
,
2106 E1000_IVAR_OTHER(core
->mac
[IVAR
]));
2111 e1000e_fix_icr_asserted(E1000ECore
*core
)
2113 core
->mac
[ICR
] &= ~E1000_ICR_ASSERTED
;
2114 if (core
->mac
[ICR
]) {
2115 core
->mac
[ICR
] |= E1000_ICR_ASSERTED
;
2118 trace_e1000e_irq_fix_icr_asserted(core
->mac
[ICR
]);
2121 static void e1000e_raise_interrupts(E1000ECore
*core
,
2122 size_t index
, uint32_t causes
)
2124 bool is_msix
= msix_enabled(core
->owner
);
2125 uint32_t old_causes
= core
->mac
[IMS
] & core
->mac
[ICR
];
2126 uint32_t raised_causes
;
2128 trace_e1000e_irq_set(index
<< 2,
2129 core
->mac
[index
], core
->mac
[index
] | causes
);
2131 core
->mac
[index
] |= causes
;
2133 /* Set ICR[OTHER] for MSI-X */
2135 if (core
->mac
[ICR
] & E1000_ICR_OTHER_CAUSES
) {
2136 core
->mac
[ICR
] |= E1000_ICR_OTHER
;
2137 trace_e1000e_irq_add_msi_other(core
->mac
[ICR
]);
2141 e1000e_fix_icr_asserted(core
);
2144 * Make sure ICR and ICS registers have the same value.
2145 * The spec says that the ICS register is write-only. However in practice,
2146 * on real hardware ICS is readable, and for reads it has the same value as
2147 * ICR (except that ICS does not have the clear on read behaviour of ICR).
2149 * The VxWorks PRO/1000 driver uses this behaviour.
2151 core
->mac
[ICS
] = core
->mac
[ICR
];
2153 trace_e1000e_irq_pending_interrupts(core
->mac
[ICR
] & core
->mac
[IMS
],
2154 core
->mac
[ICR
], core
->mac
[IMS
]);
2156 raised_causes
= core
->mac
[IMS
] & core
->mac
[ICR
] & ~old_causes
;
2157 if (!raised_causes
) {
2162 e1000e_msix_notify(core
, raised_causes
& ~E1000_ICR_ASSERTED
);
2163 } else if (!e1000e_itr_should_postpone(core
)) {
2164 if (msi_enabled(core
->owner
)) {
2165 trace_e1000e_irq_msi_notify(raised_causes
);
2166 msi_notify(core
->owner
, 0);
2168 e1000e_raise_legacy_irq(core
);
2173 static void e1000e_lower_interrupts(E1000ECore
*core
,
2174 size_t index
, uint32_t causes
)
2176 trace_e1000e_irq_clear(index
<< 2,
2177 core
->mac
[index
], core
->mac
[index
] & ~causes
);
2179 core
->mac
[index
] &= ~causes
;
2182 * Make sure ICR and ICS registers have the same value.
2183 * The spec says that the ICS register is write-only. However in practice,
2184 * on real hardware ICS is readable, and for reads it has the same value as
2185 * ICR (except that ICS does not have the clear on read behaviour of ICR).
2187 * The VxWorks PRO/1000 driver uses this behaviour.
2189 core
->mac
[ICS
] = core
->mac
[ICR
];
2191 trace_e1000e_irq_pending_interrupts(core
->mac
[ICR
] & core
->mac
[IMS
],
2192 core
->mac
[ICR
], core
->mac
[IMS
]);
2194 if (!(core
->mac
[IMS
] & core
->mac
[ICR
]) &&
2195 !msix_enabled(core
->owner
) && !msi_enabled(core
->owner
)) {
2196 e1000e_lower_legacy_irq(core
);
2201 e1000e_set_interrupt_cause(E1000ECore
*core
, uint32_t val
)
2203 val
|= e1000e_intmgr_collect_delayed_causes(core
);
2204 e1000e_raise_interrupts(core
, ICR
, val
);
2208 e1000e_autoneg_timer(void *opaque
)
2210 E1000ECore
*core
= opaque
;
2211 if (!qemu_get_queue(core
->owner_nic
)->link_down
) {
2212 e1000x_update_regs_on_autoneg_done(core
->mac
, core
->phy
[0]);
2213 e1000e_start_recv(core
);
2215 e1000e_update_flowctl_status(core
);
2216 /* signal link status change to the guest */
2217 e1000e_set_interrupt_cause(core
, E1000_ICR_LSC
);
2221 static inline uint16_t
2222 e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access
, hwaddr addr
)
2224 uint16_t index
= (addr
& 0x1ffff) >> 2;
2225 return index
+ (mac_reg_access
[index
] & 0xfffe);
2228 static const char e1000e_phy_regcap
[E1000E_PHY_PAGES
][0x20] = {
2230 [MII_BMCR
] = PHY_ANYPAGE
| PHY_RW
,
2231 [MII_BMSR
] = PHY_ANYPAGE
| PHY_R
,
2232 [MII_PHYID1
] = PHY_ANYPAGE
| PHY_R
,
2233 [MII_PHYID2
] = PHY_ANYPAGE
| PHY_R
,
2234 [MII_ANAR
] = PHY_ANYPAGE
| PHY_RW
,
2235 [MII_ANLPAR
] = PHY_ANYPAGE
| PHY_R
,
2236 [MII_ANER
] = PHY_ANYPAGE
| PHY_R
,
2237 [MII_ANNP
] = PHY_ANYPAGE
| PHY_RW
,
2238 [MII_ANLPRNP
] = PHY_ANYPAGE
| PHY_R
,
2239 [MII_CTRL1000
] = PHY_ANYPAGE
| PHY_RW
,
2240 [MII_STAT1000
] = PHY_ANYPAGE
| PHY_R
,
2241 [MII_EXTSTAT
] = PHY_ANYPAGE
| PHY_R
,
2242 [PHY_PAGE
] = PHY_ANYPAGE
| PHY_RW
,
2244 [PHY_COPPER_CTRL1
] = PHY_RW
,
2245 [PHY_COPPER_STAT1
] = PHY_R
,
2246 [PHY_COPPER_CTRL3
] = PHY_RW
,
2247 [PHY_RX_ERR_CNTR
] = PHY_R
,
2248 [PHY_OEM_BITS
] = PHY_RW
,
2249 [PHY_BIAS_1
] = PHY_RW
,
2250 [PHY_BIAS_2
] = PHY_RW
,
2251 [PHY_COPPER_INT_ENABLE
] = PHY_RW
,
2252 [PHY_COPPER_STAT2
] = PHY_R
,
2253 [PHY_COPPER_CTRL2
] = PHY_RW
2256 [PHY_MAC_CTRL1
] = PHY_RW
,
2257 [PHY_MAC_INT_ENABLE
] = PHY_RW
,
2258 [PHY_MAC_STAT
] = PHY_R
,
2259 [PHY_MAC_CTRL2
] = PHY_RW
2262 [PHY_LED_03_FUNC_CTRL1
] = PHY_RW
,
2263 [PHY_LED_03_POL_CTRL
] = PHY_RW
,
2264 [PHY_LED_TIMER_CTRL
] = PHY_RW
,
2265 [PHY_LED_45_CTRL
] = PHY_RW
2268 [PHY_1000T_SKEW
] = PHY_R
,
2269 [PHY_1000T_SWAP
] = PHY_R
2272 [PHY_CRC_COUNTERS
] = PHY_R
2277 e1000e_phy_reg_check_cap(E1000ECore
*core
, uint32_t addr
,
2278 char cap
, uint8_t *page
)
2281 (e1000e_phy_regcap
[0][addr
] & PHY_ANYPAGE
) ? 0
2282 : core
->phy
[0][PHY_PAGE
];
2284 if (*page
>= E1000E_PHY_PAGES
) {
2288 return e1000e_phy_regcap
[*page
][addr
] & cap
;
2292 e1000e_phy_reg_write(E1000ECore
*core
, uint8_t page
,
2293 uint32_t addr
, uint16_t data
)
2295 assert(page
< E1000E_PHY_PAGES
);
2296 assert(addr
< E1000E_PHY_PAGE_SIZE
);
2298 if (e1000e_phyreg_writeops
[page
][addr
]) {
2299 e1000e_phyreg_writeops
[page
][addr
](core
, addr
, data
);
2301 core
->phy
[page
][addr
] = data
;
2306 e1000e_set_mdic(E1000ECore
*core
, int index
, uint32_t val
)
2308 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
2309 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
2312 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) { /* phy # */
2313 val
= core
->mac
[MDIC
] | E1000_MDIC_ERROR
;
2314 } else if (val
& E1000_MDIC_OP_READ
) {
2315 if (!e1000e_phy_reg_check_cap(core
, addr
, PHY_R
, &page
)) {
2316 trace_e1000e_core_mdic_read_unhandled(page
, addr
);
2317 val
|= E1000_MDIC_ERROR
;
2319 val
= (val
^ data
) | core
->phy
[page
][addr
];
2320 trace_e1000e_core_mdic_read(page
, addr
, val
);
2322 } else if (val
& E1000_MDIC_OP_WRITE
) {
2323 if (!e1000e_phy_reg_check_cap(core
, addr
, PHY_W
, &page
)) {
2324 trace_e1000e_core_mdic_write_unhandled(page
, addr
);
2325 val
|= E1000_MDIC_ERROR
;
2327 trace_e1000e_core_mdic_write(page
, addr
, data
);
2328 e1000e_phy_reg_write(core
, page
, addr
, data
);
2331 core
->mac
[MDIC
] = val
| E1000_MDIC_READY
;
2333 if (val
& E1000_MDIC_INT_EN
) {
2334 e1000e_set_interrupt_cause(core
, E1000_ICR_MDAC
);
2339 e1000e_set_rdt(E1000ECore
*core
, int index
, uint32_t val
)
2341 core
->mac
[index
] = val
& 0xffff;
2342 trace_e1000e_rx_set_rdt(e1000e_mq_queue_idx(RDT0
, index
), val
);
2343 e1000e_start_recv(core
);
2347 e1000e_set_status(E1000ECore
*core
, int index
, uint32_t val
)
2349 if ((val
& E1000_STATUS_PHYRA
) == 0) {
2350 core
->mac
[index
] &= ~E1000_STATUS_PHYRA
;
2355 e1000e_set_ctrlext(E1000ECore
*core
, int index
, uint32_t val
)
2357 trace_e1000e_link_set_ext_params(!!(val
& E1000_CTRL_EXT_ASDCHK
),
2358 !!(val
& E1000_CTRL_EXT_SPD_BYPS
));
2360 /* Zero self-clearing bits */
2361 val
&= ~(E1000_CTRL_EXT_ASDCHK
| E1000_CTRL_EXT_EE_RST
);
2362 core
->mac
[CTRL_EXT
] = val
;
2366 e1000e_set_pbaclr(E1000ECore
*core
, int index
, uint32_t val
)
2370 core
->mac
[PBACLR
] = val
& E1000_PBACLR_VALID_MASK
;
2372 if (!msix_enabled(core
->owner
)) {
2376 for (i
= 0; i
< E1000E_MSIX_VEC_NUM
; i
++) {
2377 if (core
->mac
[PBACLR
] & BIT(i
)) {
2378 msix_clr_pending(core
->owner
, i
);
2384 e1000e_set_fcrth(E1000ECore
*core
, int index
, uint32_t val
)
2386 core
->mac
[FCRTH
] = val
& 0xFFF8;
2390 e1000e_set_fcrtl(E1000ECore
*core
, int index
, uint32_t val
)
2392 core
->mac
[FCRTL
] = val
& 0x8000FFF8;
2395 #define E1000E_LOW_BITS_SET_FUNC(num) \
2397 e1000e_set_##num##bit(E1000ECore *core, int index, uint32_t val) \
2399 core->mac[index] = val & (BIT(num) - 1); \
2402 E1000E_LOW_BITS_SET_FUNC(4)
2403 E1000E_LOW_BITS_SET_FUNC(6)
2404 E1000E_LOW_BITS_SET_FUNC(11)
2405 E1000E_LOW_BITS_SET_FUNC(12)
2406 E1000E_LOW_BITS_SET_FUNC(13)
2407 E1000E_LOW_BITS_SET_FUNC(16)
2410 e1000e_set_vet(E1000ECore
*core
, int index
, uint32_t val
)
2412 core
->mac
[VET
] = val
& 0xffff;
2413 trace_e1000e_vlan_vet(core
->mac
[VET
]);
2417 e1000e_set_dlen(E1000ECore
*core
, int index
, uint32_t val
)
2419 core
->mac
[index
] = val
& E1000_XDLEN_MASK
;
2423 e1000e_set_dbal(E1000ECore
*core
, int index
, uint32_t val
)
2425 core
->mac
[index
] = val
& E1000_XDBAL_MASK
;
2429 e1000e_set_tctl(E1000ECore
*core
, int index
, uint32_t val
)
2432 core
->mac
[index
] = val
;
2434 if (core
->mac
[TARC0
] & E1000_TARC_ENABLE
) {
2435 e1000e_tx_ring_init(core
, &txr
, 0);
2436 e1000e_start_xmit(core
, &txr
);
2439 if (core
->mac
[TARC1
] & E1000_TARC_ENABLE
) {
2440 e1000e_tx_ring_init(core
, &txr
, 1);
2441 e1000e_start_xmit(core
, &txr
);
2446 e1000e_set_tdt(E1000ECore
*core
, int index
, uint32_t val
)
2449 int qidx
= e1000e_mq_queue_idx(TDT
, index
);
2450 uint32_t tarc_reg
= (qidx
== 0) ? TARC0
: TARC1
;
2452 core
->mac
[index
] = val
& 0xffff;
2454 if (core
->mac
[tarc_reg
] & E1000_TARC_ENABLE
) {
2455 e1000e_tx_ring_init(core
, &txr
, qidx
);
2456 e1000e_start_xmit(core
, &txr
);
2461 e1000e_set_ics(E1000ECore
*core
, int index
, uint32_t val
)
2463 trace_e1000e_irq_write_ics(val
);
2464 e1000e_set_interrupt_cause(core
, val
);
2468 e1000e_set_icr(E1000ECore
*core
, int index
, uint32_t val
)
2470 if ((core
->mac
[ICR
] & E1000_ICR_ASSERTED
) &&
2471 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_IAME
)) {
2472 trace_e1000e_irq_icr_process_iame();
2473 e1000e_lower_interrupts(core
, IMS
, core
->mac
[IAM
]);
2477 * Windows driver expects that the "receive overrun" bit and other
2478 * ones to be cleared when the "Other" bit (#24) is cleared.
2480 if (val
& E1000_ICR_OTHER
) {
2481 val
|= E1000_ICR_OTHER_CAUSES
;
2483 e1000e_lower_interrupts(core
, ICR
, val
);
2487 e1000e_set_imc(E1000ECore
*core
, int index
, uint32_t val
)
2489 trace_e1000e_irq_ims_clear_set_imc(val
);
2490 e1000e_lower_interrupts(core
, IMS
, val
);
2494 e1000e_set_ims(E1000ECore
*core
, int index
, uint32_t val
)
2496 static const uint32_t ims_ext_mask
=
2497 E1000_IMS_RXQ0
| E1000_IMS_RXQ1
|
2498 E1000_IMS_TXQ0
| E1000_IMS_TXQ1
|
2501 static const uint32_t ims_valid_mask
=
2502 E1000_IMS_TXDW
| E1000_IMS_TXQE
| E1000_IMS_LSC
|
2503 E1000_IMS_RXDMT0
| E1000_IMS_RXO
| E1000_IMS_RXT0
|
2504 E1000_IMS_MDAC
| E1000_IMS_TXD_LOW
| E1000_IMS_SRPD
|
2505 E1000_IMS_ACK
| E1000_IMS_MNG
| E1000_IMS_RXQ0
|
2506 E1000_IMS_RXQ1
| E1000_IMS_TXQ0
| E1000_IMS_TXQ1
|
2509 uint32_t valid_val
= val
& ims_valid_mask
;
2511 if ((valid_val
& ims_ext_mask
) &&
2512 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_PBA_CLR
) &&
2513 msix_enabled(core
->owner
)) {
2514 e1000e_msix_clear(core
, valid_val
);
2517 if ((valid_val
== ims_valid_mask
) &&
2518 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA
)) {
2519 trace_e1000e_irq_fire_all_timers(val
);
2520 e1000e_intrmgr_fire_all_timers(core
);
2523 e1000e_raise_interrupts(core
, IMS
, valid_val
);
2527 e1000e_set_rdtr(E1000ECore
*core
, int index
, uint32_t val
)
2529 e1000e_set_16bit(core
, index
, val
);
2531 if ((val
& E1000_RDTR_FPD
) && (core
->rdtr
.running
)) {
2532 trace_e1000e_irq_rdtr_fpd_running();
2533 e1000e_intrmgr_fire_delayed_interrupts(core
);
2535 trace_e1000e_irq_rdtr_fpd_not_running();
2540 e1000e_set_tidv(E1000ECore
*core
, int index
, uint32_t val
)
2542 e1000e_set_16bit(core
, index
, val
);
2544 if ((val
& E1000_TIDV_FPD
) && (core
->tidv
.running
)) {
2545 trace_e1000e_irq_tidv_fpd_running();
2546 e1000e_intrmgr_fire_delayed_interrupts(core
);
2548 trace_e1000e_irq_tidv_fpd_not_running();
2553 e1000e_mac_readreg(E1000ECore
*core
, int index
)
2555 return core
->mac
[index
];
2559 e1000e_mac_ics_read(E1000ECore
*core
, int index
)
2561 trace_e1000e_irq_read_ics(core
->mac
[ICS
]);
2562 return core
->mac
[ICS
];
2566 e1000e_mac_ims_read(E1000ECore
*core
, int index
)
2568 trace_e1000e_irq_read_ims(core
->mac
[IMS
]);
2569 return core
->mac
[IMS
];
2573 e1000e_mac_swsm_read(E1000ECore
*core
, int index
)
2575 uint32_t val
= core
->mac
[SWSM
];
2576 core
->mac
[SWSM
] = val
| E1000_SWSM_SMBI
;
2581 e1000e_mac_itr_read(E1000ECore
*core
, int index
)
2583 return core
->itr_guest_value
;
2587 e1000e_mac_eitr_read(E1000ECore
*core
, int index
)
2589 return core
->eitr_guest_value
[index
- EITR
];
2593 e1000e_mac_icr_read(E1000ECore
*core
, int index
)
2595 uint32_t ret
= core
->mac
[ICR
];
2597 if (core
->mac
[IMS
] == 0) {
2598 trace_e1000e_irq_icr_clear_zero_ims();
2599 e1000e_lower_interrupts(core
, ICR
, 0xffffffff);
2602 if (!msix_enabled(core
->owner
)) {
2603 trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2604 e1000e_lower_interrupts(core
, ICR
, 0xffffffff);
2607 if (core
->mac
[ICR
] & E1000_ICR_ASSERTED
) {
2608 if (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_IAME
) {
2609 trace_e1000e_irq_icr_clear_iame();
2610 e1000e_lower_interrupts(core
, ICR
, 0xffffffff);
2611 trace_e1000e_irq_icr_process_iame();
2612 e1000e_lower_interrupts(core
, IMS
, core
->mac
[IAM
]);
2616 * The datasheet does not say what happens when interrupt was asserted
2617 * (ICR.INT_ASSERT=1) and auto mask is *not* active.
2618 * However, section of 13.3.27 the PCIe* GbE Controllers Open Source
2619 * Software Developer’s Manual, which were written for older devices,
2620 * namely 631xESB/632xESB, 82563EB/82564EB, 82571EB/82572EI &
2621 * 82573E/82573V/82573L, does say:
2622 * > If IMS = 0b, then the ICR register is always clear-on-read. If IMS
2623 * > is not 0b, but some ICR bit is set where the corresponding IMS bit
2624 * > is not set, then a read does not clear the ICR register. For
2625 * > example, if IMS = 10101010b and ICR = 01010101b, then a read to the
2626 * > ICR register does not clear it. If IMS = 10101010b and
2627 * > ICR = 0101011b, then a read to the ICR register clears it entirely
2628 * > (ICR.INT_ASSERTED = 1b).
2630 * Linux does no longer activate auto mask since commit
2631 * 0a8047ac68e50e4ccbadcfc6b6b070805b976885 and the real hardware
2632 * clears ICR even in such a case so we also should do so.
2634 if (core
->mac
[ICR
] & core
->mac
[IMS
]) {
2635 trace_e1000e_irq_icr_clear_icr_bit_ims(core
->mac
[ICR
],
2637 e1000e_lower_interrupts(core
, ICR
, 0xffffffff);
2645 e1000e_mac_read_clr4(E1000ECore
*core
, int index
)
2647 uint32_t ret
= core
->mac
[index
];
2649 core
->mac
[index
] = 0;
2654 e1000e_mac_read_clr8(E1000ECore
*core
, int index
)
2656 uint32_t ret
= core
->mac
[index
];
2658 core
->mac
[index
] = 0;
2659 core
->mac
[index
- 1] = 0;
2664 e1000e_get_ctrl(E1000ECore
*core
, int index
)
2666 uint32_t val
= core
->mac
[CTRL
];
2668 trace_e1000e_link_read_params(
2669 !!(val
& E1000_CTRL_ASDE
),
2670 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
2671 !!(val
& E1000_CTRL_FRCSPD
),
2672 !!(val
& E1000_CTRL_FRCDPX
),
2673 !!(val
& E1000_CTRL_RFCE
),
2674 !!(val
& E1000_CTRL_TFCE
));
2680 e1000e_get_status(E1000ECore
*core
, int index
)
2682 uint32_t res
= core
->mac
[STATUS
];
2684 if (!(core
->mac
[CTRL
] & E1000_CTRL_GIO_MASTER_DISABLE
)) {
2685 res
|= E1000_STATUS_GIO_MASTER_ENABLE
;
2688 if (core
->mac
[CTRL
] & E1000_CTRL_FRCDPX
) {
2689 res
|= (core
->mac
[CTRL
] & E1000_CTRL_FD
) ? E1000_STATUS_FD
: 0;
2691 res
|= E1000_STATUS_FD
;
2694 if ((core
->mac
[CTRL
] & E1000_CTRL_FRCSPD
) ||
2695 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_SPD_BYPS
)) {
2696 switch (core
->mac
[CTRL
] & E1000_CTRL_SPD_SEL
) {
2697 case E1000_CTRL_SPD_10
:
2698 res
|= E1000_STATUS_SPEED_10
;
2700 case E1000_CTRL_SPD_100
:
2701 res
|= E1000_STATUS_SPEED_100
;
2703 case E1000_CTRL_SPD_1000
:
2705 res
|= E1000_STATUS_SPEED_1000
;
2709 res
|= E1000_STATUS_SPEED_1000
;
2712 trace_e1000e_link_status(
2713 !!(res
& E1000_STATUS_LU
),
2714 !!(res
& E1000_STATUS_FD
),
2715 (res
& E1000_STATUS_SPEED_MASK
) >> E1000_STATUS_SPEED_SHIFT
,
2716 (res
& E1000_STATUS_ASDV
) >> E1000_STATUS_ASDV_SHIFT
);
2722 e1000e_get_tarc(E1000ECore
*core
, int index
)
2724 return core
->mac
[index
] & ((BIT(11) - 1) |
2732 e1000e_mac_writereg(E1000ECore
*core
, int index
, uint32_t val
)
2734 core
->mac
[index
] = val
;
2738 e1000e_mac_setmacaddr(E1000ECore
*core
, int index
, uint32_t val
)
2740 uint32_t macaddr
[2];
2742 core
->mac
[index
] = val
;
2744 macaddr
[0] = cpu_to_le32(core
->mac
[RA
]);
2745 macaddr
[1] = cpu_to_le32(core
->mac
[RA
+ 1]);
2746 qemu_format_nic_info_str(qemu_get_queue(core
->owner_nic
),
2747 (uint8_t *) macaddr
);
2749 trace_e1000e_mac_set_sw(MAC_ARG(macaddr
));
2753 e1000e_set_eecd(E1000ECore
*core
, int index
, uint32_t val
)
2755 static const uint32_t ro_bits
= E1000_EECD_PRES
|
2756 E1000_EECD_AUTO_RD
|
2757 E1000_EECD_SIZE_EX_MASK
;
2759 core
->mac
[EECD
] = (core
->mac
[EECD
] & ro_bits
) | (val
& ~ro_bits
);
2763 e1000e_set_eerd(E1000ECore
*core
, int index
, uint32_t val
)
2765 uint32_t addr
= (val
>> E1000_EERW_ADDR_SHIFT
) & E1000_EERW_ADDR_MASK
;
2769 if ((addr
< E1000E_EEPROM_SIZE
) && (val
& E1000_EERW_START
)) {
2770 data
= core
->eeprom
[addr
];
2771 flags
= E1000_EERW_DONE
;
2774 core
->mac
[EERD
] = flags
|
2775 (addr
<< E1000_EERW_ADDR_SHIFT
) |
2776 (data
<< E1000_EERW_DATA_SHIFT
);
2780 e1000e_set_eewr(E1000ECore
*core
, int index
, uint32_t val
)
2782 uint32_t addr
= (val
>> E1000_EERW_ADDR_SHIFT
) & E1000_EERW_ADDR_MASK
;
2783 uint32_t data
= (val
>> E1000_EERW_DATA_SHIFT
) & E1000_EERW_DATA_MASK
;
2786 if ((addr
< E1000E_EEPROM_SIZE
) && (val
& E1000_EERW_START
)) {
2787 core
->eeprom
[addr
] = data
;
2788 flags
= E1000_EERW_DONE
;
2791 core
->mac
[EERD
] = flags
|
2792 (addr
<< E1000_EERW_ADDR_SHIFT
) |
2793 (data
<< E1000_EERW_DATA_SHIFT
);
2797 e1000e_set_rxdctl(E1000ECore
*core
, int index
, uint32_t val
)
2799 core
->mac
[RXDCTL
] = core
->mac
[RXDCTL1
] = val
;
2803 e1000e_set_itr(E1000ECore
*core
, int index
, uint32_t val
)
2805 uint32_t interval
= val
& 0xffff;
2807 trace_e1000e_irq_itr_set(val
);
2809 core
->itr_guest_value
= interval
;
2810 core
->mac
[index
] = MAX(interval
, E1000E_MIN_XITR
);
2814 e1000e_set_eitr(E1000ECore
*core
, int index
, uint32_t val
)
2816 uint32_t interval
= val
& 0xffff;
2817 uint32_t eitr_num
= index
- EITR
;
2819 trace_e1000e_irq_eitr_set(eitr_num
, val
);
2821 core
->eitr_guest_value
[eitr_num
] = interval
;
2822 core
->mac
[index
] = MAX(interval
, E1000E_MIN_XITR
);
2826 e1000e_set_psrctl(E1000ECore
*core
, int index
, uint32_t val
)
2828 if (core
->mac
[RCTL
] & E1000_RCTL_DTYP_MASK
) {
2830 if ((val
& E1000_PSRCTL_BSIZE0_MASK
) == 0) {
2831 qemu_log_mask(LOG_GUEST_ERROR
,
2832 "e1000e: PSRCTL.BSIZE0 cannot be zero");
2836 if ((val
& E1000_PSRCTL_BSIZE1_MASK
) == 0) {
2837 qemu_log_mask(LOG_GUEST_ERROR
,
2838 "e1000e: PSRCTL.BSIZE1 cannot be zero");
2843 core
->mac
[PSRCTL
] = val
;
2847 e1000e_update_rx_offloads(E1000ECore
*core
)
2849 int cso_state
= e1000e_rx_l4_cso_enabled(core
);
2851 trace_e1000e_rx_set_cso(cso_state
);
2853 if (core
->has_vnet
) {
2854 qemu_set_offload(qemu_get_queue(core
->owner_nic
)->peer
,
2855 cso_state
, 0, 0, 0, 0);
2860 e1000e_set_rxcsum(E1000ECore
*core
, int index
, uint32_t val
)
2862 core
->mac
[RXCSUM
] = val
;
2863 e1000e_update_rx_offloads(core
);
2867 e1000e_set_gcr(E1000ECore
*core
, int index
, uint32_t val
)
2869 uint32_t ro_bits
= core
->mac
[GCR
] & E1000_GCR_RO_BITS
;
2870 core
->mac
[GCR
] = (val
& ~E1000_GCR_RO_BITS
) | ro_bits
;
2873 static uint32_t e1000e_get_systiml(E1000ECore
*core
, int index
)
2875 e1000x_timestamp(core
->mac
, core
->timadj
, SYSTIML
, SYSTIMH
);
2876 return core
->mac
[SYSTIML
];
2879 static uint32_t e1000e_get_rxsatrh(E1000ECore
*core
, int index
)
2881 core
->mac
[TSYNCRXCTL
] &= ~E1000_TSYNCRXCTL_VALID
;
2882 return core
->mac
[RXSATRH
];
2885 static uint32_t e1000e_get_txstmph(E1000ECore
*core
, int index
)
2887 core
->mac
[TSYNCTXCTL
] &= ~E1000_TSYNCTXCTL_VALID
;
2888 return core
->mac
[TXSTMPH
];
2891 static void e1000e_set_timinca(E1000ECore
*core
, int index
, uint32_t val
)
2893 e1000x_set_timinca(core
->mac
, &core
->timadj
, val
);
2896 static void e1000e_set_timadjh(E1000ECore
*core
, int index
, uint32_t val
)
2898 core
->mac
[TIMADJH
] = val
;
2899 core
->timadj
+= core
->mac
[TIMADJL
] | ((int64_t)core
->mac
[TIMADJH
] << 32);
2902 #define e1000e_getreg(x) [x] = e1000e_mac_readreg
2903 typedef uint32_t (*readops
)(E1000ECore
*, int);
2904 static const readops e1000e_macreg_readops
[] = {
2906 e1000e_getreg(WUFC
),
2907 e1000e_getreg(MANC
),
2908 e1000e_getreg(TOTL
),
2909 e1000e_getreg(RDT0
),
2910 e1000e_getreg(RDBAH0
),
2911 e1000e_getreg(TDBAL1
),
2912 e1000e_getreg(RDLEN0
),
2913 e1000e_getreg(RDH1
),
2914 e1000e_getreg(LATECOL
),
2915 e1000e_getreg(SEQEC
),
2916 e1000e_getreg(XONTXC
),
2918 e1000e_getreg(TDFH
),
2919 e1000e_getreg(TDFT
),
2920 e1000e_getreg(TDFHS
),
2921 e1000e_getreg(TDFTS
),
2922 e1000e_getreg(TDFPC
),
2925 e1000e_getreg(RDFH
),
2926 e1000e_getreg(RDFT
),
2927 e1000e_getreg(RDFHS
),
2928 e1000e_getreg(RDFTS
),
2929 e1000e_getreg(RDFPC
),
2930 e1000e_getreg(GORCL
),
2931 e1000e_getreg(MGTPRC
),
2932 e1000e_getreg(EERD
),
2933 e1000e_getreg(EIAC
),
2934 e1000e_getreg(PSRCTL
),
2935 e1000e_getreg(MANC2H
),
2936 e1000e_getreg(RXCSUM
),
2937 e1000e_getreg(GSCL_3
),
2938 e1000e_getreg(GSCN_2
),
2939 e1000e_getreg(RSRPD
),
2940 e1000e_getreg(RDBAL1
),
2941 e1000e_getreg(FCAH
),
2942 e1000e_getreg(FCRTH
),
2943 e1000e_getreg(FLOP
),
2944 e1000e_getreg(FLASHT
),
2945 e1000e_getreg(RXSTMPH
),
2946 e1000e_getreg(TXSTMPL
),
2947 e1000e_getreg(TIMADJL
),
2948 e1000e_getreg(TXDCTL
),
2949 e1000e_getreg(RDH0
),
2950 e1000e_getreg(TDT1
),
2951 e1000e_getreg(TNCRS
),
2954 e1000e_getreg(GSCL_2
),
2955 e1000e_getreg(RDBAH1
),
2956 e1000e_getreg(FLSWDATA
),
2957 e1000e_getreg(TIPG
),
2958 e1000e_getreg(FLMNGCTL
),
2959 e1000e_getreg(FLMNGCNT
),
2960 e1000e_getreg(TSYNCTXCTL
),
2961 e1000e_getreg(EXTCNF_SIZE
),
2962 e1000e_getreg(EXTCNF_CTRL
),
2963 e1000e_getreg(EEMNGDATA
),
2964 e1000e_getreg(CTRL_EXT
),
2965 e1000e_getreg(SYSTIMH
),
2966 e1000e_getreg(EEMNGCTL
),
2967 e1000e_getreg(FLMNGDATA
),
2968 e1000e_getreg(TSYNCRXCTL
),
2970 e1000e_getreg(LEDCTL
),
2971 e1000e_getreg(TCTL
),
2972 e1000e_getreg(TDBAL
),
2973 e1000e_getreg(TDLEN
),
2974 e1000e_getreg(TDH1
),
2975 e1000e_getreg(RADV
),
2976 e1000e_getreg(ECOL
),
2978 e1000e_getreg(RLEC
),
2979 e1000e_getreg(XOFFTXC
),
2981 e1000e_getreg(RNBC
),
2982 e1000e_getreg(MGTPTC
),
2983 e1000e_getreg(TIMINCA
),
2984 e1000e_getreg(RXCFGL
),
2985 e1000e_getreg(MFUTP01
),
2986 e1000e_getreg(FACTPS
),
2987 e1000e_getreg(GSCL_1
),
2988 e1000e_getreg(GSCN_0
),
2989 e1000e_getreg(GCR2
),
2990 e1000e_getreg(RDT1
),
2991 e1000e_getreg(PBACLR
),
2992 e1000e_getreg(FCTTV
),
2993 e1000e_getreg(EEWR
),
2994 e1000e_getreg(FLSWCTL
),
2995 e1000e_getreg(RXDCTL1
),
2996 e1000e_getreg(RXSATRL
),
2997 e1000e_getreg(RXUDP
),
2998 e1000e_getreg(TORL
),
2999 e1000e_getreg(TDLEN1
),
3002 e1000e_getreg(EECD
),
3003 e1000e_getreg(MFUTP23
),
3004 e1000e_getreg(RAID
),
3005 e1000e_getreg(FCRTV
),
3006 e1000e_getreg(TXDCTL1
),
3007 e1000e_getreg(RCTL
),
3009 e1000e_getreg(MDIC
),
3010 e1000e_getreg(FCRUC
),
3012 e1000e_getreg(RDBAL0
),
3013 e1000e_getreg(TDBAH1
),
3014 e1000e_getreg(RDTR
),
3016 e1000e_getreg(COLC
),
3017 e1000e_getreg(CEXTERR
),
3018 e1000e_getreg(XOFFRXC
),
3019 e1000e_getreg(IPAV
),
3020 e1000e_getreg(GOTCL
),
3021 e1000e_getreg(MGTPDC
),
3023 e1000e_getreg(IVAR
),
3024 e1000e_getreg(POEMB
),
3025 e1000e_getreg(MFVAL
),
3026 e1000e_getreg(FUNCTAG
),
3027 e1000e_getreg(GSCL_4
),
3028 e1000e_getreg(GSCN_3
),
3029 e1000e_getreg(MRQC
),
3030 e1000e_getreg(RDLEN1
),
3033 e1000e_getreg(FLOL
),
3034 e1000e_getreg(RXDCTL
),
3035 e1000e_getreg(RXSTMPL
),
3036 e1000e_getreg(TIMADJH
),
3037 e1000e_getreg(FCRTL
),
3038 e1000e_getreg(TDBAH
),
3039 e1000e_getreg(TADV
),
3040 e1000e_getreg(XONRXC
),
3041 e1000e_getreg(TSCTFC
),
3042 e1000e_getreg(RFCTL
),
3043 e1000e_getreg(GSCN_1
),
3044 e1000e_getreg(FCAL
),
3045 e1000e_getreg(FLSWCNT
),
3047 [TOTH
] = e1000e_mac_read_clr8
,
3048 [GOTCH
] = e1000e_mac_read_clr8
,
3049 [PRC64
] = e1000e_mac_read_clr4
,
3050 [PRC255
] = e1000e_mac_read_clr4
,
3051 [PRC1023
] = e1000e_mac_read_clr4
,
3052 [PTC64
] = e1000e_mac_read_clr4
,
3053 [PTC255
] = e1000e_mac_read_clr4
,
3054 [PTC1023
] = e1000e_mac_read_clr4
,
3055 [GPRC
] = e1000e_mac_read_clr4
,
3056 [TPT
] = e1000e_mac_read_clr4
,
3057 [RUC
] = e1000e_mac_read_clr4
,
3058 [BPRC
] = e1000e_mac_read_clr4
,
3059 [MPTC
] = e1000e_mac_read_clr4
,
3060 [IAC
] = e1000e_mac_read_clr4
,
3061 [ICR
] = e1000e_mac_icr_read
,
3062 [STATUS
] = e1000e_get_status
,
3063 [TARC0
] = e1000e_get_tarc
,
3064 [ICS
] = e1000e_mac_ics_read
,
3065 [TORH
] = e1000e_mac_read_clr8
,
3066 [GORCH
] = e1000e_mac_read_clr8
,
3067 [PRC127
] = e1000e_mac_read_clr4
,
3068 [PRC511
] = e1000e_mac_read_clr4
,
3069 [PRC1522
] = e1000e_mac_read_clr4
,
3070 [PTC127
] = e1000e_mac_read_clr4
,
3071 [PTC511
] = e1000e_mac_read_clr4
,
3072 [PTC1522
] = e1000e_mac_read_clr4
,
3073 [GPTC
] = e1000e_mac_read_clr4
,
3074 [TPR
] = e1000e_mac_read_clr4
,
3075 [ROC
] = e1000e_mac_read_clr4
,
3076 [MPRC
] = e1000e_mac_read_clr4
,
3077 [BPTC
] = e1000e_mac_read_clr4
,
3078 [TSCTC
] = e1000e_mac_read_clr4
,
3079 [ITR
] = e1000e_mac_itr_read
,
3080 [CTRL
] = e1000e_get_ctrl
,
3081 [TARC1
] = e1000e_get_tarc
,
3082 [SWSM
] = e1000e_mac_swsm_read
,
3083 [IMS
] = e1000e_mac_ims_read
,
3084 [SYSTIML
] = e1000e_get_systiml
,
3085 [RXSATRH
] = e1000e_get_rxsatrh
,
3086 [TXSTMPH
] = e1000e_get_txstmph
,
3088 [CRCERRS
... MPC
] = e1000e_mac_readreg
,
3089 [IP6AT
... IP6AT
+ 3] = e1000e_mac_readreg
,
3090 [IP4AT
... IP4AT
+ 6] = e1000e_mac_readreg
,
3091 [RA
... RA
+ 31] = e1000e_mac_readreg
,
3092 [WUPM
... WUPM
+ 31] = e1000e_mac_readreg
,
3093 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = e1000e_mac_readreg
,
3094 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = e1000e_mac_readreg
,
3095 [FFMT
... FFMT
+ 254] = e1000e_mac_readreg
,
3096 [FFVT
... FFVT
+ 254] = e1000e_mac_readreg
,
3097 [MDEF
... MDEF
+ 7] = e1000e_mac_readreg
,
3098 [FFLT
... FFLT
+ 10] = e1000e_mac_readreg
,
3099 [FTFT
... FTFT
+ 254] = e1000e_mac_readreg
,
3100 [PBM
... PBM
+ 10239] = e1000e_mac_readreg
,
3101 [RETA
... RETA
+ 31] = e1000e_mac_readreg
,
3102 [RSSRK
... RSSRK
+ 31] = e1000e_mac_readreg
,
3103 [MAVTV0
... MAVTV3
] = e1000e_mac_readreg
,
3104 [EITR
...EITR
+ E1000E_MSIX_VEC_NUM
- 1] = e1000e_mac_eitr_read
3106 enum { E1000E_NREADOPS
= ARRAY_SIZE(e1000e_macreg_readops
) };
3108 #define e1000e_putreg(x) [x] = e1000e_mac_writereg
3109 typedef void (*writeops
)(E1000ECore
*, int, uint32_t);
3110 static const writeops e1000e_macreg_writeops
[] = {
3112 e1000e_putreg(SWSM
),
3113 e1000e_putreg(WUFC
),
3114 e1000e_putreg(RDBAH1
),
3115 e1000e_putreg(TDBAH
),
3116 e1000e_putreg(TXDCTL
),
3117 e1000e_putreg(RDBAH0
),
3118 e1000e_putreg(LEDCTL
),
3119 e1000e_putreg(FCAL
),
3120 e1000e_putreg(FCRUC
),
3123 e1000e_putreg(IPAV
),
3124 e1000e_putreg(TDBAH1
),
3126 e1000e_putreg(EIAC
),
3127 e1000e_putreg(IVAR
),
3128 e1000e_putreg(TARC0
),
3129 e1000e_putreg(TARC1
),
3130 e1000e_putreg(FLSWDATA
),
3131 e1000e_putreg(POEMB
),
3132 e1000e_putreg(MFUTP01
),
3133 e1000e_putreg(MFUTP23
),
3134 e1000e_putreg(MANC
),
3135 e1000e_putreg(MANC2H
),
3136 e1000e_putreg(MFVAL
),
3137 e1000e_putreg(EXTCNF_CTRL
),
3138 e1000e_putreg(FACTPS
),
3139 e1000e_putreg(FUNCTAG
),
3140 e1000e_putreg(GSCL_1
),
3141 e1000e_putreg(GSCL_2
),
3142 e1000e_putreg(GSCL_3
),
3143 e1000e_putreg(GSCL_4
),
3144 e1000e_putreg(GSCN_0
),
3145 e1000e_putreg(GSCN_1
),
3146 e1000e_putreg(GSCN_2
),
3147 e1000e_putreg(GSCN_3
),
3148 e1000e_putreg(GCR2
),
3149 e1000e_putreg(MRQC
),
3150 e1000e_putreg(FLOP
),
3151 e1000e_putreg(FLOL
),
3152 e1000e_putreg(FLSWCTL
),
3153 e1000e_putreg(FLSWCNT
),
3155 e1000e_putreg(RXDCTL1
),
3156 e1000e_putreg(TXDCTL1
),
3157 e1000e_putreg(TIPG
),
3158 e1000e_putreg(RXSTMPH
),
3159 e1000e_putreg(RXSTMPL
),
3160 e1000e_putreg(RXSATRL
),
3161 e1000e_putreg(RXSATRH
),
3162 e1000e_putreg(TXSTMPL
),
3163 e1000e_putreg(TXSTMPH
),
3164 e1000e_putreg(SYSTIML
),
3165 e1000e_putreg(SYSTIMH
),
3166 e1000e_putreg(TIMADJL
),
3167 e1000e_putreg(RXUDP
),
3168 e1000e_putreg(RXCFGL
),
3169 e1000e_putreg(TSYNCRXCTL
),
3170 e1000e_putreg(TSYNCTXCTL
),
3171 e1000e_putreg(EXTCNF_SIZE
),
3172 e1000e_putreg(EEMNGCTL
),
3175 [TDH1
] = e1000e_set_16bit
,
3176 [TDT1
] = e1000e_set_tdt
,
3177 [TCTL
] = e1000e_set_tctl
,
3178 [TDT
] = e1000e_set_tdt
,
3179 [MDIC
] = e1000e_set_mdic
,
3180 [ICS
] = e1000e_set_ics
,
3181 [TDH
] = e1000e_set_16bit
,
3182 [RDH0
] = e1000e_set_16bit
,
3183 [RDT0
] = e1000e_set_rdt
,
3184 [IMC
] = e1000e_set_imc
,
3185 [IMS
] = e1000e_set_ims
,
3186 [ICR
] = e1000e_set_icr
,
3187 [EECD
] = e1000e_set_eecd
,
3188 [RCTL
] = e1000e_set_rx_control
,
3189 [CTRL
] = e1000e_set_ctrl
,
3190 [RDTR
] = e1000e_set_rdtr
,
3191 [RADV
] = e1000e_set_16bit
,
3192 [TADV
] = e1000e_set_16bit
,
3193 [ITR
] = e1000e_set_itr
,
3194 [EERD
] = e1000e_set_eerd
,
3195 [AIT
] = e1000e_set_16bit
,
3196 [TDFH
] = e1000e_set_13bit
,
3197 [TDFT
] = e1000e_set_13bit
,
3198 [TDFHS
] = e1000e_set_13bit
,
3199 [TDFTS
] = e1000e_set_13bit
,
3200 [TDFPC
] = e1000e_set_13bit
,
3201 [RDFH
] = e1000e_set_13bit
,
3202 [RDFHS
] = e1000e_set_13bit
,
3203 [RDFT
] = e1000e_set_13bit
,
3204 [RDFTS
] = e1000e_set_13bit
,
3205 [RDFPC
] = e1000e_set_13bit
,
3206 [PBS
] = e1000e_set_6bit
,
3207 [GCR
] = e1000e_set_gcr
,
3208 [PSRCTL
] = e1000e_set_psrctl
,
3209 [RXCSUM
] = e1000e_set_rxcsum
,
3210 [RAID
] = e1000e_set_16bit
,
3211 [RSRPD
] = e1000e_set_12bit
,
3212 [TIDV
] = e1000e_set_tidv
,
3213 [TDLEN1
] = e1000e_set_dlen
,
3214 [TDLEN
] = e1000e_set_dlen
,
3215 [RDLEN0
] = e1000e_set_dlen
,
3216 [RDLEN1
] = e1000e_set_dlen
,
3217 [TDBAL
] = e1000e_set_dbal
,
3218 [TDBAL1
] = e1000e_set_dbal
,
3219 [RDBAL0
] = e1000e_set_dbal
,
3220 [RDBAL1
] = e1000e_set_dbal
,
3221 [RDH1
] = e1000e_set_16bit
,
3222 [RDT1
] = e1000e_set_rdt
,
3223 [STATUS
] = e1000e_set_status
,
3224 [PBACLR
] = e1000e_set_pbaclr
,
3225 [CTRL_EXT
] = e1000e_set_ctrlext
,
3226 [FCAH
] = e1000e_set_16bit
,
3227 [FCT
] = e1000e_set_16bit
,
3228 [FCTTV
] = e1000e_set_16bit
,
3229 [FCRTV
] = e1000e_set_16bit
,
3230 [FCRTH
] = e1000e_set_fcrth
,
3231 [FCRTL
] = e1000e_set_fcrtl
,
3232 [VET
] = e1000e_set_vet
,
3233 [RXDCTL
] = e1000e_set_rxdctl
,
3234 [FLASHT
] = e1000e_set_16bit
,
3235 [EEWR
] = e1000e_set_eewr
,
3236 [CTRL_DUP
] = e1000e_set_ctrl
,
3237 [RFCTL
] = e1000e_set_rfctl
,
3238 [RA
+ 1] = e1000e_mac_setmacaddr
,
3239 [TIMINCA
] = e1000e_set_timinca
,
3240 [TIMADJH
] = e1000e_set_timadjh
,
3242 [IP6AT
... IP6AT
+ 3] = e1000e_mac_writereg
,
3243 [IP4AT
... IP4AT
+ 6] = e1000e_mac_writereg
,
3244 [RA
+ 2 ... RA
+ 31] = e1000e_mac_writereg
,
3245 [WUPM
... WUPM
+ 31] = e1000e_mac_writereg
,
3246 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = e1000e_mac_writereg
,
3247 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = e1000e_mac_writereg
,
3248 [FFMT
... FFMT
+ 254] = e1000e_set_4bit
,
3249 [FFVT
... FFVT
+ 254] = e1000e_mac_writereg
,
3250 [PBM
... PBM
+ 10239] = e1000e_mac_writereg
,
3251 [MDEF
... MDEF
+ 7] = e1000e_mac_writereg
,
3252 [FFLT
... FFLT
+ 10] = e1000e_set_11bit
,
3253 [FTFT
... FTFT
+ 254] = e1000e_mac_writereg
,
3254 [RETA
... RETA
+ 31] = e1000e_mac_writereg
,
3255 [RSSRK
... RSSRK
+ 31] = e1000e_mac_writereg
,
3256 [MAVTV0
... MAVTV3
] = e1000e_mac_writereg
,
3257 [EITR
...EITR
+ E1000E_MSIX_VEC_NUM
- 1] = e1000e_set_eitr
3259 enum { E1000E_NWRITEOPS
= ARRAY_SIZE(e1000e_macreg_writeops
) };
3261 enum { MAC_ACCESS_PARTIAL
= 1 };
3264 * The array below combines alias offsets of the index values for the
3265 * MAC registers that have aliases, with the indication of not fully
3266 * implemented registers (lowest bit). This combination is possible
3267 * because all of the offsets are even.
3269 static const uint16_t mac_reg_access
[E1000E_MAC_SIZE
] = {
3270 /* Alias index offsets */
3271 [FCRTL_A
] = 0x07fe, [FCRTH_A
] = 0x0802,
3272 [RDH0_A
] = 0x09bc, [RDT0_A
] = 0x09bc, [RDTR_A
] = 0x09c6,
3273 [RDFH_A
] = 0xe904, [RDFT_A
] = 0xe904,
3274 [TDH_A
] = 0x0cf8, [TDT_A
] = 0x0cf8, [TIDV_A
] = 0x0cf8,
3275 [TDFH_A
] = 0xed00, [TDFT_A
] = 0xed00,
3276 [RA_A
... RA_A
+ 31] = 0x14f0,
3277 [VFTA_A
... VFTA_A
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = 0x1400,
3278 [RDBAL0_A
... RDLEN0_A
] = 0x09bc,
3279 [TDBAL_A
... TDLEN_A
] = 0x0cf8,
3280 /* Access options */
3281 [RDFH
] = MAC_ACCESS_PARTIAL
, [RDFT
] = MAC_ACCESS_PARTIAL
,
3282 [RDFHS
] = MAC_ACCESS_PARTIAL
, [RDFTS
] = MAC_ACCESS_PARTIAL
,
3283 [RDFPC
] = MAC_ACCESS_PARTIAL
,
3284 [TDFH
] = MAC_ACCESS_PARTIAL
, [TDFT
] = MAC_ACCESS_PARTIAL
,
3285 [TDFHS
] = MAC_ACCESS_PARTIAL
, [TDFTS
] = MAC_ACCESS_PARTIAL
,
3286 [TDFPC
] = MAC_ACCESS_PARTIAL
, [EECD
] = MAC_ACCESS_PARTIAL
,
3287 [PBM
] = MAC_ACCESS_PARTIAL
, [FLA
] = MAC_ACCESS_PARTIAL
,
3288 [FCAL
] = MAC_ACCESS_PARTIAL
, [FCAH
] = MAC_ACCESS_PARTIAL
,
3289 [FCT
] = MAC_ACCESS_PARTIAL
, [FCTTV
] = MAC_ACCESS_PARTIAL
,
3290 [FCRTV
] = MAC_ACCESS_PARTIAL
, [FCRTL
] = MAC_ACCESS_PARTIAL
,
3291 [FCRTH
] = MAC_ACCESS_PARTIAL
, [TXDCTL
] = MAC_ACCESS_PARTIAL
,
3292 [TXDCTL1
] = MAC_ACCESS_PARTIAL
,
3293 [MAVTV0
... MAVTV3
] = MAC_ACCESS_PARTIAL
3297 e1000e_core_write(E1000ECore
*core
, hwaddr addr
, uint64_t val
, unsigned size
)
3299 uint16_t index
= e1000e_get_reg_index_with_offset(mac_reg_access
, addr
);
3301 if (index
< E1000E_NWRITEOPS
&& e1000e_macreg_writeops
[index
]) {
3302 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
3303 trace_e1000e_wrn_regs_write_trivial(index
<< 2);
3305 trace_e1000e_core_write(index
<< 2, size
, val
);
3306 e1000e_macreg_writeops
[index
](core
, index
, val
);
3307 } else if (index
< E1000E_NREADOPS
&& e1000e_macreg_readops
[index
]) {
3308 trace_e1000e_wrn_regs_write_ro(index
<< 2, size
, val
);
3310 trace_e1000e_wrn_regs_write_unknown(index
<< 2, size
, val
);
3315 e1000e_core_read(E1000ECore
*core
, hwaddr addr
, unsigned size
)
3318 uint16_t index
= e1000e_get_reg_index_with_offset(mac_reg_access
, addr
);
3320 if (index
< E1000E_NREADOPS
&& e1000e_macreg_readops
[index
]) {
3321 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
3322 trace_e1000e_wrn_regs_read_trivial(index
<< 2);
3324 val
= e1000e_macreg_readops
[index
](core
, index
);
3325 trace_e1000e_core_read(index
<< 2, size
, val
);
3328 trace_e1000e_wrn_regs_read_unknown(index
<< 2, size
);
3334 e1000e_autoneg_pause(E1000ECore
*core
)
3336 timer_del(core
->autoneg_timer
);
3340 e1000e_autoneg_resume(E1000ECore
*core
)
3342 if (e1000e_have_autoneg(core
) &&
3343 !(core
->phy
[0][MII_BMSR
] & MII_BMSR_AN_COMP
)) {
3344 qemu_get_queue(core
->owner_nic
)->link_down
= false;
3345 timer_mod(core
->autoneg_timer
,
3346 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 500);
3351 e1000e_vm_state_change(void *opaque
, bool running
, RunState state
)
3353 E1000ECore
*core
= opaque
;
3356 trace_e1000e_vm_state_running();
3357 e1000e_intrmgr_resume(core
);
3358 e1000e_autoneg_resume(core
);
3360 trace_e1000e_vm_state_stopped();
3361 e1000e_autoneg_pause(core
);
3362 e1000e_intrmgr_pause(core
);
3367 e1000e_core_pci_realize(E1000ECore
*core
,
3368 const uint16_t *eeprom_templ
,
3369 uint32_t eeprom_size
,
3370 const uint8_t *macaddr
)
3374 core
->autoneg_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3375 e1000e_autoneg_timer
, core
);
3376 e1000e_intrmgr_pci_realize(core
);
3379 qemu_add_vm_change_state_handler(e1000e_vm_state_change
, core
);
3381 for (i
= 0; i
< E1000E_NUM_QUEUES
; i
++) {
3382 net_tx_pkt_init(&core
->tx
[i
].tx_pkt
, E1000E_MAX_TX_FRAGS
);
3385 net_rx_pkt_init(&core
->rx_pkt
);
3387 e1000x_core_prepare_eeprom(core
->eeprom
,
3390 PCI_DEVICE_GET_CLASS(core
->owner
)->device_id
,
3392 e1000e_update_rx_offloads(core
);
3396 e1000e_core_pci_uninit(E1000ECore
*core
)
3400 timer_free(core
->autoneg_timer
);
3402 e1000e_intrmgr_pci_unint(core
);
3404 qemu_del_vm_change_state_handler(core
->vmstate
);
3406 for (i
= 0; i
< E1000E_NUM_QUEUES
; i
++) {
3407 net_tx_pkt_uninit(core
->tx
[i
].tx_pkt
);
3410 net_rx_pkt_uninit(core
->rx_pkt
);
3413 static const uint16_t
3414 e1000e_phy_reg_init
[E1000E_PHY_PAGES
][E1000E_PHY_PAGE_SIZE
] = {
3416 [MII_BMCR
] = MII_BMCR_SPEED1000
|
3420 [MII_BMSR
] = MII_BMSR_EXTCAP
|
3430 [MII_PHYID1
] = 0x141,
3431 [MII_PHYID2
] = E1000_PHY_ID2_82574x
,
3432 [MII_ANAR
] = MII_ANAR_CSMACD
| MII_ANAR_10
|
3433 MII_ANAR_10FD
| MII_ANAR_TX
|
3434 MII_ANAR_TXFD
| MII_ANAR_PAUSE
|
3435 MII_ANAR_PAUSE_ASYM
,
3436 [MII_ANLPAR
] = MII_ANLPAR_10
| MII_ANLPAR_10FD
|
3437 MII_ANLPAR_TX
| MII_ANLPAR_TXFD
|
3438 MII_ANLPAR_T4
| MII_ANLPAR_PAUSE
,
3439 [MII_ANER
] = MII_ANER_NP
| MII_ANER_NWAY
,
3440 [MII_ANNP
] = 1 | MII_ANNP_MP
,
3441 [MII_CTRL1000
] = MII_CTRL1000_HALF
| MII_CTRL1000_FULL
|
3442 MII_CTRL1000_PORT
| MII_CTRL1000_MASTER
,
3443 [MII_STAT1000
] = MII_STAT1000_HALF
| MII_STAT1000_FULL
|
3444 MII_STAT1000_ROK
| MII_STAT1000_LOK
,
3445 [MII_EXTSTAT
] = MII_EXTSTAT_1000T_HD
| MII_EXTSTAT_1000T_FD
,
3447 [PHY_COPPER_CTRL1
] = BIT(5) | BIT(6) | BIT(8) | BIT(9) |
3449 [PHY_COPPER_STAT1
] = BIT(3) | BIT(10) | BIT(11) | BIT(13) | BIT(15)
3452 [PHY_MAC_CTRL1
] = BIT(3) | BIT(7),
3453 [PHY_MAC_CTRL2
] = BIT(1) | BIT(2) | BIT(6) | BIT(12)
3456 [PHY_LED_TIMER_CTRL
] = BIT(0) | BIT(2) | BIT(14)
3460 static const uint32_t e1000e_mac_reg_init
[] = {
3462 [LEDCTL
] = BIT(1) | BIT(8) | BIT(9) | BIT(15) | BIT(17) | BIT(18),
3463 [EXTCNF_CTRL
] = BIT(3),
3464 [EEMNGCTL
] = BIT(31),
3466 [FLSWCTL
] = BIT(30) | BIT(31),
3469 [RXDCTL1
] = BIT(16),
3470 [TIPG
] = 0x8 | (0x8 << 10) | (0x6 << 20),
3473 [CTRL
] = E1000_CTRL_FD
| E1000_CTRL_SWDPIN2
| E1000_CTRL_SWDPIN0
|
3474 E1000_CTRL_SPD_1000
| E1000_CTRL_SLU
|
3475 E1000_CTRL_ADVD3WUC
,
3476 [STATUS
] = E1000_STATUS_ASDV_1000
| E1000_STATUS_LU
,
3477 [PSRCTL
] = (2 << E1000_PSRCTL_BSIZE0_SHIFT
) |
3478 (4 << E1000_PSRCTL_BSIZE1_SHIFT
) |
3479 (4 << E1000_PSRCTL_BSIZE2_SHIFT
),
3480 [TARC0
] = 0x3 | E1000_TARC_ENABLE
,
3481 [TARC1
] = 0x3 | E1000_TARC_ENABLE
,
3482 [EECD
] = E1000_EECD_AUTO_RD
| E1000_EECD_PRES
,
3483 [EERD
] = E1000_EERW_DONE
,
3484 [EEWR
] = E1000_EERW_DONE
,
3485 [GCR
] = E1000_L0S_ADJUST
|
3486 E1000_L1_ENTRY_LATENCY_MSB
|
3487 E1000_L1_ENTRY_LATENCY_LSB
,
3494 [MANC
] = E1000_MANC_DIS_IP_CHK_ARP
,
3495 [FACTPS
] = E1000_FACTPS_LAN0_ON
| 0x20000000,
3497 [RXCSUM
] = E1000_RXCSUM_IPOFLD
| E1000_RXCSUM_TUOFLD
,
3498 [ITR
] = E1000E_MIN_XITR
,
3499 [EITR
...EITR
+ E1000E_MSIX_VEC_NUM
- 1] = E1000E_MIN_XITR
,
3502 static void e1000e_reset(E1000ECore
*core
, bool sw
)
3506 timer_del(core
->autoneg_timer
);
3508 e1000e_intrmgr_reset(core
);
3510 memset(core
->phy
, 0, sizeof core
->phy
);
3511 memcpy(core
->phy
, e1000e_phy_reg_init
, sizeof e1000e_phy_reg_init
);
3513 for (i
= 0; i
< E1000E_MAC_SIZE
; i
++) {
3514 if (sw
&& (i
== PBA
|| i
== PBS
|| i
== FLA
)) {
3518 core
->mac
[i
] = i
< ARRAY_SIZE(e1000e_mac_reg_init
) ?
3519 e1000e_mac_reg_init
[i
] : 0;
3522 core
->rxbuf_min_shift
= 1 + E1000_RING_DESC_LEN_SHIFT
;
3524 if (qemu_get_queue(core
->owner_nic
)->link_down
) {
3525 e1000e_link_down(core
);
3528 e1000x_reset_mac_addr(core
->owner_nic
, core
->mac
, core
->permanent_mac
);
3530 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
3531 memset(&core
->tx
[i
].props
, 0, sizeof(core
->tx
[i
].props
));
3532 core
->tx
[i
].skip_cp
= false;
3537 e1000e_core_reset(E1000ECore
*core
)
3539 e1000e_reset(core
, false);
3542 void e1000e_core_pre_save(E1000ECore
*core
)
3545 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
3548 * If link is down and auto-negotiation is supported and ongoing,
3549 * complete auto-negotiation immediately. This allows us to look
3550 * at MII_BMSR_AN_COMP to infer link status on load.
3552 if (nc
->link_down
&& e1000e_have_autoneg(core
)) {
3553 core
->phy
[0][MII_BMSR
] |= MII_BMSR_AN_COMP
;
3554 e1000e_update_flowctl_status(core
);
3557 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
3558 if (net_tx_pkt_has_fragments(core
->tx
[i
].tx_pkt
)) {
3559 core
->tx
[i
].skip_cp
= true;
3565 e1000e_core_post_load(E1000ECore
*core
)
3567 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
3570 * nc.link_down can't be migrated, so infer link_down according
3571 * to link status bit in core.mac[STATUS].
3573 nc
->link_down
= (core
->mac
[STATUS
] & E1000_STATUS_LU
) == 0;