Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[qemu/kevin.git] / hw / net / e1000x_common.c
blob212873fd77c2fe7e21280dbfad5f3e11c183d455
1 /*
2 * QEMU e1000(e) emulation - shared code
4 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2007 Dan Aloni
9 * Copyright (c) 2004 Antony T Curtis
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "qemu/osdep.h"
26 #include "qemu/units.h"
27 #include "hw/net/mii.h"
28 #include "hw/pci/pci_device.h"
29 #include "net/eth.h"
30 #include "net/net.h"
32 #include "e1000_common.h"
33 #include "e1000x_common.h"
35 #include "trace.h"
37 bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac)
39 bool link_up = mac[STATUS] & E1000_STATUS_LU;
40 bool rx_enabled = mac[RCTL] & E1000_RCTL_EN;
41 bool pci_master = d->config[PCI_COMMAND] & PCI_COMMAND_MASTER;
43 if (!link_up || !rx_enabled || !pci_master) {
44 trace_e1000x_rx_can_recv_disabled(link_up, rx_enabled, pci_master);
45 return false;
48 return true;
51 bool e1000x_is_vlan_packet(const void *buf, uint16_t vet)
53 uint16_t eth_proto = lduw_be_p(&PKT_GET_ETH_HDR(buf)->h_proto);
54 bool res = (eth_proto == vet);
56 trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet);
58 return res;
61 bool e1000x_rx_vlan_filter(uint32_t *mac, const struct vlan_header *vhdr)
63 if (e1000x_vlan_rx_filter_enabled(mac)) {
64 uint16_t vid = lduw_be_p(&vhdr->h_tci);
65 uint32_t vfta =
66 ldl_le_p((uint32_t *)(mac + VFTA) +
67 ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK));
68 if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) {
69 trace_e1000x_rx_flt_vlan_mismatch(vid);
70 return false;
73 trace_e1000x_rx_flt_vlan_match(vid);
76 return true;
79 bool e1000x_rx_group_filter(uint32_t *mac, const struct eth_header *ehdr)
81 static const int mta_shift[] = { 4, 3, 2, 0 };
82 uint32_t f, ra[2], *rp, rctl = mac[RCTL];
84 if (is_broadcast_ether_addr(ehdr->h_dest)) {
85 if (rctl & E1000_RCTL_BAM) {
86 return true;
88 } else if (is_multicast_ether_addr(ehdr->h_dest)) {
89 if (rctl & E1000_RCTL_MPE) {
90 return true;
92 } else {
93 if (rctl & E1000_RCTL_UPE) {
94 return true;
98 for (rp = mac + RA; rp < mac + RA + 32; rp += 2) {
99 if (!(rp[1] & E1000_RAH_AV)) {
100 continue;
102 ra[0] = cpu_to_le32(rp[0]);
103 ra[1] = cpu_to_le32(rp[1]);
104 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
105 trace_e1000x_rx_flt_ucast_match((int)(rp - mac - RA) / 2,
106 MAC_ARG(ehdr->h_dest));
107 return true;
110 trace_e1000x_rx_flt_ucast_mismatch(MAC_ARG(ehdr->h_dest));
112 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
113 f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff;
114 if (mac[MTA + (f >> 5)] & (1 << (f & 0x1f))) {
115 return true;
118 trace_e1000x_rx_flt_inexact_mismatch(MAC_ARG(ehdr->h_dest),
119 (rctl >> E1000_RCTL_MO_SHIFT) & 3,
120 f >> 5,
121 mac[MTA + (f >> 5)]);
123 return false;
126 bool e1000x_hw_rx_enabled(uint32_t *mac)
128 if (!(mac[STATUS] & E1000_STATUS_LU)) {
129 trace_e1000x_rx_link_down(mac[STATUS]);
130 return false;
133 if (!(mac[RCTL] & E1000_RCTL_EN)) {
134 trace_e1000x_rx_disabled(mac[RCTL]);
135 return false;
138 return true;
141 bool e1000x_is_oversized(uint32_t *mac, size_t size)
143 size_t header_size = sizeof(struct eth_header) + sizeof(struct vlan_header);
144 /* this is the size past which hardware will
145 drop packets when setting LPE=0 */
146 size_t maximum_short_size = header_size + ETH_MTU;
147 /* this is the size past which hardware will
148 drop packets when setting LPE=1 */
149 size_t maximum_large_size = 16 * KiB - ETH_FCS_LEN;
151 if ((size > maximum_large_size ||
152 (size > maximum_short_size && !(mac[RCTL] & E1000_RCTL_LPE)))
153 && !(mac[RCTL] & E1000_RCTL_SBP)) {
154 e1000x_inc_reg_if_not_full(mac, ROC);
155 trace_e1000x_rx_oversized(size);
156 return true;
159 return false;
162 void e1000x_restart_autoneg(uint32_t *mac, uint16_t *phy, QEMUTimer *timer)
164 e1000x_update_regs_on_link_down(mac, phy);
165 trace_e1000x_link_negotiation_start();
166 timer_mod(timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
169 void e1000x_reset_mac_addr(NICState *nic, uint32_t *mac_regs,
170 uint8_t *mac_addr)
172 int i;
174 mac_regs[RA] = 0;
175 mac_regs[RA + 1] = E1000_RAH_AV;
176 for (i = 0; i < 4; i++) {
177 mac_regs[RA] |= mac_addr[i] << (8 * i);
178 mac_regs[RA + 1] |=
179 (i < 2) ? mac_addr[i + 4] << (8 * i) : 0;
182 qemu_format_nic_info_str(qemu_get_queue(nic), mac_addr);
183 trace_e1000x_mac_indicate(MAC_ARG(mac_addr));
186 void e1000x_update_regs_on_autoneg_done(uint32_t *mac, uint16_t *phy)
188 e1000x_update_regs_on_link_up(mac, phy);
189 phy[MII_ANLPAR] |= MII_ANLPAR_ACK;
190 phy[MII_BMSR] |= MII_BMSR_AN_COMP;
191 trace_e1000x_link_negotiation_done();
194 void
195 e1000x_core_prepare_eeprom(uint16_t *eeprom,
196 const uint16_t *templ,
197 uint32_t templ_size,
198 uint16_t dev_id,
199 const uint8_t *macaddr)
201 uint16_t checksum = 0;
202 int i;
204 memmove(eeprom, templ, templ_size);
206 for (i = 0; i < 3; i++) {
207 eeprom[i] = (macaddr[2 * i + 1] << 8) | macaddr[2 * i];
210 eeprom[11] = eeprom[13] = dev_id;
212 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
213 checksum += eeprom[i];
216 checksum = (uint16_t) EEPROM_SUM - checksum;
218 eeprom[EEPROM_CHECKSUM_REG] = checksum;
221 uint32_t
222 e1000x_rxbufsize(uint32_t rctl)
224 rctl &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
225 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
226 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
227 switch (rctl) {
228 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
229 return 16384;
230 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
231 return 8192;
232 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
233 return 4096;
234 case E1000_RCTL_SZ_1024:
235 return 1024;
236 case E1000_RCTL_SZ_512:
237 return 512;
238 case E1000_RCTL_SZ_256:
239 return 256;
241 return 2048;
244 void
245 e1000x_update_rx_total_stats(uint32_t *mac,
246 eth_pkt_types_e pkt_type,
247 size_t pkt_size,
248 size_t pkt_fcs_size)
250 static const int PRCregs[6] = { PRC64, PRC127, PRC255, PRC511,
251 PRC1023, PRC1522 };
253 e1000x_increase_size_stats(mac, PRCregs, pkt_fcs_size);
254 e1000x_inc_reg_if_not_full(mac, TPR);
255 e1000x_inc_reg_if_not_full(mac, GPRC);
256 /* TOR - Total Octets Received:
257 * This register includes bytes received in a packet from the <Destination
258 * Address> field through the <CRC> field, inclusively.
259 * Always include FCS length (4) in size.
261 e1000x_grow_8reg_if_not_full(mac, TORL, pkt_size + 4);
262 e1000x_grow_8reg_if_not_full(mac, GORCL, pkt_size + 4);
264 switch (pkt_type) {
265 case ETH_PKT_BCAST:
266 e1000x_inc_reg_if_not_full(mac, BPRC);
267 break;
269 case ETH_PKT_MCAST:
270 e1000x_inc_reg_if_not_full(mac, MPRC);
271 break;
273 default:
274 break;
278 void
279 e1000x_increase_size_stats(uint32_t *mac, const int *size_regs, int size)
281 if (size > 1023) {
282 e1000x_inc_reg_if_not_full(mac, size_regs[5]);
283 } else if (size > 511) {
284 e1000x_inc_reg_if_not_full(mac, size_regs[4]);
285 } else if (size > 255) {
286 e1000x_inc_reg_if_not_full(mac, size_regs[3]);
287 } else if (size > 127) {
288 e1000x_inc_reg_if_not_full(mac, size_regs[2]);
289 } else if (size > 64) {
290 e1000x_inc_reg_if_not_full(mac, size_regs[1]);
291 } else if (size == 64) {
292 e1000x_inc_reg_if_not_full(mac, size_regs[0]);
296 void
297 e1000x_read_tx_ctx_descr(struct e1000_context_desc *d,
298 e1000x_txd_props *props)
300 uint32_t op = le32_to_cpu(d->cmd_and_length);
302 props->ipcss = d->lower_setup.ip_fields.ipcss;
303 props->ipcso = d->lower_setup.ip_fields.ipcso;
304 props->ipcse = le16_to_cpu(d->lower_setup.ip_fields.ipcse);
305 props->tucss = d->upper_setup.tcp_fields.tucss;
306 props->tucso = d->upper_setup.tcp_fields.tucso;
307 props->tucse = le16_to_cpu(d->upper_setup.tcp_fields.tucse);
308 props->paylen = op & 0xfffff;
309 props->hdr_len = d->tcp_seg_setup.fields.hdr_len;
310 props->mss = le16_to_cpu(d->tcp_seg_setup.fields.mss);
311 props->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
312 props->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
313 props->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
316 void e1000x_timestamp(uint32_t *mac, int64_t timadj, size_t lo, size_t hi)
318 int64_t ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
319 uint32_t timinca = mac[TIMINCA];
320 uint32_t incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
321 uint32_t incperiod = MAX(timinca >> E1000_TIMINCA_INCPERIOD_SHIFT, 1);
322 int64_t timestamp = timadj + muldiv64(ns, incvalue, incperiod * 16);
324 mac[lo] = timestamp & 0xffffffff;
325 mac[hi] = timestamp >> 32;
328 void e1000x_set_timinca(uint32_t *mac, int64_t *timadj, uint32_t val)
330 int64_t ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
331 uint32_t old_val = mac[TIMINCA];
332 uint32_t old_incvalue = old_val & E1000_TIMINCA_INCVALUE_MASK;
333 uint32_t old_incperiod = MAX(old_val >> E1000_TIMINCA_INCPERIOD_SHIFT, 1);
334 uint32_t incvalue = val & E1000_TIMINCA_INCVALUE_MASK;
335 uint32_t incperiod = MAX(val >> E1000_TIMINCA_INCPERIOD_SHIFT, 1);
337 mac[TIMINCA] = val;
338 *timadj += (muldiv64(ns, incvalue, incperiod) - muldiv64(ns, old_incvalue, old_incperiod)) / 16;