fw_cfg: follow CODING_STYLE
[qemu/ar7.git] / hw / net / e1000.c
blob36e3dbe347c43bfb8f4156044b5876f41c9faa7e
1 /*
2 * QEMU e1000 emulation
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "qemu/osdep.h"
29 #include "hw/hw.h"
30 #include "hw/pci/pci.h"
31 #include "net/net.h"
32 #include "net/checksum.h"
33 #include "hw/loader.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/dma.h"
36 #include "qemu/iov.h"
37 #include "qemu/range.h"
39 #include "e1000x_common.h"
41 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
43 #define E1000_DEBUG
45 #ifdef E1000_DEBUG
46 enum {
47 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
48 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
49 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
50 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
52 #define DBGBIT(x) (1<<DEBUG_##x)
53 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
55 #define DBGOUT(what, fmt, ...) do { \
56 if (debugflags & DBGBIT(what)) \
57 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
58 } while (0)
59 #else
60 #define DBGOUT(what, fmt, ...) do {} while (0)
61 #endif
63 #define IOPORT_SIZE 0x40
64 #define PNPMMIO_SIZE 0x20000
65 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
67 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
70 * HW models:
71 * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
72 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
73 * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
74 * Others never tested
77 typedef struct E1000State_st {
78 /*< private >*/
79 PCIDevice parent_obj;
80 /*< public >*/
82 NICState *nic;
83 NICConf conf;
84 MemoryRegion mmio;
85 MemoryRegion io;
87 uint32_t mac_reg[0x8000];
88 uint16_t phy_reg[0x20];
89 uint16_t eeprom_data[64];
91 uint32_t rxbuf_size;
92 uint32_t rxbuf_min_shift;
93 struct e1000_tx {
94 unsigned char header[256];
95 unsigned char vlan_header[4];
96 /* Fields vlan and data must not be reordered or separated. */
97 unsigned char vlan[4];
98 unsigned char data[0x10000];
99 uint16_t size;
100 unsigned char vlan_needed;
101 e1000x_txd_props props;
102 uint16_t tso_frames;
103 } tx;
105 struct {
106 uint32_t val_in; /* shifted in from guest driver */
107 uint16_t bitnum_in;
108 uint16_t bitnum_out;
109 uint16_t reading;
110 uint32_t old_eecd;
111 } eecd_state;
113 QEMUTimer *autoneg_timer;
115 QEMUTimer *mit_timer; /* Mitigation timer. */
116 bool mit_timer_on; /* Mitigation timer is running. */
117 bool mit_irq_level; /* Tracks interrupt pin level. */
118 uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
120 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
121 #define E1000_FLAG_AUTONEG_BIT 0
122 #define E1000_FLAG_MIT_BIT 1
123 #define E1000_FLAG_MAC_BIT 2
124 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
125 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
126 #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
127 uint32_t compat_flags;
128 } E1000State;
130 #define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
132 typedef struct E1000BaseClass {
133 PCIDeviceClass parent_class;
134 uint16_t phy_id2;
135 } E1000BaseClass;
137 #define TYPE_E1000_BASE "e1000-base"
139 #define E1000(obj) \
140 OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
142 #define E1000_DEVICE_CLASS(klass) \
143 OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
144 #define E1000_DEVICE_GET_CLASS(obj) \
145 OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
147 static void
148 e1000_link_up(E1000State *s)
150 e1000x_update_regs_on_link_up(s->mac_reg, s->phy_reg);
152 /* E1000_STATUS_LU is tested by e1000_can_receive() */
153 qemu_flush_queued_packets(qemu_get_queue(s->nic));
156 static void
157 e1000_autoneg_done(E1000State *s)
159 e1000x_update_regs_on_autoneg_done(s->mac_reg, s->phy_reg);
161 /* E1000_STATUS_LU is tested by e1000_can_receive() */
162 qemu_flush_queued_packets(qemu_get_queue(s->nic));
165 static bool
166 have_autoneg(E1000State *s)
168 return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
171 static void
172 set_phy_ctrl(E1000State *s, int index, uint16_t val)
174 /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
175 s->phy_reg[PHY_CTRL] = val & ~(0x3f |
176 MII_CR_RESET |
177 MII_CR_RESTART_AUTO_NEG);
180 * QEMU 1.3 does not support link auto-negotiation emulation, so if we
181 * migrate during auto negotiation, after migration the link will be
182 * down.
184 if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
185 e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
189 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
190 [PHY_CTRL] = set_phy_ctrl,
193 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
195 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
196 static const char phy_regcap[0x20] = {
197 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
198 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
199 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
200 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
201 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
202 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
203 [PHY_AUTONEG_EXP] = PHY_R,
206 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
207 static const uint16_t phy_reg_init[] = {
208 [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
209 MII_CR_FULL_DUPLEX |
210 MII_CR_AUTO_NEG_EN,
212 [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
213 MII_SR_LINK_STATUS | /* link initially up */
214 MII_SR_AUTONEG_CAPS |
215 /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
216 MII_SR_PREAMBLE_SUPPRESS |
217 MII_SR_EXTENDED_STATUS |
218 MII_SR_10T_HD_CAPS |
219 MII_SR_10T_FD_CAPS |
220 MII_SR_100X_HD_CAPS |
221 MII_SR_100X_FD_CAPS,
223 [PHY_ID1] = 0x141,
224 /* [PHY_ID2] configured per DevId, from e1000_reset() */
225 [PHY_AUTONEG_ADV] = 0xde1,
226 [PHY_LP_ABILITY] = 0x1e0,
227 [PHY_1000T_CTRL] = 0x0e00,
228 [PHY_1000T_STATUS] = 0x3c00,
229 [M88E1000_PHY_SPEC_CTRL] = 0x360,
230 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
231 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
234 static const uint32_t mac_reg_init[] = {
235 [PBA] = 0x00100030,
236 [LEDCTL] = 0x602,
237 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
238 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
239 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
240 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
241 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
242 E1000_STATUS_LU,
243 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
244 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
245 E1000_MANC_RMCP_EN,
248 /* Helper function, *curr == 0 means the value is not set */
249 static inline void
250 mit_update_delay(uint32_t *curr, uint32_t value)
252 if (value && (*curr == 0 || value < *curr)) {
253 *curr = value;
257 static void
258 set_interrupt_cause(E1000State *s, int index, uint32_t val)
260 PCIDevice *d = PCI_DEVICE(s);
261 uint32_t pending_ints;
262 uint32_t mit_delay;
264 s->mac_reg[ICR] = val;
267 * Make sure ICR and ICS registers have the same value.
268 * The spec says that the ICS register is write-only. However in practice,
269 * on real hardware ICS is readable, and for reads it has the same value as
270 * ICR (except that ICS does not have the clear on read behaviour of ICR).
272 * The VxWorks PRO/1000 driver uses this behaviour.
274 s->mac_reg[ICS] = val;
276 pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
277 if (!s->mit_irq_level && pending_ints) {
279 * Here we detect a potential raising edge. We postpone raising the
280 * interrupt line if we are inside the mitigation delay window
281 * (s->mit_timer_on == 1).
282 * We provide a partial implementation of interrupt mitigation,
283 * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
284 * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
285 * RADV; relative timers based on TIDV and RDTR are not implemented.
287 if (s->mit_timer_on) {
288 return;
290 if (chkflag(MIT)) {
291 /* Compute the next mitigation delay according to pending
292 * interrupts and the current values of RADV (provided
293 * RDTR!=0), TADV and ITR.
294 * Then rearm the timer.
296 mit_delay = 0;
297 if (s->mit_ide &&
298 (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
299 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
301 if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
302 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
304 mit_update_delay(&mit_delay, s->mac_reg[ITR]);
307 * According to e1000 SPEC, the Ethernet controller guarantees
308 * a maximum observable interrupt rate of 7813 interrupts/sec.
309 * Thus if mit_delay < 500 then the delay should be set to the
310 * minimum delay possible which is 500.
312 mit_delay = (mit_delay < 500) ? 500 : mit_delay;
314 if (mit_delay) {
315 s->mit_timer_on = 1;
316 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
317 mit_delay * 256);
319 s->mit_ide = 0;
323 s->mit_irq_level = (pending_ints != 0);
324 pci_set_irq(d, s->mit_irq_level);
327 static void
328 e1000_mit_timer(void *opaque)
330 E1000State *s = opaque;
332 s->mit_timer_on = 0;
333 /* Call set_interrupt_cause to update the irq level (if necessary). */
334 set_interrupt_cause(s, 0, s->mac_reg[ICR]);
337 static void
338 set_ics(E1000State *s, int index, uint32_t val)
340 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
341 s->mac_reg[IMS]);
342 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
345 static void
346 e1000_autoneg_timer(void *opaque)
348 E1000State *s = opaque;
349 if (!qemu_get_queue(s->nic)->link_down) {
350 e1000_autoneg_done(s);
351 set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
355 static void e1000_reset(void *opaque)
357 E1000State *d = opaque;
358 E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
359 uint8_t *macaddr = d->conf.macaddr.a;
361 timer_del(d->autoneg_timer);
362 timer_del(d->mit_timer);
363 d->mit_timer_on = 0;
364 d->mit_irq_level = 0;
365 d->mit_ide = 0;
366 memset(d->phy_reg, 0, sizeof d->phy_reg);
367 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
368 d->phy_reg[PHY_ID2] = edc->phy_id2;
369 memset(d->mac_reg, 0, sizeof d->mac_reg);
370 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
371 d->rxbuf_min_shift = 1;
372 memset(&d->tx, 0, sizeof d->tx);
374 if (qemu_get_queue(d->nic)->link_down) {
375 e1000x_update_regs_on_link_down(d->mac_reg, d->phy_reg);
378 e1000x_reset_mac_addr(d->nic, d->mac_reg, macaddr);
381 static void
382 set_ctrl(E1000State *s, int index, uint32_t val)
384 /* RST is self clearing */
385 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
388 static void
389 set_rx_control(E1000State *s, int index, uint32_t val)
391 s->mac_reg[RCTL] = val;
392 s->rxbuf_size = e1000x_rxbufsize(val);
393 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
394 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
395 s->mac_reg[RCTL]);
396 qemu_flush_queued_packets(qemu_get_queue(s->nic));
399 static void
400 set_mdic(E1000State *s, int index, uint32_t val)
402 uint32_t data = val & E1000_MDIC_DATA_MASK;
403 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
405 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
406 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
407 else if (val & E1000_MDIC_OP_READ) {
408 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
409 if (!(phy_regcap[addr] & PHY_R)) {
410 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
411 val |= E1000_MDIC_ERROR;
412 } else
413 val = (val ^ data) | s->phy_reg[addr];
414 } else if (val & E1000_MDIC_OP_WRITE) {
415 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
416 if (!(phy_regcap[addr] & PHY_W)) {
417 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
418 val |= E1000_MDIC_ERROR;
419 } else {
420 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
421 phyreg_writeops[addr](s, index, data);
422 } else {
423 s->phy_reg[addr] = data;
427 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
429 if (val & E1000_MDIC_INT_EN) {
430 set_ics(s, 0, E1000_ICR_MDAC);
434 static uint32_t
435 get_eecd(E1000State *s, int index)
437 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
439 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
440 s->eecd_state.bitnum_out, s->eecd_state.reading);
441 if (!s->eecd_state.reading ||
442 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
443 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
444 ret |= E1000_EECD_DO;
445 return ret;
448 static void
449 set_eecd(E1000State *s, int index, uint32_t val)
451 uint32_t oldval = s->eecd_state.old_eecd;
453 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
454 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
455 if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
456 return;
458 if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
459 s->eecd_state.val_in = 0;
460 s->eecd_state.bitnum_in = 0;
461 s->eecd_state.bitnum_out = 0;
462 s->eecd_state.reading = 0;
464 if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
465 return;
467 if (!(E1000_EECD_SK & val)) { /* falling edge */
468 s->eecd_state.bitnum_out++;
469 return;
471 s->eecd_state.val_in <<= 1;
472 if (val & E1000_EECD_DI)
473 s->eecd_state.val_in |= 1;
474 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
475 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
476 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
477 EEPROM_READ_OPCODE_MICROWIRE);
479 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
480 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
481 s->eecd_state.reading);
484 static uint32_t
485 flash_eerd_read(E1000State *s, int x)
487 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
489 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
490 return (s->mac_reg[EERD]);
492 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
493 return (E1000_EEPROM_RW_REG_DONE | r);
495 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
496 E1000_EEPROM_RW_REG_DONE | r);
499 static void
500 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
502 uint32_t sum;
504 if (cse && cse < n)
505 n = cse + 1;
506 if (sloc < n-1) {
507 sum = net_checksum_add(n-css, data+css);
508 stw_be_p(data + sloc, net_checksum_finish(sum));
512 static inline void
513 inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
515 if (!memcmp(arr, bcast, sizeof bcast)) {
516 e1000x_inc_reg_if_not_full(s->mac_reg, BPTC);
517 } else if (arr[0] & 1) {
518 e1000x_inc_reg_if_not_full(s->mac_reg, MPTC);
522 static void
523 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
525 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
526 PTC1023, PTC1522 };
528 NetClientState *nc = qemu_get_queue(s->nic);
529 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
530 nc->info->receive(nc, buf, size);
531 } else {
532 qemu_send_packet(nc, buf, size);
534 inc_tx_bcast_or_mcast_count(s, buf);
535 e1000x_increase_size_stats(s->mac_reg, PTCregs, size);
538 static void
539 xmit_seg(E1000State *s)
541 uint16_t len, *sp;
542 unsigned int frames = s->tx.tso_frames, css, sofar;
543 struct e1000_tx *tp = &s->tx;
545 if (tp->props.tse && tp->props.cptse) {
546 css = tp->props.ipcss;
547 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
548 frames, tp->size, css);
549 if (tp->props.ip) { /* IPv4 */
550 stw_be_p(tp->data+css+2, tp->size - css);
551 stw_be_p(tp->data+css+4,
552 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
553 } else { /* IPv6 */
554 stw_be_p(tp->data+css+4, tp->size - css);
556 css = tp->props.tucss;
557 len = tp->size - css;
558 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->props.tcp, css, len);
559 if (tp->props.tcp) {
560 sofar = frames * tp->props.mss;
561 stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
562 if (tp->props.paylen - sofar > tp->props.mss) {
563 tp->data[css + 13] &= ~9; /* PSH, FIN */
564 } else if (frames) {
565 e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC);
567 } else /* UDP */
568 stw_be_p(tp->data+css+4, len);
569 if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) {
570 unsigned int phsum;
571 // add pseudo-header length before checksum calculation
572 sp = (uint16_t *)(tp->data + tp->props.tucso);
573 phsum = be16_to_cpup(sp) + len;
574 phsum = (phsum >> 16) + (phsum & 0xffff);
575 stw_be_p(sp, phsum);
577 tp->tso_frames++;
580 if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) {
581 putsum(tp->data, tp->size, tp->props.tucso,
582 tp->props.tucss, tp->props.tucse);
584 if (tp->props.sum_needed & E1000_TXD_POPTS_IXSM) {
585 putsum(tp->data, tp->size, tp->props.ipcso,
586 tp->props.ipcss, tp->props.ipcse);
588 if (tp->vlan_needed) {
589 memmove(tp->vlan, tp->data, 4);
590 memmove(tp->data, tp->data + 4, 8);
591 memcpy(tp->data + 8, tp->vlan_header, 4);
592 e1000_send_packet(s, tp->vlan, tp->size + 4);
593 } else {
594 e1000_send_packet(s, tp->data, tp->size);
597 e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
598 e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size);
599 s->mac_reg[GPTC] = s->mac_reg[TPT];
600 s->mac_reg[GOTCL] = s->mac_reg[TOTL];
601 s->mac_reg[GOTCH] = s->mac_reg[TOTH];
604 static void
605 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
607 PCIDevice *d = PCI_DEVICE(s);
608 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
609 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
610 unsigned int split_size = txd_lower & 0xffff, bytes, sz;
611 unsigned int msh = 0xfffff;
612 uint64_t addr;
613 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
614 struct e1000_tx *tp = &s->tx;
616 s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
617 if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
618 e1000x_read_tx_ctx_descr(xp, &tp->props);
619 tp->tso_frames = 0;
620 if (tp->props.tucso == 0) { /* this is probably wrong */
621 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
622 tp->props.tucso = tp->props.tucss + (tp->props.tcp ? 16 : 6);
624 return;
625 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
626 // data descriptor
627 if (tp->size == 0) {
628 tp->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8;
630 tp->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
631 } else {
632 // legacy descriptor
633 tp->props.cptse = 0;
636 if (e1000x_vlan_enabled(s->mac_reg) &&
637 e1000x_is_vlan_txd(txd_lower) &&
638 (tp->props.cptse || txd_lower & E1000_TXD_CMD_EOP)) {
639 tp->vlan_needed = 1;
640 stw_be_p(tp->vlan_header,
641 le16_to_cpu(s->mac_reg[VET]));
642 stw_be_p(tp->vlan_header + 2,
643 le16_to_cpu(dp->upper.fields.special));
646 addr = le64_to_cpu(dp->buffer_addr);
647 if (tp->props.tse && tp->props.cptse) {
648 msh = tp->props.hdr_len + tp->props.mss;
649 do {
650 bytes = split_size;
651 if (tp->size + bytes > msh)
652 bytes = msh - tp->size;
654 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
655 pci_dma_read(d, addr, tp->data + tp->size, bytes);
656 sz = tp->size + bytes;
657 if (sz >= tp->props.hdr_len && tp->size < tp->props.hdr_len) {
658 memmove(tp->header, tp->data, tp->props.hdr_len);
660 tp->size = sz;
661 addr += bytes;
662 if (sz == msh) {
663 xmit_seg(s);
664 memmove(tp->data, tp->header, tp->props.hdr_len);
665 tp->size = tp->props.hdr_len;
667 split_size -= bytes;
668 } while (bytes && split_size);
669 } else if (!tp->props.tse && tp->props.cptse) {
670 // context descriptor TSE is not set, while data descriptor TSE is set
671 DBGOUT(TXERR, "TCP segmentation error\n");
672 } else {
673 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
674 pci_dma_read(d, addr, tp->data + tp->size, split_size);
675 tp->size += split_size;
678 if (!(txd_lower & E1000_TXD_CMD_EOP))
679 return;
680 if (!(tp->props.tse && tp->props.cptse && tp->size < tp->props.hdr_len)) {
681 xmit_seg(s);
683 tp->tso_frames = 0;
684 tp->props.sum_needed = 0;
685 tp->vlan_needed = 0;
686 tp->size = 0;
687 tp->props.cptse = 0;
690 static uint32_t
691 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
693 PCIDevice *d = PCI_DEVICE(s);
694 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
696 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
697 return 0;
698 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
699 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
700 dp->upper.data = cpu_to_le32(txd_upper);
701 pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
702 &dp->upper, sizeof(dp->upper));
703 return E1000_ICR_TXDW;
706 static uint64_t tx_desc_base(E1000State *s)
708 uint64_t bah = s->mac_reg[TDBAH];
709 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
711 return (bah << 32) + bal;
714 static void
715 start_xmit(E1000State *s)
717 PCIDevice *d = PCI_DEVICE(s);
718 dma_addr_t base;
719 struct e1000_tx_desc desc;
720 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
722 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
723 DBGOUT(TX, "tx disabled\n");
724 return;
727 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
728 base = tx_desc_base(s) +
729 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
730 pci_dma_read(d, base, &desc, sizeof(desc));
732 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
733 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
734 desc.upper.data);
736 process_tx_desc(s, &desc);
737 cause |= txdesc_writeback(s, base, &desc);
739 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
740 s->mac_reg[TDH] = 0;
742 * the following could happen only if guest sw assigns
743 * bogus values to TDT/TDLEN.
744 * there's nothing too intelligent we could do about this.
746 if (s->mac_reg[TDH] == tdh_start ||
747 tdh_start >= s->mac_reg[TDLEN] / sizeof(desc)) {
748 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
749 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
750 break;
753 set_ics(s, 0, cause);
756 static int
757 receive_filter(E1000State *s, const uint8_t *buf, int size)
759 uint32_t rctl = s->mac_reg[RCTL];
760 int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1);
762 if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
763 e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
764 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
765 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
766 ((vid >> 5) & 0x7f));
767 if ((vfta & (1 << (vid & 0x1f))) == 0)
768 return 0;
771 if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */
772 return 1;
775 if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */
776 e1000x_inc_reg_if_not_full(s->mac_reg, MPRC);
777 return 1;
780 if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */
781 e1000x_inc_reg_if_not_full(s->mac_reg, BPRC);
782 return 1;
785 return e1000x_rx_group_filter(s->mac_reg, buf);
788 static void
789 e1000_set_link_status(NetClientState *nc)
791 E1000State *s = qemu_get_nic_opaque(nc);
792 uint32_t old_status = s->mac_reg[STATUS];
794 if (nc->link_down) {
795 e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg);
796 } else {
797 if (have_autoneg(s) &&
798 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
799 e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
800 } else {
801 e1000_link_up(s);
805 if (s->mac_reg[STATUS] != old_status)
806 set_ics(s, 0, E1000_ICR_LSC);
809 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
811 int bufs;
812 /* Fast-path short packets */
813 if (total_size <= s->rxbuf_size) {
814 return s->mac_reg[RDH] != s->mac_reg[RDT];
816 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
817 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
818 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
819 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
820 s->mac_reg[RDT] - s->mac_reg[RDH];
821 } else {
822 return false;
824 return total_size <= bufs * s->rxbuf_size;
827 static int
828 e1000_can_receive(NetClientState *nc)
830 E1000State *s = qemu_get_nic_opaque(nc);
832 return e1000x_rx_ready(&s->parent_obj, s->mac_reg) &&
833 e1000_has_rxbufs(s, 1);
836 static uint64_t rx_desc_base(E1000State *s)
838 uint64_t bah = s->mac_reg[RDBAH];
839 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
841 return (bah << 32) + bal;
844 static ssize_t
845 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
847 E1000State *s = qemu_get_nic_opaque(nc);
848 PCIDevice *d = PCI_DEVICE(s);
849 struct e1000_rx_desc desc;
850 dma_addr_t base;
851 unsigned int n, rdt;
852 uint32_t rdh_start;
853 uint16_t vlan_special = 0;
854 uint8_t vlan_status = 0;
855 uint8_t min_buf[MIN_BUF_SIZE];
856 struct iovec min_iov;
857 uint8_t *filter_buf = iov->iov_base;
858 size_t size = iov_size(iov, iovcnt);
859 size_t iov_ofs = 0;
860 size_t desc_offset;
861 size_t desc_size;
862 size_t total_size;
864 if (!e1000x_hw_rx_enabled(s->mac_reg)) {
865 return -1;
868 /* Pad to minimum Ethernet frame length */
869 if (size < sizeof(min_buf)) {
870 iov_to_buf(iov, iovcnt, 0, min_buf, size);
871 memset(&min_buf[size], 0, sizeof(min_buf) - size);
872 e1000x_inc_reg_if_not_full(s->mac_reg, RUC);
873 min_iov.iov_base = filter_buf = min_buf;
874 min_iov.iov_len = size = sizeof(min_buf);
875 iovcnt = 1;
876 iov = &min_iov;
877 } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
878 /* This is very unlikely, but may happen. */
879 iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
880 filter_buf = min_buf;
883 /* Discard oversized packets if !LPE and !SBP. */
884 if (e1000x_is_oversized(s->mac_reg, size)) {
885 return size;
888 if (!receive_filter(s, filter_buf, size)) {
889 return size;
892 if (e1000x_vlan_enabled(s->mac_reg) &&
893 e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
894 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
895 + 14)));
896 iov_ofs = 4;
897 if (filter_buf == iov->iov_base) {
898 memmove(filter_buf + 4, filter_buf, 12);
899 } else {
900 iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
901 while (iov->iov_len <= iov_ofs) {
902 iov_ofs -= iov->iov_len;
903 iov++;
906 vlan_status = E1000_RXD_STAT_VP;
907 size -= 4;
910 rdh_start = s->mac_reg[RDH];
911 desc_offset = 0;
912 total_size = size + e1000x_fcs_len(s->mac_reg);
913 if (!e1000_has_rxbufs(s, total_size)) {
914 set_ics(s, 0, E1000_ICS_RXO);
915 return -1;
917 do {
918 desc_size = total_size - desc_offset;
919 if (desc_size > s->rxbuf_size) {
920 desc_size = s->rxbuf_size;
922 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
923 pci_dma_read(d, base, &desc, sizeof(desc));
924 desc.special = vlan_special;
925 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
926 if (desc.buffer_addr) {
927 if (desc_offset < size) {
928 size_t iov_copy;
929 hwaddr ba = le64_to_cpu(desc.buffer_addr);
930 size_t copy_size = size - desc_offset;
931 if (copy_size > s->rxbuf_size) {
932 copy_size = s->rxbuf_size;
934 do {
935 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
936 pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
937 copy_size -= iov_copy;
938 ba += iov_copy;
939 iov_ofs += iov_copy;
940 if (iov_ofs == iov->iov_len) {
941 iov++;
942 iov_ofs = 0;
944 } while (copy_size);
946 desc_offset += desc_size;
947 desc.length = cpu_to_le16(desc_size);
948 if (desc_offset >= total_size) {
949 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
950 } else {
951 /* Guest zeroing out status is not a hardware requirement.
952 Clear EOP in case guest didn't do it. */
953 desc.status &= ~E1000_RXD_STAT_EOP;
955 } else { // as per intel docs; skip descriptors with null buf addr
956 DBGOUT(RX, "Null RX descriptor!!\n");
958 pci_dma_write(d, base, &desc, sizeof(desc));
960 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
961 s->mac_reg[RDH] = 0;
962 /* see comment in start_xmit; same here */
963 if (s->mac_reg[RDH] == rdh_start ||
964 rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) {
965 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
966 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
967 set_ics(s, 0, E1000_ICS_RXO);
968 return -1;
970 } while (desc_offset < total_size);
972 e1000x_update_rx_total_stats(s->mac_reg, size, total_size);
974 n = E1000_ICS_RXT0;
975 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
976 rdt += s->mac_reg[RDLEN] / sizeof(desc);
977 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
978 s->rxbuf_min_shift)
979 n |= E1000_ICS_RXDMT0;
981 set_ics(s, 0, n);
983 return size;
986 static ssize_t
987 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
989 const struct iovec iov = {
990 .iov_base = (uint8_t *)buf,
991 .iov_len = size
994 return e1000_receive_iov(nc, &iov, 1);
997 static uint32_t
998 mac_readreg(E1000State *s, int index)
1000 return s->mac_reg[index];
1003 static uint32_t
1004 mac_low4_read(E1000State *s, int index)
1006 return s->mac_reg[index] & 0xf;
1009 static uint32_t
1010 mac_low11_read(E1000State *s, int index)
1012 return s->mac_reg[index] & 0x7ff;
1015 static uint32_t
1016 mac_low13_read(E1000State *s, int index)
1018 return s->mac_reg[index] & 0x1fff;
1021 static uint32_t
1022 mac_low16_read(E1000State *s, int index)
1024 return s->mac_reg[index] & 0xffff;
1027 static uint32_t
1028 mac_icr_read(E1000State *s, int index)
1030 uint32_t ret = s->mac_reg[ICR];
1032 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1033 set_interrupt_cause(s, 0, 0);
1034 return ret;
1037 static uint32_t
1038 mac_read_clr4(E1000State *s, int index)
1040 uint32_t ret = s->mac_reg[index];
1042 s->mac_reg[index] = 0;
1043 return ret;
1046 static uint32_t
1047 mac_read_clr8(E1000State *s, int index)
1049 uint32_t ret = s->mac_reg[index];
1051 s->mac_reg[index] = 0;
1052 s->mac_reg[index-1] = 0;
1053 return ret;
1056 static void
1057 mac_writereg(E1000State *s, int index, uint32_t val)
1059 uint32_t macaddr[2];
1061 s->mac_reg[index] = val;
1063 if (index == RA + 1) {
1064 macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1065 macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1066 qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1070 static void
1071 set_rdt(E1000State *s, int index, uint32_t val)
1073 s->mac_reg[index] = val & 0xffff;
1074 if (e1000_has_rxbufs(s, 1)) {
1075 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1079 static void
1080 set_16bit(E1000State *s, int index, uint32_t val)
1082 s->mac_reg[index] = val & 0xffff;
1085 static void
1086 set_dlen(E1000State *s, int index, uint32_t val)
1088 s->mac_reg[index] = val & 0xfff80;
1091 static void
1092 set_tctl(E1000State *s, int index, uint32_t val)
1094 s->mac_reg[index] = val;
1095 s->mac_reg[TDT] &= 0xffff;
1096 start_xmit(s);
1099 static void
1100 set_icr(E1000State *s, int index, uint32_t val)
1102 DBGOUT(INTERRUPT, "set_icr %x\n", val);
1103 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1106 static void
1107 set_imc(E1000State *s, int index, uint32_t val)
1109 s->mac_reg[IMS] &= ~val;
1110 set_ics(s, 0, 0);
1113 static void
1114 set_ims(E1000State *s, int index, uint32_t val)
1116 s->mac_reg[IMS] |= val;
1117 set_ics(s, 0, 0);
1120 #define getreg(x) [x] = mac_readreg
1121 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1122 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
1123 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
1124 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
1125 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
1126 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
1127 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
1128 getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
1129 getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV),
1130 getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL),
1131 getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC),
1132 getreg(TNCRS), getreg(SEC), getreg(CEXTERR), getreg(RLEC),
1133 getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
1134 getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
1135 getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
1136 getreg(GOTCL),
1138 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
1139 [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
1140 [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4,
1141 [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4,
1142 [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4,
1143 [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4,
1144 [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4,
1145 [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4,
1146 [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
1147 [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
1148 [RUC] = mac_read_clr4, [ROC] = mac_read_clr4,
1149 [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4,
1150 [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4,
1151 [MPTC] = mac_read_clr4,
1152 [ICR] = mac_icr_read, [EECD] = get_eecd,
1153 [EERD] = flash_eerd_read,
1154 [RDFH] = mac_low13_read, [RDFT] = mac_low13_read,
1155 [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read,
1156 [RDFPC] = mac_low13_read,
1157 [TDFH] = mac_low11_read, [TDFT] = mac_low11_read,
1158 [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read,
1159 [TDFPC] = mac_low13_read,
1160 [AIT] = mac_low16_read,
1162 [CRCERRS ... MPC] = &mac_readreg,
1163 [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg,
1164 [FFLT ... FFLT+6] = &mac_low11_read,
1165 [RA ... RA+31] = &mac_readreg,
1166 [WUPM ... WUPM+31] = &mac_readreg,
1167 [MTA ... MTA+127] = &mac_readreg,
1168 [VFTA ... VFTA+127] = &mac_readreg,
1169 [FFMT ... FFMT+254] = &mac_low4_read,
1170 [FFVT ... FFVT+254] = &mac_readreg,
1171 [PBM ... PBM+16383] = &mac_readreg,
1173 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1175 #define putreg(x) [x] = mac_writereg
1176 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1177 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1178 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
1179 putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
1180 putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS),
1181 putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS),
1182 putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC),
1183 putreg(WUS), putreg(AIT),
1185 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1186 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1187 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1188 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1189 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1190 [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
1191 [ITR] = set_16bit,
1193 [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg,
1194 [FFLT ... FFLT+6] = &mac_writereg,
1195 [RA ... RA+31] = &mac_writereg,
1196 [WUPM ... WUPM+31] = &mac_writereg,
1197 [MTA ... MTA+127] = &mac_writereg,
1198 [VFTA ... VFTA+127] = &mac_writereg,
1199 [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg,
1200 [PBM ... PBM+16383] = &mac_writereg,
1203 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1205 enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
1207 #define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
1208 /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
1209 * f - flag bits (up to 6 possible flags)
1210 * n - flag needed
1211 * p - partially implenented */
1212 static const uint8_t mac_reg_access[0x8000] = {
1213 [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
1214 [RADV] = markflag(MIT), [ITR] = markflag(MIT),
1216 [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
1217 [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
1218 [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
1219 [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
1220 [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
1221 [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
1222 [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
1223 [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
1224 [WUS] = markflag(MAC), [AIT] = markflag(MAC),
1225 [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
1226 [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
1227 [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
1228 [SEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
1229 [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
1230 [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
1231 [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
1232 [RUC] = markflag(MAC), [ROC] = markflag(MAC),
1233 [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
1234 [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
1235 [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
1236 [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
1237 [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
1238 [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
1239 [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
1240 [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
1241 [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
1242 [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
1243 [BPTC] = markflag(MAC),
1245 [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1246 [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1247 [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1248 [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1249 [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1250 [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1251 [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1252 [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1253 [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1254 [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1255 [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
1258 static void
1259 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1260 unsigned size)
1262 E1000State *s = opaque;
1263 unsigned int index = (addr & 0x1ffff) >> 2;
1265 if (index < NWRITEOPS && macreg_writeops[index]) {
1266 if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
1267 || (s->compat_flags & (mac_reg_access[index] >> 2))) {
1268 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
1269 DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
1270 "It is not fully implemented.\n", index<<2);
1272 macreg_writeops[index](s, index, val);
1273 } else { /* "flag needed" bit is set, but the flag is not active */
1274 DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
1275 index<<2);
1277 } else if (index < NREADOPS && macreg_readops[index]) {
1278 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
1279 index<<2, val);
1280 } else {
1281 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1282 index<<2, val);
1286 static uint64_t
1287 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1289 E1000State *s = opaque;
1290 unsigned int index = (addr & 0x1ffff) >> 2;
1292 if (index < NREADOPS && macreg_readops[index]) {
1293 if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
1294 || (s->compat_flags & (mac_reg_access[index] >> 2))) {
1295 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
1296 DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
1297 "It is not fully implemented.\n", index<<2);
1299 return macreg_readops[index](s, index);
1300 } else { /* "flag needed" bit is set, but the flag is not active */
1301 DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
1302 index<<2);
1304 } else {
1305 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1307 return 0;
1310 static const MemoryRegionOps e1000_mmio_ops = {
1311 .read = e1000_mmio_read,
1312 .write = e1000_mmio_write,
1313 .endianness = DEVICE_LITTLE_ENDIAN,
1314 .impl = {
1315 .min_access_size = 4,
1316 .max_access_size = 4,
1320 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1321 unsigned size)
1323 E1000State *s = opaque;
1325 (void)s;
1326 return 0;
1329 static void e1000_io_write(void *opaque, hwaddr addr,
1330 uint64_t val, unsigned size)
1332 E1000State *s = opaque;
1334 (void)s;
1337 static const MemoryRegionOps e1000_io_ops = {
1338 .read = e1000_io_read,
1339 .write = e1000_io_write,
1340 .endianness = DEVICE_LITTLE_ENDIAN,
1343 static bool is_version_1(void *opaque, int version_id)
1345 return version_id == 1;
1348 static void e1000_pre_save(void *opaque)
1350 E1000State *s = opaque;
1351 NetClientState *nc = qemu_get_queue(s->nic);
1353 /* If the mitigation timer is active, emulate a timeout now. */
1354 if (s->mit_timer_on) {
1355 e1000_mit_timer(s);
1359 * If link is down and auto-negotiation is supported and ongoing,
1360 * complete auto-negotiation immediately. This allows us to look
1361 * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
1363 if (nc->link_down && have_autoneg(s)) {
1364 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1368 static int e1000_post_load(void *opaque, int version_id)
1370 E1000State *s = opaque;
1371 NetClientState *nc = qemu_get_queue(s->nic);
1373 if (!chkflag(MIT)) {
1374 s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1375 s->mac_reg[TADV] = 0;
1376 s->mit_irq_level = false;
1378 s->mit_ide = 0;
1379 s->mit_timer_on = false;
1381 /* nc.link_down can't be migrated, so infer link_down according
1382 * to link status bit in mac_reg[STATUS].
1383 * Alternatively, restart link negotiation if it was in progress. */
1384 nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1386 if (have_autoneg(s) &&
1387 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1388 nc->link_down = false;
1389 timer_mod(s->autoneg_timer,
1390 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1393 return 0;
1396 static bool e1000_mit_state_needed(void *opaque)
1398 E1000State *s = opaque;
1400 return chkflag(MIT);
1403 static bool e1000_full_mac_needed(void *opaque)
1405 E1000State *s = opaque;
1407 return chkflag(MAC);
1410 static const VMStateDescription vmstate_e1000_mit_state = {
1411 .name = "e1000/mit_state",
1412 .version_id = 1,
1413 .minimum_version_id = 1,
1414 .needed = e1000_mit_state_needed,
1415 .fields = (VMStateField[]) {
1416 VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1417 VMSTATE_UINT32(mac_reg[RADV], E1000State),
1418 VMSTATE_UINT32(mac_reg[TADV], E1000State),
1419 VMSTATE_UINT32(mac_reg[ITR], E1000State),
1420 VMSTATE_BOOL(mit_irq_level, E1000State),
1421 VMSTATE_END_OF_LIST()
1425 static const VMStateDescription vmstate_e1000_full_mac_state = {
1426 .name = "e1000/full_mac_state",
1427 .version_id = 1,
1428 .minimum_version_id = 1,
1429 .needed = e1000_full_mac_needed,
1430 .fields = (VMStateField[]) {
1431 VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
1432 VMSTATE_END_OF_LIST()
1436 static const VMStateDescription vmstate_e1000 = {
1437 .name = "e1000",
1438 .version_id = 2,
1439 .minimum_version_id = 1,
1440 .pre_save = e1000_pre_save,
1441 .post_load = e1000_post_load,
1442 .fields = (VMStateField[]) {
1443 VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1444 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1445 VMSTATE_UNUSED(4), /* Was mmio_base. */
1446 VMSTATE_UINT32(rxbuf_size, E1000State),
1447 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1448 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1449 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1450 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1451 VMSTATE_UINT16(eecd_state.reading, E1000State),
1452 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1453 VMSTATE_UINT8(tx.props.ipcss, E1000State),
1454 VMSTATE_UINT8(tx.props.ipcso, E1000State),
1455 VMSTATE_UINT16(tx.props.ipcse, E1000State),
1456 VMSTATE_UINT8(tx.props.tucss, E1000State),
1457 VMSTATE_UINT8(tx.props.tucso, E1000State),
1458 VMSTATE_UINT16(tx.props.tucse, E1000State),
1459 VMSTATE_UINT32(tx.props.paylen, E1000State),
1460 VMSTATE_UINT8(tx.props.hdr_len, E1000State),
1461 VMSTATE_UINT16(tx.props.mss, E1000State),
1462 VMSTATE_UINT16(tx.size, E1000State),
1463 VMSTATE_UINT16(tx.tso_frames, E1000State),
1464 VMSTATE_UINT8(tx.props.sum_needed, E1000State),
1465 VMSTATE_INT8(tx.props.ip, E1000State),
1466 VMSTATE_INT8(tx.props.tcp, E1000State),
1467 VMSTATE_BUFFER(tx.header, E1000State),
1468 VMSTATE_BUFFER(tx.data, E1000State),
1469 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1470 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1471 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1472 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1473 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1474 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1475 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1476 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1477 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1478 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1479 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1480 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1481 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1482 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1483 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1484 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1485 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1486 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1487 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1488 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1489 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1490 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1491 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1492 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1493 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1494 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1495 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1496 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1497 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1498 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1499 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1500 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1501 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1502 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1503 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1504 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1505 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1506 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1507 VMSTATE_UINT32(mac_reg[VET], E1000State),
1508 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1509 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1510 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1511 VMSTATE_END_OF_LIST()
1513 .subsections = (const VMStateDescription*[]) {
1514 &vmstate_e1000_mit_state,
1515 &vmstate_e1000_full_mac_state,
1516 NULL
1521 * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1522 * Note: A valid DevId will be inserted during pci_e1000_init().
1524 static const uint16_t e1000_eeprom_template[64] = {
1525 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1526 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1527 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1528 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1529 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1530 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1531 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1532 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1535 /* PCI interface */
1537 static void
1538 e1000_mmio_setup(E1000State *d)
1540 int i;
1541 const uint32_t excluded_regs[] = {
1542 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1543 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1546 memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1547 "e1000-mmio", PNPMMIO_SIZE);
1548 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1549 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1550 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1551 excluded_regs[i+1] - excluded_regs[i] - 4);
1552 memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1555 static void
1556 pci_e1000_uninit(PCIDevice *dev)
1558 E1000State *d = E1000(dev);
1560 timer_del(d->autoneg_timer);
1561 timer_free(d->autoneg_timer);
1562 timer_del(d->mit_timer);
1563 timer_free(d->mit_timer);
1564 qemu_del_nic(d->nic);
1567 static NetClientInfo net_e1000_info = {
1568 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1569 .size = sizeof(NICState),
1570 .can_receive = e1000_can_receive,
1571 .receive = e1000_receive,
1572 .receive_iov = e1000_receive_iov,
1573 .link_status_changed = e1000_set_link_status,
1576 static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
1577 uint32_t val, int len)
1579 E1000State *s = E1000(pci_dev);
1581 pci_default_write_config(pci_dev, address, val, len);
1583 if (range_covers_byte(address, len, PCI_COMMAND) &&
1584 (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1585 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1589 static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
1591 DeviceState *dev = DEVICE(pci_dev);
1592 E1000State *d = E1000(pci_dev);
1593 uint8_t *pci_conf;
1594 uint8_t *macaddr;
1596 pci_dev->config_write = e1000_write_config;
1598 pci_conf = pci_dev->config;
1600 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1601 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1603 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1605 e1000_mmio_setup(d);
1607 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1609 pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1611 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1612 macaddr = d->conf.macaddr.a;
1614 e1000x_core_prepare_eeprom(d->eeprom_data,
1615 e1000_eeprom_template,
1616 sizeof(e1000_eeprom_template),
1617 PCI_DEVICE_GET_CLASS(pci_dev)->device_id,
1618 macaddr);
1620 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1621 object_get_typename(OBJECT(d)), dev->id, d);
1623 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1625 d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1626 d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1629 static void qdev_e1000_reset(DeviceState *dev)
1631 E1000State *d = E1000(dev);
1632 e1000_reset(d);
1635 static Property e1000_properties[] = {
1636 DEFINE_NIC_PROPERTIES(E1000State, conf),
1637 DEFINE_PROP_BIT("autonegotiation", E1000State,
1638 compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1639 DEFINE_PROP_BIT("mitigation", E1000State,
1640 compat_flags, E1000_FLAG_MIT_BIT, true),
1641 DEFINE_PROP_BIT("extra_mac_registers", E1000State,
1642 compat_flags, E1000_FLAG_MAC_BIT, true),
1643 DEFINE_PROP_END_OF_LIST(),
1646 typedef struct E1000Info {
1647 const char *name;
1648 uint16_t device_id;
1649 uint8_t revision;
1650 uint16_t phy_id2;
1651 } E1000Info;
1653 static void e1000_class_init(ObjectClass *klass, void *data)
1655 DeviceClass *dc = DEVICE_CLASS(klass);
1656 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1657 E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1658 const E1000Info *info = data;
1660 k->realize = pci_e1000_realize;
1661 k->exit = pci_e1000_uninit;
1662 k->romfile = "efi-e1000.rom";
1663 k->vendor_id = PCI_VENDOR_ID_INTEL;
1664 k->device_id = info->device_id;
1665 k->revision = info->revision;
1666 e->phy_id2 = info->phy_id2;
1667 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1668 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1669 dc->desc = "Intel Gigabit Ethernet";
1670 dc->reset = qdev_e1000_reset;
1671 dc->vmsd = &vmstate_e1000;
1672 dc->props = e1000_properties;
1675 static void e1000_instance_init(Object *obj)
1677 E1000State *n = E1000(obj);
1678 device_add_bootindex_property(obj, &n->conf.bootindex,
1679 "bootindex", "/ethernet-phy@0",
1680 DEVICE(n), NULL);
1683 static const TypeInfo e1000_base_info = {
1684 .name = TYPE_E1000_BASE,
1685 .parent = TYPE_PCI_DEVICE,
1686 .instance_size = sizeof(E1000State),
1687 .instance_init = e1000_instance_init,
1688 .class_size = sizeof(E1000BaseClass),
1689 .abstract = true,
1692 static const E1000Info e1000_devices[] = {
1694 .name = "e1000",
1695 .device_id = E1000_DEV_ID_82540EM,
1696 .revision = 0x03,
1697 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1700 .name = "e1000-82544gc",
1701 .device_id = E1000_DEV_ID_82544GC_COPPER,
1702 .revision = 0x03,
1703 .phy_id2 = E1000_PHY_ID2_82544x,
1706 .name = "e1000-82545em",
1707 .device_id = E1000_DEV_ID_82545EM_COPPER,
1708 .revision = 0x03,
1709 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1713 static void e1000_register_types(void)
1715 int i;
1717 type_register_static(&e1000_base_info);
1718 for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1719 const E1000Info *info = &e1000_devices[i];
1720 TypeInfo type_info = {};
1722 type_info.name = info->name;
1723 type_info.parent = TYPE_E1000_BASE;
1724 type_info.class_data = (void *)info;
1725 type_info.class_init = e1000_class_init;
1726 type_info.instance_init = e1000_instance_init;
1728 type_register(&type_info);
1732 type_init(e1000_register_types)