sockets: add unix_connect_opts
[qemu.git] / hw / e1000.c
blob95c471c625b7697b0349e35c50059d0d90783209
1 /*
2 * QEMU e1000 emulation
4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "hw.h"
26 #include "pci.h"
27 #include "net.h"
29 #include "e1000_hw.h"
31 #define DEBUG
33 #ifdef DEBUG
34 enum {
35 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
36 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
37 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
38 DEBUG_RXFILTER, DEBUG_NOTYET,
40 #define DBGBIT(x) (1<<DEBUG_##x)
41 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
43 #define DBGOUT(what, fmt, ...) do { \
44 if (debugflags & DBGBIT(what)) \
45 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
46 } while (0)
47 #else
48 #define DBGOUT(what, fmt, ...) do {} while (0)
49 #endif
51 #define IOPORT_SIZE 0x40
52 #define PNPMMIO_SIZE 0x20000
55 * HW models:
56 * E1000_DEV_ID_82540EM works with Windows and Linux
57 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
58 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
59 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
60 * Others never tested
62 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
65 * May need to specify additional MAC-to-PHY entries --
66 * Intel's Windows driver refuses to initialize unless they match
68 enum {
69 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
70 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
71 /* default to E1000_DEV_ID_82540EM */ 0xc20
74 typedef struct E1000State_st {
75 PCIDevice dev;
76 VLANClientState *vc;
77 int mmio_index;
79 uint32_t mac_reg[0x8000];
80 uint16_t phy_reg[0x20];
81 uint16_t eeprom_data[64];
83 uint32_t rxbuf_size;
84 uint32_t rxbuf_min_shift;
85 int check_rxov;
86 struct e1000_tx {
87 unsigned char header[256];
88 unsigned char vlan_header[4];
89 unsigned char vlan[4];
90 unsigned char data[0x10000];
91 uint16_t size;
92 unsigned char sum_needed;
93 unsigned char vlan_needed;
94 uint8_t ipcss;
95 uint8_t ipcso;
96 uint16_t ipcse;
97 uint8_t tucss;
98 uint8_t tucso;
99 uint16_t tucse;
100 uint8_t hdr_len;
101 uint16_t mss;
102 uint32_t paylen;
103 uint16_t tso_frames;
104 char tse;
105 int8_t ip;
106 int8_t tcp;
107 char cptse; // current packet tse bit
108 } tx;
110 struct {
111 uint32_t val_in; // shifted in from guest driver
112 uint16_t bitnum_in;
113 uint16_t bitnum_out;
114 uint16_t reading;
115 uint32_t old_eecd;
116 } eecd_state;
117 } E1000State;
119 #define defreg(x) x = (E1000_##x>>2)
120 enum {
121 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
122 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
123 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
124 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
125 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
126 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
127 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
128 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
129 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
130 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
131 defreg(VET),
134 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
135 static const char phy_regcap[0x20] = {
136 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
137 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
138 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
139 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
140 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
141 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
144 static void
145 ioport_map(PCIDevice *pci_dev, int region_num, uint32_t addr,
146 uint32_t size, int type)
148 DBGOUT(IO, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr, size);
151 static void
152 set_interrupt_cause(E1000State *s, int index, uint32_t val)
154 if (val)
155 val |= E1000_ICR_INT_ASSERTED;
156 s->mac_reg[ICR] = val;
157 s->mac_reg[ICS] = val;
158 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
161 static void
162 set_ics(E1000State *s, int index, uint32_t val)
164 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
165 s->mac_reg[IMS]);
166 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
169 static int
170 rxbufsize(uint32_t v)
172 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
173 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
174 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
175 switch (v) {
176 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
177 return 16384;
178 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
179 return 8192;
180 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
181 return 4096;
182 case E1000_RCTL_SZ_1024:
183 return 1024;
184 case E1000_RCTL_SZ_512:
185 return 512;
186 case E1000_RCTL_SZ_256:
187 return 256;
189 return 2048;
192 static void
193 set_ctrl(E1000State *s, int index, uint32_t val)
195 /* RST is self clearing */
196 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
199 static void
200 set_rx_control(E1000State *s, int index, uint32_t val)
202 s->mac_reg[RCTL] = val;
203 s->rxbuf_size = rxbufsize(val);
204 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
205 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
206 s->mac_reg[RCTL]);
209 static void
210 set_mdic(E1000State *s, int index, uint32_t val)
212 uint32_t data = val & E1000_MDIC_DATA_MASK;
213 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
215 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
216 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
217 else if (val & E1000_MDIC_OP_READ) {
218 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
219 if (!(phy_regcap[addr] & PHY_R)) {
220 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
221 val |= E1000_MDIC_ERROR;
222 } else
223 val = (val ^ data) | s->phy_reg[addr];
224 } else if (val & E1000_MDIC_OP_WRITE) {
225 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
226 if (!(phy_regcap[addr] & PHY_W)) {
227 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
228 val |= E1000_MDIC_ERROR;
229 } else
230 s->phy_reg[addr] = data;
232 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
233 set_ics(s, 0, E1000_ICR_MDAC);
236 static uint32_t
237 get_eecd(E1000State *s, int index)
239 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
241 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
242 s->eecd_state.bitnum_out, s->eecd_state.reading);
243 if (!s->eecd_state.reading ||
244 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
245 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
246 ret |= E1000_EECD_DO;
247 return ret;
250 static void
251 set_eecd(E1000State *s, int index, uint32_t val)
253 uint32_t oldval = s->eecd_state.old_eecd;
255 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
256 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
257 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
258 return;
259 if (!(E1000_EECD_SK & val)) { // falling edge
260 s->eecd_state.bitnum_out++;
261 return;
263 if (!(val & E1000_EECD_CS)) { // rising, no CS (EEPROM reset)
264 memset(&s->eecd_state, 0, sizeof s->eecd_state);
266 * restore old_eecd's E1000_EECD_SK (known to be on)
267 * to avoid false detection of a clock edge
269 s->eecd_state.old_eecd = E1000_EECD_SK;
270 return;
272 s->eecd_state.val_in <<= 1;
273 if (val & E1000_EECD_DI)
274 s->eecd_state.val_in |= 1;
275 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
276 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
277 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
278 EEPROM_READ_OPCODE_MICROWIRE);
280 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
281 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
282 s->eecd_state.reading);
285 static uint32_t
286 flash_eerd_read(E1000State *s, int x)
288 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
290 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
291 return (s->mac_reg[EERD]);
293 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
294 return (E1000_EEPROM_RW_REG_DONE | r);
296 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
297 E1000_EEPROM_RW_REG_DONE | r);
300 static void
301 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
303 uint32_t sum;
305 if (cse && cse < n)
306 n = cse + 1;
307 if (sloc < n-1) {
308 sum = net_checksum_add(n-css, data+css);
309 cpu_to_be16wu((uint16_t *)(data + sloc),
310 net_checksum_finish(sum));
314 static inline int
315 vlan_enabled(E1000State *s)
317 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
320 static inline int
321 vlan_rx_filter_enabled(E1000State *s)
323 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
326 static inline int
327 is_vlan_packet(E1000State *s, const uint8_t *buf)
329 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
330 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
333 static inline int
334 is_vlan_txd(uint32_t txd_lower)
336 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
339 static void
340 xmit_seg(E1000State *s)
342 uint16_t len, *sp;
343 unsigned int frames = s->tx.tso_frames, css, sofar, n;
344 struct e1000_tx *tp = &s->tx;
346 if (tp->tse && tp->cptse) {
347 css = tp->ipcss;
348 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
349 frames, tp->size, css);
350 if (tp->ip) { // IPv4
351 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
352 tp->size - css);
353 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
354 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
355 } else // IPv6
356 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
357 tp->size - css);
358 css = tp->tucss;
359 len = tp->size - css;
360 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
361 if (tp->tcp) {
362 sofar = frames * tp->mss;
363 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
364 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
365 if (tp->paylen - sofar > tp->mss)
366 tp->data[css + 13] &= ~9; // PSH, FIN
367 } else // UDP
368 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
369 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
370 // add pseudo-header length before checksum calculation
371 sp = (uint16_t *)(tp->data + tp->tucso);
372 cpu_to_be16wu(sp, be16_to_cpup(sp) + len);
374 tp->tso_frames++;
377 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
378 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
379 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
380 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
381 if (tp->vlan_needed) {
382 memmove(tp->vlan, tp->data, 12);
383 memcpy(tp->data + 8, tp->vlan_header, 4);
384 qemu_send_packet(s->vc, tp->vlan, tp->size + 4);
385 } else
386 qemu_send_packet(s->vc, tp->data, tp->size);
387 s->mac_reg[TPT]++;
388 s->mac_reg[GPTC]++;
389 n = s->mac_reg[TOTL];
390 if ((s->mac_reg[TOTL] += s->tx.size) < n)
391 s->mac_reg[TOTH]++;
394 static void
395 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
397 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
398 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
399 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
400 unsigned int msh = 0xfffff, hdr = 0;
401 uint64_t addr;
402 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
403 struct e1000_tx *tp = &s->tx;
405 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
406 op = le32_to_cpu(xp->cmd_and_length);
407 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
408 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
409 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
410 tp->tucss = xp->upper_setup.tcp_fields.tucss;
411 tp->tucso = xp->upper_setup.tcp_fields.tucso;
412 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
413 tp->paylen = op & 0xfffff;
414 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
415 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
416 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
417 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
418 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
419 tp->tso_frames = 0;
420 if (tp->tucso == 0) { // this is probably wrong
421 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
422 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
424 return;
425 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
426 // data descriptor
427 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
428 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
429 } else
430 // legacy descriptor
431 tp->cptse = 0;
433 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
434 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
435 tp->vlan_needed = 1;
436 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
437 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
438 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
439 le16_to_cpu(dp->upper.fields.special));
442 addr = le64_to_cpu(dp->buffer_addr);
443 if (tp->tse && tp->cptse) {
444 hdr = tp->hdr_len;
445 msh = hdr + tp->mss;
446 do {
447 bytes = split_size;
448 if (tp->size + bytes > msh)
449 bytes = msh - tp->size;
450 cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
451 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
452 memmove(tp->header, tp->data, hdr);
453 tp->size = sz;
454 addr += bytes;
455 if (sz == msh) {
456 xmit_seg(s);
457 memmove(tp->data, tp->header, hdr);
458 tp->size = hdr;
460 } while (split_size -= bytes);
461 } else if (!tp->tse && tp->cptse) {
462 // context descriptor TSE is not set, while data descriptor TSE is set
463 DBGOUT(TXERR, "TCP segmentaion Error\n");
464 } else {
465 cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
466 tp->size += split_size;
469 if (!(txd_lower & E1000_TXD_CMD_EOP))
470 return;
471 if (!(tp->tse && tp->cptse && tp->size < hdr))
472 xmit_seg(s);
473 tp->tso_frames = 0;
474 tp->sum_needed = 0;
475 tp->vlan_needed = 0;
476 tp->size = 0;
477 tp->cptse = 0;
480 static uint32_t
481 txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
483 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
485 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
486 return 0;
487 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
488 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
489 dp->upper.data = cpu_to_le32(txd_upper);
490 cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
491 (void *)&dp->upper, sizeof(dp->upper));
492 return E1000_ICR_TXDW;
495 static void
496 start_xmit(E1000State *s)
498 target_phys_addr_t base;
499 struct e1000_tx_desc desc;
500 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
502 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
503 DBGOUT(TX, "tx disabled\n");
504 return;
507 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
508 base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
509 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
510 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
512 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
513 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
514 desc.upper.data);
516 process_tx_desc(s, &desc);
517 cause |= txdesc_writeback(base, &desc);
519 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
520 s->mac_reg[TDH] = 0;
522 * the following could happen only if guest sw assigns
523 * bogus values to TDT/TDLEN.
524 * there's nothing too intelligent we could do about this.
526 if (s->mac_reg[TDH] == tdh_start) {
527 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
528 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
529 break;
532 set_ics(s, 0, cause);
535 static int
536 receive_filter(E1000State *s, const uint8_t *buf, int size)
538 static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
539 static int mta_shift[] = {4, 3, 2, 0};
540 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
542 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
543 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
544 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
545 ((vid >> 5) & 0x7f));
546 if ((vfta & (1 << (vid & 0x1f))) == 0)
547 return 0;
550 if (rctl & E1000_RCTL_UPE) // promiscuous
551 return 1;
553 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
554 return 1;
556 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
557 return 1;
559 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
560 if (!(rp[1] & E1000_RAH_AV))
561 continue;
562 ra[0] = cpu_to_le32(rp[0]);
563 ra[1] = cpu_to_le32(rp[1]);
564 if (!memcmp(buf, (uint8_t *)ra, 6)) {
565 DBGOUT(RXFILTER,
566 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
567 (int)(rp - s->mac_reg - RA)/2,
568 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
569 return 1;
572 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
573 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
575 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
576 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
577 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
578 return 1;
579 DBGOUT(RXFILTER,
580 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
581 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
582 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
583 s->mac_reg[MTA + (f >> 5)]);
585 return 0;
588 static void
589 e1000_set_link_status(VLANClientState *vc)
591 E1000State *s = vc->opaque;
592 uint32_t old_status = s->mac_reg[STATUS];
594 if (vc->link_down)
595 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
596 else
597 s->mac_reg[STATUS] |= E1000_STATUS_LU;
599 if (s->mac_reg[STATUS] != old_status)
600 set_ics(s, 0, E1000_ICR_LSC);
603 static int
604 e1000_can_receive(VLANClientState *vc)
606 E1000State *s = vc->opaque;
608 return (s->mac_reg[RCTL] & E1000_RCTL_EN);
611 static ssize_t
612 e1000_receive(VLANClientState *vc, const uint8_t *buf, size_t size)
614 E1000State *s = vc->opaque;
615 struct e1000_rx_desc desc;
616 target_phys_addr_t base;
617 unsigned int n, rdt;
618 uint32_t rdh_start;
619 uint16_t vlan_special = 0;
620 uint8_t vlan_status = 0, vlan_offset = 0;
622 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
623 return -1;
625 if (size > s->rxbuf_size) {
626 DBGOUT(RX, "packet too large for buffers (%lu > %d)\n",
627 (unsigned long)size, s->rxbuf_size);
628 return -1;
631 if (!receive_filter(s, buf, size))
632 return size;
634 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
635 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
636 memmove((void *)(buf + 4), buf, 12);
637 vlan_status = E1000_RXD_STAT_VP;
638 vlan_offset = 4;
639 size -= 4;
642 rdh_start = s->mac_reg[RDH];
643 size += 4; // for the header
644 do {
645 if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) {
646 set_ics(s, 0, E1000_ICS_RXO);
647 return -1;
649 base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
650 sizeof(desc) * s->mac_reg[RDH];
651 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
652 desc.special = vlan_special;
653 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
654 if (desc.buffer_addr) {
655 cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
656 (void *)(buf + vlan_offset), size);
657 desc.length = cpu_to_le16(size);
658 desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
659 } else // as per intel docs; skip descriptors with null buf addr
660 DBGOUT(RX, "Null RX descriptor!!\n");
661 cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
663 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
664 s->mac_reg[RDH] = 0;
665 s->check_rxov = 1;
666 /* see comment in start_xmit; same here */
667 if (s->mac_reg[RDH] == rdh_start) {
668 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
669 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
670 set_ics(s, 0, E1000_ICS_RXO);
671 return -1;
673 } while (desc.buffer_addr == 0);
675 s->mac_reg[GPRC]++;
676 s->mac_reg[TPR]++;
677 n = s->mac_reg[TORL];
678 if ((s->mac_reg[TORL] += size) < n)
679 s->mac_reg[TORH]++;
681 n = E1000_ICS_RXT0;
682 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
683 rdt += s->mac_reg[RDLEN] / sizeof(desc);
684 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
685 s->rxbuf_min_shift)
686 n |= E1000_ICS_RXDMT0;
688 set_ics(s, 0, n);
690 return size;
693 static uint32_t
694 mac_readreg(E1000State *s, int index)
696 return s->mac_reg[index];
699 static uint32_t
700 mac_icr_read(E1000State *s, int index)
702 uint32_t ret = s->mac_reg[ICR];
704 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
705 set_interrupt_cause(s, 0, 0);
706 return ret;
709 static uint32_t
710 mac_read_clr4(E1000State *s, int index)
712 uint32_t ret = s->mac_reg[index];
714 s->mac_reg[index] = 0;
715 return ret;
718 static uint32_t
719 mac_read_clr8(E1000State *s, int index)
721 uint32_t ret = s->mac_reg[index];
723 s->mac_reg[index] = 0;
724 s->mac_reg[index-1] = 0;
725 return ret;
728 static void
729 mac_writereg(E1000State *s, int index, uint32_t val)
731 s->mac_reg[index] = val;
734 static void
735 set_rdt(E1000State *s, int index, uint32_t val)
737 s->check_rxov = 0;
738 s->mac_reg[index] = val & 0xffff;
741 static void
742 set_16bit(E1000State *s, int index, uint32_t val)
744 s->mac_reg[index] = val & 0xffff;
747 static void
748 set_dlen(E1000State *s, int index, uint32_t val)
750 s->mac_reg[index] = val & 0xfff80;
753 static void
754 set_tctl(E1000State *s, int index, uint32_t val)
756 s->mac_reg[index] = val;
757 s->mac_reg[TDT] &= 0xffff;
758 start_xmit(s);
761 static void
762 set_icr(E1000State *s, int index, uint32_t val)
764 DBGOUT(INTERRUPT, "set_icr %x\n", val);
765 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
768 static void
769 set_imc(E1000State *s, int index, uint32_t val)
771 s->mac_reg[IMS] &= ~val;
772 set_ics(s, 0, 0);
775 static void
776 set_ims(E1000State *s, int index, uint32_t val)
778 s->mac_reg[IMS] |= val;
779 set_ics(s, 0, 0);
782 #define getreg(x) [x] = mac_readreg
783 static uint32_t (*macreg_readops[])(E1000State *, int) = {
784 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
785 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
786 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
787 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
788 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
790 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
791 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
792 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
793 [CRCERRS ... MPC] = &mac_readreg,
794 [RA ... RA+31] = &mac_readreg,
795 [MTA ... MTA+127] = &mac_readreg,
796 [VFTA ... VFTA+127] = &mac_readreg,
798 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
800 #define putreg(x) [x] = mac_writereg
801 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
802 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
803 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
804 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
805 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
806 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
807 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
808 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
809 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
810 [RA ... RA+31] = &mac_writereg,
811 [MTA ... MTA+127] = &mac_writereg,
812 [VFTA ... VFTA+127] = &mac_writereg,
814 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
816 static void
817 e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
819 E1000State *s = opaque;
820 unsigned int index = (addr & 0x1ffff) >> 2;
822 #ifdef TARGET_WORDS_BIGENDIAN
823 val = bswap32(val);
824 #endif
825 if (index < NWRITEOPS && macreg_writeops[index])
826 macreg_writeops[index](s, index, val);
827 else if (index < NREADOPS && macreg_readops[index])
828 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
829 else
830 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
831 index<<2, val);
834 static void
835 e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
837 // emulate hw without byte enables: no RMW
838 e1000_mmio_writel(opaque, addr & ~3,
839 (val & 0xffff) << (8*(addr & 3)));
842 static void
843 e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
845 // emulate hw without byte enables: no RMW
846 e1000_mmio_writel(opaque, addr & ~3,
847 (val & 0xff) << (8*(addr & 3)));
850 static uint32_t
851 e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
853 E1000State *s = opaque;
854 unsigned int index = (addr & 0x1ffff) >> 2;
856 if (index < NREADOPS && macreg_readops[index])
858 uint32_t val = macreg_readops[index](s, index);
859 #ifdef TARGET_WORDS_BIGENDIAN
860 val = bswap32(val);
861 #endif
862 return val;
864 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
865 return 0;
868 static uint32_t
869 e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
871 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
872 (8 * (addr & 3))) & 0xff;
875 static uint32_t
876 e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
878 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
879 (8 * (addr & 3))) & 0xffff;
882 static const int mac_regtosave[] = {
883 CTRL, EECD, EERD, GPRC, GPTC, ICR, ICS, IMC, IMS,
884 LEDCTL, MANC, MDIC, MPC, PBA, RCTL, RDBAH, RDBAL, RDH,
885 RDLEN, RDT, STATUS, SWSM, TCTL, TDBAH, TDBAL, TDH, TDLEN,
886 TDT, TORH, TORL, TOTH, TOTL, TPR, TPT, TXDCTL, WUFC,
887 VET,
889 enum { MAC_NSAVE = ARRAY_SIZE(mac_regtosave) };
891 static const struct {
892 int size;
893 int array0;
894 } mac_regarraystosave[] = { {32, RA}, {128, MTA}, {128, VFTA} };
895 enum { MAC_NARRAYS = ARRAY_SIZE(mac_regarraystosave) };
897 static void
898 nic_save(QEMUFile *f, void *opaque)
900 E1000State *s = opaque;
901 int i, j;
903 pci_device_save(&s->dev, f);
904 qemu_put_be32(f, 0);
905 qemu_put_be32s(f, &s->rxbuf_size);
906 qemu_put_be32s(f, &s->rxbuf_min_shift);
907 qemu_put_be32s(f, &s->eecd_state.val_in);
908 qemu_put_be16s(f, &s->eecd_state.bitnum_in);
909 qemu_put_be16s(f, &s->eecd_state.bitnum_out);
910 qemu_put_be16s(f, &s->eecd_state.reading);
911 qemu_put_be32s(f, &s->eecd_state.old_eecd);
912 qemu_put_8s(f, &s->tx.ipcss);
913 qemu_put_8s(f, &s->tx.ipcso);
914 qemu_put_be16s(f, &s->tx.ipcse);
915 qemu_put_8s(f, &s->tx.tucss);
916 qemu_put_8s(f, &s->tx.tucso);
917 qemu_put_be16s(f, &s->tx.tucse);
918 qemu_put_be32s(f, &s->tx.paylen);
919 qemu_put_8s(f, &s->tx.hdr_len);
920 qemu_put_be16s(f, &s->tx.mss);
921 qemu_put_be16s(f, &s->tx.size);
922 qemu_put_be16s(f, &s->tx.tso_frames);
923 qemu_put_8s(f, &s->tx.sum_needed);
924 qemu_put_s8s(f, &s->tx.ip);
925 qemu_put_s8s(f, &s->tx.tcp);
926 qemu_put_buffer(f, s->tx.header, sizeof s->tx.header);
927 qemu_put_buffer(f, s->tx.data, sizeof s->tx.data);
928 for (i = 0; i < 64; i++)
929 qemu_put_be16s(f, s->eeprom_data + i);
930 for (i = 0; i < 0x20; i++)
931 qemu_put_be16s(f, s->phy_reg + i);
932 for (i = 0; i < MAC_NSAVE; i++)
933 qemu_put_be32s(f, s->mac_reg + mac_regtosave[i]);
934 for (i = 0; i < MAC_NARRAYS; i++)
935 for (j = 0; j < mac_regarraystosave[i].size; j++)
936 qemu_put_be32s(f,
937 s->mac_reg + mac_regarraystosave[i].array0 + j);
940 static int
941 nic_load(QEMUFile *f, void *opaque, int version_id)
943 E1000State *s = opaque;
944 int i, j, ret;
946 if ((ret = pci_device_load(&s->dev, f)) < 0)
947 return ret;
948 if (version_id == 1)
949 qemu_get_sbe32s(f, &i); /* once some unused instance id */
950 qemu_get_be32(f); /* Ignored. Was mmio_base. */
951 qemu_get_be32s(f, &s->rxbuf_size);
952 qemu_get_be32s(f, &s->rxbuf_min_shift);
953 qemu_get_be32s(f, &s->eecd_state.val_in);
954 qemu_get_be16s(f, &s->eecd_state.bitnum_in);
955 qemu_get_be16s(f, &s->eecd_state.bitnum_out);
956 qemu_get_be16s(f, &s->eecd_state.reading);
957 qemu_get_be32s(f, &s->eecd_state.old_eecd);
958 qemu_get_8s(f, &s->tx.ipcss);
959 qemu_get_8s(f, &s->tx.ipcso);
960 qemu_get_be16s(f, &s->tx.ipcse);
961 qemu_get_8s(f, &s->tx.tucss);
962 qemu_get_8s(f, &s->tx.tucso);
963 qemu_get_be16s(f, &s->tx.tucse);
964 qemu_get_be32s(f, &s->tx.paylen);
965 qemu_get_8s(f, &s->tx.hdr_len);
966 qemu_get_be16s(f, &s->tx.mss);
967 qemu_get_be16s(f, &s->tx.size);
968 qemu_get_be16s(f, &s->tx.tso_frames);
969 qemu_get_8s(f, &s->tx.sum_needed);
970 qemu_get_s8s(f, &s->tx.ip);
971 qemu_get_s8s(f, &s->tx.tcp);
972 qemu_get_buffer(f, s->tx.header, sizeof s->tx.header);
973 qemu_get_buffer(f, s->tx.data, sizeof s->tx.data);
974 for (i = 0; i < 64; i++)
975 qemu_get_be16s(f, s->eeprom_data + i);
976 for (i = 0; i < 0x20; i++)
977 qemu_get_be16s(f, s->phy_reg + i);
978 for (i = 0; i < MAC_NSAVE; i++)
979 qemu_get_be32s(f, s->mac_reg + mac_regtosave[i]);
980 for (i = 0; i < MAC_NARRAYS; i++)
981 for (j = 0; j < mac_regarraystosave[i].size; j++)
982 qemu_get_be32s(f,
983 s->mac_reg + mac_regarraystosave[i].array0 + j);
984 return 0;
987 static const uint16_t e1000_eeprom_template[64] = {
988 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
989 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
990 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
991 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
992 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
993 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
994 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
995 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
998 static const uint16_t phy_reg_init[] = {
999 [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
1000 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
1001 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
1002 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
1003 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
1004 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
1007 static const uint32_t mac_reg_init[] = {
1008 [PBA] = 0x00100030,
1009 [LEDCTL] = 0x602,
1010 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
1011 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
1012 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
1013 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
1014 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
1015 E1000_STATUS_LU,
1016 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
1017 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
1018 E1000_MANC_RMCP_EN,
1021 /* PCI interface */
1023 static CPUWriteMemoryFunc * const e1000_mmio_write[] = {
1024 e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
1027 static CPUReadMemoryFunc * const e1000_mmio_read[] = {
1028 e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
1031 static void
1032 e1000_mmio_map(PCIDevice *pci_dev, int region_num,
1033 uint32_t addr, uint32_t size, int type)
1035 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1036 int i;
1037 const uint32_t excluded_regs[] = {
1038 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1039 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1043 DBGOUT(MMIO, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr, size);
1045 cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
1046 qemu_register_coalesced_mmio(addr, excluded_regs[0]);
1048 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1049 qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
1050 excluded_regs[i + 1] -
1051 excluded_regs[i] - 4);
1054 static void
1055 e1000_cleanup(VLANClientState *vc)
1057 E1000State *d = vc->opaque;
1059 unregister_savevm("e1000", d);
1062 static int
1063 pci_e1000_uninit(PCIDevice *dev)
1065 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1067 cpu_unregister_io_memory(d->mmio_index);
1069 return 0;
1072 static void e1000_reset(void *opaque)
1074 E1000State *d = opaque;
1076 memset(d->phy_reg, 0, sizeof d->phy_reg);
1077 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
1078 memset(d->mac_reg, 0, sizeof d->mac_reg);
1079 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
1080 d->rxbuf_min_shift = 1;
1081 memset(&d->tx, 0, sizeof d->tx);
1084 static int pci_e1000_init(PCIDevice *pci_dev)
1086 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1087 uint8_t *pci_conf;
1088 uint16_t checksum = 0;
1089 static const char info_str[] = "e1000";
1090 int i;
1091 uint8_t macaddr[6];
1093 pci_conf = d->dev.config;
1095 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
1096 pci_config_set_device_id(pci_conf, E1000_DEVID);
1097 *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407);
1098 *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010);
1099 pci_conf[0x08] = 0x03;
1100 pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
1101 pci_conf[0x0c] = 0x10;
1103 pci_conf[0x3d] = 1; // interrupt pin 0
1105 d->mmio_index = cpu_register_io_memory(e1000_mmio_read,
1106 e1000_mmio_write, d);
1108 pci_register_bar((PCIDevice *)d, 0, PNPMMIO_SIZE,
1109 PCI_ADDRESS_SPACE_MEM, e1000_mmio_map);
1111 pci_register_bar((PCIDevice *)d, 1, IOPORT_SIZE,
1112 PCI_ADDRESS_SPACE_IO, ioport_map);
1114 memmove(d->eeprom_data, e1000_eeprom_template,
1115 sizeof e1000_eeprom_template);
1116 qdev_get_macaddr(&d->dev.qdev, macaddr);
1117 for (i = 0; i < 3; i++)
1118 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1119 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1120 checksum += d->eeprom_data[i];
1121 checksum = (uint16_t) EEPROM_SUM - checksum;
1122 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1124 d->vc = qdev_get_vlan_client(&d->dev.qdev,
1125 e1000_can_receive, e1000_receive,
1126 NULL, e1000_cleanup, d);
1127 d->vc->link_status_changed = e1000_set_link_status;
1129 qemu_format_nic_info_str(d->vc, macaddr);
1131 register_savevm(info_str, -1, 2, nic_save, nic_load, d);
1132 d->dev.unregister = pci_e1000_uninit;
1133 qemu_register_reset(e1000_reset, d);
1134 e1000_reset(d);
1135 return 0;
1138 static PCIDeviceInfo e1000_info = {
1139 .qdev.name = "e1000",
1140 .qdev.size = sizeof(E1000State),
1141 .init = pci_e1000_init,
1144 static void e1000_register_devices(void)
1146 pci_qdev_register(&e1000_info);
1149 device_init(e1000_register_devices)