target-mips: Introduce QOM realizefn for MIPSCPU
[qemu/ar7.git] / hw / e1000.c
blobd6fe815edacf38153b3feb421aa8480acdfb73a6
1 /*
2 * QEMU e1000 emulation
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "hw.h"
29 #include "pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
36 #include "e1000_hw.h"
38 #define E1000_DEBUG
40 #ifdef E1000_DEBUG
41 enum {
42 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
43 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
44 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
45 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
47 #define DBGBIT(x) (1<<DEBUG_##x)
48 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 #define DBGOUT(what, fmt, ...) do { \
51 if (debugflags & DBGBIT(what)) \
52 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
53 } while (0)
54 #else
55 #define DBGOUT(what, fmt, ...) do {} while (0)
56 #endif
58 #define IOPORT_SIZE 0x40
59 #define PNPMMIO_SIZE 0x20000
60 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
62 /* this is the size past which hardware will drop packets when setting LPE=0 */
63 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
64 /* this is the size past which hardware will drop packets when setting LPE=1 */
65 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
68 * HW models:
69 * E1000_DEV_ID_82540EM works with Windows and Linux
70 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
71 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
72 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
73 * Others never tested
75 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
78 * May need to specify additional MAC-to-PHY entries --
79 * Intel's Windows driver refuses to initialize unless they match
81 enum {
82 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
83 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
84 /* default to E1000_DEV_ID_82540EM */ 0xc20
87 typedef struct E1000State_st {
88 PCIDevice dev;
89 NICState *nic;
90 NICConf conf;
91 MemoryRegion mmio;
92 MemoryRegion io;
94 uint32_t mac_reg[0x8000];
95 uint16_t phy_reg[0x20];
96 uint16_t eeprom_data[64];
98 uint32_t rxbuf_size;
99 uint32_t rxbuf_min_shift;
100 struct e1000_tx {
101 unsigned char header[256];
102 unsigned char vlan_header[4];
103 /* Fields vlan and data must not be reordered or separated. */
104 unsigned char vlan[4];
105 unsigned char data[0x10000];
106 uint16_t size;
107 unsigned char sum_needed;
108 unsigned char vlan_needed;
109 uint8_t ipcss;
110 uint8_t ipcso;
111 uint16_t ipcse;
112 uint8_t tucss;
113 uint8_t tucso;
114 uint16_t tucse;
115 uint8_t hdr_len;
116 uint16_t mss;
117 uint32_t paylen;
118 uint16_t tso_frames;
119 char tse;
120 int8_t ip;
121 int8_t tcp;
122 char cptse; // current packet tse bit
123 } tx;
125 struct {
126 uint32_t val_in; // shifted in from guest driver
127 uint16_t bitnum_in;
128 uint16_t bitnum_out;
129 uint16_t reading;
130 uint32_t old_eecd;
131 } eecd_state;
133 QEMUTimer *autoneg_timer;
134 } E1000State;
136 #define defreg(x) x = (E1000_##x>>2)
137 enum {
138 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
139 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
140 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
141 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
142 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
143 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
144 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
145 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
146 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
147 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
148 defreg(VET),
151 static void
152 e1000_link_down(E1000State *s)
154 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
155 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
158 static void
159 e1000_link_up(E1000State *s)
161 s->mac_reg[STATUS] |= E1000_STATUS_LU;
162 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
165 static void
166 set_phy_ctrl(E1000State *s, int index, uint16_t val)
168 if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
169 e1000_link_down(s);
170 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
171 DBGOUT(PHY, "Start link auto negotiation\n");
172 qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
176 static void
177 e1000_autoneg_timer(void *opaque)
179 E1000State *s = opaque;
180 if (!qemu_get_queue(s->nic)->link_down) {
181 e1000_link_up(s);
183 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
184 DBGOUT(PHY, "Auto negotiation is completed\n");
187 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
188 [PHY_CTRL] = set_phy_ctrl,
191 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
193 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
194 static const char phy_regcap[0x20] = {
195 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
196 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
197 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
198 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
199 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
200 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
203 static const uint16_t phy_reg_init[] = {
204 [PHY_CTRL] = 0x1140,
205 [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
206 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
207 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
208 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
209 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
210 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
213 static const uint32_t mac_reg_init[] = {
214 [PBA] = 0x00100030,
215 [LEDCTL] = 0x602,
216 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
217 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
218 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
219 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
220 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
221 E1000_STATUS_LU,
222 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
223 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
224 E1000_MANC_RMCP_EN,
227 static void
228 set_interrupt_cause(E1000State *s, int index, uint32_t val)
230 if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
231 /* Only for 8257x */
232 val |= E1000_ICR_INT_ASSERTED;
234 s->mac_reg[ICR] = val;
237 * Make sure ICR and ICS registers have the same value.
238 * The spec says that the ICS register is write-only. However in practice,
239 * on real hardware ICS is readable, and for reads it has the same value as
240 * ICR (except that ICS does not have the clear on read behaviour of ICR).
242 * The VxWorks PRO/1000 driver uses this behaviour.
244 s->mac_reg[ICS] = val;
246 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
249 static void
250 set_ics(E1000State *s, int index, uint32_t val)
252 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
253 s->mac_reg[IMS]);
254 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
257 static int
258 rxbufsize(uint32_t v)
260 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
261 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
262 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
263 switch (v) {
264 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
265 return 16384;
266 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
267 return 8192;
268 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
269 return 4096;
270 case E1000_RCTL_SZ_1024:
271 return 1024;
272 case E1000_RCTL_SZ_512:
273 return 512;
274 case E1000_RCTL_SZ_256:
275 return 256;
277 return 2048;
280 static void e1000_reset(void *opaque)
282 E1000State *d = opaque;
283 uint8_t *macaddr = d->conf.macaddr.a;
284 int i;
286 qemu_del_timer(d->autoneg_timer);
287 memset(d->phy_reg, 0, sizeof d->phy_reg);
288 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
289 memset(d->mac_reg, 0, sizeof d->mac_reg);
290 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
291 d->rxbuf_min_shift = 1;
292 memset(&d->tx, 0, sizeof d->tx);
294 if (qemu_get_queue(d->nic)->link_down) {
295 e1000_link_down(d);
298 /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
299 d->mac_reg[RA] = 0;
300 d->mac_reg[RA + 1] = E1000_RAH_AV;
301 for (i = 0; i < 4; i++) {
302 d->mac_reg[RA] |= macaddr[i] << (8 * i);
303 d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
307 static void
308 set_ctrl(E1000State *s, int index, uint32_t val)
310 /* RST is self clearing */
311 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
314 static void
315 set_rx_control(E1000State *s, int index, uint32_t val)
317 s->mac_reg[RCTL] = val;
318 s->rxbuf_size = rxbufsize(val);
319 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
320 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
321 s->mac_reg[RCTL]);
322 qemu_flush_queued_packets(qemu_get_queue(s->nic));
325 static void
326 set_mdic(E1000State *s, int index, uint32_t val)
328 uint32_t data = val & E1000_MDIC_DATA_MASK;
329 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
331 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
332 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
333 else if (val & E1000_MDIC_OP_READ) {
334 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
335 if (!(phy_regcap[addr] & PHY_R)) {
336 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
337 val |= E1000_MDIC_ERROR;
338 } else
339 val = (val ^ data) | s->phy_reg[addr];
340 } else if (val & E1000_MDIC_OP_WRITE) {
341 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
342 if (!(phy_regcap[addr] & PHY_W)) {
343 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
344 val |= E1000_MDIC_ERROR;
345 } else {
346 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
347 phyreg_writeops[addr](s, index, data);
349 s->phy_reg[addr] = data;
352 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
354 if (val & E1000_MDIC_INT_EN) {
355 set_ics(s, 0, E1000_ICR_MDAC);
359 static uint32_t
360 get_eecd(E1000State *s, int index)
362 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
364 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
365 s->eecd_state.bitnum_out, s->eecd_state.reading);
366 if (!s->eecd_state.reading ||
367 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
368 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
369 ret |= E1000_EECD_DO;
370 return ret;
373 static void
374 set_eecd(E1000State *s, int index, uint32_t val)
376 uint32_t oldval = s->eecd_state.old_eecd;
378 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
379 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
380 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
381 return;
382 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
383 s->eecd_state.val_in = 0;
384 s->eecd_state.bitnum_in = 0;
385 s->eecd_state.bitnum_out = 0;
386 s->eecd_state.reading = 0;
388 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
389 return;
390 if (!(E1000_EECD_SK & val)) { // falling edge
391 s->eecd_state.bitnum_out++;
392 return;
394 s->eecd_state.val_in <<= 1;
395 if (val & E1000_EECD_DI)
396 s->eecd_state.val_in |= 1;
397 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
398 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
399 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
400 EEPROM_READ_OPCODE_MICROWIRE);
402 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
403 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
404 s->eecd_state.reading);
407 static uint32_t
408 flash_eerd_read(E1000State *s, int x)
410 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
412 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
413 return (s->mac_reg[EERD]);
415 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
416 return (E1000_EEPROM_RW_REG_DONE | r);
418 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
419 E1000_EEPROM_RW_REG_DONE | r);
422 static void
423 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
425 uint32_t sum;
427 if (cse && cse < n)
428 n = cse + 1;
429 if (sloc < n-1) {
430 sum = net_checksum_add(n-css, data+css);
431 cpu_to_be16wu((uint16_t *)(data + sloc),
432 net_checksum_finish(sum));
436 static inline int
437 vlan_enabled(E1000State *s)
439 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
442 static inline int
443 vlan_rx_filter_enabled(E1000State *s)
445 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
448 static inline int
449 is_vlan_packet(E1000State *s, const uint8_t *buf)
451 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
452 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
455 static inline int
456 is_vlan_txd(uint32_t txd_lower)
458 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
461 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
462 * fill it in, just pad descriptor length by 4 bytes unless guest
463 * told us to strip it off the packet. */
464 static inline int
465 fcs_len(E1000State *s)
467 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
470 static void
471 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
473 NetClientState *nc = qemu_get_queue(s->nic);
474 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
475 nc->info->receive(nc, buf, size);
476 } else {
477 qemu_send_packet(nc, buf, size);
481 static void
482 xmit_seg(E1000State *s)
484 uint16_t len, *sp;
485 unsigned int frames = s->tx.tso_frames, css, sofar, n;
486 struct e1000_tx *tp = &s->tx;
488 if (tp->tse && tp->cptse) {
489 css = tp->ipcss;
490 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
491 frames, tp->size, css);
492 if (tp->ip) { // IPv4
493 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
494 tp->size - css);
495 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
496 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
497 } else // IPv6
498 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
499 tp->size - css);
500 css = tp->tucss;
501 len = tp->size - css;
502 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
503 if (tp->tcp) {
504 sofar = frames * tp->mss;
505 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
506 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
507 if (tp->paylen - sofar > tp->mss)
508 tp->data[css + 13] &= ~9; // PSH, FIN
509 } else // UDP
510 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
511 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
512 unsigned int phsum;
513 // add pseudo-header length before checksum calculation
514 sp = (uint16_t *)(tp->data + tp->tucso);
515 phsum = be16_to_cpup(sp) + len;
516 phsum = (phsum >> 16) + (phsum & 0xffff);
517 cpu_to_be16wu(sp, phsum);
519 tp->tso_frames++;
522 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
523 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
524 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
525 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
526 if (tp->vlan_needed) {
527 memmove(tp->vlan, tp->data, 4);
528 memmove(tp->data, tp->data + 4, 8);
529 memcpy(tp->data + 8, tp->vlan_header, 4);
530 e1000_send_packet(s, tp->vlan, tp->size + 4);
531 } else
532 e1000_send_packet(s, tp->data, tp->size);
533 s->mac_reg[TPT]++;
534 s->mac_reg[GPTC]++;
535 n = s->mac_reg[TOTL];
536 if ((s->mac_reg[TOTL] += s->tx.size) < n)
537 s->mac_reg[TOTH]++;
540 static void
541 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
543 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
544 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
545 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
546 unsigned int msh = 0xfffff, hdr = 0;
547 uint64_t addr;
548 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
549 struct e1000_tx *tp = &s->tx;
551 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
552 op = le32_to_cpu(xp->cmd_and_length);
553 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
554 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
555 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
556 tp->tucss = xp->upper_setup.tcp_fields.tucss;
557 tp->tucso = xp->upper_setup.tcp_fields.tucso;
558 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
559 tp->paylen = op & 0xfffff;
560 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
561 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
562 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
563 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
564 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
565 tp->tso_frames = 0;
566 if (tp->tucso == 0) { // this is probably wrong
567 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
568 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
570 return;
571 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
572 // data descriptor
573 if (tp->size == 0) {
574 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
576 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
577 } else {
578 // legacy descriptor
579 tp->cptse = 0;
582 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
583 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
584 tp->vlan_needed = 1;
585 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
586 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
587 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
588 le16_to_cpu(dp->upper.fields.special));
591 addr = le64_to_cpu(dp->buffer_addr);
592 if (tp->tse && tp->cptse) {
593 hdr = tp->hdr_len;
594 msh = hdr + tp->mss;
595 do {
596 bytes = split_size;
597 if (tp->size + bytes > msh)
598 bytes = msh - tp->size;
600 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
601 pci_dma_read(&s->dev, addr, tp->data + tp->size, bytes);
602 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
603 memmove(tp->header, tp->data, hdr);
604 tp->size = sz;
605 addr += bytes;
606 if (sz == msh) {
607 xmit_seg(s);
608 memmove(tp->data, tp->header, hdr);
609 tp->size = hdr;
611 } while (split_size -= bytes);
612 } else if (!tp->tse && tp->cptse) {
613 // context descriptor TSE is not set, while data descriptor TSE is set
614 DBGOUT(TXERR, "TCP segmentation error\n");
615 } else {
616 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
617 pci_dma_read(&s->dev, addr, tp->data + tp->size, split_size);
618 tp->size += split_size;
621 if (!(txd_lower & E1000_TXD_CMD_EOP))
622 return;
623 if (!(tp->tse && tp->cptse && tp->size < hdr))
624 xmit_seg(s);
625 tp->tso_frames = 0;
626 tp->sum_needed = 0;
627 tp->vlan_needed = 0;
628 tp->size = 0;
629 tp->cptse = 0;
632 static uint32_t
633 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
635 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
637 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
638 return 0;
639 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
640 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
641 dp->upper.data = cpu_to_le32(txd_upper);
642 pci_dma_write(&s->dev, base + ((char *)&dp->upper - (char *)dp),
643 &dp->upper, sizeof(dp->upper));
644 return E1000_ICR_TXDW;
647 static uint64_t tx_desc_base(E1000State *s)
649 uint64_t bah = s->mac_reg[TDBAH];
650 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
652 return (bah << 32) + bal;
655 static void
656 start_xmit(E1000State *s)
658 dma_addr_t base;
659 struct e1000_tx_desc desc;
660 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
662 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
663 DBGOUT(TX, "tx disabled\n");
664 return;
667 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
668 base = tx_desc_base(s) +
669 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
670 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
672 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
673 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
674 desc.upper.data);
676 process_tx_desc(s, &desc);
677 cause |= txdesc_writeback(s, base, &desc);
679 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
680 s->mac_reg[TDH] = 0;
682 * the following could happen only if guest sw assigns
683 * bogus values to TDT/TDLEN.
684 * there's nothing too intelligent we could do about this.
686 if (s->mac_reg[TDH] == tdh_start) {
687 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
688 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
689 break;
692 set_ics(s, 0, cause);
695 static int
696 receive_filter(E1000State *s, const uint8_t *buf, int size)
698 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
699 static const int mta_shift[] = {4, 3, 2, 0};
700 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
702 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
703 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
704 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
705 ((vid >> 5) & 0x7f));
706 if ((vfta & (1 << (vid & 0x1f))) == 0)
707 return 0;
710 if (rctl & E1000_RCTL_UPE) // promiscuous
711 return 1;
713 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
714 return 1;
716 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
717 return 1;
719 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
720 if (!(rp[1] & E1000_RAH_AV))
721 continue;
722 ra[0] = cpu_to_le32(rp[0]);
723 ra[1] = cpu_to_le32(rp[1]);
724 if (!memcmp(buf, (uint8_t *)ra, 6)) {
725 DBGOUT(RXFILTER,
726 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
727 (int)(rp - s->mac_reg - RA)/2,
728 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
729 return 1;
732 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
733 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
735 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
736 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
737 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
738 return 1;
739 DBGOUT(RXFILTER,
740 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
741 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
742 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
743 s->mac_reg[MTA + (f >> 5)]);
745 return 0;
748 static void
749 e1000_set_link_status(NetClientState *nc)
751 E1000State *s = qemu_get_nic_opaque(nc);
752 uint32_t old_status = s->mac_reg[STATUS];
754 if (nc->link_down) {
755 e1000_link_down(s);
756 } else {
757 e1000_link_up(s);
760 if (s->mac_reg[STATUS] != old_status)
761 set_ics(s, 0, E1000_ICR_LSC);
764 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
766 int bufs;
767 /* Fast-path short packets */
768 if (total_size <= s->rxbuf_size) {
769 return s->mac_reg[RDH] != s->mac_reg[RDT];
771 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
772 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
773 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
774 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
775 s->mac_reg[RDT] - s->mac_reg[RDH];
776 } else {
777 return false;
779 return total_size <= bufs * s->rxbuf_size;
782 static int
783 e1000_can_receive(NetClientState *nc)
785 E1000State *s = qemu_get_nic_opaque(nc);
787 return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
788 (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
791 static uint64_t rx_desc_base(E1000State *s)
793 uint64_t bah = s->mac_reg[RDBAH];
794 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
796 return (bah << 32) + bal;
799 static ssize_t
800 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
802 E1000State *s = qemu_get_nic_opaque(nc);
803 struct e1000_rx_desc desc;
804 dma_addr_t base;
805 unsigned int n, rdt;
806 uint32_t rdh_start;
807 uint16_t vlan_special = 0;
808 uint8_t vlan_status = 0, vlan_offset = 0;
809 uint8_t min_buf[MIN_BUF_SIZE];
810 size_t desc_offset;
811 size_t desc_size;
812 size_t total_size;
814 if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
815 return -1;
818 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
819 return -1;
822 /* Pad to minimum Ethernet frame length */
823 if (size < sizeof(min_buf)) {
824 memcpy(min_buf, buf, size);
825 memset(&min_buf[size], 0, sizeof(min_buf) - size);
826 buf = min_buf;
827 size = sizeof(min_buf);
830 /* Discard oversized packets if !LPE and !SBP. */
831 if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
832 (size > MAXIMUM_ETHERNET_VLAN_SIZE
833 && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
834 && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
835 return size;
838 if (!receive_filter(s, buf, size))
839 return size;
841 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
842 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
843 memmove((uint8_t *)buf + 4, buf, 12);
844 vlan_status = E1000_RXD_STAT_VP;
845 vlan_offset = 4;
846 size -= 4;
849 rdh_start = s->mac_reg[RDH];
850 desc_offset = 0;
851 total_size = size + fcs_len(s);
852 if (!e1000_has_rxbufs(s, total_size)) {
853 set_ics(s, 0, E1000_ICS_RXO);
854 return -1;
856 do {
857 desc_size = total_size - desc_offset;
858 if (desc_size > s->rxbuf_size) {
859 desc_size = s->rxbuf_size;
861 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
862 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
863 desc.special = vlan_special;
864 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
865 if (desc.buffer_addr) {
866 if (desc_offset < size) {
867 size_t copy_size = size - desc_offset;
868 if (copy_size > s->rxbuf_size) {
869 copy_size = s->rxbuf_size;
871 pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr),
872 buf + desc_offset + vlan_offset, copy_size);
874 desc_offset += desc_size;
875 desc.length = cpu_to_le16(desc_size);
876 if (desc_offset >= total_size) {
877 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
878 } else {
879 /* Guest zeroing out status is not a hardware requirement.
880 Clear EOP in case guest didn't do it. */
881 desc.status &= ~E1000_RXD_STAT_EOP;
883 } else { // as per intel docs; skip descriptors with null buf addr
884 DBGOUT(RX, "Null RX descriptor!!\n");
886 pci_dma_write(&s->dev, base, &desc, sizeof(desc));
888 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
889 s->mac_reg[RDH] = 0;
890 /* see comment in start_xmit; same here */
891 if (s->mac_reg[RDH] == rdh_start) {
892 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
893 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
894 set_ics(s, 0, E1000_ICS_RXO);
895 return -1;
897 } while (desc_offset < total_size);
899 s->mac_reg[GPRC]++;
900 s->mac_reg[TPR]++;
901 /* TOR - Total Octets Received:
902 * This register includes bytes received in a packet from the <Destination
903 * Address> field through the <CRC> field, inclusively.
905 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
906 if (n < s->mac_reg[TORL])
907 s->mac_reg[TORH]++;
908 s->mac_reg[TORL] = n;
910 n = E1000_ICS_RXT0;
911 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
912 rdt += s->mac_reg[RDLEN] / sizeof(desc);
913 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
914 s->rxbuf_min_shift)
915 n |= E1000_ICS_RXDMT0;
917 set_ics(s, 0, n);
919 return size;
922 static uint32_t
923 mac_readreg(E1000State *s, int index)
925 return s->mac_reg[index];
928 static uint32_t
929 mac_icr_read(E1000State *s, int index)
931 uint32_t ret = s->mac_reg[ICR];
933 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
934 set_interrupt_cause(s, 0, 0);
935 return ret;
938 static uint32_t
939 mac_read_clr4(E1000State *s, int index)
941 uint32_t ret = s->mac_reg[index];
943 s->mac_reg[index] = 0;
944 return ret;
947 static uint32_t
948 mac_read_clr8(E1000State *s, int index)
950 uint32_t ret = s->mac_reg[index];
952 s->mac_reg[index] = 0;
953 s->mac_reg[index-1] = 0;
954 return ret;
957 static void
958 mac_writereg(E1000State *s, int index, uint32_t val)
960 s->mac_reg[index] = val;
963 static void
964 set_rdt(E1000State *s, int index, uint32_t val)
966 s->mac_reg[index] = val & 0xffff;
967 if (e1000_has_rxbufs(s, 1)) {
968 qemu_flush_queued_packets(qemu_get_queue(s->nic));
972 static void
973 set_16bit(E1000State *s, int index, uint32_t val)
975 s->mac_reg[index] = val & 0xffff;
978 static void
979 set_dlen(E1000State *s, int index, uint32_t val)
981 s->mac_reg[index] = val & 0xfff80;
984 static void
985 set_tctl(E1000State *s, int index, uint32_t val)
987 s->mac_reg[index] = val;
988 s->mac_reg[TDT] &= 0xffff;
989 start_xmit(s);
992 static void
993 set_icr(E1000State *s, int index, uint32_t val)
995 DBGOUT(INTERRUPT, "set_icr %x\n", val);
996 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
999 static void
1000 set_imc(E1000State *s, int index, uint32_t val)
1002 s->mac_reg[IMS] &= ~val;
1003 set_ics(s, 0, 0);
1006 static void
1007 set_ims(E1000State *s, int index, uint32_t val)
1009 s->mac_reg[IMS] |= val;
1010 set_ics(s, 0, 0);
1013 #define getreg(x) [x] = mac_readreg
1014 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1015 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
1016 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
1017 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
1018 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
1019 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
1020 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
1021 getreg(TDLEN), getreg(RDLEN),
1023 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
1024 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
1025 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
1026 [CRCERRS ... MPC] = &mac_readreg,
1027 [RA ... RA+31] = &mac_readreg,
1028 [MTA ... MTA+127] = &mac_readreg,
1029 [VFTA ... VFTA+127] = &mac_readreg,
1031 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1033 #define putreg(x) [x] = mac_writereg
1034 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1035 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1036 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
1037 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
1038 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1039 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1040 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1041 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1042 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1043 [RA ... RA+31] = &mac_writereg,
1044 [MTA ... MTA+127] = &mac_writereg,
1045 [VFTA ... VFTA+127] = &mac_writereg,
1048 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1050 static void
1051 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1052 unsigned size)
1054 E1000State *s = opaque;
1055 unsigned int index = (addr & 0x1ffff) >> 2;
1057 if (index < NWRITEOPS && macreg_writeops[index]) {
1058 macreg_writeops[index](s, index, val);
1059 } else if (index < NREADOPS && macreg_readops[index]) {
1060 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1061 } else {
1062 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1063 index<<2, val);
1067 static uint64_t
1068 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1070 E1000State *s = opaque;
1071 unsigned int index = (addr & 0x1ffff) >> 2;
1073 if (index < NREADOPS && macreg_readops[index])
1075 return macreg_readops[index](s, index);
1077 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1078 return 0;
1081 static const MemoryRegionOps e1000_mmio_ops = {
1082 .read = e1000_mmio_read,
1083 .write = e1000_mmio_write,
1084 .endianness = DEVICE_LITTLE_ENDIAN,
1085 .impl = {
1086 .min_access_size = 4,
1087 .max_access_size = 4,
1091 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1092 unsigned size)
1094 E1000State *s = opaque;
1096 (void)s;
1097 return 0;
1100 static void e1000_io_write(void *opaque, hwaddr addr,
1101 uint64_t val, unsigned size)
1103 E1000State *s = opaque;
1105 (void)s;
1108 static const MemoryRegionOps e1000_io_ops = {
1109 .read = e1000_io_read,
1110 .write = e1000_io_write,
1111 .endianness = DEVICE_LITTLE_ENDIAN,
1114 static bool is_version_1(void *opaque, int version_id)
1116 return version_id == 1;
1119 static void e1000_pre_save(void *opaque)
1121 E1000State *s = opaque;
1122 NetClientState *nc = qemu_get_queue(s->nic);
1124 * If link is down and auto-negotiation is ongoing, complete
1125 * auto-negotiation immediately. This allows is to look at
1126 * MII_SR_AUTONEG_COMPLETE to infer link status on load.
1128 if (nc->link_down &&
1129 s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1130 s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
1131 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1135 static int e1000_post_load(void *opaque, int version_id)
1137 E1000State *s = opaque;
1138 NetClientState *nc = qemu_get_queue(s->nic);
1140 /* nc.link_down can't be migrated, so infer link_down according
1141 * to link status bit in mac_reg[STATUS].
1142 * Alternatively, restart link negotiation if it was in progress. */
1143 nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1144 if (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
1145 s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
1146 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1147 nc->link_down = false;
1148 qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
1151 return 0;
1154 static const VMStateDescription vmstate_e1000 = {
1155 .name = "e1000",
1156 .version_id = 2,
1157 .minimum_version_id = 1,
1158 .minimum_version_id_old = 1,
1159 .pre_save = e1000_pre_save,
1160 .post_load = e1000_post_load,
1161 .fields = (VMStateField []) {
1162 VMSTATE_PCI_DEVICE(dev, E1000State),
1163 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1164 VMSTATE_UNUSED(4), /* Was mmio_base. */
1165 VMSTATE_UINT32(rxbuf_size, E1000State),
1166 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1167 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1168 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1169 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1170 VMSTATE_UINT16(eecd_state.reading, E1000State),
1171 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1172 VMSTATE_UINT8(tx.ipcss, E1000State),
1173 VMSTATE_UINT8(tx.ipcso, E1000State),
1174 VMSTATE_UINT16(tx.ipcse, E1000State),
1175 VMSTATE_UINT8(tx.tucss, E1000State),
1176 VMSTATE_UINT8(tx.tucso, E1000State),
1177 VMSTATE_UINT16(tx.tucse, E1000State),
1178 VMSTATE_UINT32(tx.paylen, E1000State),
1179 VMSTATE_UINT8(tx.hdr_len, E1000State),
1180 VMSTATE_UINT16(tx.mss, E1000State),
1181 VMSTATE_UINT16(tx.size, E1000State),
1182 VMSTATE_UINT16(tx.tso_frames, E1000State),
1183 VMSTATE_UINT8(tx.sum_needed, E1000State),
1184 VMSTATE_INT8(tx.ip, E1000State),
1185 VMSTATE_INT8(tx.tcp, E1000State),
1186 VMSTATE_BUFFER(tx.header, E1000State),
1187 VMSTATE_BUFFER(tx.data, E1000State),
1188 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1189 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1190 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1191 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1192 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1193 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1194 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1195 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1196 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1197 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1198 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1199 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1200 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1201 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1202 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1203 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1204 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1205 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1206 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1207 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1208 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1209 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1210 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1211 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1212 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1213 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1214 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1215 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1216 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1217 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1218 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1219 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1220 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1221 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1222 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1223 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1224 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1225 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1226 VMSTATE_UINT32(mac_reg[VET], E1000State),
1227 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1228 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1229 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1230 VMSTATE_END_OF_LIST()
1234 static const uint16_t e1000_eeprom_template[64] = {
1235 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1236 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
1237 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1238 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1239 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1240 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1241 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1242 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1245 /* PCI interface */
1247 static void
1248 e1000_mmio_setup(E1000State *d)
1250 int i;
1251 const uint32_t excluded_regs[] = {
1252 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1253 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1256 memory_region_init_io(&d->mmio, &e1000_mmio_ops, d, "e1000-mmio",
1257 PNPMMIO_SIZE);
1258 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1259 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1260 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1261 excluded_regs[i+1] - excluded_regs[i] - 4);
1262 memory_region_init_io(&d->io, &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1265 static void
1266 e1000_cleanup(NetClientState *nc)
1268 E1000State *s = qemu_get_nic_opaque(nc);
1270 s->nic = NULL;
1273 static void
1274 pci_e1000_uninit(PCIDevice *dev)
1276 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1278 qemu_del_timer(d->autoneg_timer);
1279 qemu_free_timer(d->autoneg_timer);
1280 memory_region_destroy(&d->mmio);
1281 memory_region_destroy(&d->io);
1282 qemu_del_nic(d->nic);
1285 static NetClientInfo net_e1000_info = {
1286 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1287 .size = sizeof(NICState),
1288 .can_receive = e1000_can_receive,
1289 .receive = e1000_receive,
1290 .cleanup = e1000_cleanup,
1291 .link_status_changed = e1000_set_link_status,
1294 static int pci_e1000_init(PCIDevice *pci_dev)
1296 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1297 uint8_t *pci_conf;
1298 uint16_t checksum = 0;
1299 int i;
1300 uint8_t *macaddr;
1302 pci_conf = d->dev.config;
1304 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1305 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1307 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1309 e1000_mmio_setup(d);
1311 pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1313 pci_register_bar(&d->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1315 memmove(d->eeprom_data, e1000_eeprom_template,
1316 sizeof e1000_eeprom_template);
1317 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1318 macaddr = d->conf.macaddr.a;
1319 for (i = 0; i < 3; i++)
1320 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1321 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1322 checksum += d->eeprom_data[i];
1323 checksum = (uint16_t) EEPROM_SUM - checksum;
1324 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1326 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1327 object_get_typename(OBJECT(d)), d->dev.qdev.id, d);
1329 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1331 add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
1333 d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
1335 return 0;
1338 static void qdev_e1000_reset(DeviceState *dev)
1340 E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
1341 e1000_reset(d);
1344 static Property e1000_properties[] = {
1345 DEFINE_NIC_PROPERTIES(E1000State, conf),
1346 DEFINE_PROP_END_OF_LIST(),
1349 static void e1000_class_init(ObjectClass *klass, void *data)
1351 DeviceClass *dc = DEVICE_CLASS(klass);
1352 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1354 k->init = pci_e1000_init;
1355 k->exit = pci_e1000_uninit;
1356 k->romfile = "pxe-e1000.rom";
1357 k->vendor_id = PCI_VENDOR_ID_INTEL;
1358 k->device_id = E1000_DEVID;
1359 k->revision = 0x03;
1360 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1361 dc->desc = "Intel Gigabit Ethernet";
1362 dc->reset = qdev_e1000_reset;
1363 dc->vmsd = &vmstate_e1000;
1364 dc->props = e1000_properties;
1367 static const TypeInfo e1000_info = {
1368 .name = "e1000",
1369 .parent = TYPE_PCI_DEVICE,
1370 .instance_size = sizeof(E1000State),
1371 .class_init = e1000_class_init,
1374 static void e1000_register_types(void)
1376 type_register_static(&e1000_info);
1379 type_init(e1000_register_types)