linux-user: correct msgrcv()
[qemu/ar7.git] / hw / e1000.c
blobef06ca18941a014c001df213ecb95cf70ff1b482
1 /*
2 * QEMU e1000 emulation
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "hw.h"
29 #include "pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
36 #include "e1000_hw.h"
38 #define E1000_DEBUG
40 #ifdef E1000_DEBUG
41 enum {
42 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
43 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
44 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
45 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
47 #define DBGBIT(x) (1<<DEBUG_##x)
48 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
50 #define DBGOUT(what, fmt, ...) do { \
51 if (debugflags & DBGBIT(what)) \
52 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
53 } while (0)
54 #else
55 #define DBGOUT(what, fmt, ...) do {} while (0)
56 #endif
58 #define IOPORT_SIZE 0x40
59 #define PNPMMIO_SIZE 0x20000
60 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
62 /* this is the size past which hardware will drop packets when setting LPE=0 */
63 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
64 /* this is the size past which hardware will drop packets when setting LPE=1 */
65 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
68 * HW models:
69 * E1000_DEV_ID_82540EM works with Windows and Linux
70 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
71 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
72 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
73 * Others never tested
75 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
78 * May need to specify additional MAC-to-PHY entries --
79 * Intel's Windows driver refuses to initialize unless they match
81 enum {
82 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
83 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
84 /* default to E1000_DEV_ID_82540EM */ 0xc20
87 typedef struct E1000State_st {
88 PCIDevice dev;
89 NICState *nic;
90 NICConf conf;
91 MemoryRegion mmio;
92 MemoryRegion io;
94 uint32_t mac_reg[0x8000];
95 uint16_t phy_reg[0x20];
96 uint16_t eeprom_data[64];
98 uint32_t rxbuf_size;
99 uint32_t rxbuf_min_shift;
100 struct e1000_tx {
101 unsigned char header[256];
102 unsigned char vlan_header[4];
103 /* Fields vlan and data must not be reordered or separated. */
104 unsigned char vlan[4];
105 unsigned char data[0x10000];
106 uint16_t size;
107 unsigned char sum_needed;
108 unsigned char vlan_needed;
109 uint8_t ipcss;
110 uint8_t ipcso;
111 uint16_t ipcse;
112 uint8_t tucss;
113 uint8_t tucso;
114 uint16_t tucse;
115 uint8_t hdr_len;
116 uint16_t mss;
117 uint32_t paylen;
118 uint16_t tso_frames;
119 char tse;
120 int8_t ip;
121 int8_t tcp;
122 char cptse; // current packet tse bit
123 } tx;
125 struct {
126 uint32_t val_in; // shifted in from guest driver
127 uint16_t bitnum_in;
128 uint16_t bitnum_out;
129 uint16_t reading;
130 uint32_t old_eecd;
131 } eecd_state;
133 QEMUTimer *autoneg_timer;
134 } E1000State;
136 #define defreg(x) x = (E1000_##x>>2)
137 enum {
138 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
139 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
140 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
141 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
142 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
143 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
144 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
145 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
146 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
147 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
148 defreg(VET),
151 static void
152 e1000_link_down(E1000State *s)
154 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
155 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
158 static void
159 e1000_link_up(E1000State *s)
161 s->mac_reg[STATUS] |= E1000_STATUS_LU;
162 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
165 static void
166 set_phy_ctrl(E1000State *s, int index, uint16_t val)
168 if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
169 /* no need auto-negotiation if link was down */
170 if (s->nic->nc.link_down) {
171 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
172 return;
174 s->nic->nc.link_down = true;
175 e1000_link_down(s);
176 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
177 DBGOUT(PHY, "Start link auto negotiation\n");
178 qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
182 static void
183 e1000_autoneg_timer(void *opaque)
185 E1000State *s = opaque;
186 s->nic->nc.link_down = false;
187 e1000_link_up(s);
188 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
189 DBGOUT(PHY, "Auto negotiation is completed\n");
192 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
193 [PHY_CTRL] = set_phy_ctrl,
196 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
198 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
199 static const char phy_regcap[0x20] = {
200 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
201 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
202 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
203 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
204 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
205 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
208 static const uint16_t phy_reg_init[] = {
209 [PHY_CTRL] = 0x1140,
210 [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
211 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
212 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
213 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
214 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
215 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
218 static const uint32_t mac_reg_init[] = {
219 [PBA] = 0x00100030,
220 [LEDCTL] = 0x602,
221 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
222 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
223 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
224 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
225 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
226 E1000_STATUS_LU,
227 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
228 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
229 E1000_MANC_RMCP_EN,
232 static void
233 set_interrupt_cause(E1000State *s, int index, uint32_t val)
235 if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
236 /* Only for 8257x */
237 val |= E1000_ICR_INT_ASSERTED;
239 s->mac_reg[ICR] = val;
240 s->mac_reg[ICS] = val;
241 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
244 static void
245 set_ics(E1000State *s, int index, uint32_t val)
247 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
248 s->mac_reg[IMS]);
249 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
252 static int
253 rxbufsize(uint32_t v)
255 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
256 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
257 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
258 switch (v) {
259 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
260 return 16384;
261 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
262 return 8192;
263 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
264 return 4096;
265 case E1000_RCTL_SZ_1024:
266 return 1024;
267 case E1000_RCTL_SZ_512:
268 return 512;
269 case E1000_RCTL_SZ_256:
270 return 256;
272 return 2048;
275 static void e1000_reset(void *opaque)
277 E1000State *d = opaque;
278 uint8_t *macaddr = d->conf.macaddr.a;
279 int i;
281 qemu_del_timer(d->autoneg_timer);
282 memset(d->phy_reg, 0, sizeof d->phy_reg);
283 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
284 memset(d->mac_reg, 0, sizeof d->mac_reg);
285 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
286 d->rxbuf_min_shift = 1;
287 memset(&d->tx, 0, sizeof d->tx);
289 if (d->nic->nc.link_down) {
290 e1000_link_down(d);
293 /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
294 d->mac_reg[RA] = 0;
295 d->mac_reg[RA + 1] = E1000_RAH_AV;
296 for (i = 0; i < 4; i++) {
297 d->mac_reg[RA] |= macaddr[i] << (8 * i);
298 d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
302 static void
303 set_ctrl(E1000State *s, int index, uint32_t val)
305 /* RST is self clearing */
306 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
309 static void
310 set_rx_control(E1000State *s, int index, uint32_t val)
312 s->mac_reg[RCTL] = val;
313 s->rxbuf_size = rxbufsize(val);
314 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
315 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
316 s->mac_reg[RCTL]);
317 qemu_flush_queued_packets(&s->nic->nc);
320 static void
321 set_mdic(E1000State *s, int index, uint32_t val)
323 uint32_t data = val & E1000_MDIC_DATA_MASK;
324 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
326 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
327 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
328 else if (val & E1000_MDIC_OP_READ) {
329 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
330 if (!(phy_regcap[addr] & PHY_R)) {
331 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
332 val |= E1000_MDIC_ERROR;
333 } else
334 val = (val ^ data) | s->phy_reg[addr];
335 } else if (val & E1000_MDIC_OP_WRITE) {
336 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
337 if (!(phy_regcap[addr] & PHY_W)) {
338 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
339 val |= E1000_MDIC_ERROR;
340 } else {
341 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
342 phyreg_writeops[addr](s, index, data);
344 s->phy_reg[addr] = data;
347 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
349 if (val & E1000_MDIC_INT_EN) {
350 set_ics(s, 0, E1000_ICR_MDAC);
354 static uint32_t
355 get_eecd(E1000State *s, int index)
357 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
359 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
360 s->eecd_state.bitnum_out, s->eecd_state.reading);
361 if (!s->eecd_state.reading ||
362 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
363 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
364 ret |= E1000_EECD_DO;
365 return ret;
368 static void
369 set_eecd(E1000State *s, int index, uint32_t val)
371 uint32_t oldval = s->eecd_state.old_eecd;
373 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
374 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
375 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
376 return;
377 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
378 s->eecd_state.val_in = 0;
379 s->eecd_state.bitnum_in = 0;
380 s->eecd_state.bitnum_out = 0;
381 s->eecd_state.reading = 0;
383 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
384 return;
385 if (!(E1000_EECD_SK & val)) { // falling edge
386 s->eecd_state.bitnum_out++;
387 return;
389 s->eecd_state.val_in <<= 1;
390 if (val & E1000_EECD_DI)
391 s->eecd_state.val_in |= 1;
392 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
393 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
394 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
395 EEPROM_READ_OPCODE_MICROWIRE);
397 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
398 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
399 s->eecd_state.reading);
402 static uint32_t
403 flash_eerd_read(E1000State *s, int x)
405 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
407 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
408 return (s->mac_reg[EERD]);
410 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
411 return (E1000_EEPROM_RW_REG_DONE | r);
413 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
414 E1000_EEPROM_RW_REG_DONE | r);
417 static void
418 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
420 uint32_t sum;
422 if (cse && cse < n)
423 n = cse + 1;
424 if (sloc < n-1) {
425 sum = net_checksum_add(n-css, data+css);
426 cpu_to_be16wu((uint16_t *)(data + sloc),
427 net_checksum_finish(sum));
431 static inline int
432 vlan_enabled(E1000State *s)
434 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
437 static inline int
438 vlan_rx_filter_enabled(E1000State *s)
440 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
443 static inline int
444 is_vlan_packet(E1000State *s, const uint8_t *buf)
446 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
447 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
450 static inline int
451 is_vlan_txd(uint32_t txd_lower)
453 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
456 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
457 * fill it in, just pad descriptor length by 4 bytes unless guest
458 * told us to strip it off the packet. */
459 static inline int
460 fcs_len(E1000State *s)
462 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
465 static void
466 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
468 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
469 s->nic->nc.info->receive(&s->nic->nc, buf, size);
470 } else {
471 qemu_send_packet(&s->nic->nc, buf, size);
475 static void
476 xmit_seg(E1000State *s)
478 uint16_t len, *sp;
479 unsigned int frames = s->tx.tso_frames, css, sofar, n;
480 struct e1000_tx *tp = &s->tx;
482 if (tp->tse && tp->cptse) {
483 css = tp->ipcss;
484 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
485 frames, tp->size, css);
486 if (tp->ip) { // IPv4
487 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
488 tp->size - css);
489 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
490 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
491 } else // IPv6
492 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
493 tp->size - css);
494 css = tp->tucss;
495 len = tp->size - css;
496 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
497 if (tp->tcp) {
498 sofar = frames * tp->mss;
499 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
500 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
501 if (tp->paylen - sofar > tp->mss)
502 tp->data[css + 13] &= ~9; // PSH, FIN
503 } else // UDP
504 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
505 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
506 unsigned int phsum;
507 // add pseudo-header length before checksum calculation
508 sp = (uint16_t *)(tp->data + tp->tucso);
509 phsum = be16_to_cpup(sp) + len;
510 phsum = (phsum >> 16) + (phsum & 0xffff);
511 cpu_to_be16wu(sp, phsum);
513 tp->tso_frames++;
516 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
517 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
518 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
519 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
520 if (tp->vlan_needed) {
521 memmove(tp->vlan, tp->data, 4);
522 memmove(tp->data, tp->data + 4, 8);
523 memcpy(tp->data + 8, tp->vlan_header, 4);
524 e1000_send_packet(s, tp->vlan, tp->size + 4);
525 } else
526 e1000_send_packet(s, tp->data, tp->size);
527 s->mac_reg[TPT]++;
528 s->mac_reg[GPTC]++;
529 n = s->mac_reg[TOTL];
530 if ((s->mac_reg[TOTL] += s->tx.size) < n)
531 s->mac_reg[TOTH]++;
534 static void
535 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
537 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
538 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
539 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
540 unsigned int msh = 0xfffff, hdr = 0;
541 uint64_t addr;
542 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
543 struct e1000_tx *tp = &s->tx;
545 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
546 op = le32_to_cpu(xp->cmd_and_length);
547 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
548 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
549 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
550 tp->tucss = xp->upper_setup.tcp_fields.tucss;
551 tp->tucso = xp->upper_setup.tcp_fields.tucso;
552 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
553 tp->paylen = op & 0xfffff;
554 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
555 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
556 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
557 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
558 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
559 tp->tso_frames = 0;
560 if (tp->tucso == 0) { // this is probably wrong
561 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
562 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
564 return;
565 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
566 // data descriptor
567 if (tp->size == 0) {
568 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
570 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
571 } else {
572 // legacy descriptor
573 tp->cptse = 0;
576 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
577 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
578 tp->vlan_needed = 1;
579 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
580 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
581 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
582 le16_to_cpu(dp->upper.fields.special));
585 addr = le64_to_cpu(dp->buffer_addr);
586 if (tp->tse && tp->cptse) {
587 hdr = tp->hdr_len;
588 msh = hdr + tp->mss;
589 do {
590 bytes = split_size;
591 if (tp->size + bytes > msh)
592 bytes = msh - tp->size;
594 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
595 pci_dma_read(&s->dev, addr, tp->data + tp->size, bytes);
596 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
597 memmove(tp->header, tp->data, hdr);
598 tp->size = sz;
599 addr += bytes;
600 if (sz == msh) {
601 xmit_seg(s);
602 memmove(tp->data, tp->header, hdr);
603 tp->size = hdr;
605 } while (split_size -= bytes);
606 } else if (!tp->tse && tp->cptse) {
607 // context descriptor TSE is not set, while data descriptor TSE is set
608 DBGOUT(TXERR, "TCP segmentation error\n");
609 } else {
610 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
611 pci_dma_read(&s->dev, addr, tp->data + tp->size, split_size);
612 tp->size += split_size;
615 if (!(txd_lower & E1000_TXD_CMD_EOP))
616 return;
617 if (!(tp->tse && tp->cptse && tp->size < hdr))
618 xmit_seg(s);
619 tp->tso_frames = 0;
620 tp->sum_needed = 0;
621 tp->vlan_needed = 0;
622 tp->size = 0;
623 tp->cptse = 0;
626 static uint32_t
627 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
629 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
631 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
632 return 0;
633 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
634 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
635 dp->upper.data = cpu_to_le32(txd_upper);
636 pci_dma_write(&s->dev, base + ((char *)&dp->upper - (char *)dp),
637 &dp->upper, sizeof(dp->upper));
638 return E1000_ICR_TXDW;
641 static uint64_t tx_desc_base(E1000State *s)
643 uint64_t bah = s->mac_reg[TDBAH];
644 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
646 return (bah << 32) + bal;
649 static void
650 start_xmit(E1000State *s)
652 dma_addr_t base;
653 struct e1000_tx_desc desc;
654 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
656 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
657 DBGOUT(TX, "tx disabled\n");
658 return;
661 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
662 base = tx_desc_base(s) +
663 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
664 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
666 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
667 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
668 desc.upper.data);
670 process_tx_desc(s, &desc);
671 cause |= txdesc_writeback(s, base, &desc);
673 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
674 s->mac_reg[TDH] = 0;
676 * the following could happen only if guest sw assigns
677 * bogus values to TDT/TDLEN.
678 * there's nothing too intelligent we could do about this.
680 if (s->mac_reg[TDH] == tdh_start) {
681 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
682 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
683 break;
686 set_ics(s, 0, cause);
689 static int
690 receive_filter(E1000State *s, const uint8_t *buf, int size)
692 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
693 static const int mta_shift[] = {4, 3, 2, 0};
694 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
696 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
697 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
698 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
699 ((vid >> 5) & 0x7f));
700 if ((vfta & (1 << (vid & 0x1f))) == 0)
701 return 0;
704 if (rctl & E1000_RCTL_UPE) // promiscuous
705 return 1;
707 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
708 return 1;
710 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
711 return 1;
713 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
714 if (!(rp[1] & E1000_RAH_AV))
715 continue;
716 ra[0] = cpu_to_le32(rp[0]);
717 ra[1] = cpu_to_le32(rp[1]);
718 if (!memcmp(buf, (uint8_t *)ra, 6)) {
719 DBGOUT(RXFILTER,
720 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
721 (int)(rp - s->mac_reg - RA)/2,
722 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
723 return 1;
726 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
727 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
729 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
730 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
731 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
732 return 1;
733 DBGOUT(RXFILTER,
734 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
735 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
736 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
737 s->mac_reg[MTA + (f >> 5)]);
739 return 0;
742 static void
743 e1000_set_link_status(NetClientState *nc)
745 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
746 uint32_t old_status = s->mac_reg[STATUS];
748 if (nc->link_down) {
749 e1000_link_down(s);
750 } else {
751 e1000_link_up(s);
754 if (s->mac_reg[STATUS] != old_status)
755 set_ics(s, 0, E1000_ICR_LSC);
758 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
760 int bufs;
761 /* Fast-path short packets */
762 if (total_size <= s->rxbuf_size) {
763 return s->mac_reg[RDH] != s->mac_reg[RDT];
765 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
766 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
767 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
768 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
769 s->mac_reg[RDT] - s->mac_reg[RDH];
770 } else {
771 return false;
773 return total_size <= bufs * s->rxbuf_size;
776 static int
777 e1000_can_receive(NetClientState *nc)
779 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
781 return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
784 static uint64_t rx_desc_base(E1000State *s)
786 uint64_t bah = s->mac_reg[RDBAH];
787 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
789 return (bah << 32) + bal;
792 static ssize_t
793 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
795 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
796 struct e1000_rx_desc desc;
797 dma_addr_t base;
798 unsigned int n, rdt;
799 uint32_t rdh_start;
800 uint16_t vlan_special = 0;
801 uint8_t vlan_status = 0, vlan_offset = 0;
802 uint8_t min_buf[MIN_BUF_SIZE];
803 size_t desc_offset;
804 size_t desc_size;
805 size_t total_size;
807 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
808 return -1;
810 /* Pad to minimum Ethernet frame length */
811 if (size < sizeof(min_buf)) {
812 memcpy(min_buf, buf, size);
813 memset(&min_buf[size], 0, sizeof(min_buf) - size);
814 buf = min_buf;
815 size = sizeof(min_buf);
818 /* Discard oversized packets if !LPE and !SBP. */
819 if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
820 (size > MAXIMUM_ETHERNET_VLAN_SIZE
821 && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
822 && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
823 return size;
826 if (!receive_filter(s, buf, size))
827 return size;
829 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
830 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
831 memmove((uint8_t *)buf + 4, buf, 12);
832 vlan_status = E1000_RXD_STAT_VP;
833 vlan_offset = 4;
834 size -= 4;
837 rdh_start = s->mac_reg[RDH];
838 desc_offset = 0;
839 total_size = size + fcs_len(s);
840 if (!e1000_has_rxbufs(s, total_size)) {
841 set_ics(s, 0, E1000_ICS_RXO);
842 return -1;
844 do {
845 desc_size = total_size - desc_offset;
846 if (desc_size > s->rxbuf_size) {
847 desc_size = s->rxbuf_size;
849 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
850 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
851 desc.special = vlan_special;
852 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
853 if (desc.buffer_addr) {
854 if (desc_offset < size) {
855 size_t copy_size = size - desc_offset;
856 if (copy_size > s->rxbuf_size) {
857 copy_size = s->rxbuf_size;
859 pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr),
860 buf + desc_offset + vlan_offset, copy_size);
862 desc_offset += desc_size;
863 desc.length = cpu_to_le16(desc_size);
864 if (desc_offset >= total_size) {
865 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
866 } else {
867 /* Guest zeroing out status is not a hardware requirement.
868 Clear EOP in case guest didn't do it. */
869 desc.status &= ~E1000_RXD_STAT_EOP;
871 } else { // as per intel docs; skip descriptors with null buf addr
872 DBGOUT(RX, "Null RX descriptor!!\n");
874 pci_dma_write(&s->dev, base, &desc, sizeof(desc));
876 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
877 s->mac_reg[RDH] = 0;
878 /* see comment in start_xmit; same here */
879 if (s->mac_reg[RDH] == rdh_start) {
880 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
881 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
882 set_ics(s, 0, E1000_ICS_RXO);
883 return -1;
885 } while (desc_offset < total_size);
887 s->mac_reg[GPRC]++;
888 s->mac_reg[TPR]++;
889 /* TOR - Total Octets Received:
890 * This register includes bytes received in a packet from the <Destination
891 * Address> field through the <CRC> field, inclusively.
893 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
894 if (n < s->mac_reg[TORL])
895 s->mac_reg[TORH]++;
896 s->mac_reg[TORL] = n;
898 n = E1000_ICS_RXT0;
899 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
900 rdt += s->mac_reg[RDLEN] / sizeof(desc);
901 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
902 s->rxbuf_min_shift)
903 n |= E1000_ICS_RXDMT0;
905 set_ics(s, 0, n);
907 return size;
910 static uint32_t
911 mac_readreg(E1000State *s, int index)
913 return s->mac_reg[index];
916 static uint32_t
917 mac_icr_read(E1000State *s, int index)
919 uint32_t ret = s->mac_reg[ICR];
921 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
922 set_interrupt_cause(s, 0, 0);
923 return ret;
926 static uint32_t
927 mac_read_clr4(E1000State *s, int index)
929 uint32_t ret = s->mac_reg[index];
931 s->mac_reg[index] = 0;
932 return ret;
935 static uint32_t
936 mac_read_clr8(E1000State *s, int index)
938 uint32_t ret = s->mac_reg[index];
940 s->mac_reg[index] = 0;
941 s->mac_reg[index-1] = 0;
942 return ret;
945 static void
946 mac_writereg(E1000State *s, int index, uint32_t val)
948 s->mac_reg[index] = val;
951 static void
952 set_rdt(E1000State *s, int index, uint32_t val)
954 s->mac_reg[index] = val & 0xffff;
955 if (e1000_has_rxbufs(s, 1)) {
956 qemu_flush_queued_packets(&s->nic->nc);
960 static void
961 set_16bit(E1000State *s, int index, uint32_t val)
963 s->mac_reg[index] = val & 0xffff;
966 static void
967 set_dlen(E1000State *s, int index, uint32_t val)
969 s->mac_reg[index] = val & 0xfff80;
972 static void
973 set_tctl(E1000State *s, int index, uint32_t val)
975 s->mac_reg[index] = val;
976 s->mac_reg[TDT] &= 0xffff;
977 start_xmit(s);
980 static void
981 set_icr(E1000State *s, int index, uint32_t val)
983 DBGOUT(INTERRUPT, "set_icr %x\n", val);
984 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
987 static void
988 set_imc(E1000State *s, int index, uint32_t val)
990 s->mac_reg[IMS] &= ~val;
991 set_ics(s, 0, 0);
994 static void
995 set_ims(E1000State *s, int index, uint32_t val)
997 s->mac_reg[IMS] |= val;
998 set_ics(s, 0, 0);
1001 #define getreg(x) [x] = mac_readreg
1002 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1003 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
1004 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
1005 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
1006 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
1007 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
1008 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
1009 getreg(TDLEN), getreg(RDLEN),
1011 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
1012 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
1013 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
1014 [CRCERRS ... MPC] = &mac_readreg,
1015 [RA ... RA+31] = &mac_readreg,
1016 [MTA ... MTA+127] = &mac_readreg,
1017 [VFTA ... VFTA+127] = &mac_readreg,
1019 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1021 #define putreg(x) [x] = mac_writereg
1022 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1023 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1024 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
1025 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
1026 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1027 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1028 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1029 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1030 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1031 [RA ... RA+31] = &mac_writereg,
1032 [MTA ... MTA+127] = &mac_writereg,
1033 [VFTA ... VFTA+127] = &mac_writereg,
1036 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1038 static void
1039 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1040 unsigned size)
1042 E1000State *s = opaque;
1043 unsigned int index = (addr & 0x1ffff) >> 2;
1045 if (index < NWRITEOPS && macreg_writeops[index]) {
1046 macreg_writeops[index](s, index, val);
1047 } else if (index < NREADOPS && macreg_readops[index]) {
1048 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1049 } else {
1050 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1051 index<<2, val);
1055 static uint64_t
1056 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1058 E1000State *s = opaque;
1059 unsigned int index = (addr & 0x1ffff) >> 2;
1061 if (index < NREADOPS && macreg_readops[index])
1063 return macreg_readops[index](s, index);
1065 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1066 return 0;
1069 static const MemoryRegionOps e1000_mmio_ops = {
1070 .read = e1000_mmio_read,
1071 .write = e1000_mmio_write,
1072 .endianness = DEVICE_LITTLE_ENDIAN,
1073 .impl = {
1074 .min_access_size = 4,
1075 .max_access_size = 4,
1079 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1080 unsigned size)
1082 E1000State *s = opaque;
1084 (void)s;
1085 return 0;
1088 static void e1000_io_write(void *opaque, hwaddr addr,
1089 uint64_t val, unsigned size)
1091 E1000State *s = opaque;
1093 (void)s;
1096 static const MemoryRegionOps e1000_io_ops = {
1097 .read = e1000_io_read,
1098 .write = e1000_io_write,
1099 .endianness = DEVICE_LITTLE_ENDIAN,
1102 static bool is_version_1(void *opaque, int version_id)
1104 return version_id == 1;
1107 static int e1000_post_load(void *opaque, int version_id)
1109 E1000State *s = opaque;
1111 /* nc.link_down can't be migrated, so infer link_down according
1112 * to link status bit in mac_reg[STATUS] */
1113 s->nic->nc.link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1115 return 0;
1118 static const VMStateDescription vmstate_e1000 = {
1119 .name = "e1000",
1120 .version_id = 2,
1121 .minimum_version_id = 1,
1122 .minimum_version_id_old = 1,
1123 .post_load = e1000_post_load,
1124 .fields = (VMStateField []) {
1125 VMSTATE_PCI_DEVICE(dev, E1000State),
1126 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1127 VMSTATE_UNUSED(4), /* Was mmio_base. */
1128 VMSTATE_UINT32(rxbuf_size, E1000State),
1129 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1130 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1131 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1132 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1133 VMSTATE_UINT16(eecd_state.reading, E1000State),
1134 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1135 VMSTATE_UINT8(tx.ipcss, E1000State),
1136 VMSTATE_UINT8(tx.ipcso, E1000State),
1137 VMSTATE_UINT16(tx.ipcse, E1000State),
1138 VMSTATE_UINT8(tx.tucss, E1000State),
1139 VMSTATE_UINT8(tx.tucso, E1000State),
1140 VMSTATE_UINT16(tx.tucse, E1000State),
1141 VMSTATE_UINT32(tx.paylen, E1000State),
1142 VMSTATE_UINT8(tx.hdr_len, E1000State),
1143 VMSTATE_UINT16(tx.mss, E1000State),
1144 VMSTATE_UINT16(tx.size, E1000State),
1145 VMSTATE_UINT16(tx.tso_frames, E1000State),
1146 VMSTATE_UINT8(tx.sum_needed, E1000State),
1147 VMSTATE_INT8(tx.ip, E1000State),
1148 VMSTATE_INT8(tx.tcp, E1000State),
1149 VMSTATE_BUFFER(tx.header, E1000State),
1150 VMSTATE_BUFFER(tx.data, E1000State),
1151 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1152 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1153 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1154 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1155 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1156 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1157 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1158 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1159 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1160 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1161 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1162 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1163 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1164 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1165 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1166 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1167 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1168 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1169 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1170 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1171 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1172 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1173 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1174 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1175 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1176 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1177 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1178 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1179 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1180 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1181 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1182 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1183 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1184 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1185 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1186 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1187 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1188 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1189 VMSTATE_UINT32(mac_reg[VET], E1000State),
1190 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1191 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1192 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1193 VMSTATE_END_OF_LIST()
1197 static const uint16_t e1000_eeprom_template[64] = {
1198 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1199 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
1200 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1201 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1202 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1203 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1204 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1205 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1208 /* PCI interface */
1210 static void
1211 e1000_mmio_setup(E1000State *d)
1213 int i;
1214 const uint32_t excluded_regs[] = {
1215 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1216 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1219 memory_region_init_io(&d->mmio, &e1000_mmio_ops, d, "e1000-mmio",
1220 PNPMMIO_SIZE);
1221 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1222 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1223 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1224 excluded_regs[i+1] - excluded_regs[i] - 4);
1225 memory_region_init_io(&d->io, &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1228 static void
1229 e1000_cleanup(NetClientState *nc)
1231 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
1233 s->nic = NULL;
1236 static void
1237 pci_e1000_uninit(PCIDevice *dev)
1239 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1241 qemu_del_timer(d->autoneg_timer);
1242 qemu_free_timer(d->autoneg_timer);
1243 memory_region_destroy(&d->mmio);
1244 memory_region_destroy(&d->io);
1245 qemu_del_net_client(&d->nic->nc);
1248 static NetClientInfo net_e1000_info = {
1249 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1250 .size = sizeof(NICState),
1251 .can_receive = e1000_can_receive,
1252 .receive = e1000_receive,
1253 .cleanup = e1000_cleanup,
1254 .link_status_changed = e1000_set_link_status,
1257 static int pci_e1000_init(PCIDevice *pci_dev)
1259 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1260 uint8_t *pci_conf;
1261 uint16_t checksum = 0;
1262 int i;
1263 uint8_t *macaddr;
1265 pci_conf = d->dev.config;
1267 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1268 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1270 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1272 e1000_mmio_setup(d);
1274 pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1276 pci_register_bar(&d->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1278 memmove(d->eeprom_data, e1000_eeprom_template,
1279 sizeof e1000_eeprom_template);
1280 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1281 macaddr = d->conf.macaddr.a;
1282 for (i = 0; i < 3; i++)
1283 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1284 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1285 checksum += d->eeprom_data[i];
1286 checksum = (uint16_t) EEPROM_SUM - checksum;
1287 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1289 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1290 object_get_typename(OBJECT(d)), d->dev.qdev.id, d);
1292 qemu_format_nic_info_str(&d->nic->nc, macaddr);
1294 add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
1296 d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
1298 return 0;
1301 static void qdev_e1000_reset(DeviceState *dev)
1303 E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
1304 e1000_reset(d);
1307 static Property e1000_properties[] = {
1308 DEFINE_NIC_PROPERTIES(E1000State, conf),
1309 DEFINE_PROP_END_OF_LIST(),
1312 static void e1000_class_init(ObjectClass *klass, void *data)
1314 DeviceClass *dc = DEVICE_CLASS(klass);
1315 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1317 k->init = pci_e1000_init;
1318 k->exit = pci_e1000_uninit;
1319 k->romfile = "pxe-e1000.rom";
1320 k->vendor_id = PCI_VENDOR_ID_INTEL;
1321 k->device_id = E1000_DEVID;
1322 k->revision = 0x03;
1323 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1324 dc->desc = "Intel Gigabit Ethernet";
1325 dc->reset = qdev_e1000_reset;
1326 dc->vmsd = &vmstate_e1000;
1327 dc->props = e1000_properties;
1330 static const TypeInfo e1000_info = {
1331 .name = "e1000",
1332 .parent = TYPE_PCI_DEVICE,
1333 .instance_size = sizeof(E1000State),
1334 .class_init = e1000_class_init,
1337 static void e1000_register_types(void)
1339 type_register_static(&e1000_info);
1342 type_init(e1000_register_types)