monitor/hmp-cmds: delete redundant Error check before invoke hmp_handle_error()
[qemu.git] / hw / net / sungem.c
blobe4b7b577046f1710bc827e4a304ea44217a236de
1 /*
2 * QEMU model of SUN GEM ethernet controller
4 * As found in Apple ASICs among others
6 * Copyright 2016 Ben Herrenschmidt
7 * Copyright 2017 Mark Cave-Ayland
8 */
10 #include "qemu/osdep.h"
11 #include "hw/pci/pci.h"
12 #include "hw/qdev-properties.h"
13 #include "migration/vmstate.h"
14 #include "qemu/log.h"
15 #include "qemu/module.h"
16 #include "net/net.h"
17 #include "net/eth.h"
18 #include "net/checksum.h"
19 #include "hw/net/mii.h"
20 #include "sysemu/sysemu.h"
21 #include "trace.h"
23 #define TYPE_SUNGEM "sungem"
25 #define SUNGEM(obj) OBJECT_CHECK(SunGEMState, (obj), TYPE_SUNGEM)
27 #define MAX_PACKET_SIZE 9016
29 #define SUNGEM_MMIO_SIZE 0x200000
31 /* Global registers */
32 #define SUNGEM_MMIO_GREG_SIZE 0x2000
34 #define GREG_SEBSTATE 0x0000UL /* SEB State Register */
36 #define GREG_STAT 0x000CUL /* Status Register */
37 #define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
38 #define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
39 #define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
40 #define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */
41 #define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */
42 #define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */
43 #define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
44 #define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */
45 #define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */
46 #define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */
47 #define GREG_STAT_TXNR_SHIFT 19
49 /* These interrupts are edge latches in the status register,
50 * reading it (or writing the corresponding bit in IACK) will
51 * clear them
53 #define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
54 GREG_STAT_RXDONE | GREG_STAT_RXDONE | \
55 GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR)
57 #define GREG_IMASK 0x0010UL /* Interrupt Mask Register */
58 #define GREG_IACK 0x0014UL /* Interrupt ACK Register */
59 #define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */
60 #define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */
61 #define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */
63 #define GREG_SWRST 0x1010UL /* Software Reset Register */
64 #define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
65 #define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */
66 #define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */
68 /* TX DMA Registers */
69 #define SUNGEM_MMIO_TXDMA_SIZE 0x1000
71 #define TXDMA_KICK 0x0000UL /* TX Kick Register */
73 #define TXDMA_CFG 0x0004UL /* TX Configuration Register */
74 #define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
75 #define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
77 #define TXDMA_DBLOW 0x0008UL /* TX Desc. Base Low */
78 #define TXDMA_DBHI 0x000CUL /* TX Desc. Base High */
79 #define TXDMA_PCNT 0x0024UL /* TX FIFO Packet Counter */
80 #define TXDMA_SMACHINE 0x0028UL /* TX State Machine Register */
81 #define TXDMA_DPLOW 0x0030UL /* TX Data Pointer Low */
82 #define TXDMA_DPHI 0x0034UL /* TX Data Pointer High */
83 #define TXDMA_TXDONE 0x0100UL /* TX Completion Register */
84 #define TXDMA_FTAG 0x0108UL /* TX FIFO Tag */
85 #define TXDMA_FSZ 0x0118UL /* TX FIFO Size */
87 /* Receive DMA Registers */
88 #define SUNGEM_MMIO_RXDMA_SIZE 0x2000
90 #define RXDMA_CFG 0x0000UL /* RX Configuration Register */
91 #define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */
92 #define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */
93 #define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */
94 #define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */
96 #define RXDMA_DBLOW 0x0004UL /* RX Descriptor Base Low */
97 #define RXDMA_DBHI 0x0008UL /* RX Descriptor Base High */
98 #define RXDMA_PCNT 0x0018UL /* RX FIFO Packet Counter */
99 #define RXDMA_SMACHINE 0x001CUL /* RX State Machine Register */
100 #define RXDMA_PTHRESH 0x0020UL /* Pause Thresholds */
101 #define RXDMA_DPLOW 0x0024UL /* RX Data Pointer Low */
102 #define RXDMA_DPHI 0x0028UL /* RX Data Pointer High */
103 #define RXDMA_KICK 0x0100UL /* RX Kick Register */
104 #define RXDMA_DONE 0x0104UL /* RX Completion Register */
105 #define RXDMA_BLANK 0x0108UL /* RX Blanking Register */
106 #define RXDMA_FTAG 0x0110UL /* RX FIFO Tag */
107 #define RXDMA_FSZ 0x0120UL /* RX FIFO Size */
109 /* MAC Registers */
110 #define SUNGEM_MMIO_MAC_SIZE 0x200
112 #define MAC_TXRST 0x0000UL /* TX MAC Software Reset Command */
113 #define MAC_RXRST 0x0004UL /* RX MAC Software Reset Command */
114 #define MAC_TXSTAT 0x0010UL /* TX MAC Status Register */
115 #define MAC_RXSTAT 0x0014UL /* RX MAC Status Register */
117 #define MAC_CSTAT 0x0018UL /* MAC Control Status Register */
118 #define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */
120 #define MAC_TXMASK 0x0020UL /* TX MAC Mask Register */
121 #define MAC_RXMASK 0x0024UL /* RX MAC Mask Register */
122 #define MAC_MCMASK 0x0028UL /* MAC Control Mask Register */
124 #define MAC_TXCFG 0x0030UL /* TX MAC Configuration Register */
125 #define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */
127 #define MAC_RXCFG 0x0034UL /* RX MAC Configuration Register */
128 #define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */
129 #define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */
130 #define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */
131 #define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */
132 #define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */
134 #define MAC_XIFCFG 0x003CUL /* XIF Configuration Register */
135 #define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */
137 #define MAC_MINFSZ 0x0050UL /* MinFrameSize Register */
138 #define MAC_MAXFSZ 0x0054UL /* MaxFrameSize Register */
139 #define MAC_ADDR0 0x0080UL /* MAC Address 0 Register */
140 #define MAC_ADDR1 0x0084UL /* MAC Address 1 Register */
141 #define MAC_ADDR2 0x0088UL /* MAC Address 2 Register */
142 #define MAC_ADDR3 0x008CUL /* MAC Address 3 Register */
143 #define MAC_ADDR4 0x0090UL /* MAC Address 4 Register */
144 #define MAC_ADDR5 0x0094UL /* MAC Address 5 Register */
145 #define MAC_HASH0 0x00C0UL /* Hash Table 0 Register */
146 #define MAC_PATMPS 0x0114UL /* Peak Attempts Register */
147 #define MAC_SMACHINE 0x0134UL /* State Machine Register */
149 /* MIF Registers */
150 #define SUNGEM_MMIO_MIF_SIZE 0x20
152 #define MIF_FRAME 0x000CUL /* MIF Frame/Output Register */
153 #define MIF_FRAME_OP 0x30000000 /* OPcode */
154 #define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */
155 #define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */
156 #define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */
157 #define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */
159 #define MIF_CFG 0x0010UL /* MIF Configuration Register */
160 #define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */
161 #define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */
163 #define MIF_STATUS 0x0018UL /* MIF Status Register */
164 #define MIF_SMACHINE 0x001CUL /* MIF State Machine Register */
166 /* PCS/Serialink Registers */
167 #define SUNGEM_MMIO_PCS_SIZE 0x60
168 #define PCS_MIISTAT 0x0004UL /* PCS MII Status Register */
169 #define PCS_ISTAT 0x0018UL /* PCS Interrupt Status Reg */
170 #define PCS_SSTATE 0x005CUL /* Serialink State Register */
172 /* Descriptors */
173 struct gem_txd {
174 uint64_t control_word;
175 uint64_t buffer;
178 #define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */
179 #define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */
180 #define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */
181 #define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */
182 #define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */
183 #define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */
184 #define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */
186 struct gem_rxd {
187 uint64_t status_word;
188 uint64_t buffer;
191 #define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */
192 #define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */
195 typedef struct {
196 PCIDevice pdev;
198 MemoryRegion sungem;
199 MemoryRegion greg;
200 MemoryRegion txdma;
201 MemoryRegion rxdma;
202 MemoryRegion mac;
203 MemoryRegion mif;
204 MemoryRegion pcs;
205 NICState *nic;
206 NICConf conf;
207 uint32_t phy_addr;
209 uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2];
210 uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2];
211 uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2];
212 uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2];
213 uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2];
214 uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2];
216 /* Cache some useful things */
217 uint32_t rx_mask;
218 uint32_t tx_mask;
220 /* Current tx packet */
221 uint8_t tx_data[MAX_PACKET_SIZE];
222 uint32_t tx_size;
223 uint64_t tx_first_ctl;
224 } SunGEMState;
227 static void sungem_eval_irq(SunGEMState *s)
229 uint32_t stat, mask;
231 mask = s->gregs[GREG_IMASK >> 2];
232 stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR;
233 if (stat & ~mask) {
234 pci_set_irq(PCI_DEVICE(s), 1);
235 } else {
236 pci_set_irq(PCI_DEVICE(s), 0);
240 static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val)
242 uint32_t stat;
244 stat = s->gregs[GREG_STAT >> 2];
245 if (val) {
246 stat |= bits;
247 } else {
248 stat &= ~bits;
250 s->gregs[GREG_STAT >> 2] = stat;
251 sungem_eval_irq(s);
254 static void sungem_eval_cascade_irq(SunGEMState *s)
256 uint32_t stat, mask;
258 mask = s->macregs[MAC_TXSTAT >> 2];
259 stat = s->macregs[MAC_TXMASK >> 2];
260 if (stat & ~mask) {
261 sungem_update_status(s, GREG_STAT_TXMAC, true);
262 } else {
263 sungem_update_status(s, GREG_STAT_TXMAC, false);
266 mask = s->macregs[MAC_RXSTAT >> 2];
267 stat = s->macregs[MAC_RXMASK >> 2];
268 if (stat & ~mask) {
269 sungem_update_status(s, GREG_STAT_RXMAC, true);
270 } else {
271 sungem_update_status(s, GREG_STAT_RXMAC, false);
274 mask = s->macregs[MAC_CSTAT >> 2];
275 stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR;
276 if (stat & ~mask) {
277 sungem_update_status(s, GREG_STAT_MAC, true);
278 } else {
279 sungem_update_status(s, GREG_STAT_MAC, false);
283 static void sungem_do_tx_csum(SunGEMState *s)
285 uint16_t start, off;
286 uint32_t csum;
288 start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15;
289 off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21;
291 trace_sungem_tx_checksum(start, off);
293 if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) {
294 trace_sungem_tx_checksum_oob();
295 return;
298 csum = net_raw_checksum(s->tx_data + start, s->tx_size - start);
299 stw_be_p(s->tx_data + off, csum);
302 static void sungem_send_packet(SunGEMState *s, const uint8_t *buf,
303 int size)
305 NetClientState *nc = qemu_get_queue(s->nic);
307 if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) {
308 nc->info->receive(nc, buf, size);
309 } else {
310 qemu_send_packet(nc, buf, size);
314 static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc)
316 PCIDevice *d = PCI_DEVICE(s);
317 uint32_t len;
319 /* If it's a start of frame, discard anything we had in the
320 * buffer and start again. This should be an error condition
321 * if we had something ... for now we ignore it
323 if (desc->control_word & TXDCTRL_SOF) {
324 if (s->tx_first_ctl) {
325 trace_sungem_tx_unfinished();
327 s->tx_size = 0;
328 s->tx_first_ctl = desc->control_word;
331 /* Grab data size */
332 len = desc->control_word & TXDCTRL_BUFSZ;
334 /* Clamp it to our max size */
335 if ((s->tx_size + len) > MAX_PACKET_SIZE) {
336 trace_sungem_tx_overflow();
337 len = MAX_PACKET_SIZE - s->tx_size;
340 /* Read the data */
341 pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len);
342 s->tx_size += len;
344 /* If end of frame, send packet */
345 if (desc->control_word & TXDCTRL_EOF) {
346 trace_sungem_tx_finished(s->tx_size);
348 /* Handle csum */
349 if (s->tx_first_ctl & TXDCTRL_CENAB) {
350 sungem_do_tx_csum(s);
353 /* Send it */
354 sungem_send_packet(s, s->tx_data, s->tx_size);
356 /* No more pending packet */
357 s->tx_size = 0;
358 s->tx_first_ctl = 0;
362 static void sungem_tx_kick(SunGEMState *s)
364 PCIDevice *d = PCI_DEVICE(s);
365 uint32_t comp, kick;
366 uint32_t txdma_cfg, txmac_cfg, ints;
367 uint64_t dbase;
369 trace_sungem_tx_kick();
371 /* Check that both TX MAC and TX DMA are enabled. We don't
372 * handle DMA-less direct FIFO operations (we don't emulate
373 * the FIFO at all).
375 * A write to TXDMA_KICK while DMA isn't enabled can happen
376 * when the driver is resetting the pointer.
378 txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2];
379 txmac_cfg = s->macregs[MAC_TXCFG >> 2];
380 if (!(txdma_cfg & TXDMA_CFG_ENABLE) ||
381 !(txmac_cfg & MAC_TXCFG_ENAB)) {
382 trace_sungem_tx_disabled();
383 return;
386 /* XXX Test min frame size register ? */
387 /* XXX Test max frame size register ? */
389 dbase = s->txdmaregs[TXDMA_DBHI >> 2];
390 dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2];
392 comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask;
393 kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask;
395 trace_sungem_tx_process(comp, kick, s->tx_mask + 1);
397 /* This is rather primitive for now, we just send everything we
398 * can in one go, like e1000. Ideally we should do the sending
399 * from some kind of background task
401 while (comp != kick) {
402 struct gem_txd desc;
404 /* Read the next descriptor */
405 pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc));
407 /* Byteswap descriptor */
408 desc.control_word = le64_to_cpu(desc.control_word);
409 desc.buffer = le64_to_cpu(desc.buffer);
410 trace_sungem_tx_desc(comp, desc.control_word, desc.buffer);
412 /* Send it for processing */
413 sungem_process_tx_desc(s, &desc);
415 /* Interrupt */
416 ints = GREG_STAT_TXDONE;
417 if (desc.control_word & TXDCTRL_INTME) {
418 ints |= GREG_STAT_TXINTME;
420 sungem_update_status(s, ints, true);
422 /* Next ! */
423 comp = (comp + 1) & s->tx_mask;
424 s->txdmaregs[TXDMA_TXDONE >> 2] = comp;
427 /* We sent everything, set status/irq bit */
428 sungem_update_status(s, GREG_STAT_TXALL, true);
431 static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done)
433 return kick == ((done + 1) & s->rx_mask);
436 static bool sungem_can_receive(NetClientState *nc)
438 SunGEMState *s = qemu_get_nic_opaque(nc);
439 uint32_t kick, done, rxdma_cfg, rxmac_cfg;
440 bool full;
442 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
443 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
445 /* If MAC disabled, can't receive */
446 if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) {
447 trace_sungem_rx_mac_disabled();
448 return false;
450 if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) {
451 trace_sungem_rx_txdma_disabled();
452 return false;
455 /* Check RX availability */
456 kick = s->rxdmaregs[RXDMA_KICK >> 2];
457 done = s->rxdmaregs[RXDMA_DONE >> 2];
458 full = sungem_rx_full(s, kick, done);
460 trace_sungem_rx_check(!full, kick, done);
462 return !full;
465 enum {
466 rx_no_match,
467 rx_match_promisc,
468 rx_match_bcast,
469 rx_match_allmcast,
470 rx_match_mcast,
471 rx_match_mac,
472 rx_match_altmac,
475 static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc)
477 uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2];
478 uint32_t mac0, mac1, mac2;
480 /* Promisc enabled ? */
481 if (rxcfg & MAC_RXCFG_PROM) {
482 return rx_match_promisc;
485 /* Format MAC address into dwords */
486 mac0 = (mac[4] << 8) | mac[5];
487 mac1 = (mac[2] << 8) | mac[3];
488 mac2 = (mac[0] << 8) | mac[1];
490 trace_sungem_rx_mac_check(mac0, mac1, mac2);
492 /* Is this a broadcast frame ? */
493 if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) {
494 return rx_match_bcast;
497 /* TODO: Implement address filter registers (or we don't care ?) */
499 /* Is this a multicast frame ? */
500 if (mac[0] & 1) {
501 trace_sungem_rx_mac_multicast();
503 /* Promisc group enabled ? */
504 if (rxcfg & MAC_RXCFG_PGRP) {
505 return rx_match_allmcast;
508 /* TODO: Check MAC control frames (or we don't care) ? */
510 /* Check hash filter (somebody check that's correct ?) */
511 if (rxcfg & MAC_RXCFG_HFE) {
512 uint32_t hash, idx;
514 crc >>= 24;
515 idx = (crc >> 2) & 0x3c;
516 hash = s->macregs[(MAC_HASH0 + idx) >> 2];
517 if (hash & (1 << (15 - (crc & 0xf)))) {
518 return rx_match_mcast;
521 return rx_no_match;
524 /* Main MAC check */
525 trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2],
526 s->macregs[MAC_ADDR1 >> 2],
527 s->macregs[MAC_ADDR2 >> 2]);
529 if (mac0 == s->macregs[MAC_ADDR0 >> 2] &&
530 mac1 == s->macregs[MAC_ADDR1 >> 2] &&
531 mac2 == s->macregs[MAC_ADDR2 >> 2]) {
532 return rx_match_mac;
535 /* Alt MAC check */
536 if (mac0 == s->macregs[MAC_ADDR3 >> 2] &&
537 mac1 == s->macregs[MAC_ADDR4 >> 2] &&
538 mac2 == s->macregs[MAC_ADDR5 >> 2]) {
539 return rx_match_altmac;
542 return rx_no_match;
545 static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf,
546 size_t size)
548 SunGEMState *s = qemu_get_nic_opaque(nc);
549 PCIDevice *d = PCI_DEVICE(s);
550 uint32_t mac_crc, done, kick, max_fsize;
551 uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff;
552 uint8_t smallbuf[60];
553 struct gem_rxd desc;
554 uint64_t dbase, baddr;
555 unsigned int rx_cond;
557 trace_sungem_rx_packet(size);
559 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
560 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
561 max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff;
563 /* If MAC or DMA disabled, can't receive */
564 if (!(rxdma_cfg & RXDMA_CFG_ENABLE) ||
565 !(rxmac_cfg & MAC_RXCFG_ENAB)) {
566 trace_sungem_rx_disabled();
567 return 0;
570 /* Size adjustment for FCS */
571 if (rxmac_cfg & MAC_RXCFG_SFCS) {
572 fcs_size = 0;
573 } else {
574 fcs_size = 4;
577 /* Discard frame smaller than a MAC or larger than max frame size
578 * (when accounting for FCS)
580 if (size < 6 || (size + 4) > max_fsize) {
581 trace_sungem_rx_bad_frame_size(size);
582 /* XXX Increment error statistics ? */
583 return size;
586 /* We don't drop too small frames since we get them in qemu, we pad
587 * them instead. We should probably use the min frame size register
588 * but I don't want to use a variable size staging buffer and I
589 * know both MacOS and Linux use the default 64 anyway. We use 60
590 * here to account for the non-existent FCS.
592 if (size < 60) {
593 memcpy(smallbuf, buf, size);
594 memset(&smallbuf[size], 0, 60 - size);
595 buf = smallbuf;
596 size = 60;
599 /* Get MAC crc */
600 mac_crc = net_crc32_le(buf, ETH_ALEN);
602 /* Packet isn't for me ? */
603 rx_cond = sungem_check_rx_mac(s, buf, mac_crc);
604 if (rx_cond == rx_no_match) {
605 /* Just drop it */
606 trace_sungem_rx_unmatched();
607 return size;
610 /* Get ring pointers */
611 kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask;
612 done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask;
614 trace_sungem_rx_process(done, kick, s->rx_mask + 1);
616 /* Ring full ? Can't receive */
617 if (sungem_rx_full(s, kick, done)) {
618 trace_sungem_rx_ringfull();
619 return 0;
622 /* Note: The real GEM will fetch descriptors in blocks of 4,
623 * for now we handle them one at a time, I think the driver will
624 * cope
627 dbase = s->rxdmaregs[RXDMA_DBHI >> 2];
628 dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2];
630 /* Read the next descriptor */
631 pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
633 trace_sungem_rx_desc(le64_to_cpu(desc.status_word),
634 le64_to_cpu(desc.buffer));
636 /* Effective buffer address */
637 baddr = le64_to_cpu(desc.buffer) & ~7ull;
638 baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10;
640 /* Write buffer out */
641 pci_dma_write(d, baddr, buf, size);
643 if (fcs_size) {
644 /* Should we add an FCS ? Linux doesn't ask us to strip it,
645 * however I believe nothing checks it... For now we just
646 * do nothing. It's faster this way.
650 /* Calculate the checksum */
651 coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13;
652 csum = net_raw_checksum((uint8_t *)buf + coff, size - coff);
654 /* Build the updated descriptor */
655 desc.status_word = (size + fcs_size) << 16;
656 desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44;
657 desc.status_word |= csum;
658 if (rx_cond == rx_match_mcast) {
659 desc.status_word |= RXDCTRL_HPASS;
661 if (rx_cond == rx_match_altmac) {
662 desc.status_word |= RXDCTRL_ALTMAC;
664 desc.status_word = cpu_to_le64(desc.status_word);
666 pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
668 done = (done + 1) & s->rx_mask;
669 s->rxdmaregs[RXDMA_DONE >> 2] = done;
671 /* XXX Unconditionally set RX interrupt for now. The interrupt
672 * mitigation timer might well end up adding more overhead than
673 * helping here...
675 ints = GREG_STAT_RXDONE;
676 if (sungem_rx_full(s, kick, done)) {
677 ints |= GREG_STAT_RXNOBUF;
679 sungem_update_status(s, ints, true);
681 return size;
684 static void sungem_set_link_status(NetClientState *nc)
686 /* We don't do anything for now as I believe none of the OSes
687 * drivers use the MIF autopoll feature nor the PHY interrupt
691 static void sungem_update_masks(SunGEMState *s)
693 uint32_t sz;
695 sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5);
696 s->rx_mask = sz - 1;
698 sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5);
699 s->tx_mask = sz - 1;
702 static void sungem_reset_rx(SunGEMState *s)
704 trace_sungem_rx_reset();
706 /* XXX Do RXCFG */
707 /* XXX Check value */
708 s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140;
709 s->rxdmaregs[RXDMA_DONE >> 2] = 0;
710 s->rxdmaregs[RXDMA_KICK >> 2] = 0;
711 s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010;
712 s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8;
713 s->rxdmaregs[RXDMA_BLANK >> 2] = 0;
715 sungem_update_masks(s);
718 static void sungem_reset_tx(SunGEMState *s)
720 trace_sungem_tx_reset();
722 /* XXX Do TXCFG */
723 /* XXX Check value */
724 s->txdmaregs[TXDMA_FSZ >> 2] = 0x90;
725 s->txdmaregs[TXDMA_TXDONE >> 2] = 0;
726 s->txdmaregs[TXDMA_KICK >> 2] = 0;
727 s->txdmaregs[TXDMA_CFG >> 2] = 0x118010;
729 sungem_update_masks(s);
731 s->tx_size = 0;
732 s->tx_first_ctl = 0;
735 static void sungem_reset_all(SunGEMState *s, bool pci_reset)
737 trace_sungem_reset(pci_reset);
739 sungem_reset_rx(s);
740 sungem_reset_tx(s);
742 s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF;
743 s->gregs[GREG_STAT >> 2] = 0;
744 if (pci_reset) {
745 uint8_t *ma = s->conf.macaddr.a;
747 s->gregs[GREG_SWRST >> 2] = 0;
748 s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5];
749 s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3];
750 s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1];
751 } else {
752 s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT;
754 s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0;
757 static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr,
758 uint8_t reg_addr, uint16_t val)
760 trace_sungem_mii_write(phy_addr, reg_addr, val);
762 /* XXX TODO */
765 static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
766 uint8_t reg_addr)
768 if (phy_addr != s->phy_addr) {
769 return 0xffff;
771 /* Primitive emulation of a BCM5201 to please the driver,
772 * ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400
774 switch (reg_addr) {
775 case MII_BMCR:
776 return 0;
777 case MII_PHYID1:
778 return 0x0040;
779 case MII_PHYID2:
780 return 0x6210;
781 case MII_BMSR:
782 if (qemu_get_queue(s->nic)->link_down) {
783 return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG;
784 } else {
785 return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP |
786 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
788 case MII_ANLPAR:
789 case MII_ANAR:
790 return MII_ANLPAR_TXFD;
791 case 0x18: /* 5201 AUX status */
792 return 3; /* 100FD */
793 default:
794 return 0;
797 static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
798 uint8_t reg_addr)
800 uint16_t val;
802 val = __sungem_mii_read(s, phy_addr, reg_addr);
804 trace_sungem_mii_read(phy_addr, reg_addr, val);
806 return val;
809 static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val)
811 uint8_t phy_addr, reg_addr, op;
813 /* Ignore not start of frame */
814 if ((val >> 30) != 1) {
815 trace_sungem_mii_invalid_sof(val >> 30);
816 return 0xffff;
818 phy_addr = (val & MIF_FRAME_PHYAD) >> 23;
819 reg_addr = (val & MIF_FRAME_REGAD) >> 18;
820 op = (val & MIF_FRAME_OP) >> 28;
821 switch (op) {
822 case 1:
823 sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA);
824 return val | MIF_FRAME_TALSB;
825 case 2:
826 return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB;
827 default:
828 trace_sungem_mii_invalid_op(op);
830 return 0xffff | MIF_FRAME_TALSB;
833 static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val,
834 unsigned size)
836 SunGEMState *s = opaque;
838 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
839 qemu_log_mask(LOG_GUEST_ERROR,
840 "Write to unknown GREG register 0x%"HWADDR_PRIx"\n",
841 addr);
842 return;
845 trace_sungem_mmio_greg_write(addr, val);
847 /* Pre-write filter */
848 switch (addr) {
849 /* Read only registers */
850 case GREG_SEBSTATE:
851 case GREG_STAT:
852 case GREG_STAT2:
853 case GREG_PCIESTAT:
854 return; /* No actual write */
855 case GREG_IACK:
856 val &= GREG_STAT_LATCH;
857 s->gregs[GREG_STAT >> 2] &= ~val;
858 sungem_eval_irq(s);
859 return; /* No actual write */
860 case GREG_PCIEMASK:
861 val &= 0x7;
862 break;
865 s->gregs[addr >> 2] = val;
867 /* Post write action */
868 switch (addr) {
869 case GREG_IMASK:
870 /* Re-evaluate interrupt */
871 sungem_eval_irq(s);
872 break;
873 case GREG_SWRST:
874 switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) {
875 case GREG_SWRST_RXRST:
876 sungem_reset_rx(s);
877 break;
878 case GREG_SWRST_TXRST:
879 sungem_reset_tx(s);
880 break;
881 case GREG_SWRST_RXRST | GREG_SWRST_TXRST:
882 sungem_reset_all(s, false);
884 break;
888 static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size)
890 SunGEMState *s = opaque;
891 uint32_t val;
893 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
894 qemu_log_mask(LOG_GUEST_ERROR,
895 "Read from unknown GREG register 0x%"HWADDR_PRIx"\n",
896 addr);
897 return 0;
900 val = s->gregs[addr >> 2];
902 trace_sungem_mmio_greg_read(addr, val);
904 switch (addr) {
905 case GREG_STAT:
906 /* Side effect, clear bottom 7 bits */
907 s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH;
908 sungem_eval_irq(s);
910 /* Inject TX completion in returned value */
911 val = (val & ~GREG_STAT_TXNR) |
912 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
913 break;
914 case GREG_STAT2:
915 /* Return the status reg without side effect
916 * (and inject TX completion in returned value)
918 val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) |
919 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
920 break;
923 return val;
926 static const MemoryRegionOps sungem_mmio_greg_ops = {
927 .read = sungem_mmio_greg_read,
928 .write = sungem_mmio_greg_write,
929 .endianness = DEVICE_LITTLE_ENDIAN,
930 .impl = {
931 .min_access_size = 4,
932 .max_access_size = 4,
936 static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val,
937 unsigned size)
939 SunGEMState *s = opaque;
941 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
942 qemu_log_mask(LOG_GUEST_ERROR,
943 "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n",
944 addr);
945 return;
948 trace_sungem_mmio_txdma_write(addr, val);
950 /* Pre-write filter */
951 switch (addr) {
952 /* Read only registers */
953 case TXDMA_TXDONE:
954 case TXDMA_PCNT:
955 case TXDMA_SMACHINE:
956 case TXDMA_DPLOW:
957 case TXDMA_DPHI:
958 case TXDMA_FSZ:
959 case TXDMA_FTAG:
960 return; /* No actual write */
963 s->txdmaregs[addr >> 2] = val;
965 /* Post write action */
966 switch (addr) {
967 case TXDMA_KICK:
968 sungem_tx_kick(s);
969 break;
970 case TXDMA_CFG:
971 sungem_update_masks(s);
972 break;
976 static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size)
978 SunGEMState *s = opaque;
979 uint32_t val;
981 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
982 qemu_log_mask(LOG_GUEST_ERROR,
983 "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n",
984 addr);
985 return 0;
988 val = s->txdmaregs[addr >> 2];
990 trace_sungem_mmio_txdma_read(addr, val);
992 return val;
995 static const MemoryRegionOps sungem_mmio_txdma_ops = {
996 .read = sungem_mmio_txdma_read,
997 .write = sungem_mmio_txdma_write,
998 .endianness = DEVICE_LITTLE_ENDIAN,
999 .impl = {
1000 .min_access_size = 4,
1001 .max_access_size = 4,
1005 static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val,
1006 unsigned size)
1008 SunGEMState *s = opaque;
1010 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1011 qemu_log_mask(LOG_GUEST_ERROR,
1012 "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1013 addr);
1014 return;
1017 trace_sungem_mmio_rxdma_write(addr, val);
1019 /* Pre-write filter */
1020 switch (addr) {
1021 /* Read only registers */
1022 case RXDMA_DONE:
1023 case RXDMA_PCNT:
1024 case RXDMA_SMACHINE:
1025 case RXDMA_DPLOW:
1026 case RXDMA_DPHI:
1027 case RXDMA_FSZ:
1028 case RXDMA_FTAG:
1029 return; /* No actual write */
1032 s->rxdmaregs[addr >> 2] = val;
1034 /* Post write action */
1035 switch (addr) {
1036 case RXDMA_KICK:
1037 trace_sungem_rx_kick(val);
1038 break;
1039 case RXDMA_CFG:
1040 sungem_update_masks(s);
1041 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1042 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1043 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1045 break;
1049 static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size)
1051 SunGEMState *s = opaque;
1052 uint32_t val;
1054 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1055 qemu_log_mask(LOG_GUEST_ERROR,
1056 "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1057 addr);
1058 return 0;
1061 val = s->rxdmaregs[addr >> 2];
1063 trace_sungem_mmio_rxdma_read(addr, val);
1065 return val;
1068 static const MemoryRegionOps sungem_mmio_rxdma_ops = {
1069 .read = sungem_mmio_rxdma_read,
1070 .write = sungem_mmio_rxdma_write,
1071 .endianness = DEVICE_LITTLE_ENDIAN,
1072 .impl = {
1073 .min_access_size = 4,
1074 .max_access_size = 4,
1078 static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val,
1079 unsigned size)
1081 SunGEMState *s = opaque;
1083 if (!(addr <= 0x134)) {
1084 qemu_log_mask(LOG_GUEST_ERROR,
1085 "Write to unknown MAC register 0x%"HWADDR_PRIx"\n",
1086 addr);
1087 return;
1090 trace_sungem_mmio_mac_write(addr, val);
1092 /* Pre-write filter */
1093 switch (addr) {
1094 /* Read only registers */
1095 case MAC_TXRST: /* Not technically read-only but will do for now */
1096 case MAC_RXRST: /* Not technically read-only but will do for now */
1097 case MAC_TXSTAT:
1098 case MAC_RXSTAT:
1099 case MAC_CSTAT:
1100 case MAC_PATMPS:
1101 case MAC_SMACHINE:
1102 return; /* No actual write */
1103 case MAC_MINFSZ:
1104 /* 10-bits implemented */
1105 val &= 0x3ff;
1106 break;
1109 s->macregs[addr >> 2] = val;
1111 /* Post write action */
1112 switch (addr) {
1113 case MAC_TXMASK:
1114 case MAC_RXMASK:
1115 case MAC_MCMASK:
1116 sungem_eval_cascade_irq(s);
1117 break;
1118 case MAC_RXCFG:
1119 sungem_update_masks(s);
1120 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1121 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1122 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1124 break;
1128 static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size)
1130 SunGEMState *s = opaque;
1131 uint32_t val;
1133 if (!(addr <= 0x134)) {
1134 qemu_log_mask(LOG_GUEST_ERROR,
1135 "Read from unknown MAC register 0x%"HWADDR_PRIx"\n",
1136 addr);
1137 return 0;
1140 val = s->macregs[addr >> 2];
1142 trace_sungem_mmio_mac_read(addr, val);
1144 switch (addr) {
1145 case MAC_TXSTAT:
1146 /* Side effect, clear all */
1147 s->macregs[addr >> 2] = 0;
1148 sungem_update_status(s, GREG_STAT_TXMAC, false);
1149 break;
1150 case MAC_RXSTAT:
1151 /* Side effect, clear all */
1152 s->macregs[addr >> 2] = 0;
1153 sungem_update_status(s, GREG_STAT_RXMAC, false);
1154 break;
1155 case MAC_CSTAT:
1156 /* Side effect, interrupt bits */
1157 s->macregs[addr >> 2] &= MAC_CSTAT_PTR;
1158 sungem_update_status(s, GREG_STAT_MAC, false);
1159 break;
1162 return val;
1165 static const MemoryRegionOps sungem_mmio_mac_ops = {
1166 .read = sungem_mmio_mac_read,
1167 .write = sungem_mmio_mac_write,
1168 .endianness = DEVICE_LITTLE_ENDIAN,
1169 .impl = {
1170 .min_access_size = 4,
1171 .max_access_size = 4,
1175 static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val,
1176 unsigned size)
1178 SunGEMState *s = opaque;
1180 if (!(addr <= 0x1c)) {
1181 qemu_log_mask(LOG_GUEST_ERROR,
1182 "Write to unknown MIF register 0x%"HWADDR_PRIx"\n",
1183 addr);
1184 return;
1187 trace_sungem_mmio_mif_write(addr, val);
1189 /* Pre-write filter */
1190 switch (addr) {
1191 /* Read only registers */
1192 case MIF_STATUS:
1193 case MIF_SMACHINE:
1194 return; /* No actual write */
1195 case MIF_CFG:
1196 /* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */
1197 val &= ~MIF_CFG_MDI1;
1198 val |= MIF_CFG_MDI0;
1199 break;
1202 s->mifregs[addr >> 2] = val;
1204 /* Post write action */
1205 switch (addr) {
1206 case MIF_FRAME:
1207 s->mifregs[addr >> 2] = sungem_mii_op(s, val);
1208 break;
1212 static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size)
1214 SunGEMState *s = opaque;
1215 uint32_t val;
1217 if (!(addr <= 0x1c)) {
1218 qemu_log_mask(LOG_GUEST_ERROR,
1219 "Read from unknown MIF register 0x%"HWADDR_PRIx"\n",
1220 addr);
1221 return 0;
1224 val = s->mifregs[addr >> 2];
1226 trace_sungem_mmio_mif_read(addr, val);
1228 return val;
1231 static const MemoryRegionOps sungem_mmio_mif_ops = {
1232 .read = sungem_mmio_mif_read,
1233 .write = sungem_mmio_mif_write,
1234 .endianness = DEVICE_LITTLE_ENDIAN,
1235 .impl = {
1236 .min_access_size = 4,
1237 .max_access_size = 4,
1241 static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val,
1242 unsigned size)
1244 SunGEMState *s = opaque;
1246 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1247 qemu_log_mask(LOG_GUEST_ERROR,
1248 "Write to unknown PCS register 0x%"HWADDR_PRIx"\n",
1249 addr);
1250 return;
1253 trace_sungem_mmio_pcs_write(addr, val);
1255 /* Pre-write filter */
1256 switch (addr) {
1257 /* Read only registers */
1258 case PCS_MIISTAT:
1259 case PCS_ISTAT:
1260 case PCS_SSTATE:
1261 return; /* No actual write */
1264 s->pcsregs[addr >> 2] = val;
1267 static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size)
1269 SunGEMState *s = opaque;
1270 uint32_t val;
1272 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1273 qemu_log_mask(LOG_GUEST_ERROR,
1274 "Read from unknown PCS register 0x%"HWADDR_PRIx"\n",
1275 addr);
1276 return 0;
1279 val = s->pcsregs[addr >> 2];
1281 trace_sungem_mmio_pcs_read(addr, val);
1283 return val;
1286 static const MemoryRegionOps sungem_mmio_pcs_ops = {
1287 .read = sungem_mmio_pcs_read,
1288 .write = sungem_mmio_pcs_write,
1289 .endianness = DEVICE_LITTLE_ENDIAN,
1290 .impl = {
1291 .min_access_size = 4,
1292 .max_access_size = 4,
1296 static void sungem_uninit(PCIDevice *dev)
1298 SunGEMState *s = SUNGEM(dev);
1300 qemu_del_nic(s->nic);
1303 static NetClientInfo net_sungem_info = {
1304 .type = NET_CLIENT_DRIVER_NIC,
1305 .size = sizeof(NICState),
1306 .can_receive = sungem_can_receive,
1307 .receive = sungem_receive,
1308 .link_status_changed = sungem_set_link_status,
1311 static void sungem_realize(PCIDevice *pci_dev, Error **errp)
1313 DeviceState *dev = DEVICE(pci_dev);
1314 SunGEMState *s = SUNGEM(pci_dev);
1315 uint8_t *pci_conf;
1317 pci_conf = pci_dev->config;
1319 pci_set_word(pci_conf + PCI_STATUS,
1320 PCI_STATUS_FAST_BACK |
1321 PCI_STATUS_DEVSEL_MEDIUM |
1322 PCI_STATUS_66MHZ);
1324 pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
1325 pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);
1327 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1328 pci_conf[PCI_MIN_GNT] = 0x40;
1329 pci_conf[PCI_MAX_LAT] = 0x40;
1331 sungem_reset_all(s, true);
1332 memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE);
1334 memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s,
1335 "sungem.greg", SUNGEM_MMIO_GREG_SIZE);
1336 memory_region_add_subregion(&s->sungem, 0, &s->greg);
1338 memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s,
1339 "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE);
1340 memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma);
1342 memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s,
1343 "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE);
1344 memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma);
1346 memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s,
1347 "sungem.mac", SUNGEM_MMIO_MAC_SIZE);
1348 memory_region_add_subregion(&s->sungem, 0x6000, &s->mac);
1350 memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s,
1351 "sungem.mif", SUNGEM_MMIO_MIF_SIZE);
1352 memory_region_add_subregion(&s->sungem, 0x6200, &s->mif);
1354 memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s,
1355 "sungem.pcs", SUNGEM_MMIO_PCS_SIZE);
1356 memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs);
1358 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem);
1360 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1361 s->nic = qemu_new_nic(&net_sungem_info, &s->conf,
1362 object_get_typename(OBJECT(dev)),
1363 dev->id, s);
1364 qemu_format_nic_info_str(qemu_get_queue(s->nic),
1365 s->conf.macaddr.a);
1368 static void sungem_reset(DeviceState *dev)
1370 SunGEMState *s = SUNGEM(dev);
1372 sungem_reset_all(s, true);
1375 static void sungem_instance_init(Object *obj)
1377 SunGEMState *s = SUNGEM(obj);
1379 device_add_bootindex_property(obj, &s->conf.bootindex,
1380 "bootindex", "/ethernet-phy@0",
1381 DEVICE(obj));
1384 static Property sungem_properties[] = {
1385 DEFINE_NIC_PROPERTIES(SunGEMState, conf),
1386 /* Phy address should be 0 for most Apple machines except
1387 * for K2 in which case it's 1. Will be set by a machine
1388 * override.
1390 DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0),
1391 DEFINE_PROP_END_OF_LIST(),
1394 static const VMStateDescription vmstate_sungem = {
1395 .name = "sungem",
1396 .version_id = 0,
1397 .minimum_version_id = 0,
1398 .fields = (VMStateField[]) {
1399 VMSTATE_PCI_DEVICE(pdev, SunGEMState),
1400 VMSTATE_MACADDR(conf.macaddr, SunGEMState),
1401 VMSTATE_UINT32(phy_addr, SunGEMState),
1402 VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)),
1403 VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState,
1404 (SUNGEM_MMIO_TXDMA_SIZE >> 2)),
1405 VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState,
1406 (SUNGEM_MMIO_RXDMA_SIZE >> 2)),
1407 VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)),
1408 VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)),
1409 VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)),
1410 VMSTATE_UINT32(rx_mask, SunGEMState),
1411 VMSTATE_UINT32(tx_mask, SunGEMState),
1412 VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE),
1413 VMSTATE_UINT32(tx_size, SunGEMState),
1414 VMSTATE_UINT64(tx_first_ctl, SunGEMState),
1415 VMSTATE_END_OF_LIST()
1419 static void sungem_class_init(ObjectClass *klass, void *data)
1421 DeviceClass *dc = DEVICE_CLASS(klass);
1422 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1424 k->realize = sungem_realize;
1425 k->exit = sungem_uninit;
1426 k->vendor_id = PCI_VENDOR_ID_APPLE;
1427 k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC;
1428 k->revision = 0x01;
1429 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1430 dc->vmsd = &vmstate_sungem;
1431 dc->reset = sungem_reset;
1432 device_class_set_props(dc, sungem_properties);
1433 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1436 static const TypeInfo sungem_info = {
1437 .name = TYPE_SUNGEM,
1438 .parent = TYPE_PCI_DEVICE,
1439 .instance_size = sizeof(SunGEMState),
1440 .class_init = sungem_class_init,
1441 .instance_init = sungem_instance_init,
1442 .interfaces = (InterfaceInfo[]) {
1443 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1448 static void sungem_register_types(void)
1450 type_register_static(&sungem_info);
1453 type_init(sungem_register_types)