intel-iommu: correctly check passthrough during translation
[qemu/kevin.git] / hw / net / sungem.c
blob3684a4d733b6ec8bea39b4487e189d20cccf9eb6
1 /*
2 * QEMU model of SUN GEM ethernet controller
4 * As found in Apple ASICs among others
6 * Copyright 2016 Ben Herrenschmidt
7 * Copyright 2017 Mark Cave-Ayland
8 */
10 #include "qemu/osdep.h"
11 #include "hw/pci/pci.h"
12 #include "hw/qdev-properties.h"
13 #include "migration/vmstate.h"
14 #include "qemu/log.h"
15 #include "qemu/module.h"
16 #include "net/net.h"
17 #include "net/eth.h"
18 #include "net/checksum.h"
19 #include "hw/net/mii.h"
20 #include "sysemu/sysemu.h"
21 #include "trace.h"
22 #include "qom/object.h"
24 #define TYPE_SUNGEM "sungem"
26 OBJECT_DECLARE_SIMPLE_TYPE(SunGEMState, SUNGEM)
28 #define MAX_PACKET_SIZE 9016
30 #define SUNGEM_MMIO_SIZE 0x200000
32 /* Global registers */
33 #define SUNGEM_MMIO_GREG_SIZE 0x2000
35 #define GREG_SEBSTATE 0x0000UL /* SEB State Register */
37 #define GREG_STAT 0x000CUL /* Status Register */
38 #define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
39 #define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
40 #define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
41 #define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */
42 #define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */
43 #define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */
44 #define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
45 #define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */
46 #define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */
47 #define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */
48 #define GREG_STAT_TXNR_SHIFT 19
50 /* These interrupts are edge latches in the status register,
51 * reading it (or writing the corresponding bit in IACK) will
52 * clear them
54 #define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
55 GREG_STAT_RXDONE | GREG_STAT_RXDONE | \
56 GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR)
58 #define GREG_IMASK 0x0010UL /* Interrupt Mask Register */
59 #define GREG_IACK 0x0014UL /* Interrupt ACK Register */
60 #define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */
61 #define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */
62 #define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */
64 #define GREG_SWRST 0x1010UL /* Software Reset Register */
65 #define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
66 #define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */
67 #define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */
69 /* TX DMA Registers */
70 #define SUNGEM_MMIO_TXDMA_SIZE 0x1000
72 #define TXDMA_KICK 0x0000UL /* TX Kick Register */
74 #define TXDMA_CFG 0x0004UL /* TX Configuration Register */
75 #define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
76 #define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
78 #define TXDMA_DBLOW 0x0008UL /* TX Desc. Base Low */
79 #define TXDMA_DBHI 0x000CUL /* TX Desc. Base High */
80 #define TXDMA_PCNT 0x0024UL /* TX FIFO Packet Counter */
81 #define TXDMA_SMACHINE 0x0028UL /* TX State Machine Register */
82 #define TXDMA_DPLOW 0x0030UL /* TX Data Pointer Low */
83 #define TXDMA_DPHI 0x0034UL /* TX Data Pointer High */
84 #define TXDMA_TXDONE 0x0100UL /* TX Completion Register */
85 #define TXDMA_FTAG 0x0108UL /* TX FIFO Tag */
86 #define TXDMA_FSZ 0x0118UL /* TX FIFO Size */
88 /* Receive DMA Registers */
89 #define SUNGEM_MMIO_RXDMA_SIZE 0x2000
91 #define RXDMA_CFG 0x0000UL /* RX Configuration Register */
92 #define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */
93 #define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */
94 #define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */
95 #define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */
97 #define RXDMA_DBLOW 0x0004UL /* RX Descriptor Base Low */
98 #define RXDMA_DBHI 0x0008UL /* RX Descriptor Base High */
99 #define RXDMA_PCNT 0x0018UL /* RX FIFO Packet Counter */
100 #define RXDMA_SMACHINE 0x001CUL /* RX State Machine Register */
101 #define RXDMA_PTHRESH 0x0020UL /* Pause Thresholds */
102 #define RXDMA_DPLOW 0x0024UL /* RX Data Pointer Low */
103 #define RXDMA_DPHI 0x0028UL /* RX Data Pointer High */
104 #define RXDMA_KICK 0x0100UL /* RX Kick Register */
105 #define RXDMA_DONE 0x0104UL /* RX Completion Register */
106 #define RXDMA_BLANK 0x0108UL /* RX Blanking Register */
107 #define RXDMA_FTAG 0x0110UL /* RX FIFO Tag */
108 #define RXDMA_FSZ 0x0120UL /* RX FIFO Size */
110 /* MAC Registers */
111 #define SUNGEM_MMIO_MAC_SIZE 0x200
113 #define MAC_TXRST 0x0000UL /* TX MAC Software Reset Command */
114 #define MAC_RXRST 0x0004UL /* RX MAC Software Reset Command */
115 #define MAC_TXSTAT 0x0010UL /* TX MAC Status Register */
116 #define MAC_RXSTAT 0x0014UL /* RX MAC Status Register */
118 #define MAC_CSTAT 0x0018UL /* MAC Control Status Register */
119 #define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */
121 #define MAC_TXMASK 0x0020UL /* TX MAC Mask Register */
122 #define MAC_RXMASK 0x0024UL /* RX MAC Mask Register */
123 #define MAC_MCMASK 0x0028UL /* MAC Control Mask Register */
125 #define MAC_TXCFG 0x0030UL /* TX MAC Configuration Register */
126 #define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */
128 #define MAC_RXCFG 0x0034UL /* RX MAC Configuration Register */
129 #define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */
130 #define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */
131 #define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */
132 #define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */
133 #define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */
135 #define MAC_XIFCFG 0x003CUL /* XIF Configuration Register */
136 #define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */
138 #define MAC_MINFSZ 0x0050UL /* MinFrameSize Register */
139 #define MAC_MAXFSZ 0x0054UL /* MaxFrameSize Register */
140 #define MAC_ADDR0 0x0080UL /* MAC Address 0 Register */
141 #define MAC_ADDR1 0x0084UL /* MAC Address 1 Register */
142 #define MAC_ADDR2 0x0088UL /* MAC Address 2 Register */
143 #define MAC_ADDR3 0x008CUL /* MAC Address 3 Register */
144 #define MAC_ADDR4 0x0090UL /* MAC Address 4 Register */
145 #define MAC_ADDR5 0x0094UL /* MAC Address 5 Register */
146 #define MAC_HASH0 0x00C0UL /* Hash Table 0 Register */
147 #define MAC_PATMPS 0x0114UL /* Peak Attempts Register */
148 #define MAC_SMACHINE 0x0134UL /* State Machine Register */
150 /* MIF Registers */
151 #define SUNGEM_MMIO_MIF_SIZE 0x20
153 #define MIF_FRAME 0x000CUL /* MIF Frame/Output Register */
154 #define MIF_FRAME_OP 0x30000000 /* OPcode */
155 #define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */
156 #define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */
157 #define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */
158 #define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */
160 #define MIF_CFG 0x0010UL /* MIF Configuration Register */
161 #define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */
162 #define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */
164 #define MIF_STATUS 0x0018UL /* MIF Status Register */
165 #define MIF_SMACHINE 0x001CUL /* MIF State Machine Register */
167 /* PCS/Serialink Registers */
168 #define SUNGEM_MMIO_PCS_SIZE 0x60
169 #define PCS_MIISTAT 0x0004UL /* PCS MII Status Register */
170 #define PCS_ISTAT 0x0018UL /* PCS Interrupt Status Reg */
171 #define PCS_SSTATE 0x005CUL /* Serialink State Register */
173 /* Descriptors */
174 struct gem_txd {
175 uint64_t control_word;
176 uint64_t buffer;
179 #define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */
180 #define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */
181 #define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */
182 #define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */
183 #define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */
184 #define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */
185 #define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */
187 struct gem_rxd {
188 uint64_t status_word;
189 uint64_t buffer;
192 #define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */
193 #define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */
196 struct SunGEMState {
197 PCIDevice pdev;
199 MemoryRegion sungem;
200 MemoryRegion greg;
201 MemoryRegion txdma;
202 MemoryRegion rxdma;
203 MemoryRegion mac;
204 MemoryRegion mif;
205 MemoryRegion pcs;
206 NICState *nic;
207 NICConf conf;
208 uint32_t phy_addr;
210 uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2];
211 uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2];
212 uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2];
213 uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2];
214 uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2];
215 uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2];
217 /* Cache some useful things */
218 uint32_t rx_mask;
219 uint32_t tx_mask;
221 /* Current tx packet */
222 uint8_t tx_data[MAX_PACKET_SIZE];
223 uint32_t tx_size;
224 uint64_t tx_first_ctl;
228 static void sungem_eval_irq(SunGEMState *s)
230 uint32_t stat, mask;
232 mask = s->gregs[GREG_IMASK >> 2];
233 stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR;
234 if (stat & ~mask) {
235 pci_set_irq(PCI_DEVICE(s), 1);
236 } else {
237 pci_set_irq(PCI_DEVICE(s), 0);
241 static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val)
243 uint32_t stat;
245 stat = s->gregs[GREG_STAT >> 2];
246 if (val) {
247 stat |= bits;
248 } else {
249 stat &= ~bits;
251 s->gregs[GREG_STAT >> 2] = stat;
252 sungem_eval_irq(s);
255 static void sungem_eval_cascade_irq(SunGEMState *s)
257 uint32_t stat, mask;
259 mask = s->macregs[MAC_TXSTAT >> 2];
260 stat = s->macregs[MAC_TXMASK >> 2];
261 if (stat & ~mask) {
262 sungem_update_status(s, GREG_STAT_TXMAC, true);
263 } else {
264 sungem_update_status(s, GREG_STAT_TXMAC, false);
267 mask = s->macregs[MAC_RXSTAT >> 2];
268 stat = s->macregs[MAC_RXMASK >> 2];
269 if (stat & ~mask) {
270 sungem_update_status(s, GREG_STAT_RXMAC, true);
271 } else {
272 sungem_update_status(s, GREG_STAT_RXMAC, false);
275 mask = s->macregs[MAC_CSTAT >> 2];
276 stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR;
277 if (stat & ~mask) {
278 sungem_update_status(s, GREG_STAT_MAC, true);
279 } else {
280 sungem_update_status(s, GREG_STAT_MAC, false);
284 static void sungem_do_tx_csum(SunGEMState *s)
286 uint16_t start, off;
287 uint32_t csum;
289 start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15;
290 off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21;
292 trace_sungem_tx_checksum(start, off);
294 if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) {
295 trace_sungem_tx_checksum_oob();
296 return;
299 csum = net_raw_checksum(s->tx_data + start, s->tx_size - start);
300 stw_be_p(s->tx_data + off, csum);
303 static void sungem_send_packet(SunGEMState *s, const uint8_t *buf,
304 int size)
306 NetClientState *nc = qemu_get_queue(s->nic);
308 if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) {
309 qemu_receive_packet(nc, buf, size);
310 } else {
311 qemu_send_packet(nc, buf, size);
315 static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc)
317 PCIDevice *d = PCI_DEVICE(s);
318 uint32_t len;
320 /* If it's a start of frame, discard anything we had in the
321 * buffer and start again. This should be an error condition
322 * if we had something ... for now we ignore it
324 if (desc->control_word & TXDCTRL_SOF) {
325 if (s->tx_first_ctl) {
326 trace_sungem_tx_unfinished();
328 s->tx_size = 0;
329 s->tx_first_ctl = desc->control_word;
332 /* Grab data size */
333 len = desc->control_word & TXDCTRL_BUFSZ;
335 /* Clamp it to our max size */
336 if ((s->tx_size + len) > MAX_PACKET_SIZE) {
337 trace_sungem_tx_overflow();
338 len = MAX_PACKET_SIZE - s->tx_size;
341 /* Read the data */
342 pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len);
343 s->tx_size += len;
345 /* If end of frame, send packet */
346 if (desc->control_word & TXDCTRL_EOF) {
347 trace_sungem_tx_finished(s->tx_size);
349 /* Handle csum */
350 if (s->tx_first_ctl & TXDCTRL_CENAB) {
351 sungem_do_tx_csum(s);
354 /* Send it */
355 sungem_send_packet(s, s->tx_data, s->tx_size);
357 /* No more pending packet */
358 s->tx_size = 0;
359 s->tx_first_ctl = 0;
363 static void sungem_tx_kick(SunGEMState *s)
365 PCIDevice *d = PCI_DEVICE(s);
366 uint32_t comp, kick;
367 uint32_t txdma_cfg, txmac_cfg, ints;
368 uint64_t dbase;
370 trace_sungem_tx_kick();
372 /* Check that both TX MAC and TX DMA are enabled. We don't
373 * handle DMA-less direct FIFO operations (we don't emulate
374 * the FIFO at all).
376 * A write to TXDMA_KICK while DMA isn't enabled can happen
377 * when the driver is resetting the pointer.
379 txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2];
380 txmac_cfg = s->macregs[MAC_TXCFG >> 2];
381 if (!(txdma_cfg & TXDMA_CFG_ENABLE) ||
382 !(txmac_cfg & MAC_TXCFG_ENAB)) {
383 trace_sungem_tx_disabled();
384 return;
387 /* XXX Test min frame size register ? */
388 /* XXX Test max frame size register ? */
390 dbase = s->txdmaregs[TXDMA_DBHI >> 2];
391 dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2];
393 comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask;
394 kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask;
396 trace_sungem_tx_process(comp, kick, s->tx_mask + 1);
398 /* This is rather primitive for now, we just send everything we
399 * can in one go, like e1000. Ideally we should do the sending
400 * from some kind of background task
402 while (comp != kick) {
403 struct gem_txd desc;
405 /* Read the next descriptor */
406 pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc));
408 /* Byteswap descriptor */
409 desc.control_word = le64_to_cpu(desc.control_word);
410 desc.buffer = le64_to_cpu(desc.buffer);
411 trace_sungem_tx_desc(comp, desc.control_word, desc.buffer);
413 /* Send it for processing */
414 sungem_process_tx_desc(s, &desc);
416 /* Interrupt */
417 ints = GREG_STAT_TXDONE;
418 if (desc.control_word & TXDCTRL_INTME) {
419 ints |= GREG_STAT_TXINTME;
421 sungem_update_status(s, ints, true);
423 /* Next ! */
424 comp = (comp + 1) & s->tx_mask;
425 s->txdmaregs[TXDMA_TXDONE >> 2] = comp;
428 /* We sent everything, set status/irq bit */
429 sungem_update_status(s, GREG_STAT_TXALL, true);
432 static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done)
434 return kick == ((done + 1) & s->rx_mask);
437 static bool sungem_can_receive(NetClientState *nc)
439 SunGEMState *s = qemu_get_nic_opaque(nc);
440 uint32_t kick, done, rxdma_cfg, rxmac_cfg;
441 bool full;
443 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
444 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
446 /* If MAC disabled, can't receive */
447 if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) {
448 trace_sungem_rx_mac_disabled();
449 return false;
451 if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) {
452 trace_sungem_rx_txdma_disabled();
453 return false;
456 /* Check RX availability */
457 kick = s->rxdmaregs[RXDMA_KICK >> 2];
458 done = s->rxdmaregs[RXDMA_DONE >> 2];
459 full = sungem_rx_full(s, kick, done);
461 trace_sungem_rx_check(!full, kick, done);
463 return !full;
466 enum {
467 rx_no_match,
468 rx_match_promisc,
469 rx_match_bcast,
470 rx_match_allmcast,
471 rx_match_mcast,
472 rx_match_mac,
473 rx_match_altmac,
476 static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc)
478 uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2];
479 uint32_t mac0, mac1, mac2;
481 /* Promisc enabled ? */
482 if (rxcfg & MAC_RXCFG_PROM) {
483 return rx_match_promisc;
486 /* Format MAC address into dwords */
487 mac0 = (mac[4] << 8) | mac[5];
488 mac1 = (mac[2] << 8) | mac[3];
489 mac2 = (mac[0] << 8) | mac[1];
491 trace_sungem_rx_mac_check(mac0, mac1, mac2);
493 /* Is this a broadcast frame ? */
494 if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) {
495 return rx_match_bcast;
498 /* TODO: Implement address filter registers (or we don't care ?) */
500 /* Is this a multicast frame ? */
501 if (mac[0] & 1) {
502 trace_sungem_rx_mac_multicast();
504 /* Promisc group enabled ? */
505 if (rxcfg & MAC_RXCFG_PGRP) {
506 return rx_match_allmcast;
509 /* TODO: Check MAC control frames (or we don't care) ? */
511 /* Check hash filter (somebody check that's correct ?) */
512 if (rxcfg & MAC_RXCFG_HFE) {
513 uint32_t hash, idx;
515 crc >>= 24;
516 idx = (crc >> 2) & 0x3c;
517 hash = s->macregs[(MAC_HASH0 + idx) >> 2];
518 if (hash & (1 << (15 - (crc & 0xf)))) {
519 return rx_match_mcast;
522 return rx_no_match;
525 /* Main MAC check */
526 trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2],
527 s->macregs[MAC_ADDR1 >> 2],
528 s->macregs[MAC_ADDR2 >> 2]);
530 if (mac0 == s->macregs[MAC_ADDR0 >> 2] &&
531 mac1 == s->macregs[MAC_ADDR1 >> 2] &&
532 mac2 == s->macregs[MAC_ADDR2 >> 2]) {
533 return rx_match_mac;
536 /* Alt MAC check */
537 if (mac0 == s->macregs[MAC_ADDR3 >> 2] &&
538 mac1 == s->macregs[MAC_ADDR4 >> 2] &&
539 mac2 == s->macregs[MAC_ADDR5 >> 2]) {
540 return rx_match_altmac;
543 return rx_no_match;
546 static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf,
547 size_t size)
549 SunGEMState *s = qemu_get_nic_opaque(nc);
550 PCIDevice *d = PCI_DEVICE(s);
551 uint32_t mac_crc, done, kick, max_fsize;
552 uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff;
553 uint8_t smallbuf[60];
554 struct gem_rxd desc;
555 uint64_t dbase, baddr;
556 unsigned int rx_cond;
558 trace_sungem_rx_packet(size);
560 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
561 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
562 max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff;
564 /* If MAC or DMA disabled, can't receive */
565 if (!(rxdma_cfg & RXDMA_CFG_ENABLE) ||
566 !(rxmac_cfg & MAC_RXCFG_ENAB)) {
567 trace_sungem_rx_disabled();
568 return 0;
571 /* Size adjustment for FCS */
572 if (rxmac_cfg & MAC_RXCFG_SFCS) {
573 fcs_size = 0;
574 } else {
575 fcs_size = 4;
578 /* Discard frame smaller than a MAC or larger than max frame size
579 * (when accounting for FCS)
581 if (size < 6 || (size + 4) > max_fsize) {
582 trace_sungem_rx_bad_frame_size(size);
583 /* XXX Increment error statistics ? */
584 return size;
587 /* We don't drop too small frames since we get them in qemu, we pad
588 * them instead. We should probably use the min frame size register
589 * but I don't want to use a variable size staging buffer and I
590 * know both MacOS and Linux use the default 64 anyway. We use 60
591 * here to account for the non-existent FCS.
593 if (size < 60) {
594 memcpy(smallbuf, buf, size);
595 memset(&smallbuf[size], 0, 60 - size);
596 buf = smallbuf;
597 size = 60;
600 /* Get MAC crc */
601 mac_crc = net_crc32_le(buf, ETH_ALEN);
603 /* Packet isn't for me ? */
604 rx_cond = sungem_check_rx_mac(s, buf, mac_crc);
605 if (rx_cond == rx_no_match) {
606 /* Just drop it */
607 trace_sungem_rx_unmatched();
608 return size;
611 /* Get ring pointers */
612 kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask;
613 done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask;
615 trace_sungem_rx_process(done, kick, s->rx_mask + 1);
617 /* Ring full ? Can't receive */
618 if (sungem_rx_full(s, kick, done)) {
619 trace_sungem_rx_ringfull();
620 return 0;
623 /* Note: The real GEM will fetch descriptors in blocks of 4,
624 * for now we handle them one at a time, I think the driver will
625 * cope
628 dbase = s->rxdmaregs[RXDMA_DBHI >> 2];
629 dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2];
631 /* Read the next descriptor */
632 pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
634 trace_sungem_rx_desc(le64_to_cpu(desc.status_word),
635 le64_to_cpu(desc.buffer));
637 /* Effective buffer address */
638 baddr = le64_to_cpu(desc.buffer) & ~7ull;
639 baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10;
641 /* Write buffer out */
642 pci_dma_write(d, baddr, buf, size);
644 if (fcs_size) {
645 /* Should we add an FCS ? Linux doesn't ask us to strip it,
646 * however I believe nothing checks it... For now we just
647 * do nothing. It's faster this way.
651 /* Calculate the checksum */
652 coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13;
653 csum = net_raw_checksum((uint8_t *)buf + coff, size - coff);
655 /* Build the updated descriptor */
656 desc.status_word = (size + fcs_size) << 16;
657 desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44;
658 desc.status_word |= csum;
659 if (rx_cond == rx_match_mcast) {
660 desc.status_word |= RXDCTRL_HPASS;
662 if (rx_cond == rx_match_altmac) {
663 desc.status_word |= RXDCTRL_ALTMAC;
665 desc.status_word = cpu_to_le64(desc.status_word);
667 pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
669 done = (done + 1) & s->rx_mask;
670 s->rxdmaregs[RXDMA_DONE >> 2] = done;
672 /* XXX Unconditionally set RX interrupt for now. The interrupt
673 * mitigation timer might well end up adding more overhead than
674 * helping here...
676 ints = GREG_STAT_RXDONE;
677 if (sungem_rx_full(s, kick, done)) {
678 ints |= GREG_STAT_RXNOBUF;
680 sungem_update_status(s, ints, true);
682 return size;
685 static void sungem_set_link_status(NetClientState *nc)
687 /* We don't do anything for now as I believe none of the OSes
688 * drivers use the MIF autopoll feature nor the PHY interrupt
692 static void sungem_update_masks(SunGEMState *s)
694 uint32_t sz;
696 sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5);
697 s->rx_mask = sz - 1;
699 sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5);
700 s->tx_mask = sz - 1;
703 static void sungem_reset_rx(SunGEMState *s)
705 trace_sungem_rx_reset();
707 /* XXX Do RXCFG */
708 /* XXX Check value */
709 s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140;
710 s->rxdmaregs[RXDMA_DONE >> 2] = 0;
711 s->rxdmaregs[RXDMA_KICK >> 2] = 0;
712 s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010;
713 s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8;
714 s->rxdmaregs[RXDMA_BLANK >> 2] = 0;
716 sungem_update_masks(s);
719 static void sungem_reset_tx(SunGEMState *s)
721 trace_sungem_tx_reset();
723 /* XXX Do TXCFG */
724 /* XXX Check value */
725 s->txdmaregs[TXDMA_FSZ >> 2] = 0x90;
726 s->txdmaregs[TXDMA_TXDONE >> 2] = 0;
727 s->txdmaregs[TXDMA_KICK >> 2] = 0;
728 s->txdmaregs[TXDMA_CFG >> 2] = 0x118010;
730 sungem_update_masks(s);
732 s->tx_size = 0;
733 s->tx_first_ctl = 0;
736 static void sungem_reset_all(SunGEMState *s, bool pci_reset)
738 trace_sungem_reset(pci_reset);
740 sungem_reset_rx(s);
741 sungem_reset_tx(s);
743 s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF;
744 s->gregs[GREG_STAT >> 2] = 0;
745 if (pci_reset) {
746 uint8_t *ma = s->conf.macaddr.a;
748 s->gregs[GREG_SWRST >> 2] = 0;
749 s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5];
750 s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3];
751 s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1];
752 } else {
753 s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT;
755 s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0;
758 static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr,
759 uint8_t reg_addr, uint16_t val)
761 trace_sungem_mii_write(phy_addr, reg_addr, val);
763 /* XXX TODO */
766 static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
767 uint8_t reg_addr)
769 if (phy_addr != s->phy_addr) {
770 return 0xffff;
772 /* Primitive emulation of a BCM5201 to please the driver,
773 * ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400
775 switch (reg_addr) {
776 case MII_BMCR:
777 return 0;
778 case MII_PHYID1:
779 return 0x0040;
780 case MII_PHYID2:
781 return 0x6210;
782 case MII_BMSR:
783 if (qemu_get_queue(s->nic)->link_down) {
784 return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG;
785 } else {
786 return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP |
787 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
789 case MII_ANLPAR:
790 case MII_ANAR:
791 return MII_ANLPAR_TXFD;
792 case 0x18: /* 5201 AUX status */
793 return 3; /* 100FD */
794 default:
795 return 0;
798 static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
799 uint8_t reg_addr)
801 uint16_t val;
803 val = __sungem_mii_read(s, phy_addr, reg_addr);
805 trace_sungem_mii_read(phy_addr, reg_addr, val);
807 return val;
810 static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val)
812 uint8_t phy_addr, reg_addr, op;
814 /* Ignore not start of frame */
815 if ((val >> 30) != 1) {
816 trace_sungem_mii_invalid_sof(val >> 30);
817 return 0xffff;
819 phy_addr = (val & MIF_FRAME_PHYAD) >> 23;
820 reg_addr = (val & MIF_FRAME_REGAD) >> 18;
821 op = (val & MIF_FRAME_OP) >> 28;
822 switch (op) {
823 case 1:
824 sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA);
825 return val | MIF_FRAME_TALSB;
826 case 2:
827 return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB;
828 default:
829 trace_sungem_mii_invalid_op(op);
831 return 0xffff | MIF_FRAME_TALSB;
834 static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val,
835 unsigned size)
837 SunGEMState *s = opaque;
839 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
840 qemu_log_mask(LOG_GUEST_ERROR,
841 "Write to unknown GREG register 0x%"HWADDR_PRIx"\n",
842 addr);
843 return;
846 trace_sungem_mmio_greg_write(addr, val);
848 /* Pre-write filter */
849 switch (addr) {
850 /* Read only registers */
851 case GREG_SEBSTATE:
852 case GREG_STAT:
853 case GREG_STAT2:
854 case GREG_PCIESTAT:
855 return; /* No actual write */
856 case GREG_IACK:
857 val &= GREG_STAT_LATCH;
858 s->gregs[GREG_STAT >> 2] &= ~val;
859 sungem_eval_irq(s);
860 return; /* No actual write */
861 case GREG_PCIEMASK:
862 val &= 0x7;
863 break;
866 s->gregs[addr >> 2] = val;
868 /* Post write action */
869 switch (addr) {
870 case GREG_IMASK:
871 /* Re-evaluate interrupt */
872 sungem_eval_irq(s);
873 break;
874 case GREG_SWRST:
875 switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) {
876 case GREG_SWRST_RXRST:
877 sungem_reset_rx(s);
878 break;
879 case GREG_SWRST_TXRST:
880 sungem_reset_tx(s);
881 break;
882 case GREG_SWRST_RXRST | GREG_SWRST_TXRST:
883 sungem_reset_all(s, false);
885 break;
889 static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size)
891 SunGEMState *s = opaque;
892 uint32_t val;
894 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
895 qemu_log_mask(LOG_GUEST_ERROR,
896 "Read from unknown GREG register 0x%"HWADDR_PRIx"\n",
897 addr);
898 return 0;
901 val = s->gregs[addr >> 2];
903 trace_sungem_mmio_greg_read(addr, val);
905 switch (addr) {
906 case GREG_STAT:
907 /* Side effect, clear bottom 7 bits */
908 s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH;
909 sungem_eval_irq(s);
911 /* Inject TX completion in returned value */
912 val = (val & ~GREG_STAT_TXNR) |
913 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
914 break;
915 case GREG_STAT2:
916 /* Return the status reg without side effect
917 * (and inject TX completion in returned value)
919 val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) |
920 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
921 break;
924 return val;
927 static const MemoryRegionOps sungem_mmio_greg_ops = {
928 .read = sungem_mmio_greg_read,
929 .write = sungem_mmio_greg_write,
930 .endianness = DEVICE_LITTLE_ENDIAN,
931 .impl = {
932 .min_access_size = 4,
933 .max_access_size = 4,
937 static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val,
938 unsigned size)
940 SunGEMState *s = opaque;
942 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
943 qemu_log_mask(LOG_GUEST_ERROR,
944 "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n",
945 addr);
946 return;
949 trace_sungem_mmio_txdma_write(addr, val);
951 /* Pre-write filter */
952 switch (addr) {
953 /* Read only registers */
954 case TXDMA_TXDONE:
955 case TXDMA_PCNT:
956 case TXDMA_SMACHINE:
957 case TXDMA_DPLOW:
958 case TXDMA_DPHI:
959 case TXDMA_FSZ:
960 case TXDMA_FTAG:
961 return; /* No actual write */
964 s->txdmaregs[addr >> 2] = val;
966 /* Post write action */
967 switch (addr) {
968 case TXDMA_KICK:
969 sungem_tx_kick(s);
970 break;
971 case TXDMA_CFG:
972 sungem_update_masks(s);
973 break;
977 static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size)
979 SunGEMState *s = opaque;
980 uint32_t val;
982 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
983 qemu_log_mask(LOG_GUEST_ERROR,
984 "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n",
985 addr);
986 return 0;
989 val = s->txdmaregs[addr >> 2];
991 trace_sungem_mmio_txdma_read(addr, val);
993 return val;
996 static const MemoryRegionOps sungem_mmio_txdma_ops = {
997 .read = sungem_mmio_txdma_read,
998 .write = sungem_mmio_txdma_write,
999 .endianness = DEVICE_LITTLE_ENDIAN,
1000 .impl = {
1001 .min_access_size = 4,
1002 .max_access_size = 4,
1006 static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val,
1007 unsigned size)
1009 SunGEMState *s = opaque;
1011 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1012 qemu_log_mask(LOG_GUEST_ERROR,
1013 "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1014 addr);
1015 return;
1018 trace_sungem_mmio_rxdma_write(addr, val);
1020 /* Pre-write filter */
1021 switch (addr) {
1022 /* Read only registers */
1023 case RXDMA_DONE:
1024 case RXDMA_PCNT:
1025 case RXDMA_SMACHINE:
1026 case RXDMA_DPLOW:
1027 case RXDMA_DPHI:
1028 case RXDMA_FSZ:
1029 case RXDMA_FTAG:
1030 return; /* No actual write */
1033 s->rxdmaregs[addr >> 2] = val;
1035 /* Post write action */
1036 switch (addr) {
1037 case RXDMA_KICK:
1038 trace_sungem_rx_kick(val);
1039 break;
1040 case RXDMA_CFG:
1041 sungem_update_masks(s);
1042 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1043 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1044 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1046 break;
1050 static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size)
1052 SunGEMState *s = opaque;
1053 uint32_t val;
1055 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1056 qemu_log_mask(LOG_GUEST_ERROR,
1057 "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1058 addr);
1059 return 0;
1062 val = s->rxdmaregs[addr >> 2];
1064 trace_sungem_mmio_rxdma_read(addr, val);
1066 return val;
1069 static const MemoryRegionOps sungem_mmio_rxdma_ops = {
1070 .read = sungem_mmio_rxdma_read,
1071 .write = sungem_mmio_rxdma_write,
1072 .endianness = DEVICE_LITTLE_ENDIAN,
1073 .impl = {
1074 .min_access_size = 4,
1075 .max_access_size = 4,
1079 static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val,
1080 unsigned size)
1082 SunGEMState *s = opaque;
1084 if (!(addr <= 0x134)) {
1085 qemu_log_mask(LOG_GUEST_ERROR,
1086 "Write to unknown MAC register 0x%"HWADDR_PRIx"\n",
1087 addr);
1088 return;
1091 trace_sungem_mmio_mac_write(addr, val);
1093 /* Pre-write filter */
1094 switch (addr) {
1095 /* Read only registers */
1096 case MAC_TXRST: /* Not technically read-only but will do for now */
1097 case MAC_RXRST: /* Not technically read-only but will do for now */
1098 case MAC_TXSTAT:
1099 case MAC_RXSTAT:
1100 case MAC_CSTAT:
1101 case MAC_PATMPS:
1102 case MAC_SMACHINE:
1103 return; /* No actual write */
1104 case MAC_MINFSZ:
1105 /* 10-bits implemented */
1106 val &= 0x3ff;
1107 break;
1110 s->macregs[addr >> 2] = val;
1112 /* Post write action */
1113 switch (addr) {
1114 case MAC_TXMASK:
1115 case MAC_RXMASK:
1116 case MAC_MCMASK:
1117 sungem_eval_cascade_irq(s);
1118 break;
1119 case MAC_RXCFG:
1120 sungem_update_masks(s);
1121 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1122 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1123 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1125 break;
1129 static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size)
1131 SunGEMState *s = opaque;
1132 uint32_t val;
1134 if (!(addr <= 0x134)) {
1135 qemu_log_mask(LOG_GUEST_ERROR,
1136 "Read from unknown MAC register 0x%"HWADDR_PRIx"\n",
1137 addr);
1138 return 0;
1141 val = s->macregs[addr >> 2];
1143 trace_sungem_mmio_mac_read(addr, val);
1145 switch (addr) {
1146 case MAC_TXSTAT:
1147 /* Side effect, clear all */
1148 s->macregs[addr >> 2] = 0;
1149 sungem_update_status(s, GREG_STAT_TXMAC, false);
1150 break;
1151 case MAC_RXSTAT:
1152 /* Side effect, clear all */
1153 s->macregs[addr >> 2] = 0;
1154 sungem_update_status(s, GREG_STAT_RXMAC, false);
1155 break;
1156 case MAC_CSTAT:
1157 /* Side effect, interrupt bits */
1158 s->macregs[addr >> 2] &= MAC_CSTAT_PTR;
1159 sungem_update_status(s, GREG_STAT_MAC, false);
1160 break;
1163 return val;
1166 static const MemoryRegionOps sungem_mmio_mac_ops = {
1167 .read = sungem_mmio_mac_read,
1168 .write = sungem_mmio_mac_write,
1169 .endianness = DEVICE_LITTLE_ENDIAN,
1170 .impl = {
1171 .min_access_size = 4,
1172 .max_access_size = 4,
1176 static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val,
1177 unsigned size)
1179 SunGEMState *s = opaque;
1181 if (!(addr <= 0x1c)) {
1182 qemu_log_mask(LOG_GUEST_ERROR,
1183 "Write to unknown MIF register 0x%"HWADDR_PRIx"\n",
1184 addr);
1185 return;
1188 trace_sungem_mmio_mif_write(addr, val);
1190 /* Pre-write filter */
1191 switch (addr) {
1192 /* Read only registers */
1193 case MIF_STATUS:
1194 case MIF_SMACHINE:
1195 return; /* No actual write */
1196 case MIF_CFG:
1197 /* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */
1198 val &= ~MIF_CFG_MDI1;
1199 val |= MIF_CFG_MDI0;
1200 break;
1203 s->mifregs[addr >> 2] = val;
1205 /* Post write action */
1206 switch (addr) {
1207 case MIF_FRAME:
1208 s->mifregs[addr >> 2] = sungem_mii_op(s, val);
1209 break;
1213 static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size)
1215 SunGEMState *s = opaque;
1216 uint32_t val;
1218 if (!(addr <= 0x1c)) {
1219 qemu_log_mask(LOG_GUEST_ERROR,
1220 "Read from unknown MIF register 0x%"HWADDR_PRIx"\n",
1221 addr);
1222 return 0;
1225 val = s->mifregs[addr >> 2];
1227 trace_sungem_mmio_mif_read(addr, val);
1229 return val;
1232 static const MemoryRegionOps sungem_mmio_mif_ops = {
1233 .read = sungem_mmio_mif_read,
1234 .write = sungem_mmio_mif_write,
1235 .endianness = DEVICE_LITTLE_ENDIAN,
1236 .impl = {
1237 .min_access_size = 4,
1238 .max_access_size = 4,
1242 static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val,
1243 unsigned size)
1245 SunGEMState *s = opaque;
1247 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1248 qemu_log_mask(LOG_GUEST_ERROR,
1249 "Write to unknown PCS register 0x%"HWADDR_PRIx"\n",
1250 addr);
1251 return;
1254 trace_sungem_mmio_pcs_write(addr, val);
1256 /* Pre-write filter */
1257 switch (addr) {
1258 /* Read only registers */
1259 case PCS_MIISTAT:
1260 case PCS_ISTAT:
1261 case PCS_SSTATE:
1262 return; /* No actual write */
1265 s->pcsregs[addr >> 2] = val;
1268 static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size)
1270 SunGEMState *s = opaque;
1271 uint32_t val;
1273 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1274 qemu_log_mask(LOG_GUEST_ERROR,
1275 "Read from unknown PCS register 0x%"HWADDR_PRIx"\n",
1276 addr);
1277 return 0;
1280 val = s->pcsregs[addr >> 2];
1282 trace_sungem_mmio_pcs_read(addr, val);
1284 return val;
1287 static const MemoryRegionOps sungem_mmio_pcs_ops = {
1288 .read = sungem_mmio_pcs_read,
1289 .write = sungem_mmio_pcs_write,
1290 .endianness = DEVICE_LITTLE_ENDIAN,
1291 .impl = {
1292 .min_access_size = 4,
1293 .max_access_size = 4,
1297 static void sungem_uninit(PCIDevice *dev)
1299 SunGEMState *s = SUNGEM(dev);
1301 qemu_del_nic(s->nic);
1304 static NetClientInfo net_sungem_info = {
1305 .type = NET_CLIENT_DRIVER_NIC,
1306 .size = sizeof(NICState),
1307 .can_receive = sungem_can_receive,
1308 .receive = sungem_receive,
1309 .link_status_changed = sungem_set_link_status,
1312 static void sungem_realize(PCIDevice *pci_dev, Error **errp)
1314 DeviceState *dev = DEVICE(pci_dev);
1315 SunGEMState *s = SUNGEM(pci_dev);
1316 uint8_t *pci_conf;
1318 pci_conf = pci_dev->config;
1320 pci_set_word(pci_conf + PCI_STATUS,
1321 PCI_STATUS_FAST_BACK |
1322 PCI_STATUS_DEVSEL_MEDIUM |
1323 PCI_STATUS_66MHZ);
1325 pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
1326 pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);
1328 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1329 pci_conf[PCI_MIN_GNT] = 0x40;
1330 pci_conf[PCI_MAX_LAT] = 0x40;
1332 sungem_reset_all(s, true);
1333 memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE);
1335 memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s,
1336 "sungem.greg", SUNGEM_MMIO_GREG_SIZE);
1337 memory_region_add_subregion(&s->sungem, 0, &s->greg);
1339 memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s,
1340 "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE);
1341 memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma);
1343 memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s,
1344 "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE);
1345 memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma);
1347 memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s,
1348 "sungem.mac", SUNGEM_MMIO_MAC_SIZE);
1349 memory_region_add_subregion(&s->sungem, 0x6000, &s->mac);
1351 memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s,
1352 "sungem.mif", SUNGEM_MMIO_MIF_SIZE);
1353 memory_region_add_subregion(&s->sungem, 0x6200, &s->mif);
1355 memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s,
1356 "sungem.pcs", SUNGEM_MMIO_PCS_SIZE);
1357 memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs);
1359 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem);
1361 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1362 s->nic = qemu_new_nic(&net_sungem_info, &s->conf,
1363 object_get_typename(OBJECT(dev)),
1364 dev->id, s);
1365 qemu_format_nic_info_str(qemu_get_queue(s->nic),
1366 s->conf.macaddr.a);
1369 static void sungem_reset(DeviceState *dev)
1371 SunGEMState *s = SUNGEM(dev);
1373 sungem_reset_all(s, true);
1376 static void sungem_instance_init(Object *obj)
1378 SunGEMState *s = SUNGEM(obj);
1380 device_add_bootindex_property(obj, &s->conf.bootindex,
1381 "bootindex", "/ethernet-phy@0",
1382 DEVICE(obj));
1385 static Property sungem_properties[] = {
1386 DEFINE_NIC_PROPERTIES(SunGEMState, conf),
1387 /* Phy address should be 0 for most Apple machines except
1388 * for K2 in which case it's 1. Will be set by a machine
1389 * override.
1391 DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0),
1392 DEFINE_PROP_END_OF_LIST(),
1395 static const VMStateDescription vmstate_sungem = {
1396 .name = "sungem",
1397 .version_id = 0,
1398 .minimum_version_id = 0,
1399 .fields = (VMStateField[]) {
1400 VMSTATE_PCI_DEVICE(pdev, SunGEMState),
1401 VMSTATE_MACADDR(conf.macaddr, SunGEMState),
1402 VMSTATE_UINT32(phy_addr, SunGEMState),
1403 VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)),
1404 VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState,
1405 (SUNGEM_MMIO_TXDMA_SIZE >> 2)),
1406 VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState,
1407 (SUNGEM_MMIO_RXDMA_SIZE >> 2)),
1408 VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)),
1409 VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)),
1410 VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)),
1411 VMSTATE_UINT32(rx_mask, SunGEMState),
1412 VMSTATE_UINT32(tx_mask, SunGEMState),
1413 VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE),
1414 VMSTATE_UINT32(tx_size, SunGEMState),
1415 VMSTATE_UINT64(tx_first_ctl, SunGEMState),
1416 VMSTATE_END_OF_LIST()
1420 static void sungem_class_init(ObjectClass *klass, void *data)
1422 DeviceClass *dc = DEVICE_CLASS(klass);
1423 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1425 k->realize = sungem_realize;
1426 k->exit = sungem_uninit;
1427 k->vendor_id = PCI_VENDOR_ID_APPLE;
1428 k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC;
1429 k->revision = 0x01;
1430 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1431 dc->vmsd = &vmstate_sungem;
1432 dc->reset = sungem_reset;
1433 device_class_set_props(dc, sungem_properties);
1434 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1437 static const TypeInfo sungem_info = {
1438 .name = TYPE_SUNGEM,
1439 .parent = TYPE_PCI_DEVICE,
1440 .instance_size = sizeof(SunGEMState),
1441 .class_init = sungem_class_init,
1442 .instance_init = sungem_instance_init,
1443 .interfaces = (InterfaceInfo[]) {
1444 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1449 static void sungem_register_types(void)
1451 type_register_static(&sungem_info);
1454 type_init(sungem_register_types)