Merge remote-tracking branch 'remotes/kraxel/tags/ui-20180312-pull-request' into...
[qemu/ar7.git] / hw / net / sunhme.c
blob7558fca8f9499e4ef7827da7ebfe736441e1765b
1 /*
2 * QEMU Sun Happy Meal Ethernet emulation
4 * Copyright (c) 2017 Mark Cave-Ayland
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/pci/pci.h"
28 #include "hw/net/mii.h"
29 #include "net/net.h"
30 #include "net/checksum.h"
31 #include "net/eth.h"
32 #include "sysemu/sysemu.h"
33 #include "trace.h"
35 #define HME_REG_SIZE 0x8000
37 #define HME_SEB_REG_SIZE 0x2000
39 #define HME_SEBI_RESET 0x0
40 #define HME_SEB_RESET_ETX 0x1
41 #define HME_SEB_RESET_ERX 0x2
43 #define HME_SEBI_STAT 0x100
44 #define HME_SEBI_STAT_LINUXBUG 0x108
45 #define HME_SEB_STAT_RXTOHOST 0x10000
46 #define HME_SEB_STAT_MIFIRQ 0x800000
47 #define HME_SEB_STAT_HOSTTOTX 0x1000000
48 #define HME_SEB_STAT_TXALL 0x2000000
50 #define HME_SEBI_IMASK 0x104
51 #define HME_SEBI_IMASK_LINUXBUG 0x10c
53 #define HME_ETX_REG_SIZE 0x2000
55 #define HME_ETXI_PENDING 0x0
57 #define HME_ETXI_RING 0x8
58 #define HME_ETXI_RING_ADDR 0xffffff00
59 #define HME_ETXI_RING_OFFSET 0xff
61 #define HME_ETXI_RSIZE 0x2c
63 #define HME_ERX_REG_SIZE 0x2000
65 #define HME_ERXI_CFG 0x0
66 #define HME_ERX_CFG_RINGSIZE 0x600
67 #define HME_ERX_CFG_RINGSIZE_SHIFT 9
68 #define HME_ERX_CFG_BYTEOFFSET 0x38
69 #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3
70 #define HME_ERX_CFG_CSUMSTART 0x7f0000
71 #define HME_ERX_CFG_CSUMSHIFT 16
73 #define HME_ERXI_RING 0x4
74 #define HME_ERXI_RING_ADDR 0xffffff00
75 #define HME_ERXI_RING_OFFSET 0xff
77 #define HME_MAC_REG_SIZE 0x1000
79 #define HME_MACI_TXCFG 0x20c
80 #define HME_MAC_TXCFG_ENABLE 0x1
82 #define HME_MACI_RXCFG 0x30c
83 #define HME_MAC_RXCFG_ENABLE 0x1
84 #define HME_MAC_RXCFG_PMISC 0x40
85 #define HME_MAC_RXCFG_HENABLE 0x800
87 #define HME_MACI_MACADDR2 0x318
88 #define HME_MACI_MACADDR1 0x31c
89 #define HME_MACI_MACADDR0 0x320
91 #define HME_MACI_HASHTAB3 0x340
92 #define HME_MACI_HASHTAB2 0x344
93 #define HME_MACI_HASHTAB1 0x348
94 #define HME_MACI_HASHTAB0 0x34c
96 #define HME_MIF_REG_SIZE 0x20
98 #define HME_MIFI_FO 0xc
99 #define HME_MIF_FO_ST 0xc0000000
100 #define HME_MIF_FO_ST_SHIFT 30
101 #define HME_MIF_FO_OPC 0x30000000
102 #define HME_MIF_FO_OPC_SHIFT 28
103 #define HME_MIF_FO_PHYAD 0x0f800000
104 #define HME_MIF_FO_PHYAD_SHIFT 23
105 #define HME_MIF_FO_REGAD 0x007c0000
106 #define HME_MIF_FO_REGAD_SHIFT 18
107 #define HME_MIF_FO_TAMSB 0x20000
108 #define HME_MIF_FO_TALSB 0x10000
109 #define HME_MIF_FO_DATA 0xffff
111 #define HME_MIFI_CFG 0x10
112 #define HME_MIF_CFG_MDI0 0x100
113 #define HME_MIF_CFG_MDI1 0x200
115 #define HME_MIFI_IMASK 0x14
117 #define HME_MIFI_STAT 0x18
120 /* Wired HME PHY addresses */
121 #define HME_PHYAD_INTERNAL 1
122 #define HME_PHYAD_EXTERNAL 0
124 #define MII_COMMAND_START 0x1
125 #define MII_COMMAND_READ 0x2
126 #define MII_COMMAND_WRITE 0x1
128 #define TYPE_SUNHME "sunhme"
129 #define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME)
131 /* Maximum size of buffer */
132 #define HME_FIFO_SIZE 0x800
134 /* Size of TX/RX descriptor */
135 #define HME_DESC_SIZE 0x8
137 #define HME_XD_OWN 0x80000000
138 #define HME_XD_OFL 0x40000000
139 #define HME_XD_SOP 0x40000000
140 #define HME_XD_EOP 0x20000000
141 #define HME_XD_RXLENMSK 0x3fff0000
142 #define HME_XD_RXLENSHIFT 16
143 #define HME_XD_RXCKSUM 0xffff
144 #define HME_XD_TXLENMSK 0x00001fff
145 #define HME_XD_TXCKSUM 0x10000000
146 #define HME_XD_TXCSSTUFF 0xff00000
147 #define HME_XD_TXCSSTUFFSHIFT 20
148 #define HME_XD_TXCSSTART 0xfc000
149 #define HME_XD_TXCSSTARTSHIFT 14
151 #define HME_MII_REGS_SIZE 0x20
153 typedef struct SunHMEState {
154 /*< private >*/
155 PCIDevice parent_obj;
157 NICState *nic;
158 NICConf conf;
160 MemoryRegion hme;
161 MemoryRegion sebreg;
162 MemoryRegion etxreg;
163 MemoryRegion erxreg;
164 MemoryRegion macreg;
165 MemoryRegion mifreg;
167 uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
168 uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
169 uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
170 uint32_t macregs[HME_MAC_REG_SIZE >> 2];
171 uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
173 uint16_t miiregs[HME_MII_REGS_SIZE];
174 } SunHMEState;
176 static Property sunhme_properties[] = {
177 DEFINE_NIC_PROPERTIES(SunHMEState, conf),
178 DEFINE_PROP_END_OF_LIST(),
181 static void sunhme_reset_tx(SunHMEState *s)
183 /* Indicate TX reset complete */
184 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
187 static void sunhme_reset_rx(SunHMEState *s)
189 /* Indicate RX reset complete */
190 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
193 static void sunhme_update_irq(SunHMEState *s)
195 PCIDevice *d = PCI_DEVICE(s);
196 int level;
198 /* MIF interrupt mask (16-bit) */
199 uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
200 uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
202 /* Main SEB interrupt mask (include MIF status from above) */
203 uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
204 ~HME_SEB_STAT_MIFIRQ;
205 uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
206 if (mif) {
207 seb |= HME_SEB_STAT_MIFIRQ;
210 level = (seb ? 1 : 0);
211 pci_set_irq(d, level);
214 static void sunhme_seb_write(void *opaque, hwaddr addr,
215 uint64_t val, unsigned size)
217 SunHMEState *s = SUNHME(opaque);
219 trace_sunhme_seb_write(addr, val);
221 /* Handly buggy Linux drivers before 4.13 which have
222 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
223 switch (addr) {
224 case HME_SEBI_STAT_LINUXBUG:
225 addr = HME_SEBI_STAT;
226 break;
227 case HME_SEBI_IMASK_LINUXBUG:
228 addr = HME_SEBI_IMASK;
229 break;
230 default:
231 break;
234 switch (addr) {
235 case HME_SEBI_RESET:
236 if (val & HME_SEB_RESET_ETX) {
237 sunhme_reset_tx(s);
239 if (val & HME_SEB_RESET_ERX) {
240 sunhme_reset_rx(s);
242 val = s->sebregs[HME_SEBI_RESET >> 2];
243 break;
246 s->sebregs[addr >> 2] = val;
249 static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
250 unsigned size)
252 SunHMEState *s = SUNHME(opaque);
253 uint64_t val;
255 /* Handly buggy Linux drivers before 4.13 which have
256 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
257 switch (addr) {
258 case HME_SEBI_STAT_LINUXBUG:
259 addr = HME_SEBI_STAT;
260 break;
261 case HME_SEBI_IMASK_LINUXBUG:
262 addr = HME_SEBI_IMASK;
263 break;
264 default:
265 break;
268 val = s->sebregs[addr >> 2];
270 switch (addr) {
271 case HME_SEBI_STAT:
272 /* Autoclear status (except MIF) */
273 s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
274 sunhme_update_irq(s);
275 break;
278 trace_sunhme_seb_read(addr, val);
280 return val;
283 static const MemoryRegionOps sunhme_seb_ops = {
284 .read = sunhme_seb_read,
285 .write = sunhme_seb_write,
286 .endianness = DEVICE_LITTLE_ENDIAN,
287 .valid = {
288 .min_access_size = 4,
289 .max_access_size = 4,
293 static void sunhme_transmit(SunHMEState *s);
295 static void sunhme_etx_write(void *opaque, hwaddr addr,
296 uint64_t val, unsigned size)
298 SunHMEState *s = SUNHME(opaque);
300 trace_sunhme_etx_write(addr, val);
302 switch (addr) {
303 case HME_ETXI_PENDING:
304 if (val) {
305 sunhme_transmit(s);
307 break;
310 s->etxregs[addr >> 2] = val;
313 static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
314 unsigned size)
316 SunHMEState *s = SUNHME(opaque);
317 uint64_t val;
319 val = s->etxregs[addr >> 2];
321 trace_sunhme_etx_read(addr, val);
323 return val;
326 static const MemoryRegionOps sunhme_etx_ops = {
327 .read = sunhme_etx_read,
328 .write = sunhme_etx_write,
329 .endianness = DEVICE_LITTLE_ENDIAN,
330 .valid = {
331 .min_access_size = 4,
332 .max_access_size = 4,
336 static void sunhme_erx_write(void *opaque, hwaddr addr,
337 uint64_t val, unsigned size)
339 SunHMEState *s = SUNHME(opaque);
341 trace_sunhme_erx_write(addr, val);
343 s->erxregs[addr >> 2] = val;
346 static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
347 unsigned size)
349 SunHMEState *s = SUNHME(opaque);
350 uint64_t val;
352 val = s->erxregs[addr >> 2];
354 trace_sunhme_erx_read(addr, val);
356 return val;
359 static const MemoryRegionOps sunhme_erx_ops = {
360 .read = sunhme_erx_read,
361 .write = sunhme_erx_write,
362 .endianness = DEVICE_LITTLE_ENDIAN,
363 .valid = {
364 .min_access_size = 4,
365 .max_access_size = 4,
369 static void sunhme_mac_write(void *opaque, hwaddr addr,
370 uint64_t val, unsigned size)
372 SunHMEState *s = SUNHME(opaque);
374 trace_sunhme_mac_write(addr, val);
376 s->macregs[addr >> 2] = val;
379 static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
380 unsigned size)
382 SunHMEState *s = SUNHME(opaque);
383 uint64_t val;
385 val = s->macregs[addr >> 2];
387 trace_sunhme_mac_read(addr, val);
389 return val;
392 static const MemoryRegionOps sunhme_mac_ops = {
393 .read = sunhme_mac_read,
394 .write = sunhme_mac_write,
395 .endianness = DEVICE_LITTLE_ENDIAN,
396 .valid = {
397 .min_access_size = 4,
398 .max_access_size = 4,
402 static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
404 trace_sunhme_mii_write(reg, data);
406 switch (reg) {
407 case MII_BMCR:
408 if (data & MII_BMCR_RESET) {
409 /* Autoclear reset bit, enable auto negotiation */
410 data &= ~MII_BMCR_RESET;
411 data |= MII_BMCR_AUTOEN;
413 if (data & MII_BMCR_ANRESTART) {
414 /* Autoclear auto negotiation restart */
415 data &= ~MII_BMCR_ANRESTART;
417 /* Indicate negotiation complete */
418 s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
420 if (!qemu_get_queue(s->nic)->link_down) {
421 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
422 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
425 break;
428 s->miiregs[reg] = data;
431 static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
433 uint16_t data = s->miiregs[reg];
435 trace_sunhme_mii_read(reg, data);
437 return data;
440 static void sunhme_mif_write(void *opaque, hwaddr addr,
441 uint64_t val, unsigned size)
443 SunHMEState *s = SUNHME(opaque);
444 uint8_t cmd, reg;
445 uint16_t data;
447 trace_sunhme_mif_write(addr, val);
449 switch (addr) {
450 case HME_MIFI_CFG:
451 /* Mask the read-only bits */
452 val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
453 val |= s->mifregs[HME_MIFI_CFG >> 2] &
454 (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
455 break;
456 case HME_MIFI_FO:
457 /* Detect start of MII command */
458 if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
459 != MII_COMMAND_START) {
460 val |= HME_MIF_FO_TALSB;
461 break;
464 /* Internal phy only */
465 if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
466 != HME_PHYAD_INTERNAL) {
467 val |= HME_MIF_FO_TALSB;
468 break;
471 cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
472 reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
473 data = (val & HME_MIF_FO_DATA);
475 switch (cmd) {
476 case MII_COMMAND_WRITE:
477 sunhme_mii_write(s, reg, data);
478 break;
480 case MII_COMMAND_READ:
481 val &= ~HME_MIF_FO_DATA;
482 val |= sunhme_mii_read(s, reg);
483 break;
486 val |= HME_MIF_FO_TALSB;
487 break;
490 s->mifregs[addr >> 2] = val;
493 static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
494 unsigned size)
496 SunHMEState *s = SUNHME(opaque);
497 uint64_t val;
499 val = s->mifregs[addr >> 2];
501 switch (addr) {
502 case HME_MIFI_STAT:
503 /* Autoclear MIF interrupt status */
504 s->mifregs[HME_MIFI_STAT >> 2] = 0;
505 sunhme_update_irq(s);
506 break;
509 trace_sunhme_mif_read(addr, val);
511 return val;
514 static const MemoryRegionOps sunhme_mif_ops = {
515 .read = sunhme_mif_read,
516 .write = sunhme_mif_write,
517 .endianness = DEVICE_LITTLE_ENDIAN,
518 .valid = {
519 .min_access_size = 4,
520 .max_access_size = 4,
524 static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
526 qemu_send_packet(qemu_get_queue(s->nic), buf, size);
529 static inline int sunhme_get_tx_ring_count(SunHMEState *s)
531 return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
534 static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
536 return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
539 static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
541 uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
542 ring |= i & HME_ETXI_RING_OFFSET;
544 s->etxregs[HME_ETXI_RING >> 2] = ring;
547 static void sunhme_transmit(SunHMEState *s)
549 PCIDevice *d = PCI_DEVICE(s);
550 dma_addr_t tb, addr;
551 uint32_t intstatus, status, buffer, sum = 0;
552 int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
553 uint16_t csum = 0;
554 uint8_t xmit_buffer[HME_FIFO_SIZE];
556 tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
557 nr = sunhme_get_tx_ring_count(s);
558 cr = sunhme_get_tx_ring_nr(s);
560 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
561 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
563 xmit_pos = 0;
564 while (status & HME_XD_OWN) {
565 trace_sunhme_tx_desc(buffer, status, cr, nr);
567 /* Copy data into transmit buffer */
568 addr = buffer;
569 len = status & HME_XD_TXLENMSK;
571 if (xmit_pos + len > HME_FIFO_SIZE) {
572 len = HME_FIFO_SIZE - xmit_pos;
575 pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
576 xmit_pos += len;
578 /* Detect start of packet for TX checksum */
579 if (status & HME_XD_SOP) {
580 sum = 0;
581 csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
582 csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
583 HME_XD_TXCSSTUFFSHIFT;
586 if (status & HME_XD_TXCKSUM) {
587 /* Only start calculation from csum_offset */
588 if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
589 sum += net_checksum_add(xmit_pos - csum_offset,
590 xmit_buffer + csum_offset);
591 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
592 } else {
593 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
594 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
598 /* Detect end of packet for TX checksum */
599 if (status & HME_XD_EOP) {
600 /* Stuff the checksum if required */
601 if (status & HME_XD_TXCKSUM) {
602 csum = net_checksum_finish(sum);
603 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
604 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
607 if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
608 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
609 trace_sunhme_tx_done(xmit_pos);
613 /* Update status */
614 status &= ~HME_XD_OWN;
615 pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
617 /* Move onto next descriptor */
618 cr++;
619 if (cr >= nr) {
620 cr = 0;
622 sunhme_set_tx_ring_nr(s, cr);
624 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
625 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
627 /* Indicate TX complete */
628 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
629 intstatus |= HME_SEB_STAT_HOSTTOTX;
630 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
632 /* Autoclear TX pending */
633 s->etxregs[HME_ETXI_PENDING >> 2] = 0;
635 sunhme_update_irq(s);
638 /* TX FIFO now clear */
639 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
640 intstatus |= HME_SEB_STAT_TXALL;
641 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
642 sunhme_update_irq(s);
645 static int sunhme_can_receive(NetClientState *nc)
647 SunHMEState *s = qemu_get_nic_opaque(nc);
649 return s->macregs[HME_MAC_RXCFG_ENABLE >> 2] & HME_MAC_RXCFG_ENABLE;
652 static void sunhme_link_status_changed(NetClientState *nc)
654 SunHMEState *s = qemu_get_nic_opaque(nc);
656 if (nc->link_down) {
657 s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
658 s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
659 } else {
660 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
661 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
664 /* Exact bits unknown */
665 s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
666 sunhme_update_irq(s);
669 static inline int sunhme_get_rx_ring_count(SunHMEState *s)
671 uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
672 >> HME_ERX_CFG_RINGSIZE_SHIFT;
674 switch (rings) {
675 case 0:
676 return 32;
677 case 1:
678 return 64;
679 case 2:
680 return 128;
681 case 3:
682 return 256;
685 return 0;
688 static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
690 return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
693 static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
695 uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
696 ring |= i & HME_ERXI_RING_OFFSET;
698 s->erxregs[HME_ERXI_RING >> 2] = ring;
701 #define MIN_BUF_SIZE 60
703 static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
704 size_t size)
706 SunHMEState *s = qemu_get_nic_opaque(nc);
707 PCIDevice *d = PCI_DEVICE(s);
708 dma_addr_t rb, addr;
709 uint32_t intstatus, status, buffer, buffersize, sum;
710 uint16_t csum;
711 uint8_t buf1[60];
712 int nr, cr, len, rxoffset, csum_offset;
714 trace_sunhme_rx_incoming(size);
716 /* Do nothing if MAC RX disabled */
717 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
718 return -1;
721 trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
722 buf[3], buf[4], buf[5]);
724 /* Check destination MAC address */
725 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
726 /* Try and match local MAC address */
727 if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
728 (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
729 ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
730 (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
731 ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
732 (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
733 /* Matched local MAC address */
734 trace_sunhme_rx_filter_local_match();
735 } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
736 buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
737 /* Matched broadcast address */
738 trace_sunhme_rx_filter_bcast_match();
739 } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
740 /* Didn't match local address, check hash filter */
741 int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
742 if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
743 (1 << (mcast_idx & 0xf)))) {
744 /* Didn't match hash filter */
745 trace_sunhme_rx_filter_hash_nomatch();
746 trace_sunhme_rx_filter_reject();
747 return 0;
748 } else {
749 trace_sunhme_rx_filter_hash_match();
751 } else {
752 /* Not for us */
753 trace_sunhme_rx_filter_reject();
754 return 0;
756 } else {
757 trace_sunhme_rx_filter_promisc_match();
760 trace_sunhme_rx_filter_accept();
762 /* If too small buffer, then expand it */
763 if (size < MIN_BUF_SIZE) {
764 memcpy(buf1, buf, size);
765 memset(buf1 + size, 0, MIN_BUF_SIZE - size);
766 buf = buf1;
767 size = MIN_BUF_SIZE;
770 rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
771 nr = sunhme_get_rx_ring_count(s);
772 cr = sunhme_get_rx_ring_nr(s);
774 pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
775 pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
777 rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
778 HME_ERX_CFG_BYTEOFFSET_SHIFT;
780 addr = buffer + rxoffset;
781 buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
783 /* Detect receive overflow */
784 len = size;
785 if (size > buffersize) {
786 status |= HME_XD_OFL;
787 len = buffersize;
790 pci_dma_write(d, addr, buf, len);
792 trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
794 /* Calculate the receive checksum */
795 csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
796 HME_ERX_CFG_CSUMSHIFT << 1;
797 sum = 0;
798 sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
799 csum = net_checksum_finish(sum);
801 trace_sunhme_rx_xsum_calc(csum);
803 /* Update status */
804 status &= ~HME_XD_OWN;
805 status &= ~HME_XD_RXLENMSK;
806 status |= len << HME_XD_RXLENSHIFT;
807 status &= ~HME_XD_RXCKSUM;
808 status |= csum;
810 pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
812 cr++;
813 if (cr >= nr) {
814 cr = 0;
817 sunhme_set_rx_ring_nr(s, cr);
819 /* Indicate RX complete */
820 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
821 intstatus |= HME_SEB_STAT_RXTOHOST;
822 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
824 sunhme_update_irq(s);
826 return len;
829 static NetClientInfo net_sunhme_info = {
830 .type = NET_CLIENT_DRIVER_NIC,
831 .size = sizeof(NICState),
832 .can_receive = sunhme_can_receive,
833 .receive = sunhme_receive,
834 .link_status_changed = sunhme_link_status_changed,
837 static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
839 SunHMEState *s = SUNHME(pci_dev);
840 DeviceState *d = DEVICE(pci_dev);
841 uint8_t *pci_conf;
843 pci_conf = pci_dev->config;
844 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
846 memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
847 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
849 memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
850 "sunhme.seb", HME_SEB_REG_SIZE);
851 memory_region_add_subregion(&s->hme, 0, &s->sebreg);
853 memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
854 "sunhme.etx", HME_ETX_REG_SIZE);
855 memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
857 memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
858 "sunhme.erx", HME_ERX_REG_SIZE);
859 memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
861 memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
862 "sunhme.mac", HME_MAC_REG_SIZE);
863 memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
865 memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
866 "sunhme.mif", HME_MIF_REG_SIZE);
867 memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
869 qemu_macaddr_default_if_unset(&s->conf.macaddr);
870 s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
871 object_get_typename(OBJECT(d)), d->id, s);
872 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
875 static void sunhme_instance_init(Object *obj)
877 SunHMEState *s = SUNHME(obj);
879 device_add_bootindex_property(obj, &s->conf.bootindex,
880 "bootindex", "/ethernet-phy@0",
881 DEVICE(obj), NULL);
884 static void sunhme_reset(DeviceState *ds)
886 SunHMEState *s = SUNHME(ds);
888 /* Configure internal transceiver */
889 s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
891 /* Advetise auto, 100Mbps FD */
892 s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
893 s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
894 MII_BMSR_AN_COMP;
896 if (!qemu_get_queue(s->nic)->link_down) {
897 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
898 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
901 /* Set manufacturer */
902 s->miiregs[MII_PHYID1] = DP83840_PHYID1;
903 s->miiregs[MII_PHYID2] = DP83840_PHYID2;
905 /* Configure default interrupt mask */
906 s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
907 s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
910 static const VMStateDescription vmstate_hme = {
911 .name = "sunhme",
912 .version_id = 0,
913 .minimum_version_id = 0,
914 .fields = (VMStateField[]) {
915 VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
916 VMSTATE_MACADDR(conf.macaddr, SunHMEState),
917 VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
918 VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
919 VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
920 VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
921 VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
922 VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
923 VMSTATE_END_OF_LIST()
927 static void sunhme_class_init(ObjectClass *klass, void *data)
929 DeviceClass *dc = DEVICE_CLASS(klass);
930 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
932 k->realize = sunhme_realize;
933 k->vendor_id = PCI_VENDOR_ID_SUN;
934 k->device_id = PCI_DEVICE_ID_SUN_HME;
935 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
936 dc->vmsd = &vmstate_hme;
937 dc->reset = sunhme_reset;
938 dc->props = sunhme_properties;
939 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
942 static const TypeInfo sunhme_info = {
943 .name = TYPE_SUNHME,
944 .parent = TYPE_PCI_DEVICE,
945 .class_init = sunhme_class_init,
946 .instance_size = sizeof(SunHMEState),
947 .instance_init = sunhme_instance_init,
948 .interfaces = (InterfaceInfo[]) {
949 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
954 static void sunhme_register_types(void)
956 type_register_static(&sunhme_info);
959 type_init(sunhme_register_types)