qemu-img: avoid unaligned read requests during convert
[qemu/ar7.git] / hw / net / sunhme.c
blob7364ba10190e0cb4f400839ae165742812a44582
1 /*
2 * QEMU Sun Happy Meal Ethernet emulation
4 * Copyright (c) 2017 Mark Cave-Ayland
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/pci/pci.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "hw/net/mii.h"
30 #include "net/net.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "sysemu/sysemu.h"
35 #include "trace.h"
36 #include "qom/object.h"
38 #define HME_REG_SIZE 0x8000
40 #define HME_SEB_REG_SIZE 0x2000
42 #define HME_SEBI_RESET 0x0
43 #define HME_SEB_RESET_ETX 0x1
44 #define HME_SEB_RESET_ERX 0x2
46 #define HME_SEBI_STAT 0x100
47 #define HME_SEBI_STAT_LINUXBUG 0x108
48 #define HME_SEB_STAT_RXTOHOST 0x10000
49 #define HME_SEB_STAT_NORXD 0x20000
50 #define HME_SEB_STAT_MIFIRQ 0x800000
51 #define HME_SEB_STAT_HOSTTOTX 0x1000000
52 #define HME_SEB_STAT_TXALL 0x2000000
54 #define HME_SEBI_IMASK 0x104
55 #define HME_SEBI_IMASK_LINUXBUG 0x10c
57 #define HME_ETX_REG_SIZE 0x2000
59 #define HME_ETXI_PENDING 0x0
61 #define HME_ETXI_RING 0x8
62 #define HME_ETXI_RING_ADDR 0xffffff00
63 #define HME_ETXI_RING_OFFSET 0xff
65 #define HME_ETXI_RSIZE 0x2c
67 #define HME_ERX_REG_SIZE 0x2000
69 #define HME_ERXI_CFG 0x0
70 #define HME_ERX_CFG_RINGSIZE 0x600
71 #define HME_ERX_CFG_RINGSIZE_SHIFT 9
72 #define HME_ERX_CFG_BYTEOFFSET 0x38
73 #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3
74 #define HME_ERX_CFG_CSUMSTART 0x7f0000
75 #define HME_ERX_CFG_CSUMSHIFT 16
77 #define HME_ERXI_RING 0x4
78 #define HME_ERXI_RING_ADDR 0xffffff00
79 #define HME_ERXI_RING_OFFSET 0xff
81 #define HME_MAC_REG_SIZE 0x1000
83 #define HME_MACI_TXCFG 0x20c
84 #define HME_MAC_TXCFG_ENABLE 0x1
86 #define HME_MACI_RXCFG 0x30c
87 #define HME_MAC_RXCFG_ENABLE 0x1
88 #define HME_MAC_RXCFG_PMISC 0x40
89 #define HME_MAC_RXCFG_HENABLE 0x800
91 #define HME_MACI_MACADDR2 0x318
92 #define HME_MACI_MACADDR1 0x31c
93 #define HME_MACI_MACADDR0 0x320
95 #define HME_MACI_HASHTAB3 0x340
96 #define HME_MACI_HASHTAB2 0x344
97 #define HME_MACI_HASHTAB1 0x348
98 #define HME_MACI_HASHTAB0 0x34c
100 #define HME_MIF_REG_SIZE 0x20
102 #define HME_MIFI_FO 0xc
103 #define HME_MIF_FO_ST 0xc0000000
104 #define HME_MIF_FO_ST_SHIFT 30
105 #define HME_MIF_FO_OPC 0x30000000
106 #define HME_MIF_FO_OPC_SHIFT 28
107 #define HME_MIF_FO_PHYAD 0x0f800000
108 #define HME_MIF_FO_PHYAD_SHIFT 23
109 #define HME_MIF_FO_REGAD 0x007c0000
110 #define HME_MIF_FO_REGAD_SHIFT 18
111 #define HME_MIF_FO_TAMSB 0x20000
112 #define HME_MIF_FO_TALSB 0x10000
113 #define HME_MIF_FO_DATA 0xffff
115 #define HME_MIFI_CFG 0x10
116 #define HME_MIF_CFG_MDI0 0x100
117 #define HME_MIF_CFG_MDI1 0x200
119 #define HME_MIFI_IMASK 0x14
121 #define HME_MIFI_STAT 0x18
124 /* Wired HME PHY addresses */
125 #define HME_PHYAD_INTERNAL 1
126 #define HME_PHYAD_EXTERNAL 0
128 #define MII_COMMAND_START 0x1
129 #define MII_COMMAND_READ 0x2
130 #define MII_COMMAND_WRITE 0x1
132 #define TYPE_SUNHME "sunhme"
133 typedef struct SunHMEState SunHMEState;
134 DECLARE_INSTANCE_CHECKER(SunHMEState, SUNHME,
135 TYPE_SUNHME)
137 /* Maximum size of buffer */
138 #define HME_FIFO_SIZE 0x800
140 /* Size of TX/RX descriptor */
141 #define HME_DESC_SIZE 0x8
143 #define HME_XD_OWN 0x80000000
144 #define HME_XD_OFL 0x40000000
145 #define HME_XD_SOP 0x40000000
146 #define HME_XD_EOP 0x20000000
147 #define HME_XD_RXLENMSK 0x3fff0000
148 #define HME_XD_RXLENSHIFT 16
149 #define HME_XD_RXCKSUM 0xffff
150 #define HME_XD_TXLENMSK 0x00001fff
151 #define HME_XD_TXCKSUM 0x10000000
152 #define HME_XD_TXCSSTUFF 0xff00000
153 #define HME_XD_TXCSSTUFFSHIFT 20
154 #define HME_XD_TXCSSTART 0xfc000
155 #define HME_XD_TXCSSTARTSHIFT 14
157 #define HME_MII_REGS_SIZE 0x20
159 struct SunHMEState {
160 /*< private >*/
161 PCIDevice parent_obj;
163 NICState *nic;
164 NICConf conf;
166 MemoryRegion hme;
167 MemoryRegion sebreg;
168 MemoryRegion etxreg;
169 MemoryRegion erxreg;
170 MemoryRegion macreg;
171 MemoryRegion mifreg;
173 uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
174 uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
175 uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
176 uint32_t macregs[HME_MAC_REG_SIZE >> 2];
177 uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
179 uint16_t miiregs[HME_MII_REGS_SIZE];
182 static Property sunhme_properties[] = {
183 DEFINE_NIC_PROPERTIES(SunHMEState, conf),
184 DEFINE_PROP_END_OF_LIST(),
187 static void sunhme_reset_tx(SunHMEState *s)
189 /* Indicate TX reset complete */
190 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
193 static void sunhme_reset_rx(SunHMEState *s)
195 /* Indicate RX reset complete */
196 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
199 static void sunhme_update_irq(SunHMEState *s)
201 PCIDevice *d = PCI_DEVICE(s);
202 int level;
204 /* MIF interrupt mask (16-bit) */
205 uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
206 uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
208 /* Main SEB interrupt mask (include MIF status from above) */
209 uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
210 ~HME_SEB_STAT_MIFIRQ;
211 uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
212 if (mif) {
213 seb |= HME_SEB_STAT_MIFIRQ;
216 level = (seb ? 1 : 0);
217 trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
219 pci_set_irq(d, level);
222 static void sunhme_seb_write(void *opaque, hwaddr addr,
223 uint64_t val, unsigned size)
225 SunHMEState *s = SUNHME(opaque);
227 trace_sunhme_seb_write(addr, val);
229 /* Handly buggy Linux drivers before 4.13 which have
230 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
231 switch (addr) {
232 case HME_SEBI_STAT_LINUXBUG:
233 addr = HME_SEBI_STAT;
234 break;
235 case HME_SEBI_IMASK_LINUXBUG:
236 addr = HME_SEBI_IMASK;
237 break;
238 default:
239 break;
242 switch (addr) {
243 case HME_SEBI_RESET:
244 if (val & HME_SEB_RESET_ETX) {
245 sunhme_reset_tx(s);
247 if (val & HME_SEB_RESET_ERX) {
248 sunhme_reset_rx(s);
250 val = s->sebregs[HME_SEBI_RESET >> 2];
251 break;
254 s->sebregs[addr >> 2] = val;
257 static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
258 unsigned size)
260 SunHMEState *s = SUNHME(opaque);
261 uint64_t val;
263 /* Handly buggy Linux drivers before 4.13 which have
264 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
265 switch (addr) {
266 case HME_SEBI_STAT_LINUXBUG:
267 addr = HME_SEBI_STAT;
268 break;
269 case HME_SEBI_IMASK_LINUXBUG:
270 addr = HME_SEBI_IMASK;
271 break;
272 default:
273 break;
276 val = s->sebregs[addr >> 2];
278 switch (addr) {
279 case HME_SEBI_STAT:
280 /* Autoclear status (except MIF) */
281 s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
282 sunhme_update_irq(s);
283 break;
286 trace_sunhme_seb_read(addr, val);
288 return val;
291 static const MemoryRegionOps sunhme_seb_ops = {
292 .read = sunhme_seb_read,
293 .write = sunhme_seb_write,
294 .endianness = DEVICE_LITTLE_ENDIAN,
295 .valid = {
296 .min_access_size = 4,
297 .max_access_size = 4,
301 static void sunhme_transmit(SunHMEState *s);
303 static void sunhme_etx_write(void *opaque, hwaddr addr,
304 uint64_t val, unsigned size)
306 SunHMEState *s = SUNHME(opaque);
308 trace_sunhme_etx_write(addr, val);
310 switch (addr) {
311 case HME_ETXI_PENDING:
312 if (val) {
313 sunhme_transmit(s);
315 break;
318 s->etxregs[addr >> 2] = val;
321 static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
322 unsigned size)
324 SunHMEState *s = SUNHME(opaque);
325 uint64_t val;
327 val = s->etxregs[addr >> 2];
329 trace_sunhme_etx_read(addr, val);
331 return val;
334 static const MemoryRegionOps sunhme_etx_ops = {
335 .read = sunhme_etx_read,
336 .write = sunhme_etx_write,
337 .endianness = DEVICE_LITTLE_ENDIAN,
338 .valid = {
339 .min_access_size = 4,
340 .max_access_size = 4,
344 static void sunhme_erx_write(void *opaque, hwaddr addr,
345 uint64_t val, unsigned size)
347 SunHMEState *s = SUNHME(opaque);
349 trace_sunhme_erx_write(addr, val);
351 s->erxregs[addr >> 2] = val;
354 static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
355 unsigned size)
357 SunHMEState *s = SUNHME(opaque);
358 uint64_t val;
360 val = s->erxregs[addr >> 2];
362 trace_sunhme_erx_read(addr, val);
364 return val;
367 static const MemoryRegionOps sunhme_erx_ops = {
368 .read = sunhme_erx_read,
369 .write = sunhme_erx_write,
370 .endianness = DEVICE_LITTLE_ENDIAN,
371 .valid = {
372 .min_access_size = 4,
373 .max_access_size = 4,
377 static void sunhme_mac_write(void *opaque, hwaddr addr,
378 uint64_t val, unsigned size)
380 SunHMEState *s = SUNHME(opaque);
381 uint64_t oldval = s->macregs[addr >> 2];
383 trace_sunhme_mac_write(addr, val);
385 s->macregs[addr >> 2] = val;
387 switch (addr) {
388 case HME_MACI_RXCFG:
389 if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
390 (val & HME_MAC_RXCFG_ENABLE)) {
391 qemu_flush_queued_packets(qemu_get_queue(s->nic));
393 break;
397 static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
398 unsigned size)
400 SunHMEState *s = SUNHME(opaque);
401 uint64_t val;
403 val = s->macregs[addr >> 2];
405 trace_sunhme_mac_read(addr, val);
407 return val;
410 static const MemoryRegionOps sunhme_mac_ops = {
411 .read = sunhme_mac_read,
412 .write = sunhme_mac_write,
413 .endianness = DEVICE_LITTLE_ENDIAN,
414 .valid = {
415 .min_access_size = 4,
416 .max_access_size = 4,
420 static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
422 trace_sunhme_mii_write(reg, data);
424 switch (reg) {
425 case MII_BMCR:
426 if (data & MII_BMCR_RESET) {
427 /* Autoclear reset bit, enable auto negotiation */
428 data &= ~MII_BMCR_RESET;
429 data |= MII_BMCR_AUTOEN;
431 if (data & MII_BMCR_ANRESTART) {
432 /* Autoclear auto negotiation restart */
433 data &= ~MII_BMCR_ANRESTART;
435 /* Indicate negotiation complete */
436 s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
438 if (!qemu_get_queue(s->nic)->link_down) {
439 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
440 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
443 break;
446 s->miiregs[reg] = data;
449 static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
451 uint16_t data = s->miiregs[reg];
453 trace_sunhme_mii_read(reg, data);
455 return data;
458 static void sunhme_mif_write(void *opaque, hwaddr addr,
459 uint64_t val, unsigned size)
461 SunHMEState *s = SUNHME(opaque);
462 uint8_t cmd, reg;
463 uint16_t data;
465 trace_sunhme_mif_write(addr, val);
467 switch (addr) {
468 case HME_MIFI_CFG:
469 /* Mask the read-only bits */
470 val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
471 val |= s->mifregs[HME_MIFI_CFG >> 2] &
472 (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
473 break;
474 case HME_MIFI_FO:
475 /* Detect start of MII command */
476 if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
477 != MII_COMMAND_START) {
478 val |= HME_MIF_FO_TALSB;
479 break;
482 /* Internal phy only */
483 if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
484 != HME_PHYAD_INTERNAL) {
485 val |= HME_MIF_FO_TALSB;
486 break;
489 cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
490 reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
491 data = (val & HME_MIF_FO_DATA);
493 switch (cmd) {
494 case MII_COMMAND_WRITE:
495 sunhme_mii_write(s, reg, data);
496 break;
498 case MII_COMMAND_READ:
499 val &= ~HME_MIF_FO_DATA;
500 val |= sunhme_mii_read(s, reg);
501 break;
504 val |= HME_MIF_FO_TALSB;
505 break;
508 s->mifregs[addr >> 2] = val;
511 static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
512 unsigned size)
514 SunHMEState *s = SUNHME(opaque);
515 uint64_t val;
517 val = s->mifregs[addr >> 2];
519 switch (addr) {
520 case HME_MIFI_STAT:
521 /* Autoclear MIF interrupt status */
522 s->mifregs[HME_MIFI_STAT >> 2] = 0;
523 sunhme_update_irq(s);
524 break;
527 trace_sunhme_mif_read(addr, val);
529 return val;
532 static const MemoryRegionOps sunhme_mif_ops = {
533 .read = sunhme_mif_read,
534 .write = sunhme_mif_write,
535 .endianness = DEVICE_LITTLE_ENDIAN,
536 .valid = {
537 .min_access_size = 4,
538 .max_access_size = 4,
542 static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
544 qemu_send_packet(qemu_get_queue(s->nic), buf, size);
547 static inline int sunhme_get_tx_ring_count(SunHMEState *s)
549 return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
552 static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
554 return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
557 static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
559 uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
560 ring |= i & HME_ETXI_RING_OFFSET;
562 s->etxregs[HME_ETXI_RING >> 2] = ring;
565 static void sunhme_transmit(SunHMEState *s)
567 PCIDevice *d = PCI_DEVICE(s);
568 dma_addr_t tb, addr;
569 uint32_t intstatus, status, buffer, sum = 0;
570 int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
571 uint16_t csum = 0;
572 uint8_t xmit_buffer[HME_FIFO_SIZE];
574 tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
575 nr = sunhme_get_tx_ring_count(s);
576 cr = sunhme_get_tx_ring_nr(s);
578 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
579 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
581 xmit_pos = 0;
582 while (status & HME_XD_OWN) {
583 trace_sunhme_tx_desc(buffer, status, cr, nr);
585 /* Copy data into transmit buffer */
586 addr = buffer;
587 len = status & HME_XD_TXLENMSK;
589 if (xmit_pos + len > HME_FIFO_SIZE) {
590 len = HME_FIFO_SIZE - xmit_pos;
593 pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
594 xmit_pos += len;
596 /* Detect start of packet for TX checksum */
597 if (status & HME_XD_SOP) {
598 sum = 0;
599 csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
600 csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
601 HME_XD_TXCSSTUFFSHIFT;
604 if (status & HME_XD_TXCKSUM) {
605 /* Only start calculation from csum_offset */
606 if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
607 sum += net_checksum_add(xmit_pos - csum_offset,
608 xmit_buffer + csum_offset);
609 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
610 } else {
611 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
612 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
616 /* Detect end of packet for TX checksum */
617 if (status & HME_XD_EOP) {
618 /* Stuff the checksum if required */
619 if (status & HME_XD_TXCKSUM) {
620 csum = net_checksum_finish(sum);
621 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
622 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
625 if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
626 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
627 trace_sunhme_tx_done(xmit_pos);
631 /* Update status */
632 status &= ~HME_XD_OWN;
633 pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
635 /* Move onto next descriptor */
636 cr++;
637 if (cr >= nr) {
638 cr = 0;
640 sunhme_set_tx_ring_nr(s, cr);
642 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
643 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
645 /* Indicate TX complete */
646 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
647 intstatus |= HME_SEB_STAT_HOSTTOTX;
648 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
650 /* Autoclear TX pending */
651 s->etxregs[HME_ETXI_PENDING >> 2] = 0;
653 sunhme_update_irq(s);
656 /* TX FIFO now clear */
657 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
658 intstatus |= HME_SEB_STAT_TXALL;
659 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
660 sunhme_update_irq(s);
663 static bool sunhme_can_receive(NetClientState *nc)
665 SunHMEState *s = qemu_get_nic_opaque(nc);
667 return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE);
670 static void sunhme_link_status_changed(NetClientState *nc)
672 SunHMEState *s = qemu_get_nic_opaque(nc);
674 if (nc->link_down) {
675 s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
676 s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
677 } else {
678 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
679 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
682 /* Exact bits unknown */
683 s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
684 sunhme_update_irq(s);
687 static inline int sunhme_get_rx_ring_count(SunHMEState *s)
689 uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
690 >> HME_ERX_CFG_RINGSIZE_SHIFT;
692 switch (rings) {
693 case 0:
694 return 32;
695 case 1:
696 return 64;
697 case 2:
698 return 128;
699 case 3:
700 return 256;
703 return 0;
706 static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
708 return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
711 static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
713 uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
714 ring |= i & HME_ERXI_RING_OFFSET;
716 s->erxregs[HME_ERXI_RING >> 2] = ring;
719 #define MIN_BUF_SIZE 60
721 static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
722 size_t size)
724 SunHMEState *s = qemu_get_nic_opaque(nc);
725 PCIDevice *d = PCI_DEVICE(s);
726 dma_addr_t rb, addr;
727 uint32_t intstatus, status, buffer, buffersize, sum;
728 uint16_t csum;
729 uint8_t buf1[60];
730 int nr, cr, len, rxoffset, csum_offset;
732 trace_sunhme_rx_incoming(size);
734 /* Do nothing if MAC RX disabled */
735 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
736 return 0;
739 trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
740 buf[3], buf[4], buf[5]);
742 /* Check destination MAC address */
743 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
744 /* Try and match local MAC address */
745 if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
746 (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
747 ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
748 (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
749 ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
750 (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
751 /* Matched local MAC address */
752 trace_sunhme_rx_filter_local_match();
753 } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
754 buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
755 /* Matched broadcast address */
756 trace_sunhme_rx_filter_bcast_match();
757 } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
758 /* Didn't match local address, check hash filter */
759 int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
760 if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
761 (1 << (mcast_idx & 0xf)))) {
762 /* Didn't match hash filter */
763 trace_sunhme_rx_filter_hash_nomatch();
764 trace_sunhme_rx_filter_reject();
765 return -1;
766 } else {
767 trace_sunhme_rx_filter_hash_match();
769 } else {
770 /* Not for us */
771 trace_sunhme_rx_filter_reject();
772 return -1;
774 } else {
775 trace_sunhme_rx_filter_promisc_match();
778 trace_sunhme_rx_filter_accept();
780 /* If too small buffer, then expand it */
781 if (size < MIN_BUF_SIZE) {
782 memcpy(buf1, buf, size);
783 memset(buf1 + size, 0, MIN_BUF_SIZE - size);
784 buf = buf1;
785 size = MIN_BUF_SIZE;
788 rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
789 nr = sunhme_get_rx_ring_count(s);
790 cr = sunhme_get_rx_ring_nr(s);
792 pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
793 pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
795 /* If we don't own the current descriptor then indicate overflow error */
796 if (!(status & HME_XD_OWN)) {
797 s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
798 sunhme_update_irq(s);
799 trace_sunhme_rx_norxd();
800 return -1;
803 rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
804 HME_ERX_CFG_BYTEOFFSET_SHIFT;
806 addr = buffer + rxoffset;
807 buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
809 /* Detect receive overflow */
810 len = size;
811 if (size > buffersize) {
812 status |= HME_XD_OFL;
813 len = buffersize;
816 pci_dma_write(d, addr, buf, len);
818 trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
820 /* Calculate the receive checksum */
821 csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
822 HME_ERX_CFG_CSUMSHIFT << 1;
823 sum = 0;
824 sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
825 csum = net_checksum_finish(sum);
827 trace_sunhme_rx_xsum_calc(csum);
829 /* Update status */
830 status &= ~HME_XD_OWN;
831 status &= ~HME_XD_RXLENMSK;
832 status |= len << HME_XD_RXLENSHIFT;
833 status &= ~HME_XD_RXCKSUM;
834 status |= csum;
836 pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
838 cr++;
839 if (cr >= nr) {
840 cr = 0;
843 sunhme_set_rx_ring_nr(s, cr);
845 /* Indicate RX complete */
846 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
847 intstatus |= HME_SEB_STAT_RXTOHOST;
848 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
850 sunhme_update_irq(s);
852 return len;
855 static NetClientInfo net_sunhme_info = {
856 .type = NET_CLIENT_DRIVER_NIC,
857 .size = sizeof(NICState),
858 .can_receive = sunhme_can_receive,
859 .receive = sunhme_receive,
860 .link_status_changed = sunhme_link_status_changed,
863 static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
865 SunHMEState *s = SUNHME(pci_dev);
866 DeviceState *d = DEVICE(pci_dev);
867 uint8_t *pci_conf;
869 pci_conf = pci_dev->config;
870 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
872 memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
873 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
875 memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
876 "sunhme.seb", HME_SEB_REG_SIZE);
877 memory_region_add_subregion(&s->hme, 0, &s->sebreg);
879 memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
880 "sunhme.etx", HME_ETX_REG_SIZE);
881 memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
883 memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
884 "sunhme.erx", HME_ERX_REG_SIZE);
885 memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
887 memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
888 "sunhme.mac", HME_MAC_REG_SIZE);
889 memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
891 memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
892 "sunhme.mif", HME_MIF_REG_SIZE);
893 memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
895 qemu_macaddr_default_if_unset(&s->conf.macaddr);
896 s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
897 object_get_typename(OBJECT(d)), d->id, s);
898 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
901 static void sunhme_instance_init(Object *obj)
903 SunHMEState *s = SUNHME(obj);
905 device_add_bootindex_property(obj, &s->conf.bootindex,
906 "bootindex", "/ethernet-phy@0",
907 DEVICE(obj));
910 static void sunhme_reset(DeviceState *ds)
912 SunHMEState *s = SUNHME(ds);
914 /* Configure internal transceiver */
915 s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
917 /* Advetise auto, 100Mbps FD */
918 s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
919 s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
920 MII_BMSR_AN_COMP;
922 if (!qemu_get_queue(s->nic)->link_down) {
923 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
924 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
927 /* Set manufacturer */
928 s->miiregs[MII_PHYID1] = DP83840_PHYID1;
929 s->miiregs[MII_PHYID2] = DP83840_PHYID2;
931 /* Configure default interrupt mask */
932 s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
933 s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
936 static const VMStateDescription vmstate_hme = {
937 .name = "sunhme",
938 .version_id = 0,
939 .minimum_version_id = 0,
940 .fields = (VMStateField[]) {
941 VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
942 VMSTATE_MACADDR(conf.macaddr, SunHMEState),
943 VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
944 VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
945 VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
946 VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
947 VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
948 VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
949 VMSTATE_END_OF_LIST()
953 static void sunhme_class_init(ObjectClass *klass, void *data)
955 DeviceClass *dc = DEVICE_CLASS(klass);
956 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
958 k->realize = sunhme_realize;
959 k->vendor_id = PCI_VENDOR_ID_SUN;
960 k->device_id = PCI_DEVICE_ID_SUN_HME;
961 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
962 dc->vmsd = &vmstate_hme;
963 dc->reset = sunhme_reset;
964 device_class_set_props(dc, sunhme_properties);
965 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
968 static const TypeInfo sunhme_info = {
969 .name = TYPE_SUNHME,
970 .parent = TYPE_PCI_DEVICE,
971 .class_init = sunhme_class_init,
972 .instance_size = sizeof(SunHMEState),
973 .instance_init = sunhme_instance_init,
974 .interfaces = (InterfaceInfo[]) {
975 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
980 static void sunhme_register_types(void)
982 type_register_static(&sunhme_info);
985 type_init(sunhme_register_types)