target/arm: Check supported KVM features globally (not per vCPU)
[qemu/ar7.git] / hw / net / xilinx_axienet.c
blobc2f40b8ea9557a27cc7ef60655c3aad96f2cf455
1 /*
2 * QEMU model of Xilinx AXI-Ethernet.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/sysbus.h"
28 #include "qapi/error.h"
29 #include "qemu/log.h"
30 #include "qemu/module.h"
31 #include "net/net.h"
32 #include "net/checksum.h"
34 #include "hw/hw.h"
35 #include "hw/irq.h"
36 #include "hw/qdev-properties.h"
37 #include "hw/stream.h"
39 #define DPHY(x)
41 #define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet"
42 #define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream"
43 #define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream"
45 #define XILINX_AXI_ENET(obj) \
46 OBJECT_CHECK(XilinxAXIEnet, (obj), TYPE_XILINX_AXI_ENET)
48 #define XILINX_AXI_ENET_DATA_STREAM(obj) \
49 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
50 TYPE_XILINX_AXI_ENET_DATA_STREAM)
52 #define XILINX_AXI_ENET_CONTROL_STREAM(obj) \
53 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
54 TYPE_XILINX_AXI_ENET_CONTROL_STREAM)
56 /* Advertisement control register. */
57 #define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
58 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
59 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
60 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
62 #define CONTROL_PAYLOAD_WORDS 5
63 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
65 struct PHY {
66 uint32_t regs[32];
68 int link;
70 unsigned int (*read)(struct PHY *phy, unsigned int req);
71 void (*write)(struct PHY *phy, unsigned int req,
72 unsigned int data);
75 static unsigned int tdk_read(struct PHY *phy, unsigned int req)
77 int regnum;
78 unsigned r = 0;
80 regnum = req & 0x1f;
82 switch (regnum) {
83 case 1:
84 if (!phy->link) {
85 break;
87 /* MR1. */
88 /* Speeds and modes. */
89 r |= (1 << 13) | (1 << 14);
90 r |= (1 << 11) | (1 << 12);
91 r |= (1 << 5); /* Autoneg complete. */
92 r |= (1 << 3); /* Autoneg able. */
93 r |= (1 << 2); /* link. */
94 r |= (1 << 1); /* link. */
95 break;
96 case 5:
97 /* Link partner ability.
98 We are kind; always agree with whatever best mode
99 the guest advertises. */
100 r = 1 << 14; /* Success. */
101 /* Copy advertised modes. */
102 r |= phy->regs[4] & (15 << 5);
103 /* Autoneg support. */
104 r |= 1;
105 break;
106 case 17:
107 /* Marvell PHY on many xilinx boards. */
108 r = 0x8000; /* 1000Mb */
109 break;
110 case 18:
112 /* Diagnostics reg. */
113 int duplex = 0;
114 int speed_100 = 0;
116 if (!phy->link) {
117 break;
120 /* Are we advertising 100 half or 100 duplex ? */
121 speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
122 speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
124 /* Are we advertising 10 duplex or 100 duplex ? */
125 duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
126 duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
127 r = (speed_100 << 10) | (duplex << 11);
129 break;
131 default:
132 r = phy->regs[regnum];
133 break;
135 DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
136 return r;
139 static void
140 tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
142 int regnum;
144 regnum = req & 0x1f;
145 DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
146 switch (regnum) {
147 default:
148 phy->regs[regnum] = data;
149 break;
152 /* Unconditionally clear regs[BMCR][BMCR_RESET] and auto-neg */
153 phy->regs[0] &= ~0x8200;
156 static void
157 tdk_init(struct PHY *phy)
159 phy->regs[0] = 0x3100;
160 /* PHY Id. */
161 phy->regs[2] = 0x0300;
162 phy->regs[3] = 0xe400;
163 /* Autonegotiation advertisement reg. */
164 phy->regs[4] = 0x01E1;
165 phy->link = 1;
167 phy->read = tdk_read;
168 phy->write = tdk_write;
171 struct MDIOBus {
172 /* bus. */
173 int mdc;
174 int mdio;
176 /* decoder. */
177 enum {
178 PREAMBLE,
179 SOF,
180 OPC,
181 ADDR,
182 REQ,
183 TURNAROUND,
184 DATA
185 } state;
186 unsigned int drive;
188 unsigned int cnt;
189 unsigned int addr;
190 unsigned int opc;
191 unsigned int req;
192 unsigned int data;
194 struct PHY *devs[32];
197 static void
198 mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
200 bus->devs[addr & 0x1f] = phy;
203 #ifdef USE_THIS_DEAD_CODE
204 static void
205 mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
207 bus->devs[addr & 0x1f] = NULL;
209 #endif
211 static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr,
212 unsigned int reg)
214 struct PHY *phy;
215 uint16_t data;
217 phy = bus->devs[addr];
218 if (phy && phy->read) {
219 data = phy->read(phy, reg);
220 } else {
221 data = 0xffff;
223 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
224 return data;
227 static void mdio_write_req(struct MDIOBus *bus, unsigned int addr,
228 unsigned int reg, uint16_t data)
230 struct PHY *phy;
232 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
233 phy = bus->devs[addr];
234 if (phy && phy->write) {
235 phy->write(phy, reg, data);
239 #define DENET(x)
241 #define R_RAF (0x000 / 4)
242 enum {
243 RAF_MCAST_REJ = (1 << 1),
244 RAF_BCAST_REJ = (1 << 2),
245 RAF_EMCF_EN = (1 << 12),
246 RAF_NEWFUNC_EN = (1 << 11)
249 #define R_IS (0x00C / 4)
250 enum {
251 IS_HARD_ACCESS_COMPLETE = 1,
252 IS_AUTONEG = (1 << 1),
253 IS_RX_COMPLETE = (1 << 2),
254 IS_RX_REJECT = (1 << 3),
255 IS_TX_COMPLETE = (1 << 5),
256 IS_RX_DCM_LOCK = (1 << 6),
257 IS_MGM_RDY = (1 << 7),
258 IS_PHY_RST_DONE = (1 << 8),
261 #define R_IP (0x010 / 4)
262 #define R_IE (0x014 / 4)
263 #define R_UAWL (0x020 / 4)
264 #define R_UAWU (0x024 / 4)
265 #define R_PPST (0x030 / 4)
266 enum {
267 PPST_LINKSTATUS = (1 << 0),
268 PPST_PHY_LINKSTATUS = (1 << 7),
271 #define R_STATS_RX_BYTESL (0x200 / 4)
272 #define R_STATS_RX_BYTESH (0x204 / 4)
273 #define R_STATS_TX_BYTESL (0x208 / 4)
274 #define R_STATS_TX_BYTESH (0x20C / 4)
275 #define R_STATS_RXL (0x290 / 4)
276 #define R_STATS_RXH (0x294 / 4)
277 #define R_STATS_RX_BCASTL (0x2a0 / 4)
278 #define R_STATS_RX_BCASTH (0x2a4 / 4)
279 #define R_STATS_RX_MCASTL (0x2a8 / 4)
280 #define R_STATS_RX_MCASTH (0x2ac / 4)
282 #define R_RCW0 (0x400 / 4)
283 #define R_RCW1 (0x404 / 4)
284 enum {
285 RCW1_VLAN = (1 << 27),
286 RCW1_RX = (1 << 28),
287 RCW1_FCS = (1 << 29),
288 RCW1_JUM = (1 << 30),
289 RCW1_RST = (1 << 31),
292 #define R_TC (0x408 / 4)
293 enum {
294 TC_VLAN = (1 << 27),
295 TC_TX = (1 << 28),
296 TC_FCS = (1 << 29),
297 TC_JUM = (1 << 30),
298 TC_RST = (1 << 31),
301 #define R_EMMC (0x410 / 4)
302 enum {
303 EMMC_LINKSPEED_10MB = (0 << 30),
304 EMMC_LINKSPEED_100MB = (1 << 30),
305 EMMC_LINKSPEED_1000MB = (2 << 30),
308 #define R_PHYC (0x414 / 4)
310 #define R_MC (0x500 / 4)
311 #define MC_EN (1 << 6)
313 #define R_MCR (0x504 / 4)
314 #define R_MWD (0x508 / 4)
315 #define R_MRD (0x50c / 4)
316 #define R_MIS (0x600 / 4)
317 #define R_MIP (0x620 / 4)
318 #define R_MIE (0x640 / 4)
319 #define R_MIC (0x640 / 4)
321 #define R_UAW0 (0x700 / 4)
322 #define R_UAW1 (0x704 / 4)
323 #define R_FMI (0x708 / 4)
324 #define R_AF0 (0x710 / 4)
325 #define R_AF1 (0x714 / 4)
326 #define R_MAX (0x34 / 4)
328 /* Indirect registers. */
329 struct TEMAC {
330 struct MDIOBus mdio_bus;
331 struct PHY phy;
333 void *parent;
336 typedef struct XilinxAXIEnetStreamSlave XilinxAXIEnetStreamSlave;
337 typedef struct XilinxAXIEnet XilinxAXIEnet;
339 struct XilinxAXIEnetStreamSlave {
340 Object parent;
342 struct XilinxAXIEnet *enet;
345 struct XilinxAXIEnet {
346 SysBusDevice busdev;
347 MemoryRegion iomem;
348 qemu_irq irq;
349 StreamSlave *tx_data_dev;
350 StreamSlave *tx_control_dev;
351 XilinxAXIEnetStreamSlave rx_data_dev;
352 XilinxAXIEnetStreamSlave rx_control_dev;
353 NICState *nic;
354 NICConf conf;
357 uint32_t c_rxmem;
358 uint32_t c_txmem;
359 uint32_t c_phyaddr;
361 struct TEMAC TEMAC;
363 /* MII regs. */
364 union {
365 uint32_t regs[4];
366 struct {
367 uint32_t mc;
368 uint32_t mcr;
369 uint32_t mwd;
370 uint32_t mrd;
372 } mii;
374 struct {
375 uint64_t rx_bytes;
376 uint64_t tx_bytes;
378 uint64_t rx;
379 uint64_t rx_bcast;
380 uint64_t rx_mcast;
381 } stats;
383 /* Receive configuration words. */
384 uint32_t rcw[2];
385 /* Transmit config. */
386 uint32_t tc;
387 uint32_t emmc;
388 uint32_t phyc;
390 /* Unicast Address Word. */
391 uint32_t uaw[2];
392 /* Unicast address filter used with extended mcast. */
393 uint32_t ext_uaw[2];
394 uint32_t fmi;
396 uint32_t regs[R_MAX];
398 /* Multicast filter addrs. */
399 uint32_t maddr[4][2];
400 /* 32K x 1 lookup filter. */
401 uint32_t ext_mtable[1024];
403 uint32_t hdr[CONTROL_PAYLOAD_WORDS];
405 uint8_t *txmem;
406 uint32_t txpos;
408 uint8_t *rxmem;
409 uint32_t rxsize;
410 uint32_t rxpos;
412 uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
413 uint32_t rxappsize;
415 /* Whether axienet_eth_rx_notify should flush incoming queue. */
416 bool need_flush;
419 static void axienet_rx_reset(XilinxAXIEnet *s)
421 s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN;
424 static void axienet_tx_reset(XilinxAXIEnet *s)
426 s->tc = TC_JUM | TC_TX | TC_VLAN;
427 s->txpos = 0;
430 static inline int axienet_rx_resetting(XilinxAXIEnet *s)
432 return s->rcw[1] & RCW1_RST;
435 static inline int axienet_rx_enabled(XilinxAXIEnet *s)
437 return s->rcw[1] & RCW1_RX;
440 static inline int axienet_extmcf_enabled(XilinxAXIEnet *s)
442 return !!(s->regs[R_RAF] & RAF_EMCF_EN);
445 static inline int axienet_newfunc_enabled(XilinxAXIEnet *s)
447 return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN);
450 static void xilinx_axienet_reset(DeviceState *d)
452 XilinxAXIEnet *s = XILINX_AXI_ENET(d);
454 axienet_rx_reset(s);
455 axienet_tx_reset(s);
457 s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS;
458 s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE;
460 s->emmc = EMMC_LINKSPEED_100MB;
463 static void enet_update_irq(XilinxAXIEnet *s)
465 s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
466 qemu_set_irq(s->irq, !!s->regs[R_IP]);
469 static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size)
471 XilinxAXIEnet *s = opaque;
472 uint32_t r = 0;
473 addr >>= 2;
475 switch (addr) {
476 case R_RCW0:
477 case R_RCW1:
478 r = s->rcw[addr & 1];
479 break;
481 case R_TC:
482 r = s->tc;
483 break;
485 case R_EMMC:
486 r = s->emmc;
487 break;
489 case R_PHYC:
490 r = s->phyc;
491 break;
493 case R_MCR:
494 r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
495 break;
497 case R_STATS_RX_BYTESL:
498 case R_STATS_RX_BYTESH:
499 r = s->stats.rx_bytes >> (32 * (addr & 1));
500 break;
502 case R_STATS_TX_BYTESL:
503 case R_STATS_TX_BYTESH:
504 r = s->stats.tx_bytes >> (32 * (addr & 1));
505 break;
507 case R_STATS_RXL:
508 case R_STATS_RXH:
509 r = s->stats.rx >> (32 * (addr & 1));
510 break;
511 case R_STATS_RX_BCASTL:
512 case R_STATS_RX_BCASTH:
513 r = s->stats.rx_bcast >> (32 * (addr & 1));
514 break;
515 case R_STATS_RX_MCASTL:
516 case R_STATS_RX_MCASTH:
517 r = s->stats.rx_mcast >> (32 * (addr & 1));
518 break;
520 case R_MC:
521 case R_MWD:
522 case R_MRD:
523 r = s->mii.regs[addr & 3];
524 break;
526 case R_UAW0:
527 case R_UAW1:
528 r = s->uaw[addr & 1];
529 break;
531 case R_UAWU:
532 case R_UAWL:
533 r = s->ext_uaw[addr & 1];
534 break;
536 case R_FMI:
537 r = s->fmi;
538 break;
540 case R_AF0:
541 case R_AF1:
542 r = s->maddr[s->fmi & 3][addr & 1];
543 break;
545 case 0x8000 ... 0x83ff:
546 r = s->ext_mtable[addr - 0x8000];
547 break;
549 default:
550 if (addr < ARRAY_SIZE(s->regs)) {
551 r = s->regs[addr];
553 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
554 __func__, addr * 4, r));
555 break;
557 return r;
560 static void enet_write(void *opaque, hwaddr addr,
561 uint64_t value, unsigned size)
563 XilinxAXIEnet *s = opaque;
564 struct TEMAC *t = &s->TEMAC;
566 addr >>= 2;
567 switch (addr) {
568 case R_RCW0:
569 case R_RCW1:
570 s->rcw[addr & 1] = value;
571 if ((addr & 1) && value & RCW1_RST) {
572 axienet_rx_reset(s);
573 } else {
574 qemu_flush_queued_packets(qemu_get_queue(s->nic));
576 break;
578 case R_TC:
579 s->tc = value;
580 if (value & TC_RST) {
581 axienet_tx_reset(s);
583 break;
585 case R_EMMC:
586 s->emmc = value;
587 break;
589 case R_PHYC:
590 s->phyc = value;
591 break;
593 case R_MC:
594 value &= ((1 << 7) - 1);
596 /* Enable the MII. */
597 if (value & MC_EN) {
598 unsigned int miiclkdiv = value & ((1 << 6) - 1);
599 if (!miiclkdiv) {
600 qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
603 s->mii.mc = value;
604 break;
606 case R_MCR: {
607 unsigned int phyaddr = (value >> 24) & 0x1f;
608 unsigned int regaddr = (value >> 16) & 0x1f;
609 unsigned int op = (value >> 14) & 3;
610 unsigned int initiate = (value >> 11) & 1;
612 if (initiate) {
613 if (op == 1) {
614 mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd);
615 } else if (op == 2) {
616 s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr);
617 } else {
618 qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op);
621 s->mii.mcr = value;
622 break;
625 case R_MWD:
626 case R_MRD:
627 s->mii.regs[addr & 3] = value;
628 break;
631 case R_UAW0:
632 case R_UAW1:
633 s->uaw[addr & 1] = value;
634 break;
636 case R_UAWL:
637 case R_UAWU:
638 s->ext_uaw[addr & 1] = value;
639 break;
641 case R_FMI:
642 s->fmi = value;
643 break;
645 case R_AF0:
646 case R_AF1:
647 s->maddr[s->fmi & 3][addr & 1] = value;
648 break;
650 case R_IS:
651 s->regs[addr] &= ~value;
652 break;
654 case 0x8000 ... 0x83ff:
655 s->ext_mtable[addr - 0x8000] = value;
656 break;
658 default:
659 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
660 __func__, addr * 4, (unsigned)value));
661 if (addr < ARRAY_SIZE(s->regs)) {
662 s->regs[addr] = value;
664 break;
666 enet_update_irq(s);
669 static const MemoryRegionOps enet_ops = {
670 .read = enet_read,
671 .write = enet_write,
672 .endianness = DEVICE_LITTLE_ENDIAN,
675 static int eth_can_rx(XilinxAXIEnet *s)
677 /* RX enabled? */
678 return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
681 static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
683 int match = 1;
685 if (memcmp(buf, &f0, 4)) {
686 match = 0;
689 if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
690 match = 0;
693 return match;
696 static void axienet_eth_rx_notify(void *opaque)
698 XilinxAXIEnet *s = XILINX_AXI_ENET(opaque);
700 while (s->rxappsize && stream_can_push(s->tx_control_dev,
701 axienet_eth_rx_notify, s)) {
702 size_t ret = stream_push(s->tx_control_dev,
703 (void *)s->rxapp + CONTROL_PAYLOAD_SIZE
704 - s->rxappsize, s->rxappsize, true);
705 s->rxappsize -= ret;
708 while (s->rxsize && stream_can_push(s->tx_data_dev,
709 axienet_eth_rx_notify, s)) {
710 size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
711 s->rxsize, true);
712 s->rxsize -= ret;
713 s->rxpos += ret;
714 if (!s->rxsize) {
715 s->regs[R_IS] |= IS_RX_COMPLETE;
716 if (s->need_flush) {
717 s->need_flush = false;
718 qemu_flush_queued_packets(qemu_get_queue(s->nic));
722 enet_update_irq(s);
725 static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
727 XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
728 static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
729 0xff, 0xff, 0xff};
730 static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
731 uint32_t app[CONTROL_PAYLOAD_WORDS] = {0};
732 int promisc = s->fmi & (1 << 31);
733 int unicast, broadcast, multicast, ip_multicast = 0;
734 uint32_t csum32;
735 uint16_t csum16;
736 int i;
738 DENET(qemu_log("%s: %zd bytes\n", __func__, size));
740 if (!eth_can_rx(s)) {
741 s->need_flush = true;
742 return 0;
745 unicast = ~buf[0] & 0x1;
746 broadcast = memcmp(buf, sa_bcast, 6) == 0;
747 multicast = !unicast && !broadcast;
748 if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
749 ip_multicast = 1;
752 /* Jumbo or vlan sizes ? */
753 if (!(s->rcw[1] & RCW1_JUM)) {
754 if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) {
755 return size;
759 /* Basic Address filters. If you want to use the extended filters
760 you'll generally have to place the ethernet mac into promiscuous mode
761 to avoid the basic filtering from dropping most frames. */
762 if (!promisc) {
763 if (unicast) {
764 if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
765 return size;
767 } else {
768 if (broadcast) {
769 /* Broadcast. */
770 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
771 return size;
773 } else {
774 int drop = 1;
776 /* Multicast. */
777 if (s->regs[R_RAF] & RAF_MCAST_REJ) {
778 return size;
781 for (i = 0; i < 4; i++) {
782 if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
783 drop = 0;
784 break;
788 if (drop) {
789 return size;
795 /* Extended mcast filtering enabled? */
796 if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
797 if (unicast) {
798 if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
799 return size;
801 } else {
802 if (broadcast) {
803 /* Broadcast. ??? */
804 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
805 return size;
807 } else {
808 int idx, bit;
810 /* Multicast. */
811 if (!memcmp(buf, sa_ipmcast, 3)) {
812 return size;
815 idx = (buf[4] & 0x7f) << 8;
816 idx |= buf[5];
818 bit = 1 << (idx & 0x1f);
819 idx >>= 5;
821 if (!(s->ext_mtable[idx] & bit)) {
822 return size;
828 if (size < 12) {
829 s->regs[R_IS] |= IS_RX_REJECT;
830 enet_update_irq(s);
831 return -1;
834 if (size > (s->c_rxmem - 4)) {
835 size = s->c_rxmem - 4;
838 memcpy(s->rxmem, buf, size);
839 memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
841 if (s->rcw[1] & RCW1_FCS) {
842 size += 4; /* fcs is inband. */
845 app[0] = 5 << 28;
846 csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
847 /* Fold it once. */
848 csum32 = (csum32 & 0xffff) + (csum32 >> 16);
849 /* And twice to get rid of possible carries. */
850 csum16 = (csum32 & 0xffff) + (csum32 >> 16);
851 app[3] = csum16;
852 app[4] = size & 0xffff;
854 s->stats.rx_bytes += size;
855 s->stats.rx++;
856 if (multicast) {
857 s->stats.rx_mcast++;
858 app[2] |= 1 | (ip_multicast << 1);
859 } else if (broadcast) {
860 s->stats.rx_bcast++;
861 app[2] |= 1 << 3;
864 /* Good frame. */
865 app[2] |= 1 << 6;
867 s->rxsize = size;
868 s->rxpos = 0;
869 for (i = 0; i < ARRAY_SIZE(app); ++i) {
870 app[i] = cpu_to_le32(app[i]);
872 s->rxappsize = CONTROL_PAYLOAD_SIZE;
873 memcpy(s->rxapp, app, s->rxappsize);
874 axienet_eth_rx_notify(s);
876 enet_update_irq(s);
877 return size;
880 static size_t
881 xilinx_axienet_control_stream_push(StreamSlave *obj, uint8_t *buf, size_t len,
882 bool eop)
884 int i;
885 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj);
886 XilinxAXIEnet *s = cs->enet;
888 assert(eop);
889 if (len != CONTROL_PAYLOAD_SIZE) {
890 hw_error("AXI Enet requires %d byte control stream payload\n",
891 (int)CONTROL_PAYLOAD_SIZE);
894 memcpy(s->hdr, buf, len);
896 for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) {
897 s->hdr[i] = le32_to_cpu(s->hdr[i]);
899 return len;
902 static size_t
903 xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size,
904 bool eop)
906 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(obj);
907 XilinxAXIEnet *s = ds->enet;
909 /* TX enable ? */
910 if (!(s->tc & TC_TX)) {
911 return size;
914 if (s->txpos + size > s->c_txmem) {
915 qemu_log_mask(LOG_GUEST_ERROR, "%s: Packet larger than txmem\n",
916 TYPE_XILINX_AXI_ENET);
917 s->txpos = 0;
918 return size;
921 if (s->txpos == 0 && eop) {
922 /* Fast path single fragment. */
923 s->txpos = size;
924 } else {
925 memcpy(s->txmem + s->txpos, buf, size);
926 buf = s->txmem;
927 s->txpos += size;
929 if (!eop) {
930 return size;
934 /* Jumbo or vlan sizes ? */
935 if (!(s->tc & TC_JUM)) {
936 if (s->txpos > 1518 && s->txpos <= 1522 && !(s->tc & TC_VLAN)) {
937 s->txpos = 0;
938 return size;
942 if (s->hdr[0] & 1) {
943 unsigned int start_off = s->hdr[1] >> 16;
944 unsigned int write_off = s->hdr[1] & 0xffff;
945 uint32_t tmp_csum;
946 uint16_t csum;
948 tmp_csum = net_checksum_add(s->txpos - start_off,
949 buf + start_off);
950 /* Accumulate the seed. */
951 tmp_csum += s->hdr[2] & 0xffff;
953 /* Fold the 32bit partial checksum. */
954 csum = net_checksum_finish(tmp_csum);
956 /* Writeback. */
957 buf[write_off] = csum >> 8;
958 buf[write_off + 1] = csum & 0xff;
961 qemu_send_packet(qemu_get_queue(s->nic), buf, s->txpos);
963 s->stats.tx_bytes += s->txpos;
964 s->regs[R_IS] |= IS_TX_COMPLETE;
965 enet_update_irq(s);
967 s->txpos = 0;
968 return size;
971 static NetClientInfo net_xilinx_enet_info = {
972 .type = NET_CLIENT_DRIVER_NIC,
973 .size = sizeof(NICState),
974 .receive = eth_rx,
977 static void xilinx_enet_realize(DeviceState *dev, Error **errp)
979 XilinxAXIEnet *s = XILINX_AXI_ENET(dev);
980 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev);
981 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(
982 &s->rx_control_dev);
983 Error *local_err = NULL;
985 object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet",
986 (Object **) &ds->enet,
987 object_property_allow_set_link,
988 OBJ_PROP_LINK_STRONG);
989 object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet",
990 (Object **) &cs->enet,
991 object_property_allow_set_link,
992 OBJ_PROP_LINK_STRONG);
993 object_property_set_link(OBJECT(ds), OBJECT(s), "enet", &local_err);
994 object_property_set_link(OBJECT(cs), OBJECT(s), "enet", &local_err);
995 if (local_err) {
996 goto xilinx_enet_realize_fail;
999 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1000 s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
1001 object_get_typename(OBJECT(dev)), dev->id, s);
1002 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1004 tdk_init(&s->TEMAC.phy);
1005 mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr);
1007 s->TEMAC.parent = s;
1009 s->rxmem = g_malloc(s->c_rxmem);
1010 s->txmem = g_malloc(s->c_txmem);
1011 return;
1013 xilinx_enet_realize_fail:
1014 error_propagate(errp, local_err);
1017 static void xilinx_enet_init(Object *obj)
1019 XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
1020 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1022 object_initialize_child(OBJECT(s), "axistream-connected-target",
1023 &s->rx_data_dev, TYPE_XILINX_AXI_ENET_DATA_STREAM);
1024 object_initialize_child(OBJECT(s), "axistream-control-connected-target",
1025 &s->rx_control_dev,
1026 TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
1027 sysbus_init_irq(sbd, &s->irq);
1029 memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
1030 sysbus_init_mmio(sbd, &s->iomem);
1033 static Property xilinx_enet_properties[] = {
1034 DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7),
1035 DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000),
1036 DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000),
1037 DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf),
1038 DEFINE_PROP_LINK("axistream-connected", XilinxAXIEnet,
1039 tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *),
1040 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet,
1041 tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *),
1042 DEFINE_PROP_END_OF_LIST(),
1045 static void xilinx_enet_class_init(ObjectClass *klass, void *data)
1047 DeviceClass *dc = DEVICE_CLASS(klass);
1049 dc->realize = xilinx_enet_realize;
1050 device_class_set_props(dc, xilinx_enet_properties);
1051 dc->reset = xilinx_axienet_reset;
1054 static void xilinx_enet_control_stream_class_init(ObjectClass *klass,
1055 void *data)
1057 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
1059 ssc->push = xilinx_axienet_control_stream_push;
1062 static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data)
1064 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
1066 ssc->push = xilinx_axienet_data_stream_push;
1069 static const TypeInfo xilinx_enet_info = {
1070 .name = TYPE_XILINX_AXI_ENET,
1071 .parent = TYPE_SYS_BUS_DEVICE,
1072 .instance_size = sizeof(XilinxAXIEnet),
1073 .class_init = xilinx_enet_class_init,
1074 .instance_init = xilinx_enet_init,
1077 static const TypeInfo xilinx_enet_data_stream_info = {
1078 .name = TYPE_XILINX_AXI_ENET_DATA_STREAM,
1079 .parent = TYPE_OBJECT,
1080 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1081 .class_init = xilinx_enet_data_stream_class_init,
1082 .interfaces = (InterfaceInfo[]) {
1083 { TYPE_STREAM_SLAVE },
1088 static const TypeInfo xilinx_enet_control_stream_info = {
1089 .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM,
1090 .parent = TYPE_OBJECT,
1091 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1092 .class_init = xilinx_enet_control_stream_class_init,
1093 .interfaces = (InterfaceInfo[]) {
1094 { TYPE_STREAM_SLAVE },
1099 static void xilinx_enet_register_types(void)
1101 type_register_static(&xilinx_enet_info);
1102 type_register_static(&xilinx_enet_data_stream_info);
1103 type_register_static(&xilinx_enet_control_stream_info);
1106 type_init(xilinx_enet_register_types)