target-arm: implement arm_debug_target_el()
[qemu/ar7.git] / hw / net / xilinx_axienet.c
blobd63c423247aa3b12f897d4212925661a75c1a394
1 /*
2 * QEMU model of Xilinx AXI-Ethernet.
4 * Copyright (c) 2011 Edgar E. Iglesias.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "hw/sysbus.h"
26 #include "qemu/log.h"
27 #include "net/net.h"
28 #include "net/checksum.h"
30 #include "hw/stream.h"
32 #define DPHY(x)
34 #define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet"
35 #define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream"
36 #define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream"
38 #define XILINX_AXI_ENET(obj) \
39 OBJECT_CHECK(XilinxAXIEnet, (obj), TYPE_XILINX_AXI_ENET)
41 #define XILINX_AXI_ENET_DATA_STREAM(obj) \
42 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
43 TYPE_XILINX_AXI_ENET_DATA_STREAM)
45 #define XILINX_AXI_ENET_CONTROL_STREAM(obj) \
46 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
47 TYPE_XILINX_AXI_ENET_CONTROL_STREAM)
49 /* Advertisement control register. */
50 #define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
51 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
52 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
53 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
55 #define CONTROL_PAYLOAD_WORDS 5
56 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
58 struct PHY {
59 uint32_t regs[32];
61 int link;
63 unsigned int (*read)(struct PHY *phy, unsigned int req);
64 void (*write)(struct PHY *phy, unsigned int req,
65 unsigned int data);
68 static unsigned int tdk_read(struct PHY *phy, unsigned int req)
70 int regnum;
71 unsigned r = 0;
73 regnum = req & 0x1f;
75 switch (regnum) {
76 case 1:
77 if (!phy->link) {
78 break;
80 /* MR1. */
81 /* Speeds and modes. */
82 r |= (1 << 13) | (1 << 14);
83 r |= (1 << 11) | (1 << 12);
84 r |= (1 << 5); /* Autoneg complete. */
85 r |= (1 << 3); /* Autoneg able. */
86 r |= (1 << 2); /* link. */
87 r |= (1 << 1); /* link. */
88 break;
89 case 5:
90 /* Link partner ability.
91 We are kind; always agree with whatever best mode
92 the guest advertises. */
93 r = 1 << 14; /* Success. */
94 /* Copy advertised modes. */
95 r |= phy->regs[4] & (15 << 5);
96 /* Autoneg support. */
97 r |= 1;
98 break;
99 case 17:
100 /* Marvell PHY on many xilinx boards. */
101 r = 0x8000; /* 1000Mb */
102 break;
103 case 18:
105 /* Diagnostics reg. */
106 int duplex = 0;
107 int speed_100 = 0;
109 if (!phy->link) {
110 break;
113 /* Are we advertising 100 half or 100 duplex ? */
114 speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
115 speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
117 /* Are we advertising 10 duplex or 100 duplex ? */
118 duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
119 duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
120 r = (speed_100 << 10) | (duplex << 11);
122 break;
124 default:
125 r = phy->regs[regnum];
126 break;
128 DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
129 return r;
132 static void
133 tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
135 int regnum;
137 regnum = req & 0x1f;
138 DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
139 switch (regnum) {
140 default:
141 phy->regs[regnum] = data;
142 break;
145 /* Unconditionally clear regs[BMCR][BMCR_RESET] */
146 phy->regs[0] &= ~0x8000;
149 static void
150 tdk_init(struct PHY *phy)
152 phy->regs[0] = 0x3100;
153 /* PHY Id. */
154 phy->regs[2] = 0x0300;
155 phy->regs[3] = 0xe400;
156 /* Autonegotiation advertisement reg. */
157 phy->regs[4] = 0x01E1;
158 phy->link = 1;
160 phy->read = tdk_read;
161 phy->write = tdk_write;
164 struct MDIOBus {
165 /* bus. */
166 int mdc;
167 int mdio;
169 /* decoder. */
170 enum {
171 PREAMBLE,
172 SOF,
173 OPC,
174 ADDR,
175 REQ,
176 TURNAROUND,
177 DATA
178 } state;
179 unsigned int drive;
181 unsigned int cnt;
182 unsigned int addr;
183 unsigned int opc;
184 unsigned int req;
185 unsigned int data;
187 struct PHY *devs[32];
190 static void
191 mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
193 bus->devs[addr & 0x1f] = phy;
196 #ifdef USE_THIS_DEAD_CODE
197 static void
198 mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
200 bus->devs[addr & 0x1f] = NULL;
202 #endif
204 static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr,
205 unsigned int reg)
207 struct PHY *phy;
208 uint16_t data;
210 phy = bus->devs[addr];
211 if (phy && phy->read) {
212 data = phy->read(phy, reg);
213 } else {
214 data = 0xffff;
216 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
217 return data;
220 static void mdio_write_req(struct MDIOBus *bus, unsigned int addr,
221 unsigned int reg, uint16_t data)
223 struct PHY *phy;
225 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
226 phy = bus->devs[addr];
227 if (phy && phy->write) {
228 phy->write(phy, reg, data);
232 #define DENET(x)
234 #define R_RAF (0x000 / 4)
235 enum {
236 RAF_MCAST_REJ = (1 << 1),
237 RAF_BCAST_REJ = (1 << 2),
238 RAF_EMCF_EN = (1 << 12),
239 RAF_NEWFUNC_EN = (1 << 11)
242 #define R_IS (0x00C / 4)
243 enum {
244 IS_HARD_ACCESS_COMPLETE = 1,
245 IS_AUTONEG = (1 << 1),
246 IS_RX_COMPLETE = (1 << 2),
247 IS_RX_REJECT = (1 << 3),
248 IS_TX_COMPLETE = (1 << 5),
249 IS_RX_DCM_LOCK = (1 << 6),
250 IS_MGM_RDY = (1 << 7),
251 IS_PHY_RST_DONE = (1 << 8),
254 #define R_IP (0x010 / 4)
255 #define R_IE (0x014 / 4)
256 #define R_UAWL (0x020 / 4)
257 #define R_UAWU (0x024 / 4)
258 #define R_PPST (0x030 / 4)
259 enum {
260 PPST_LINKSTATUS = (1 << 0),
261 PPST_PHY_LINKSTATUS = (1 << 7),
264 #define R_STATS_RX_BYTESL (0x200 / 4)
265 #define R_STATS_RX_BYTESH (0x204 / 4)
266 #define R_STATS_TX_BYTESL (0x208 / 4)
267 #define R_STATS_TX_BYTESH (0x20C / 4)
268 #define R_STATS_RXL (0x290 / 4)
269 #define R_STATS_RXH (0x294 / 4)
270 #define R_STATS_RX_BCASTL (0x2a0 / 4)
271 #define R_STATS_RX_BCASTH (0x2a4 / 4)
272 #define R_STATS_RX_MCASTL (0x2a8 / 4)
273 #define R_STATS_RX_MCASTH (0x2ac / 4)
275 #define R_RCW0 (0x400 / 4)
276 #define R_RCW1 (0x404 / 4)
277 enum {
278 RCW1_VLAN = (1 << 27),
279 RCW1_RX = (1 << 28),
280 RCW1_FCS = (1 << 29),
281 RCW1_JUM = (1 << 30),
282 RCW1_RST = (1 << 31),
285 #define R_TC (0x408 / 4)
286 enum {
287 TC_VLAN = (1 << 27),
288 TC_TX = (1 << 28),
289 TC_FCS = (1 << 29),
290 TC_JUM = (1 << 30),
291 TC_RST = (1 << 31),
294 #define R_EMMC (0x410 / 4)
295 enum {
296 EMMC_LINKSPEED_10MB = (0 << 30),
297 EMMC_LINKSPEED_100MB = (1 << 30),
298 EMMC_LINKSPEED_1000MB = (2 << 30),
301 #define R_PHYC (0x414 / 4)
303 #define R_MC (0x500 / 4)
304 #define MC_EN (1 << 6)
306 #define R_MCR (0x504 / 4)
307 #define R_MWD (0x508 / 4)
308 #define R_MRD (0x50c / 4)
309 #define R_MIS (0x600 / 4)
310 #define R_MIP (0x620 / 4)
311 #define R_MIE (0x640 / 4)
312 #define R_MIC (0x640 / 4)
314 #define R_UAW0 (0x700 / 4)
315 #define R_UAW1 (0x704 / 4)
316 #define R_FMI (0x708 / 4)
317 #define R_AF0 (0x710 / 4)
318 #define R_AF1 (0x714 / 4)
319 #define R_MAX (0x34 / 4)
321 /* Indirect registers. */
322 struct TEMAC {
323 struct MDIOBus mdio_bus;
324 struct PHY phy;
326 void *parent;
329 typedef struct XilinxAXIEnetStreamSlave XilinxAXIEnetStreamSlave;
330 typedef struct XilinxAXIEnet XilinxAXIEnet;
332 struct XilinxAXIEnetStreamSlave {
333 Object parent;
335 struct XilinxAXIEnet *enet;
338 struct XilinxAXIEnet {
339 SysBusDevice busdev;
340 MemoryRegion iomem;
341 qemu_irq irq;
342 StreamSlave *tx_data_dev;
343 StreamSlave *tx_control_dev;
344 XilinxAXIEnetStreamSlave rx_data_dev;
345 XilinxAXIEnetStreamSlave rx_control_dev;
346 NICState *nic;
347 NICConf conf;
350 uint32_t c_rxmem;
351 uint32_t c_txmem;
352 uint32_t c_phyaddr;
354 struct TEMAC TEMAC;
356 /* MII regs. */
357 union {
358 uint32_t regs[4];
359 struct {
360 uint32_t mc;
361 uint32_t mcr;
362 uint32_t mwd;
363 uint32_t mrd;
365 } mii;
367 struct {
368 uint64_t rx_bytes;
369 uint64_t tx_bytes;
371 uint64_t rx;
372 uint64_t rx_bcast;
373 uint64_t rx_mcast;
374 } stats;
376 /* Receive configuration words. */
377 uint32_t rcw[2];
378 /* Transmit config. */
379 uint32_t tc;
380 uint32_t emmc;
381 uint32_t phyc;
383 /* Unicast Address Word. */
384 uint32_t uaw[2];
385 /* Unicast address filter used with extended mcast. */
386 uint32_t ext_uaw[2];
387 uint32_t fmi;
389 uint32_t regs[R_MAX];
391 /* Multicast filter addrs. */
392 uint32_t maddr[4][2];
393 /* 32K x 1 lookup filter. */
394 uint32_t ext_mtable[1024];
396 uint32_t hdr[CONTROL_PAYLOAD_WORDS];
398 uint8_t *rxmem;
399 uint32_t rxsize;
400 uint32_t rxpos;
402 uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
403 uint32_t rxappsize;
405 /* Whether axienet_eth_rx_notify should flush incoming queue. */
406 bool need_flush;
409 static void axienet_rx_reset(XilinxAXIEnet *s)
411 s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN;
414 static void axienet_tx_reset(XilinxAXIEnet *s)
416 s->tc = TC_JUM | TC_TX | TC_VLAN;
419 static inline int axienet_rx_resetting(XilinxAXIEnet *s)
421 return s->rcw[1] & RCW1_RST;
424 static inline int axienet_rx_enabled(XilinxAXIEnet *s)
426 return s->rcw[1] & RCW1_RX;
429 static inline int axienet_extmcf_enabled(XilinxAXIEnet *s)
431 return !!(s->regs[R_RAF] & RAF_EMCF_EN);
434 static inline int axienet_newfunc_enabled(XilinxAXIEnet *s)
436 return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN);
439 static void xilinx_axienet_reset(DeviceState *d)
441 XilinxAXIEnet *s = XILINX_AXI_ENET(d);
443 axienet_rx_reset(s);
444 axienet_tx_reset(s);
446 s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS;
447 s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE;
449 s->emmc = EMMC_LINKSPEED_100MB;
452 static void enet_update_irq(XilinxAXIEnet *s)
454 s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
455 qemu_set_irq(s->irq, !!s->regs[R_IP]);
458 static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size)
460 XilinxAXIEnet *s = opaque;
461 uint32_t r = 0;
462 addr >>= 2;
464 switch (addr) {
465 case R_RCW0:
466 case R_RCW1:
467 r = s->rcw[addr & 1];
468 break;
470 case R_TC:
471 r = s->tc;
472 break;
474 case R_EMMC:
475 r = s->emmc;
476 break;
478 case R_PHYC:
479 r = s->phyc;
480 break;
482 case R_MCR:
483 r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
484 break;
486 case R_STATS_RX_BYTESL:
487 case R_STATS_RX_BYTESH:
488 r = s->stats.rx_bytes >> (32 * (addr & 1));
489 break;
491 case R_STATS_TX_BYTESL:
492 case R_STATS_TX_BYTESH:
493 r = s->stats.tx_bytes >> (32 * (addr & 1));
494 break;
496 case R_STATS_RXL:
497 case R_STATS_RXH:
498 r = s->stats.rx >> (32 * (addr & 1));
499 break;
500 case R_STATS_RX_BCASTL:
501 case R_STATS_RX_BCASTH:
502 r = s->stats.rx_bcast >> (32 * (addr & 1));
503 break;
504 case R_STATS_RX_MCASTL:
505 case R_STATS_RX_MCASTH:
506 r = s->stats.rx_mcast >> (32 * (addr & 1));
507 break;
509 case R_MC:
510 case R_MWD:
511 case R_MRD:
512 r = s->mii.regs[addr & 3];
513 break;
515 case R_UAW0:
516 case R_UAW1:
517 r = s->uaw[addr & 1];
518 break;
520 case R_UAWU:
521 case R_UAWL:
522 r = s->ext_uaw[addr & 1];
523 break;
525 case R_FMI:
526 r = s->fmi;
527 break;
529 case R_AF0:
530 case R_AF1:
531 r = s->maddr[s->fmi & 3][addr & 1];
532 break;
534 case 0x8000 ... 0x83ff:
535 r = s->ext_mtable[addr - 0x8000];
536 break;
538 default:
539 if (addr < ARRAY_SIZE(s->regs)) {
540 r = s->regs[addr];
542 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
543 __func__, addr * 4, r));
544 break;
546 return r;
549 static void enet_write(void *opaque, hwaddr addr,
550 uint64_t value, unsigned size)
552 XilinxAXIEnet *s = opaque;
553 struct TEMAC *t = &s->TEMAC;
555 addr >>= 2;
556 switch (addr) {
557 case R_RCW0:
558 case R_RCW1:
559 s->rcw[addr & 1] = value;
560 if ((addr & 1) && value & RCW1_RST) {
561 axienet_rx_reset(s);
562 } else {
563 qemu_flush_queued_packets(qemu_get_queue(s->nic));
565 break;
567 case R_TC:
568 s->tc = value;
569 if (value & TC_RST) {
570 axienet_tx_reset(s);
572 break;
574 case R_EMMC:
575 s->emmc = value;
576 break;
578 case R_PHYC:
579 s->phyc = value;
580 break;
582 case R_MC:
583 value &= ((1 << 7) - 1);
585 /* Enable the MII. */
586 if (value & MC_EN) {
587 unsigned int miiclkdiv = value & ((1 << 6) - 1);
588 if (!miiclkdiv) {
589 qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
592 s->mii.mc = value;
593 break;
595 case R_MCR: {
596 unsigned int phyaddr = (value >> 24) & 0x1f;
597 unsigned int regaddr = (value >> 16) & 0x1f;
598 unsigned int op = (value >> 14) & 3;
599 unsigned int initiate = (value >> 11) & 1;
601 if (initiate) {
602 if (op == 1) {
603 mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd);
604 } else if (op == 2) {
605 s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr);
606 } else {
607 qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op);
610 s->mii.mcr = value;
611 break;
614 case R_MWD:
615 case R_MRD:
616 s->mii.regs[addr & 3] = value;
617 break;
620 case R_UAW0:
621 case R_UAW1:
622 s->uaw[addr & 1] = value;
623 break;
625 case R_UAWL:
626 case R_UAWU:
627 s->ext_uaw[addr & 1] = value;
628 break;
630 case R_FMI:
631 s->fmi = value;
632 break;
634 case R_AF0:
635 case R_AF1:
636 s->maddr[s->fmi & 3][addr & 1] = value;
637 break;
639 case R_IS:
640 s->regs[addr] &= ~value;
641 break;
643 case 0x8000 ... 0x83ff:
644 s->ext_mtable[addr - 0x8000] = value;
645 break;
647 default:
648 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
649 __func__, addr * 4, (unsigned)value));
650 if (addr < ARRAY_SIZE(s->regs)) {
651 s->regs[addr] = value;
653 break;
655 enet_update_irq(s);
658 static const MemoryRegionOps enet_ops = {
659 .read = enet_read,
660 .write = enet_write,
661 .endianness = DEVICE_LITTLE_ENDIAN,
664 static int eth_can_rx(XilinxAXIEnet *s)
666 /* RX enabled? */
667 return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
670 static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
672 int match = 1;
674 if (memcmp(buf, &f0, 4)) {
675 match = 0;
678 if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
679 match = 0;
682 return match;
685 static void axienet_eth_rx_notify(void *opaque)
687 XilinxAXIEnet *s = XILINX_AXI_ENET(opaque);
689 while (s->rxappsize && stream_can_push(s->tx_control_dev,
690 axienet_eth_rx_notify, s)) {
691 size_t ret = stream_push(s->tx_control_dev,
692 (void *)s->rxapp + CONTROL_PAYLOAD_SIZE
693 - s->rxappsize, s->rxappsize);
694 s->rxappsize -= ret;
697 while (s->rxsize && stream_can_push(s->tx_data_dev,
698 axienet_eth_rx_notify, s)) {
699 size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
700 s->rxsize);
701 s->rxsize -= ret;
702 s->rxpos += ret;
703 if (!s->rxsize) {
704 s->regs[R_IS] |= IS_RX_COMPLETE;
705 if (s->need_flush) {
706 s->need_flush = false;
707 qemu_flush_queued_packets(qemu_get_queue(s->nic));
711 enet_update_irq(s);
714 static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
716 XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
717 static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
718 0xff, 0xff, 0xff};
719 static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
720 uint32_t app[CONTROL_PAYLOAD_WORDS] = {0};
721 int promisc = s->fmi & (1 << 31);
722 int unicast, broadcast, multicast, ip_multicast = 0;
723 uint32_t csum32;
724 uint16_t csum16;
725 int i;
727 DENET(qemu_log("%s: %zd bytes\n", __func__, size));
729 if (!eth_can_rx(s)) {
730 s->need_flush = true;
731 return 0;
734 unicast = ~buf[0] & 0x1;
735 broadcast = memcmp(buf, sa_bcast, 6) == 0;
736 multicast = !unicast && !broadcast;
737 if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
738 ip_multicast = 1;
741 /* Jumbo or vlan sizes ? */
742 if (!(s->rcw[1] & RCW1_JUM)) {
743 if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) {
744 return size;
748 /* Basic Address filters. If you want to use the extended filters
749 you'll generally have to place the ethernet mac into promiscuous mode
750 to avoid the basic filtering from dropping most frames. */
751 if (!promisc) {
752 if (unicast) {
753 if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
754 return size;
756 } else {
757 if (broadcast) {
758 /* Broadcast. */
759 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
760 return size;
762 } else {
763 int drop = 1;
765 /* Multicast. */
766 if (s->regs[R_RAF] & RAF_MCAST_REJ) {
767 return size;
770 for (i = 0; i < 4; i++) {
771 if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
772 drop = 0;
773 break;
777 if (drop) {
778 return size;
784 /* Extended mcast filtering enabled? */
785 if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
786 if (unicast) {
787 if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
788 return size;
790 } else {
791 if (broadcast) {
792 /* Broadcast. ??? */
793 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
794 return size;
796 } else {
797 int idx, bit;
799 /* Multicast. */
800 if (!memcmp(buf, sa_ipmcast, 3)) {
801 return size;
804 idx = (buf[4] & 0x7f) << 8;
805 idx |= buf[5];
807 bit = 1 << (idx & 0x1f);
808 idx >>= 5;
810 if (!(s->ext_mtable[idx] & bit)) {
811 return size;
817 if (size < 12) {
818 s->regs[R_IS] |= IS_RX_REJECT;
819 enet_update_irq(s);
820 return -1;
823 if (size > (s->c_rxmem - 4)) {
824 size = s->c_rxmem - 4;
827 memcpy(s->rxmem, buf, size);
828 memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
830 if (s->rcw[1] & RCW1_FCS) {
831 size += 4; /* fcs is inband. */
834 app[0] = 5 << 28;
835 csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
836 /* Fold it once. */
837 csum32 = (csum32 & 0xffff) + (csum32 >> 16);
838 /* And twice to get rid of possible carries. */
839 csum16 = (csum32 & 0xffff) + (csum32 >> 16);
840 app[3] = csum16;
841 app[4] = size & 0xffff;
843 s->stats.rx_bytes += size;
844 s->stats.rx++;
845 if (multicast) {
846 s->stats.rx_mcast++;
847 app[2] |= 1 | (ip_multicast << 1);
848 } else if (broadcast) {
849 s->stats.rx_bcast++;
850 app[2] |= 1 << 3;
853 /* Good frame. */
854 app[2] |= 1 << 6;
856 s->rxsize = size;
857 s->rxpos = 0;
858 for (i = 0; i < ARRAY_SIZE(app); ++i) {
859 app[i] = cpu_to_le32(app[i]);
861 s->rxappsize = CONTROL_PAYLOAD_SIZE;
862 memcpy(s->rxapp, app, s->rxappsize);
863 axienet_eth_rx_notify(s);
865 enet_update_irq(s);
866 return size;
869 static size_t
870 xilinx_axienet_control_stream_push(StreamSlave *obj, uint8_t *buf, size_t len)
872 int i;
873 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj);
874 XilinxAXIEnet *s = cs->enet;
876 if (len != CONTROL_PAYLOAD_SIZE) {
877 hw_error("AXI Enet requires %d byte control stream payload\n",
878 (int)CONTROL_PAYLOAD_SIZE);
881 memcpy(s->hdr, buf, len);
883 for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) {
884 s->hdr[i] = le32_to_cpu(s->hdr[i]);
886 return len;
889 static size_t
890 xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size)
892 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(obj);
893 XilinxAXIEnet *s = ds->enet;
895 /* TX enable ? */
896 if (!(s->tc & TC_TX)) {
897 return size;
900 /* Jumbo or vlan sizes ? */
901 if (!(s->tc & TC_JUM)) {
902 if (size > 1518 && size <= 1522 && !(s->tc & TC_VLAN)) {
903 return size;
907 if (s->hdr[0] & 1) {
908 unsigned int start_off = s->hdr[1] >> 16;
909 unsigned int write_off = s->hdr[1] & 0xffff;
910 uint32_t tmp_csum;
911 uint16_t csum;
913 tmp_csum = net_checksum_add(size - start_off,
914 (uint8_t *)buf + start_off);
915 /* Accumulate the seed. */
916 tmp_csum += s->hdr[2] & 0xffff;
918 /* Fold the 32bit partial checksum. */
919 csum = net_checksum_finish(tmp_csum);
921 /* Writeback. */
922 buf[write_off] = csum >> 8;
923 buf[write_off + 1] = csum & 0xff;
926 qemu_send_packet(qemu_get_queue(s->nic), buf, size);
928 s->stats.tx_bytes += size;
929 s->regs[R_IS] |= IS_TX_COMPLETE;
930 enet_update_irq(s);
932 return size;
935 static NetClientInfo net_xilinx_enet_info = {
936 .type = NET_CLIENT_OPTIONS_KIND_NIC,
937 .size = sizeof(NICState),
938 .receive = eth_rx,
941 static void xilinx_enet_realize(DeviceState *dev, Error **errp)
943 XilinxAXIEnet *s = XILINX_AXI_ENET(dev);
944 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev);
945 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(
946 &s->rx_control_dev);
947 Error *local_err = NULL;
949 object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet",
950 (Object **) &ds->enet,
951 object_property_allow_set_link,
952 OBJ_PROP_LINK_UNREF_ON_RELEASE,
953 &local_err);
954 object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet",
955 (Object **) &cs->enet,
956 object_property_allow_set_link,
957 OBJ_PROP_LINK_UNREF_ON_RELEASE,
958 &local_err);
959 if (local_err) {
960 goto xilinx_enet_realize_fail;
962 object_property_set_link(OBJECT(ds), OBJECT(s), "enet", &local_err);
963 object_property_set_link(OBJECT(cs), OBJECT(s), "enet", &local_err);
964 if (local_err) {
965 goto xilinx_enet_realize_fail;
968 qemu_macaddr_default_if_unset(&s->conf.macaddr);
969 s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
970 object_get_typename(OBJECT(dev)), dev->id, s);
971 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
973 tdk_init(&s->TEMAC.phy);
974 mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr);
976 s->TEMAC.parent = s;
978 s->rxmem = g_malloc(s->c_rxmem);
979 return;
981 xilinx_enet_realize_fail:
982 if (!*errp) {
983 *errp = local_err;
987 static void xilinx_enet_init(Object *obj)
989 XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
990 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
992 object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
993 (Object **) &s->tx_data_dev,
994 qdev_prop_allow_set_link_before_realize,
995 OBJ_PROP_LINK_UNREF_ON_RELEASE,
996 &error_abort);
997 object_property_add_link(obj, "axistream-control-connected",
998 TYPE_STREAM_SLAVE,
999 (Object **) &s->tx_control_dev,
1000 qdev_prop_allow_set_link_before_realize,
1001 OBJ_PROP_LINK_UNREF_ON_RELEASE,
1002 &error_abort);
1004 object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
1005 TYPE_XILINX_AXI_ENET_DATA_STREAM);
1006 object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
1007 TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
1008 object_property_add_child(OBJECT(s), "axistream-connected-target",
1009 (Object *)&s->rx_data_dev, &error_abort);
1010 object_property_add_child(OBJECT(s), "axistream-control-connected-target",
1011 (Object *)&s->rx_control_dev, &error_abort);
1013 sysbus_init_irq(sbd, &s->irq);
1015 memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
1016 sysbus_init_mmio(sbd, &s->iomem);
1019 static Property xilinx_enet_properties[] = {
1020 DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7),
1021 DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000),
1022 DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000),
1023 DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf),
1024 DEFINE_PROP_END_OF_LIST(),
1027 static void xilinx_enet_class_init(ObjectClass *klass, void *data)
1029 DeviceClass *dc = DEVICE_CLASS(klass);
1031 dc->realize = xilinx_enet_realize;
1032 dc->props = xilinx_enet_properties;
1033 dc->reset = xilinx_axienet_reset;
1036 static void xilinx_enet_stream_class_init(ObjectClass *klass, void *data)
1038 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
1040 ssc->push = data;
1043 static const TypeInfo xilinx_enet_info = {
1044 .name = TYPE_XILINX_AXI_ENET,
1045 .parent = TYPE_SYS_BUS_DEVICE,
1046 .instance_size = sizeof(XilinxAXIEnet),
1047 .class_init = xilinx_enet_class_init,
1048 .instance_init = xilinx_enet_init,
1051 static const TypeInfo xilinx_enet_data_stream_info = {
1052 .name = TYPE_XILINX_AXI_ENET_DATA_STREAM,
1053 .parent = TYPE_OBJECT,
1054 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1055 .class_init = xilinx_enet_stream_class_init,
1056 .class_data = xilinx_axienet_data_stream_push,
1057 .interfaces = (InterfaceInfo[]) {
1058 { TYPE_STREAM_SLAVE },
1063 static const TypeInfo xilinx_enet_control_stream_info = {
1064 .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM,
1065 .parent = TYPE_OBJECT,
1066 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1067 .class_init = xilinx_enet_stream_class_init,
1068 .class_data = xilinx_axienet_control_stream_push,
1069 .interfaces = (InterfaceInfo[]) {
1070 { TYPE_STREAM_SLAVE },
1075 static void xilinx_enet_register_types(void)
1077 type_register_static(&xilinx_enet_info);
1078 type_register_static(&xilinx_enet_data_stream_info);
1079 type_register_static(&xilinx_enet_control_stream_info);
1082 type_init(xilinx_enet_register_types)