Merge tag 'pull-nvme-20241001' of https://gitlab.com/birkelund/qemu into staging
[qemu/armbru.git] / hw / net / imx_fec.c
blob6294d292023b960d2b0b3c98898eecd06dcab97e
1 /*
2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34 #include "trace.h"
36 #include <zlib.h> /* for crc32 */
38 #define IMX_MAX_DESC 1024
40 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
42 static char tmp[20];
43 snprintf(tmp, sizeof(tmp), "index %d", index);
44 return tmp;
47 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
49 switch (index) {
50 case ENET_FRBR:
51 return "FRBR";
52 case ENET_FRSR:
53 return "FRSR";
54 case ENET_MIIGSK_CFGR:
55 return "MIIGSK_CFGR";
56 case ENET_MIIGSK_ENR:
57 return "MIIGSK_ENR";
58 default:
59 return imx_default_reg_name(s, index);
63 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
65 switch (index) {
66 case ENET_RSFL:
67 return "RSFL";
68 case ENET_RSEM:
69 return "RSEM";
70 case ENET_RAEM:
71 return "RAEM";
72 case ENET_RAFL:
73 return "RAFL";
74 case ENET_TSEM:
75 return "TSEM";
76 case ENET_TAEM:
77 return "TAEM";
78 case ENET_TAFL:
79 return "TAFL";
80 case ENET_TIPG:
81 return "TIPG";
82 case ENET_FTRL:
83 return "FTRL";
84 case ENET_TACC:
85 return "TACC";
86 case ENET_RACC:
87 return "RACC";
88 case ENET_ATCR:
89 return "ATCR";
90 case ENET_ATVR:
91 return "ATVR";
92 case ENET_ATOFF:
93 return "ATOFF";
94 case ENET_ATPER:
95 return "ATPER";
96 case ENET_ATCOR:
97 return "ATCOR";
98 case ENET_ATINC:
99 return "ATINC";
100 case ENET_ATSTMP:
101 return "ATSTMP";
102 case ENET_TGSR:
103 return "TGSR";
104 case ENET_TCSR0:
105 return "TCSR0";
106 case ENET_TCCR0:
107 return "TCCR0";
108 case ENET_TCSR1:
109 return "TCSR1";
110 case ENET_TCCR1:
111 return "TCCR1";
112 case ENET_TCSR2:
113 return "TCSR2";
114 case ENET_TCCR2:
115 return "TCCR2";
116 case ENET_TCSR3:
117 return "TCSR3";
118 case ENET_TCCR3:
119 return "TCCR3";
120 default:
121 return imx_default_reg_name(s, index);
125 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
127 switch (index) {
128 case ENET_EIR:
129 return "EIR";
130 case ENET_EIMR:
131 return "EIMR";
132 case ENET_RDAR:
133 return "RDAR";
134 case ENET_TDAR:
135 return "TDAR";
136 case ENET_ECR:
137 return "ECR";
138 case ENET_MMFR:
139 return "MMFR";
140 case ENET_MSCR:
141 return "MSCR";
142 case ENET_MIBC:
143 return "MIBC";
144 case ENET_RCR:
145 return "RCR";
146 case ENET_TCR:
147 return "TCR";
148 case ENET_PALR:
149 return "PALR";
150 case ENET_PAUR:
151 return "PAUR";
152 case ENET_OPD:
153 return "OPD";
154 case ENET_IAUR:
155 return "IAUR";
156 case ENET_IALR:
157 return "IALR";
158 case ENET_GAUR:
159 return "GAUR";
160 case ENET_GALR:
161 return "GALR";
162 case ENET_TFWR:
163 return "TFWR";
164 case ENET_RDSR:
165 return "RDSR";
166 case ENET_TDSR:
167 return "TDSR";
168 case ENET_MRBR:
169 return "MRBR";
170 default:
171 if (s->is_fec) {
172 return imx_fec_reg_name(s, index);
173 } else {
174 return imx_enet_reg_name(s, index);
180 * Versions of this device with more than one TX descriptor save the
181 * 2nd and 3rd descriptors in a subsection, to maintain migration
182 * compatibility with previous versions of the device that only
183 * supported a single descriptor.
185 static bool imx_eth_is_multi_tx_ring(void *opaque)
187 IMXFECState *s = IMX_FEC(opaque);
189 return s->tx_ring_num > 1;
192 static const VMStateDescription vmstate_imx_eth_txdescs = {
193 .name = "imx.fec/txdescs",
194 .version_id = 1,
195 .minimum_version_id = 1,
196 .needed = imx_eth_is_multi_tx_ring,
197 .fields = (const VMStateField[]) {
198 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
199 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
200 VMSTATE_END_OF_LIST()
204 static const VMStateDescription vmstate_imx_eth = {
205 .name = TYPE_IMX_FEC,
206 .version_id = 2,
207 .minimum_version_id = 2,
208 .fields = (const VMStateField[]) {
209 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
210 VMSTATE_UINT32(rx_descriptor, IMXFECState),
211 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
212 VMSTATE_UINT32(phy_status, IMXFECState),
213 VMSTATE_UINT32(phy_control, IMXFECState),
214 VMSTATE_UINT32(phy_advertise, IMXFECState),
215 VMSTATE_UINT32(phy_int, IMXFECState),
216 VMSTATE_UINT32(phy_int_mask, IMXFECState),
217 VMSTATE_END_OF_LIST()
219 .subsections = (const VMStateDescription * const []) {
220 &vmstate_imx_eth_txdescs,
221 NULL
225 #define PHY_INT_ENERGYON (1 << 7)
226 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
227 #define PHY_INT_FAULT (1 << 5)
228 #define PHY_INT_DOWN (1 << 4)
229 #define PHY_INT_AUTONEG_LP (1 << 3)
230 #define PHY_INT_PARFAULT (1 << 2)
231 #define PHY_INT_AUTONEG_PAGE (1 << 1)
233 static void imx_eth_update(IMXFECState *s);
236 * The MII phy could raise a GPIO to the processor which in turn
237 * could be handled as an interrpt by the OS.
238 * For now we don't handle any GPIO/interrupt line, so the OS will
239 * have to poll for the PHY status.
241 static void imx_phy_update_irq(IMXFECState *s)
243 imx_eth_update(s);
246 static void imx_phy_update_link(IMXFECState *s)
248 /* Autonegotiation status mirrors link status. */
249 if (qemu_get_queue(s->nic)->link_down) {
250 trace_imx_phy_update_link("down");
251 s->phy_status &= ~0x0024;
252 s->phy_int |= PHY_INT_DOWN;
253 } else {
254 trace_imx_phy_update_link("up");
255 s->phy_status |= 0x0024;
256 s->phy_int |= PHY_INT_ENERGYON;
257 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
259 imx_phy_update_irq(s);
262 static void imx_eth_set_link(NetClientState *nc)
264 imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
267 static void imx_phy_reset(IMXFECState *s)
269 trace_imx_phy_reset();
271 s->phy_status = 0x7809;
272 s->phy_control = 0x3000;
273 s->phy_advertise = 0x01e1;
274 s->phy_int_mask = 0;
275 s->phy_int = 0;
276 imx_phy_update_link(s);
279 static uint32_t imx_phy_read(IMXFECState *s, int reg)
281 uint32_t val;
282 uint32_t phy = reg / 32;
284 if (!s->phy_connected) {
285 return 0xffff;
288 if (phy != s->phy_num) {
289 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
290 s = s->phy_consumer;
291 } else {
292 trace_imx_phy_read_num(phy, s->phy_num);
293 return 0xffff;
297 reg %= 32;
299 switch (reg) {
300 case 0: /* Basic Control */
301 val = s->phy_control;
302 break;
303 case 1: /* Basic Status */
304 val = s->phy_status;
305 break;
306 case 2: /* ID1 */
307 val = 0x0007;
308 break;
309 case 3: /* ID2 */
310 val = 0xc0d1;
311 break;
312 case 4: /* Auto-neg advertisement */
313 val = s->phy_advertise;
314 break;
315 case 5: /* Auto-neg Link Partner Ability */
316 val = 0x0f71;
317 break;
318 case 6: /* Auto-neg Expansion */
319 val = 1;
320 break;
321 case 29: /* Interrupt source. */
322 val = s->phy_int;
323 s->phy_int = 0;
324 imx_phy_update_irq(s);
325 break;
326 case 30: /* Interrupt mask */
327 val = s->phy_int_mask;
328 break;
329 case 17:
330 case 18:
331 case 27:
332 case 31:
333 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
334 TYPE_IMX_FEC, __func__, reg);
335 val = 0;
336 break;
337 default:
338 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
339 TYPE_IMX_FEC, __func__, reg);
340 val = 0;
341 break;
344 trace_imx_phy_read(val, phy, reg);
346 return val;
349 static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
351 uint32_t phy = reg / 32;
353 if (!s->phy_connected) {
354 return;
357 if (phy != s->phy_num) {
358 if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
359 s = s->phy_consumer;
360 } else {
361 trace_imx_phy_write_num(phy, s->phy_num);
362 return;
366 reg %= 32;
368 trace_imx_phy_write(val, phy, reg);
370 switch (reg) {
371 case 0: /* Basic Control */
372 if (val & 0x8000) {
373 imx_phy_reset(s);
374 } else {
375 s->phy_control = val & 0x7980;
376 /* Complete autonegotiation immediately. */
377 if (val & 0x1000) {
378 s->phy_status |= 0x0020;
381 break;
382 case 4: /* Auto-neg advertisement */
383 s->phy_advertise = (val & 0x2d7f) | 0x80;
384 break;
385 case 30: /* Interrupt mask */
386 s->phy_int_mask = val & 0xff;
387 imx_phy_update_irq(s);
388 break;
389 case 17:
390 case 18:
391 case 27:
392 case 31:
393 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
394 TYPE_IMX_FEC, __func__, reg);
395 break;
396 default:
397 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
398 TYPE_IMX_FEC, __func__, reg);
399 break;
403 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
405 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
406 MEMTXATTRS_UNSPECIFIED);
408 trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
411 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
413 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
414 MEMTXATTRS_UNSPECIFIED);
417 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
419 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
420 MEMTXATTRS_UNSPECIFIED);
422 trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
423 bd->option, bd->status);
426 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
428 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
429 MEMTXATTRS_UNSPECIFIED);
432 static void imx_eth_update(IMXFECState *s)
435 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
436 * interrupts swapped. This worked with older versions of Linux (4.14
437 * and older) since Linux associated both interrupt lines with Ethernet
438 * MAC interrupts. Specifically,
439 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
440 * timer interrupts. Those versions of Linux fail with versions of QEMU
441 * with swapped interrupt assignments.
442 * - In linux 4.14, both interrupt lines were registered with the Ethernet
443 * MAC interrupt handler. As a result, all versions of qemu happen to
444 * work, though that is accidental.
445 * - In Linux 4.9 and older, the timer interrupt was registered directly
446 * with the Ethernet MAC interrupt handler. The MAC interrupt was
447 * redirected to a GPIO interrupt to work around erratum ERR006687.
448 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
449 * interrupt never fired since IOMUX is currently not supported in qemu.
450 * Linux instead received MAC interrupts on the timer interrupt.
451 * As a result, qemu versions with the swapped interrupt assignment work,
452 * albeit accidentally, but qemu versions with the correct interrupt
453 * assignment fail.
455 * To ensure that all versions of Linux work, generate ENET_INT_MAC
456 * interrupts on both interrupt lines. This should be changed if and when
457 * qemu supports IOMUX.
459 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
460 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
461 qemu_set_irq(s->irq[1], 1);
462 } else {
463 qemu_set_irq(s->irq[1], 0);
466 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
467 qemu_set_irq(s->irq[0], 1);
468 } else {
469 qemu_set_irq(s->irq[0], 0);
473 static void imx_fec_do_tx(IMXFECState *s)
475 int frame_size = 0, descnt = 0;
476 uint8_t *ptr = s->frame;
477 uint32_t addr = s->tx_descriptor[0];
479 while (descnt++ < IMX_MAX_DESC) {
480 IMXFECBufDesc bd;
481 int len;
483 imx_fec_read_bd(&bd, addr);
484 if ((bd.flags & ENET_BD_R) == 0) {
486 /* Run out of descriptors to transmit. */
487 trace_imx_eth_tx_bd_busy();
489 break;
491 len = bd.length;
492 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
493 len = ENET_MAX_FRAME_SIZE - frame_size;
494 s->regs[ENET_EIR] |= ENET_INT_BABT;
496 dma_memory_read(&address_space_memory, bd.data, ptr, len,
497 MEMTXATTRS_UNSPECIFIED);
498 ptr += len;
499 frame_size += len;
500 if (bd.flags & ENET_BD_L) {
501 /* Last buffer in frame. */
502 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
503 ptr = s->frame;
504 frame_size = 0;
505 s->regs[ENET_EIR] |= ENET_INT_TXF;
507 s->regs[ENET_EIR] |= ENET_INT_TXB;
508 bd.flags &= ~ENET_BD_R;
509 /* Write back the modified descriptor. */
510 imx_fec_write_bd(&bd, addr);
511 /* Advance to the next descriptor. */
512 if ((bd.flags & ENET_BD_W) != 0) {
513 addr = s->regs[ENET_TDSR];
514 } else {
515 addr += sizeof(bd);
519 s->tx_descriptor[0] = addr;
521 imx_eth_update(s);
524 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
526 int frame_size = 0, descnt = 0;
528 uint8_t *ptr = s->frame;
529 uint32_t addr, int_txb, int_txf, tdsr;
530 size_t ring;
532 switch (index) {
533 case ENET_TDAR:
534 ring = 0;
535 int_txb = ENET_INT_TXB;
536 int_txf = ENET_INT_TXF;
537 tdsr = ENET_TDSR;
538 break;
539 case ENET_TDAR1:
540 ring = 1;
541 int_txb = ENET_INT_TXB1;
542 int_txf = ENET_INT_TXF1;
543 tdsr = ENET_TDSR1;
544 break;
545 case ENET_TDAR2:
546 ring = 2;
547 int_txb = ENET_INT_TXB2;
548 int_txf = ENET_INT_TXF2;
549 tdsr = ENET_TDSR2;
550 break;
551 default:
552 qemu_log_mask(LOG_GUEST_ERROR,
553 "%s: bogus value for index %x\n",
554 __func__, index);
555 abort();
556 break;
559 addr = s->tx_descriptor[ring];
561 while (descnt++ < IMX_MAX_DESC) {
562 IMXENETBufDesc bd;
563 int len;
565 imx_enet_read_bd(&bd, addr);
566 if ((bd.flags & ENET_BD_R) == 0) {
567 /* Run out of descriptors to transmit. */
569 trace_imx_eth_tx_bd_busy();
571 break;
573 len = bd.length;
574 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
575 len = ENET_MAX_FRAME_SIZE - frame_size;
576 s->regs[ENET_EIR] |= ENET_INT_BABT;
578 dma_memory_read(&address_space_memory, bd.data, ptr, len,
579 MEMTXATTRS_UNSPECIFIED);
580 ptr += len;
581 frame_size += len;
582 if (bd.flags & ENET_BD_L) {
583 int csum = 0;
585 if (bd.option & ENET_BD_PINS) {
586 csum |= (CSUM_TCP | CSUM_UDP);
588 if (bd.option & ENET_BD_IINS) {
589 csum |= CSUM_IP;
591 if (csum) {
592 net_checksum_calculate(s->frame, frame_size, csum);
595 /* Last buffer in frame. */
597 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
598 ptr = s->frame;
600 frame_size = 0;
601 if (bd.option & ENET_BD_TX_INT) {
602 s->regs[ENET_EIR] |= int_txf;
604 /* Indicate that we've updated the last buffer descriptor. */
605 bd.last_buffer = ENET_BD_BDU;
607 if (bd.option & ENET_BD_TX_INT) {
608 s->regs[ENET_EIR] |= int_txb;
610 bd.flags &= ~ENET_BD_R;
611 /* Write back the modified descriptor. */
612 imx_enet_write_bd(&bd, addr);
613 /* Advance to the next descriptor. */
614 if ((bd.flags & ENET_BD_W) != 0) {
615 addr = s->regs[tdsr];
616 } else {
617 addr += sizeof(bd);
621 s->tx_descriptor[ring] = addr;
623 imx_eth_update(s);
626 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
628 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
629 imx_enet_do_tx(s, index);
630 } else {
631 imx_fec_do_tx(s);
635 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
637 IMXFECBufDesc bd;
639 imx_fec_read_bd(&bd, s->rx_descriptor);
641 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
643 if (!s->regs[ENET_RDAR]) {
644 trace_imx_eth_rx_bd_full();
645 } else if (flush) {
646 qemu_flush_queued_packets(qemu_get_queue(s->nic));
650 static void imx_eth_reset(DeviceState *d)
652 IMXFECState *s = IMX_FEC(d);
654 /* Reset the Device */
655 memset(s->regs, 0, sizeof(s->regs));
656 s->regs[ENET_ECR] = 0xf0000000;
657 s->regs[ENET_MIBC] = 0xc0000000;
658 s->regs[ENET_RCR] = 0x05ee0001;
659 s->regs[ENET_OPD] = 0x00010000;
661 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
662 | (s->conf.macaddr.a[1] << 16)
663 | (s->conf.macaddr.a[2] << 8)
664 | s->conf.macaddr.a[3];
665 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
666 | (s->conf.macaddr.a[5] << 16)
667 | 0x8808;
669 if (s->is_fec) {
670 s->regs[ENET_FRBR] = 0x00000600;
671 s->regs[ENET_FRSR] = 0x00000500;
672 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
673 } else {
674 s->regs[ENET_RAEM] = 0x00000004;
675 s->regs[ENET_RAFL] = 0x00000004;
676 s->regs[ENET_TAEM] = 0x00000004;
677 s->regs[ENET_TAFL] = 0x00000008;
678 s->regs[ENET_TIPG] = 0x0000000c;
679 s->regs[ENET_FTRL] = 0x000007ff;
680 s->regs[ENET_ATPER] = 0x3b9aca00;
683 s->rx_descriptor = 0;
684 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
686 /* We also reset the PHY */
687 imx_phy_reset(s);
690 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
692 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
693 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
694 return 0;
697 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
699 switch (index) {
700 case ENET_FRBR:
701 case ENET_FRSR:
702 case ENET_MIIGSK_CFGR:
703 case ENET_MIIGSK_ENR:
704 return s->regs[index];
705 default:
706 return imx_default_read(s, index);
710 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
712 switch (index) {
713 case ENET_RSFL:
714 case ENET_RSEM:
715 case ENET_RAEM:
716 case ENET_RAFL:
717 case ENET_TSEM:
718 case ENET_TAEM:
719 case ENET_TAFL:
720 case ENET_TIPG:
721 case ENET_FTRL:
722 case ENET_TACC:
723 case ENET_RACC:
724 case ENET_ATCR:
725 case ENET_ATVR:
726 case ENET_ATOFF:
727 case ENET_ATPER:
728 case ENET_ATCOR:
729 case ENET_ATINC:
730 case ENET_ATSTMP:
731 case ENET_TGSR:
732 case ENET_TCSR0:
733 case ENET_TCCR0:
734 case ENET_TCSR1:
735 case ENET_TCCR1:
736 case ENET_TCSR2:
737 case ENET_TCCR2:
738 case ENET_TCSR3:
739 case ENET_TCCR3:
740 return s->regs[index];
741 default:
742 return imx_default_read(s, index);
746 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
748 uint32_t value = 0;
749 IMXFECState *s = IMX_FEC(opaque);
750 uint32_t index = offset >> 2;
752 switch (index) {
753 case ENET_EIR:
754 case ENET_EIMR:
755 case ENET_RDAR:
756 case ENET_TDAR:
757 case ENET_ECR:
758 case ENET_MMFR:
759 case ENET_MSCR:
760 case ENET_MIBC:
761 case ENET_RCR:
762 case ENET_TCR:
763 case ENET_PALR:
764 case ENET_PAUR:
765 case ENET_OPD:
766 case ENET_IAUR:
767 case ENET_IALR:
768 case ENET_GAUR:
769 case ENET_GALR:
770 case ENET_TFWR:
771 case ENET_RDSR:
772 case ENET_TDSR:
773 case ENET_MRBR:
774 value = s->regs[index];
775 break;
776 default:
777 if (s->is_fec) {
778 value = imx_fec_read(s, index);
779 } else {
780 value = imx_enet_read(s, index);
782 break;
785 trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
787 return value;
790 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
792 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
793 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
794 return;
797 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
799 switch (index) {
800 case ENET_FRBR:
801 /* FRBR is read only */
802 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
803 TYPE_IMX_FEC, __func__);
804 break;
805 case ENET_FRSR:
806 s->regs[index] = (value & 0x000003fc) | 0x00000400;
807 break;
808 case ENET_MIIGSK_CFGR:
809 s->regs[index] = value & 0x00000053;
810 break;
811 case ENET_MIIGSK_ENR:
812 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
813 break;
814 default:
815 imx_default_write(s, index, value);
816 break;
820 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
822 switch (index) {
823 case ENET_RSFL:
824 case ENET_RSEM:
825 case ENET_RAEM:
826 case ENET_RAFL:
827 case ENET_TSEM:
828 case ENET_TAEM:
829 case ENET_TAFL:
830 s->regs[index] = value & 0x000001ff;
831 break;
832 case ENET_TIPG:
833 s->regs[index] = value & 0x0000001f;
834 break;
835 case ENET_FTRL:
836 s->regs[index] = value & 0x00003fff;
837 break;
838 case ENET_TACC:
839 s->regs[index] = value & 0x00000019;
840 break;
841 case ENET_RACC:
842 s->regs[index] = value & 0x000000C7;
843 break;
844 case ENET_ATCR:
845 s->regs[index] = value & 0x00002a9d;
846 break;
847 case ENET_ATVR:
848 case ENET_ATOFF:
849 case ENET_ATPER:
850 s->regs[index] = value;
851 break;
852 case ENET_ATSTMP:
853 /* ATSTMP is read only */
854 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
855 TYPE_IMX_FEC, __func__);
856 break;
857 case ENET_ATCOR:
858 s->regs[index] = value & 0x7fffffff;
859 break;
860 case ENET_ATINC:
861 s->regs[index] = value & 0x00007f7f;
862 break;
863 case ENET_TGSR:
864 /* implement clear timer flag */
865 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
866 break;
867 case ENET_TCSR0:
868 case ENET_TCSR1:
869 case ENET_TCSR2:
870 case ENET_TCSR3:
871 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
872 s->regs[index] &= ~0x0000007d; /* writable fields */
873 s->regs[index] |= (value & 0x0000007d);
874 break;
875 case ENET_TCCR0:
876 case ENET_TCCR1:
877 case ENET_TCCR2:
878 case ENET_TCCR3:
879 s->regs[index] = value;
880 break;
881 default:
882 imx_default_write(s, index, value);
883 break;
887 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
888 unsigned size)
890 IMXFECState *s = IMX_FEC(opaque);
891 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
892 uint32_t index = offset >> 2;
894 trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
896 switch (index) {
897 case ENET_EIR:
898 s->regs[index] &= ~value;
899 break;
900 case ENET_EIMR:
901 s->regs[index] = value;
902 break;
903 case ENET_RDAR:
904 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
905 if (!s->regs[index]) {
906 imx_eth_enable_rx(s, true);
908 } else {
909 s->regs[index] = 0;
911 break;
912 case ENET_TDAR1:
913 case ENET_TDAR2:
914 if (unlikely(single_tx_ring)) {
915 qemu_log_mask(LOG_GUEST_ERROR,
916 "[%s]%s: trying to access TDAR2 or TDAR1\n",
917 TYPE_IMX_FEC, __func__);
918 return;
920 /* fall through */
921 case ENET_TDAR:
922 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
923 s->regs[index] = ENET_TDAR_TDAR;
924 imx_eth_do_tx(s, index);
926 s->regs[index] = 0;
927 break;
928 case ENET_ECR:
929 if (value & ENET_ECR_RESET) {
930 return imx_eth_reset(DEVICE(s));
932 s->regs[index] = value;
933 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
934 s->regs[ENET_RDAR] = 0;
935 s->rx_descriptor = s->regs[ENET_RDSR];
936 s->regs[ENET_TDAR] = 0;
937 s->regs[ENET_TDAR1] = 0;
938 s->regs[ENET_TDAR2] = 0;
939 s->tx_descriptor[0] = s->regs[ENET_TDSR];
940 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
941 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
943 break;
944 case ENET_MMFR:
945 s->regs[index] = value;
946 if (extract32(value, 29, 1)) {
947 /* This is a read operation */
948 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
949 imx_phy_read(s,
950 extract32(value,
951 18, 10)));
952 } else {
953 /* This is a write operation */
954 imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
956 /* raise the interrupt as the PHY operation is done */
957 s->regs[ENET_EIR] |= ENET_INT_MII;
958 break;
959 case ENET_MSCR:
960 s->regs[index] = value & 0xfe;
961 break;
962 case ENET_MIBC:
963 /* TODO: Implement MIB. */
964 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
965 break;
966 case ENET_RCR:
967 s->regs[index] = value & 0x07ff003f;
968 /* TODO: Implement LOOP mode. */
969 break;
970 case ENET_TCR:
971 /* We transmit immediately, so raise GRA immediately. */
972 s->regs[index] = value;
973 if (value & 1) {
974 s->regs[ENET_EIR] |= ENET_INT_GRA;
976 break;
977 case ENET_PALR:
978 s->regs[index] = value;
979 s->conf.macaddr.a[0] = value >> 24;
980 s->conf.macaddr.a[1] = value >> 16;
981 s->conf.macaddr.a[2] = value >> 8;
982 s->conf.macaddr.a[3] = value;
983 break;
984 case ENET_PAUR:
985 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
986 s->conf.macaddr.a[4] = value >> 24;
987 s->conf.macaddr.a[5] = value >> 16;
988 break;
989 case ENET_OPD:
990 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
991 break;
992 case ENET_IAUR:
993 case ENET_IALR:
994 case ENET_GAUR:
995 case ENET_GALR:
996 /* TODO: implement MAC hash filtering. */
997 break;
998 case ENET_TFWR:
999 if (s->is_fec) {
1000 s->regs[index] = value & 0x3;
1001 } else {
1002 s->regs[index] = value & 0x13f;
1004 break;
1005 case ENET_RDSR:
1006 if (s->is_fec) {
1007 s->regs[index] = value & ~3;
1008 } else {
1009 s->regs[index] = value & ~7;
1011 s->rx_descriptor = s->regs[index];
1012 break;
1013 case ENET_TDSR:
1014 if (s->is_fec) {
1015 s->regs[index] = value & ~3;
1016 } else {
1017 s->regs[index] = value & ~7;
1019 s->tx_descriptor[0] = s->regs[index];
1020 break;
1021 case ENET_TDSR1:
1022 if (unlikely(single_tx_ring)) {
1023 qemu_log_mask(LOG_GUEST_ERROR,
1024 "[%s]%s: trying to access TDSR1\n",
1025 TYPE_IMX_FEC, __func__);
1026 return;
1029 s->regs[index] = value & ~7;
1030 s->tx_descriptor[1] = s->regs[index];
1031 break;
1032 case ENET_TDSR2:
1033 if (unlikely(single_tx_ring)) {
1034 qemu_log_mask(LOG_GUEST_ERROR,
1035 "[%s]%s: trying to access TDSR2\n",
1036 TYPE_IMX_FEC, __func__);
1037 return;
1040 s->regs[index] = value & ~7;
1041 s->tx_descriptor[2] = s->regs[index];
1042 break;
1043 case ENET_MRBR:
1044 s->regs[index] = value & 0x00003ff0;
1045 break;
1046 default:
1047 if (s->is_fec) {
1048 imx_fec_write(s, index, value);
1049 } else {
1050 imx_enet_write(s, index, value);
1052 return;
1055 imx_eth_update(s);
1058 static bool imx_eth_can_receive(NetClientState *nc)
1060 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1062 return !!s->regs[ENET_RDAR];
1065 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1066 size_t len)
1068 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1069 IMXFECBufDesc bd;
1070 uint32_t flags = 0;
1071 uint32_t addr;
1072 uint32_t crc;
1073 uint32_t buf_addr;
1074 uint8_t *crc_ptr;
1075 unsigned int buf_len;
1076 size_t size = len;
1078 trace_imx_fec_receive(size);
1080 if (!s->regs[ENET_RDAR]) {
1081 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1082 TYPE_IMX_FEC, __func__);
1083 return 0;
1086 crc = cpu_to_be32(crc32(~0, buf, size));
1087 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1088 size += 4;
1089 crc_ptr = (uint8_t *) &crc;
1091 /* Huge frames are truncated. */
1092 if (size > ENET_MAX_FRAME_SIZE) {
1093 size = ENET_MAX_FRAME_SIZE;
1094 flags |= ENET_BD_TR | ENET_BD_LG;
1097 /* Frames larger than the user limit just set error flags. */
1098 if (size > (s->regs[ENET_RCR] >> 16)) {
1099 flags |= ENET_BD_LG;
1102 addr = s->rx_descriptor;
1103 while (size > 0) {
1104 imx_fec_read_bd(&bd, addr);
1105 if ((bd.flags & ENET_BD_E) == 0) {
1106 /* No descriptors available. Bail out. */
1108 * FIXME: This is wrong. We should probably either
1109 * save the remainder for when more RX buffers are
1110 * available, or flag an error.
1112 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1113 TYPE_IMX_FEC, __func__);
1114 break;
1116 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1117 bd.length = buf_len;
1118 size -= buf_len;
1120 trace_imx_fec_receive_len(addr, bd.length);
1122 /* The last 4 bytes are the CRC. */
1123 if (size < 4) {
1124 buf_len += size - 4;
1126 buf_addr = bd.data;
1127 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1128 MEMTXATTRS_UNSPECIFIED);
1129 buf += buf_len;
1130 if (size < 4) {
1131 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1132 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1133 crc_ptr += 4 - size;
1135 bd.flags &= ~ENET_BD_E;
1136 if (size == 0) {
1137 /* Last buffer in frame. */
1138 bd.flags |= flags | ENET_BD_L;
1140 trace_imx_fec_receive_last(bd.flags);
1142 s->regs[ENET_EIR] |= ENET_INT_RXF;
1143 } else {
1144 s->regs[ENET_EIR] |= ENET_INT_RXB;
1146 imx_fec_write_bd(&bd, addr);
1147 /* Advance to the next descriptor. */
1148 if ((bd.flags & ENET_BD_W) != 0) {
1149 addr = s->regs[ENET_RDSR];
1150 } else {
1151 addr += sizeof(bd);
1154 s->rx_descriptor = addr;
1155 imx_eth_enable_rx(s, false);
1156 imx_eth_update(s);
1157 return len;
1160 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1161 size_t len)
1163 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1164 IMXENETBufDesc bd;
1165 uint32_t flags = 0;
1166 uint32_t addr;
1167 uint32_t crc;
1168 uint32_t buf_addr;
1169 uint8_t *crc_ptr;
1170 unsigned int buf_len;
1171 size_t size = len;
1172 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1174 trace_imx_enet_receive(size);
1176 if (!s->regs[ENET_RDAR]) {
1177 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1178 TYPE_IMX_FEC, __func__);
1179 return 0;
1182 crc = cpu_to_be32(crc32(~0, buf, size));
1183 /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
1184 size += 4;
1185 crc_ptr = (uint8_t *) &crc;
1187 if (shift16) {
1188 size += 2;
1191 /* Huge frames are truncated. */
1192 if (size > s->regs[ENET_FTRL]) {
1193 size = s->regs[ENET_FTRL];
1194 flags |= ENET_BD_TR | ENET_BD_LG;
1197 /* Frames larger than the user limit just set error flags. */
1198 if (size > (s->regs[ENET_RCR] >> 16)) {
1199 flags |= ENET_BD_LG;
1202 addr = s->rx_descriptor;
1203 while (size > 0) {
1204 imx_enet_read_bd(&bd, addr);
1205 if ((bd.flags & ENET_BD_E) == 0) {
1206 /* No descriptors available. Bail out. */
1208 * FIXME: This is wrong. We should probably either
1209 * save the remainder for when more RX buffers are
1210 * available, or flag an error.
1212 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1213 TYPE_IMX_FEC, __func__);
1214 break;
1216 buf_len = MIN(size, s->regs[ENET_MRBR]);
1217 bd.length = buf_len;
1218 size -= buf_len;
1220 trace_imx_enet_receive_len(addr, bd.length);
1222 /* The last 4 bytes are the CRC. */
1223 if (size < 4) {
1224 buf_len += size - 4;
1226 buf_addr = bd.data;
1228 if (shift16) {
1230 * If SHIFT16 bit of ENETx_RACC register is set we need to
1231 * align the payload to 4-byte boundary.
1233 const uint8_t zeros[2] = { 0 };
1235 dma_memory_write(&address_space_memory, buf_addr, zeros,
1236 sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
1238 buf_addr += sizeof(zeros);
1239 buf_len -= sizeof(zeros);
1241 /* We only do this once per Ethernet frame */
1242 shift16 = false;
1245 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
1246 MEMTXATTRS_UNSPECIFIED);
1247 buf += buf_len;
1248 if (size < 4) {
1249 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1250 crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
1251 crc_ptr += 4 - size;
1253 bd.flags &= ~ENET_BD_E;
1254 if (size == 0) {
1255 /* Last buffer in frame. */
1256 bd.flags |= flags | ENET_BD_L;
1258 trace_imx_enet_receive_last(bd.flags);
1260 /* Indicate that we've updated the last buffer descriptor. */
1261 bd.last_buffer = ENET_BD_BDU;
1262 if (bd.option & ENET_BD_RX_INT) {
1263 s->regs[ENET_EIR] |= ENET_INT_RXF;
1265 } else {
1266 if (bd.option & ENET_BD_RX_INT) {
1267 s->regs[ENET_EIR] |= ENET_INT_RXB;
1270 imx_enet_write_bd(&bd, addr);
1271 /* Advance to the next descriptor. */
1272 if ((bd.flags & ENET_BD_W) != 0) {
1273 addr = s->regs[ENET_RDSR];
1274 } else {
1275 addr += sizeof(bd);
1278 s->rx_descriptor = addr;
1279 imx_eth_enable_rx(s, false);
1280 imx_eth_update(s);
1281 return len;
1284 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1285 size_t len)
1287 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1289 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1290 return imx_enet_receive(nc, buf, len);
1291 } else {
1292 return imx_fec_receive(nc, buf, len);
1296 static const MemoryRegionOps imx_eth_ops = {
1297 .read = imx_eth_read,
1298 .write = imx_eth_write,
1299 .valid.min_access_size = 4,
1300 .valid.max_access_size = 4,
1301 .endianness = DEVICE_NATIVE_ENDIAN,
1304 static void imx_eth_cleanup(NetClientState *nc)
1306 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1308 s->nic = NULL;
1311 static NetClientInfo imx_eth_net_info = {
1312 .type = NET_CLIENT_DRIVER_NIC,
1313 .size = sizeof(NICState),
1314 .can_receive = imx_eth_can_receive,
1315 .receive = imx_eth_receive,
1316 .cleanup = imx_eth_cleanup,
1317 .link_status_changed = imx_eth_set_link,
1321 static void imx_eth_realize(DeviceState *dev, Error **errp)
1323 IMXFECState *s = IMX_FEC(dev);
1324 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1326 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1327 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1328 sysbus_init_mmio(sbd, &s->iomem);
1329 sysbus_init_irq(sbd, &s->irq[0]);
1330 sysbus_init_irq(sbd, &s->irq[1]);
1332 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1334 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1335 object_get_typename(OBJECT(dev)),
1336 dev->id, &dev->mem_reentrancy_guard, s);
1338 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1341 static Property imx_eth_properties[] = {
1342 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1343 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1344 DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
1345 DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
1346 DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
1347 IMXFECState *),
1348 DEFINE_PROP_END_OF_LIST(),
1351 static void imx_eth_class_init(ObjectClass *klass, void *data)
1353 DeviceClass *dc = DEVICE_CLASS(klass);
1355 dc->vmsd = &vmstate_imx_eth;
1356 device_class_set_legacy_reset(dc, imx_eth_reset);
1357 device_class_set_props(dc, imx_eth_properties);
1358 dc->realize = imx_eth_realize;
1359 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1362 static void imx_fec_init(Object *obj)
1364 IMXFECState *s = IMX_FEC(obj);
1366 s->is_fec = true;
1369 static void imx_enet_init(Object *obj)
1371 IMXFECState *s = IMX_FEC(obj);
1373 s->is_fec = false;
1376 static const TypeInfo imx_fec_info = {
1377 .name = TYPE_IMX_FEC,
1378 .parent = TYPE_SYS_BUS_DEVICE,
1379 .instance_size = sizeof(IMXFECState),
1380 .instance_init = imx_fec_init,
1381 .class_init = imx_eth_class_init,
1384 static const TypeInfo imx_enet_info = {
1385 .name = TYPE_IMX_ENET,
1386 .parent = TYPE_IMX_FEC,
1387 .instance_init = imx_enet_init,
1390 static void imx_eth_register_types(void)
1392 type_register_static(&imx_fec_info);
1393 type_register_static(&imx_enet_info);
1396 type_init(imx_eth_register_types)