find_ram_offset: Align ram_addr_t allocation on long boundaries
[qemu/kevin.git] / hw / net / imx_fec.c
blob4fb48f62ba5f9c812261c16de641313b1c637118
1 /*
2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
25 #include "hw/net/imx_fec.h"
26 #include "sysemu/dma.h"
27 #include "qemu/log.h"
28 #include "net/checksum.h"
29 #include "net/eth.h"
31 /* For crc32 */
32 #include <zlib.h>
34 #ifndef DEBUG_IMX_FEC
35 #define DEBUG_IMX_FEC 0
36 #endif
38 #define FEC_PRINTF(fmt, args...) \
39 do { \
40 if (DEBUG_IMX_FEC) { \
41 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
42 __func__, ##args); \
43 } \
44 } while (0)
46 #ifndef DEBUG_IMX_PHY
47 #define DEBUG_IMX_PHY 0
48 #endif
50 #define PHY_PRINTF(fmt, args...) \
51 do { \
52 if (DEBUG_IMX_PHY) { \
53 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
54 __func__, ##args); \
55 } \
56 } while (0)
58 #define IMX_MAX_DESC 1024
60 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
62 static char tmp[20];
63 sprintf(tmp, "index %d", index);
64 return tmp;
67 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
69 switch (index) {
70 case ENET_FRBR:
71 return "FRBR";
72 case ENET_FRSR:
73 return "FRSR";
74 case ENET_MIIGSK_CFGR:
75 return "MIIGSK_CFGR";
76 case ENET_MIIGSK_ENR:
77 return "MIIGSK_ENR";
78 default:
79 return imx_default_reg_name(s, index);
83 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
85 switch (index) {
86 case ENET_RSFL:
87 return "RSFL";
88 case ENET_RSEM:
89 return "RSEM";
90 case ENET_RAEM:
91 return "RAEM";
92 case ENET_RAFL:
93 return "RAFL";
94 case ENET_TSEM:
95 return "TSEM";
96 case ENET_TAEM:
97 return "TAEM";
98 case ENET_TAFL:
99 return "TAFL";
100 case ENET_TIPG:
101 return "TIPG";
102 case ENET_FTRL:
103 return "FTRL";
104 case ENET_TACC:
105 return "TACC";
106 case ENET_RACC:
107 return "RACC";
108 case ENET_ATCR:
109 return "ATCR";
110 case ENET_ATVR:
111 return "ATVR";
112 case ENET_ATOFF:
113 return "ATOFF";
114 case ENET_ATPER:
115 return "ATPER";
116 case ENET_ATCOR:
117 return "ATCOR";
118 case ENET_ATINC:
119 return "ATINC";
120 case ENET_ATSTMP:
121 return "ATSTMP";
122 case ENET_TGSR:
123 return "TGSR";
124 case ENET_TCSR0:
125 return "TCSR0";
126 case ENET_TCCR0:
127 return "TCCR0";
128 case ENET_TCSR1:
129 return "TCSR1";
130 case ENET_TCCR1:
131 return "TCCR1";
132 case ENET_TCSR2:
133 return "TCSR2";
134 case ENET_TCCR2:
135 return "TCCR2";
136 case ENET_TCSR3:
137 return "TCSR3";
138 case ENET_TCCR3:
139 return "TCCR3";
140 default:
141 return imx_default_reg_name(s, index);
145 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
147 switch (index) {
148 case ENET_EIR:
149 return "EIR";
150 case ENET_EIMR:
151 return "EIMR";
152 case ENET_RDAR:
153 return "RDAR";
154 case ENET_TDAR:
155 return "TDAR";
156 case ENET_ECR:
157 return "ECR";
158 case ENET_MMFR:
159 return "MMFR";
160 case ENET_MSCR:
161 return "MSCR";
162 case ENET_MIBC:
163 return "MIBC";
164 case ENET_RCR:
165 return "RCR";
166 case ENET_TCR:
167 return "TCR";
168 case ENET_PALR:
169 return "PALR";
170 case ENET_PAUR:
171 return "PAUR";
172 case ENET_OPD:
173 return "OPD";
174 case ENET_IAUR:
175 return "IAUR";
176 case ENET_IALR:
177 return "IALR";
178 case ENET_GAUR:
179 return "GAUR";
180 case ENET_GALR:
181 return "GALR";
182 case ENET_TFWR:
183 return "TFWR";
184 case ENET_RDSR:
185 return "RDSR";
186 case ENET_TDSR:
187 return "TDSR";
188 case ENET_MRBR:
189 return "MRBR";
190 default:
191 if (s->is_fec) {
192 return imx_fec_reg_name(s, index);
193 } else {
194 return imx_enet_reg_name(s, index);
200 * Versions of this device with more than one TX descriptor save the
201 * 2nd and 3rd descriptors in a subsection, to maintain migration
202 * compatibility with previous versions of the device that only
203 * supported a single descriptor.
205 static bool imx_eth_is_multi_tx_ring(void *opaque)
207 IMXFECState *s = IMX_FEC(opaque);
209 return s->tx_ring_num > 1;
212 static const VMStateDescription vmstate_imx_eth_txdescs = {
213 .name = "imx.fec/txdescs",
214 .version_id = 1,
215 .minimum_version_id = 1,
216 .needed = imx_eth_is_multi_tx_ring,
217 .fields = (VMStateField[]) {
218 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
219 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
220 VMSTATE_END_OF_LIST()
224 static const VMStateDescription vmstate_imx_eth = {
225 .name = TYPE_IMX_FEC,
226 .version_id = 2,
227 .minimum_version_id = 2,
228 .fields = (VMStateField[]) {
229 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
230 VMSTATE_UINT32(rx_descriptor, IMXFECState),
231 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
232 VMSTATE_UINT32(phy_status, IMXFECState),
233 VMSTATE_UINT32(phy_control, IMXFECState),
234 VMSTATE_UINT32(phy_advertise, IMXFECState),
235 VMSTATE_UINT32(phy_int, IMXFECState),
236 VMSTATE_UINT32(phy_int_mask, IMXFECState),
237 VMSTATE_END_OF_LIST()
239 .subsections = (const VMStateDescription * []) {
240 &vmstate_imx_eth_txdescs,
241 NULL
245 #define PHY_INT_ENERGYON (1 << 7)
246 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
247 #define PHY_INT_FAULT (1 << 5)
248 #define PHY_INT_DOWN (1 << 4)
249 #define PHY_INT_AUTONEG_LP (1 << 3)
250 #define PHY_INT_PARFAULT (1 << 2)
251 #define PHY_INT_AUTONEG_PAGE (1 << 1)
253 static void imx_eth_update(IMXFECState *s);
256 * The MII phy could raise a GPIO to the processor which in turn
257 * could be handled as an interrpt by the OS.
258 * For now we don't handle any GPIO/interrupt line, so the OS will
259 * have to poll for the PHY status.
261 static void phy_update_irq(IMXFECState *s)
263 imx_eth_update(s);
266 static void phy_update_link(IMXFECState *s)
268 /* Autonegotiation status mirrors link status. */
269 if (qemu_get_queue(s->nic)->link_down) {
270 PHY_PRINTF("link is down\n");
271 s->phy_status &= ~0x0024;
272 s->phy_int |= PHY_INT_DOWN;
273 } else {
274 PHY_PRINTF("link is up\n");
275 s->phy_status |= 0x0024;
276 s->phy_int |= PHY_INT_ENERGYON;
277 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
279 phy_update_irq(s);
282 static void imx_eth_set_link(NetClientState *nc)
284 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
287 static void phy_reset(IMXFECState *s)
289 s->phy_status = 0x7809;
290 s->phy_control = 0x3000;
291 s->phy_advertise = 0x01e1;
292 s->phy_int_mask = 0;
293 s->phy_int = 0;
294 phy_update_link(s);
297 static uint32_t do_phy_read(IMXFECState *s, int reg)
299 uint32_t val;
301 if (reg > 31) {
302 /* we only advertise one phy */
303 return 0;
306 switch (reg) {
307 case 0: /* Basic Control */
308 val = s->phy_control;
309 break;
310 case 1: /* Basic Status */
311 val = s->phy_status;
312 break;
313 case 2: /* ID1 */
314 val = 0x0007;
315 break;
316 case 3: /* ID2 */
317 val = 0xc0d1;
318 break;
319 case 4: /* Auto-neg advertisement */
320 val = s->phy_advertise;
321 break;
322 case 5: /* Auto-neg Link Partner Ability */
323 val = 0x0f71;
324 break;
325 case 6: /* Auto-neg Expansion */
326 val = 1;
327 break;
328 case 29: /* Interrupt source. */
329 val = s->phy_int;
330 s->phy_int = 0;
331 phy_update_irq(s);
332 break;
333 case 30: /* Interrupt mask */
334 val = s->phy_int_mask;
335 break;
336 case 17:
337 case 18:
338 case 27:
339 case 31:
340 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
341 TYPE_IMX_FEC, __func__, reg);
342 val = 0;
343 break;
344 default:
345 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
346 TYPE_IMX_FEC, __func__, reg);
347 val = 0;
348 break;
351 PHY_PRINTF("read 0x%04x @ %d\n", val, reg);
353 return val;
356 static void do_phy_write(IMXFECState *s, int reg, uint32_t val)
358 PHY_PRINTF("write 0x%04x @ %d\n", val, reg);
360 if (reg > 31) {
361 /* we only advertise one phy */
362 return;
365 switch (reg) {
366 case 0: /* Basic Control */
367 if (val & 0x8000) {
368 phy_reset(s);
369 } else {
370 s->phy_control = val & 0x7980;
371 /* Complete autonegotiation immediately. */
372 if (val & 0x1000) {
373 s->phy_status |= 0x0020;
376 break;
377 case 4: /* Auto-neg advertisement */
378 s->phy_advertise = (val & 0x2d7f) | 0x80;
379 break;
380 case 30: /* Interrupt mask */
381 s->phy_int_mask = val & 0xff;
382 phy_update_irq(s);
383 break;
384 case 17:
385 case 18:
386 case 27:
387 case 31:
388 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
389 TYPE_IMX_FEC, __func__, reg);
390 break;
391 default:
392 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
393 TYPE_IMX_FEC, __func__, reg);
394 break;
398 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
400 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
403 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
405 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
408 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
410 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
413 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
415 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
418 static void imx_eth_update(IMXFECState *s)
420 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_TS_TIMER) {
421 qemu_set_irq(s->irq[1], 1);
422 } else {
423 qemu_set_irq(s->irq[1], 0);
426 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
427 qemu_set_irq(s->irq[0], 1);
428 } else {
429 qemu_set_irq(s->irq[0], 0);
433 static void imx_fec_do_tx(IMXFECState *s)
435 int frame_size = 0, descnt = 0;
436 uint8_t *ptr = s->frame;
437 uint32_t addr = s->tx_descriptor[0];
439 while (descnt++ < IMX_MAX_DESC) {
440 IMXFECBufDesc bd;
441 int len;
443 imx_fec_read_bd(&bd, addr);
444 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
445 addr, bd.flags, bd.length, bd.data);
446 if ((bd.flags & ENET_BD_R) == 0) {
447 /* Run out of descriptors to transmit. */
448 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
449 break;
451 len = bd.length;
452 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
453 len = ENET_MAX_FRAME_SIZE - frame_size;
454 s->regs[ENET_EIR] |= ENET_INT_BABT;
456 dma_memory_read(&address_space_memory, bd.data, ptr, len);
457 ptr += len;
458 frame_size += len;
459 if (bd.flags & ENET_BD_L) {
460 /* Last buffer in frame. */
461 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
462 ptr = s->frame;
463 frame_size = 0;
464 s->regs[ENET_EIR] |= ENET_INT_TXF;
466 s->regs[ENET_EIR] |= ENET_INT_TXB;
467 bd.flags &= ~ENET_BD_R;
468 /* Write back the modified descriptor. */
469 imx_fec_write_bd(&bd, addr);
470 /* Advance to the next descriptor. */
471 if ((bd.flags & ENET_BD_W) != 0) {
472 addr = s->regs[ENET_TDSR];
473 } else {
474 addr += sizeof(bd);
478 s->tx_descriptor[0] = addr;
480 imx_eth_update(s);
483 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
485 int frame_size = 0, descnt = 0;
487 uint8_t *ptr = s->frame;
488 uint32_t addr, int_txb, int_txf, tdsr;
489 size_t ring;
491 switch (index) {
492 case ENET_TDAR:
493 ring = 0;
494 int_txb = ENET_INT_TXB;
495 int_txf = ENET_INT_TXF;
496 tdsr = ENET_TDSR;
497 break;
498 case ENET_TDAR1:
499 ring = 1;
500 int_txb = ENET_INT_TXB1;
501 int_txf = ENET_INT_TXF1;
502 tdsr = ENET_TDSR1;
503 break;
504 case ENET_TDAR2:
505 ring = 2;
506 int_txb = ENET_INT_TXB2;
507 int_txf = ENET_INT_TXF2;
508 tdsr = ENET_TDSR2;
509 break;
510 default:
511 qemu_log_mask(LOG_GUEST_ERROR,
512 "%s: bogus value for index %x\n",
513 __func__, index);
514 abort();
515 break;
518 addr = s->tx_descriptor[ring];
520 while (descnt++ < IMX_MAX_DESC) {
521 IMXENETBufDesc bd;
522 int len;
524 imx_enet_read_bd(&bd, addr);
525 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
526 "status %04x\n", addr, bd.flags, bd.length, bd.data,
527 bd.option, bd.status);
528 if ((bd.flags & ENET_BD_R) == 0) {
529 /* Run out of descriptors to transmit. */
530 break;
532 len = bd.length;
533 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
534 len = ENET_MAX_FRAME_SIZE - frame_size;
535 s->regs[ENET_EIR] |= ENET_INT_BABT;
537 dma_memory_read(&address_space_memory, bd.data, ptr, len);
538 ptr += len;
539 frame_size += len;
540 if (bd.flags & ENET_BD_L) {
541 if (bd.option & ENET_BD_PINS) {
542 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
543 if (IP_HEADER_VERSION(ip_hd) == 4) {
544 net_checksum_calculate(s->frame, frame_size);
547 if (bd.option & ENET_BD_IINS) {
548 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
549 /* We compute checksum only for IPv4 frames */
550 if (IP_HEADER_VERSION(ip_hd) == 4) {
551 uint16_t csum;
552 ip_hd->ip_sum = 0;
553 csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
554 ip_hd->ip_sum = cpu_to_be16(csum);
557 /* Last buffer in frame. */
559 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
560 ptr = s->frame;
562 frame_size = 0;
563 if (bd.option & ENET_BD_TX_INT) {
564 s->regs[ENET_EIR] |= int_txf;
567 if (bd.option & ENET_BD_TX_INT) {
568 s->regs[ENET_EIR] |= int_txb;
570 bd.flags &= ~ENET_BD_R;
571 /* Write back the modified descriptor. */
572 imx_enet_write_bd(&bd, addr);
573 /* Advance to the next descriptor. */
574 if ((bd.flags & ENET_BD_W) != 0) {
575 addr = s->regs[tdsr];
576 } else {
577 addr += sizeof(bd);
581 s->tx_descriptor[ring] = addr;
583 imx_eth_update(s);
586 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
588 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
589 imx_enet_do_tx(s, index);
590 } else {
591 imx_fec_do_tx(s);
595 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
597 IMXFECBufDesc bd;
598 bool rx_ring_full;
600 imx_fec_read_bd(&bd, s->rx_descriptor);
602 rx_ring_full = !(bd.flags & ENET_BD_E);
604 if (rx_ring_full) {
605 FEC_PRINTF("RX buffer full\n");
606 } else if (flush) {
607 qemu_flush_queued_packets(qemu_get_queue(s->nic));
610 s->regs[ENET_RDAR] = rx_ring_full ? 0 : ENET_RDAR_RDAR;
613 static void imx_eth_reset(DeviceState *d)
615 IMXFECState *s = IMX_FEC(d);
617 /* Reset the Device */
618 memset(s->regs, 0, sizeof(s->regs));
619 s->regs[ENET_ECR] = 0xf0000000;
620 s->regs[ENET_MIBC] = 0xc0000000;
621 s->regs[ENET_RCR] = 0x05ee0001;
622 s->regs[ENET_OPD] = 0x00010000;
624 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
625 | (s->conf.macaddr.a[1] << 16)
626 | (s->conf.macaddr.a[2] << 8)
627 | s->conf.macaddr.a[3];
628 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
629 | (s->conf.macaddr.a[5] << 16)
630 | 0x8808;
632 if (s->is_fec) {
633 s->regs[ENET_FRBR] = 0x00000600;
634 s->regs[ENET_FRSR] = 0x00000500;
635 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
636 } else {
637 s->regs[ENET_RAEM] = 0x00000004;
638 s->regs[ENET_RAFL] = 0x00000004;
639 s->regs[ENET_TAEM] = 0x00000004;
640 s->regs[ENET_TAFL] = 0x00000008;
641 s->regs[ENET_TIPG] = 0x0000000c;
642 s->regs[ENET_FTRL] = 0x000007ff;
643 s->regs[ENET_ATPER] = 0x3b9aca00;
646 s->rx_descriptor = 0;
647 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
649 /* We also reset the PHY */
650 phy_reset(s);
653 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
655 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
656 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
657 return 0;
660 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
662 switch (index) {
663 case ENET_FRBR:
664 case ENET_FRSR:
665 case ENET_MIIGSK_CFGR:
666 case ENET_MIIGSK_ENR:
667 return s->regs[index];
668 default:
669 return imx_default_read(s, index);
673 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
675 switch (index) {
676 case ENET_RSFL:
677 case ENET_RSEM:
678 case ENET_RAEM:
679 case ENET_RAFL:
680 case ENET_TSEM:
681 case ENET_TAEM:
682 case ENET_TAFL:
683 case ENET_TIPG:
684 case ENET_FTRL:
685 case ENET_TACC:
686 case ENET_RACC:
687 case ENET_ATCR:
688 case ENET_ATVR:
689 case ENET_ATOFF:
690 case ENET_ATPER:
691 case ENET_ATCOR:
692 case ENET_ATINC:
693 case ENET_ATSTMP:
694 case ENET_TGSR:
695 case ENET_TCSR0:
696 case ENET_TCCR0:
697 case ENET_TCSR1:
698 case ENET_TCCR1:
699 case ENET_TCSR2:
700 case ENET_TCCR2:
701 case ENET_TCSR3:
702 case ENET_TCCR3:
703 return s->regs[index];
704 default:
705 return imx_default_read(s, index);
709 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
711 uint32_t value = 0;
712 IMXFECState *s = IMX_FEC(opaque);
713 uint32_t index = offset >> 2;
715 switch (index) {
716 case ENET_EIR:
717 case ENET_EIMR:
718 case ENET_RDAR:
719 case ENET_TDAR:
720 case ENET_ECR:
721 case ENET_MMFR:
722 case ENET_MSCR:
723 case ENET_MIBC:
724 case ENET_RCR:
725 case ENET_TCR:
726 case ENET_PALR:
727 case ENET_PAUR:
728 case ENET_OPD:
729 case ENET_IAUR:
730 case ENET_IALR:
731 case ENET_GAUR:
732 case ENET_GALR:
733 case ENET_TFWR:
734 case ENET_RDSR:
735 case ENET_TDSR:
736 case ENET_MRBR:
737 value = s->regs[index];
738 break;
739 default:
740 if (s->is_fec) {
741 value = imx_fec_read(s, index);
742 } else {
743 value = imx_enet_read(s, index);
745 break;
748 FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
749 value);
751 return value;
754 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
756 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
757 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
758 return;
761 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
763 switch (index) {
764 case ENET_FRBR:
765 /* FRBR is read only */
766 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
767 TYPE_IMX_FEC, __func__);
768 break;
769 case ENET_FRSR:
770 s->regs[index] = (value & 0x000003fc) | 0x00000400;
771 break;
772 case ENET_MIIGSK_CFGR:
773 s->regs[index] = value & 0x00000053;
774 break;
775 case ENET_MIIGSK_ENR:
776 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
777 break;
778 default:
779 imx_default_write(s, index, value);
780 break;
784 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
786 switch (index) {
787 case ENET_RSFL:
788 case ENET_RSEM:
789 case ENET_RAEM:
790 case ENET_RAFL:
791 case ENET_TSEM:
792 case ENET_TAEM:
793 case ENET_TAFL:
794 s->regs[index] = value & 0x000001ff;
795 break;
796 case ENET_TIPG:
797 s->regs[index] = value & 0x0000001f;
798 break;
799 case ENET_FTRL:
800 s->regs[index] = value & 0x00003fff;
801 break;
802 case ENET_TACC:
803 s->regs[index] = value & 0x00000019;
804 break;
805 case ENET_RACC:
806 s->regs[index] = value & 0x000000C7;
807 break;
808 case ENET_ATCR:
809 s->regs[index] = value & 0x00002a9d;
810 break;
811 case ENET_ATVR:
812 case ENET_ATOFF:
813 case ENET_ATPER:
814 s->regs[index] = value;
815 break;
816 case ENET_ATSTMP:
817 /* ATSTMP is read only */
818 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
819 TYPE_IMX_FEC, __func__);
820 break;
821 case ENET_ATCOR:
822 s->regs[index] = value & 0x7fffffff;
823 break;
824 case ENET_ATINC:
825 s->regs[index] = value & 0x00007f7f;
826 break;
827 case ENET_TGSR:
828 /* implement clear timer flag */
829 value = value & 0x0000000f;
830 break;
831 case ENET_TCSR0:
832 case ENET_TCSR1:
833 case ENET_TCSR2:
834 case ENET_TCSR3:
835 value = value & 0x000000fd;
836 break;
837 case ENET_TCCR0:
838 case ENET_TCCR1:
839 case ENET_TCCR2:
840 case ENET_TCCR3:
841 s->regs[index] = value;
842 break;
843 default:
844 imx_default_write(s, index, value);
845 break;
849 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
850 unsigned size)
852 IMXFECState *s = IMX_FEC(opaque);
853 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
854 uint32_t index = offset >> 2;
856 FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
857 (uint32_t)value);
859 switch (index) {
860 case ENET_EIR:
861 s->regs[index] &= ~value;
862 break;
863 case ENET_EIMR:
864 s->regs[index] = value;
865 break;
866 case ENET_RDAR:
867 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
868 if (!s->regs[index]) {
869 s->regs[index] = ENET_RDAR_RDAR;
870 imx_eth_enable_rx(s, true);
872 } else {
873 s->regs[index] = 0;
875 break;
876 case ENET_TDAR1: /* FALLTHROUGH */
877 case ENET_TDAR2: /* FALLTHROUGH */
878 if (unlikely(single_tx_ring)) {
879 qemu_log_mask(LOG_GUEST_ERROR,
880 "[%s]%s: trying to access TDAR2 or TDAR1\n",
881 TYPE_IMX_FEC, __func__);
882 return;
884 case ENET_TDAR: /* FALLTHROUGH */
885 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
886 s->regs[index] = ENET_TDAR_TDAR;
887 imx_eth_do_tx(s, index);
889 s->regs[index] = 0;
890 break;
891 case ENET_ECR:
892 if (value & ENET_ECR_RESET) {
893 return imx_eth_reset(DEVICE(s));
895 s->regs[index] = value;
896 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
897 s->regs[ENET_RDAR] = 0;
898 s->rx_descriptor = s->regs[ENET_RDSR];
899 s->regs[ENET_TDAR] = 0;
900 s->regs[ENET_TDAR1] = 0;
901 s->regs[ENET_TDAR2] = 0;
902 s->tx_descriptor[0] = s->regs[ENET_TDSR];
903 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
904 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
906 break;
907 case ENET_MMFR:
908 s->regs[index] = value;
909 if (extract32(value, 29, 1)) {
910 /* This is a read operation */
911 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
912 do_phy_read(s,
913 extract32(value,
914 18, 10)));
915 } else {
916 /* This a write operation */
917 do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
919 /* raise the interrupt as the PHY operation is done */
920 s->regs[ENET_EIR] |= ENET_INT_MII;
921 break;
922 case ENET_MSCR:
923 s->regs[index] = value & 0xfe;
924 break;
925 case ENET_MIBC:
926 /* TODO: Implement MIB. */
927 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
928 break;
929 case ENET_RCR:
930 s->regs[index] = value & 0x07ff003f;
931 /* TODO: Implement LOOP mode. */
932 break;
933 case ENET_TCR:
934 /* We transmit immediately, so raise GRA immediately. */
935 s->regs[index] = value;
936 if (value & 1) {
937 s->regs[ENET_EIR] |= ENET_INT_GRA;
939 break;
940 case ENET_PALR:
941 s->regs[index] = value;
942 s->conf.macaddr.a[0] = value >> 24;
943 s->conf.macaddr.a[1] = value >> 16;
944 s->conf.macaddr.a[2] = value >> 8;
945 s->conf.macaddr.a[3] = value;
946 break;
947 case ENET_PAUR:
948 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
949 s->conf.macaddr.a[4] = value >> 24;
950 s->conf.macaddr.a[5] = value >> 16;
951 break;
952 case ENET_OPD:
953 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
954 break;
955 case ENET_IAUR:
956 case ENET_IALR:
957 case ENET_GAUR:
958 case ENET_GALR:
959 /* TODO: implement MAC hash filtering. */
960 break;
961 case ENET_TFWR:
962 if (s->is_fec) {
963 s->regs[index] = value & 0x3;
964 } else {
965 s->regs[index] = value & 0x13f;
967 break;
968 case ENET_RDSR:
969 if (s->is_fec) {
970 s->regs[index] = value & ~3;
971 } else {
972 s->regs[index] = value & ~7;
974 s->rx_descriptor = s->regs[index];
975 break;
976 case ENET_TDSR:
977 if (s->is_fec) {
978 s->regs[index] = value & ~3;
979 } else {
980 s->regs[index] = value & ~7;
982 s->tx_descriptor[0] = s->regs[index];
983 break;
984 case ENET_TDSR1:
985 if (unlikely(single_tx_ring)) {
986 qemu_log_mask(LOG_GUEST_ERROR,
987 "[%s]%s: trying to access TDSR1\n",
988 TYPE_IMX_FEC, __func__);
989 return;
992 s->regs[index] = value & ~7;
993 s->tx_descriptor[1] = s->regs[index];
994 break;
995 case ENET_TDSR2:
996 if (unlikely(single_tx_ring)) {
997 qemu_log_mask(LOG_GUEST_ERROR,
998 "[%s]%s: trying to access TDSR2\n",
999 TYPE_IMX_FEC, __func__);
1000 return;
1003 s->regs[index] = value & ~7;
1004 s->tx_descriptor[2] = s->regs[index];
1005 break;
1006 case ENET_MRBR:
1007 s->regs[index] = value & 0x00003ff0;
1008 break;
1009 default:
1010 if (s->is_fec) {
1011 imx_fec_write(s, index, value);
1012 } else {
1013 imx_enet_write(s, index, value);
1015 return;
1018 imx_eth_update(s);
1021 static int imx_eth_can_receive(NetClientState *nc)
1023 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1025 FEC_PRINTF("\n");
1027 return !!s->regs[ENET_RDAR];
1030 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1031 size_t len)
1033 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1034 IMXFECBufDesc bd;
1035 uint32_t flags = 0;
1036 uint32_t addr;
1037 uint32_t crc;
1038 uint32_t buf_addr;
1039 uint8_t *crc_ptr;
1040 unsigned int buf_len;
1041 size_t size = len;
1043 FEC_PRINTF("len %d\n", (int)size);
1045 if (!s->regs[ENET_RDAR]) {
1046 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1047 TYPE_IMX_FEC, __func__);
1048 return 0;
1051 /* 4 bytes for the CRC. */
1052 size += 4;
1053 crc = cpu_to_be32(crc32(~0, buf, size));
1054 crc_ptr = (uint8_t *) &crc;
1056 /* Huge frames are truncated. */
1057 if (size > ENET_MAX_FRAME_SIZE) {
1058 size = ENET_MAX_FRAME_SIZE;
1059 flags |= ENET_BD_TR | ENET_BD_LG;
1062 /* Frames larger than the user limit just set error flags. */
1063 if (size > (s->regs[ENET_RCR] >> 16)) {
1064 flags |= ENET_BD_LG;
1067 addr = s->rx_descriptor;
1068 while (size > 0) {
1069 imx_fec_read_bd(&bd, addr);
1070 if ((bd.flags & ENET_BD_E) == 0) {
1071 /* No descriptors available. Bail out. */
1073 * FIXME: This is wrong. We should probably either
1074 * save the remainder for when more RX buffers are
1075 * available, or flag an error.
1077 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1078 TYPE_IMX_FEC, __func__);
1079 break;
1081 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1082 bd.length = buf_len;
1083 size -= buf_len;
1085 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1087 /* The last 4 bytes are the CRC. */
1088 if (size < 4) {
1089 buf_len += size - 4;
1091 buf_addr = bd.data;
1092 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1093 buf += buf_len;
1094 if (size < 4) {
1095 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1096 crc_ptr, 4 - size);
1097 crc_ptr += 4 - size;
1099 bd.flags &= ~ENET_BD_E;
1100 if (size == 0) {
1101 /* Last buffer in frame. */
1102 bd.flags |= flags | ENET_BD_L;
1103 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1104 s->regs[ENET_EIR] |= ENET_INT_RXF;
1105 } else {
1106 s->regs[ENET_EIR] |= ENET_INT_RXB;
1108 imx_fec_write_bd(&bd, addr);
1109 /* Advance to the next descriptor. */
1110 if ((bd.flags & ENET_BD_W) != 0) {
1111 addr = s->regs[ENET_RDSR];
1112 } else {
1113 addr += sizeof(bd);
1116 s->rx_descriptor = addr;
1117 imx_eth_enable_rx(s, false);
1118 imx_eth_update(s);
1119 return len;
1122 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1123 size_t len)
1125 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1126 IMXENETBufDesc bd;
1127 uint32_t flags = 0;
1128 uint32_t addr;
1129 uint32_t crc;
1130 uint32_t buf_addr;
1131 uint8_t *crc_ptr;
1132 unsigned int buf_len;
1133 size_t size = len;
1134 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1136 FEC_PRINTF("len %d\n", (int)size);
1138 if (!s->regs[ENET_RDAR]) {
1139 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1140 TYPE_IMX_FEC, __func__);
1141 return 0;
1144 /* 4 bytes for the CRC. */
1145 size += 4;
1146 crc = cpu_to_be32(crc32(~0, buf, size));
1147 crc_ptr = (uint8_t *) &crc;
1149 if (shift16) {
1150 size += 2;
1153 /* Huge frames are truncated. */
1154 if (size > s->regs[ENET_FTRL]) {
1155 size = s->regs[ENET_FTRL];
1156 flags |= ENET_BD_TR | ENET_BD_LG;
1159 /* Frames larger than the user limit just set error flags. */
1160 if (size > (s->regs[ENET_RCR] >> 16)) {
1161 flags |= ENET_BD_LG;
1164 addr = s->rx_descriptor;
1165 while (size > 0) {
1166 imx_enet_read_bd(&bd, addr);
1167 if ((bd.flags & ENET_BD_E) == 0) {
1168 /* No descriptors available. Bail out. */
1170 * FIXME: This is wrong. We should probably either
1171 * save the remainder for when more RX buffers are
1172 * available, or flag an error.
1174 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1175 TYPE_IMX_FEC, __func__);
1176 break;
1178 buf_len = MIN(size, s->regs[ENET_MRBR]);
1179 bd.length = buf_len;
1180 size -= buf_len;
1182 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1184 /* The last 4 bytes are the CRC. */
1185 if (size < 4) {
1186 buf_len += size - 4;
1188 buf_addr = bd.data;
1190 if (shift16) {
1192 * If SHIFT16 bit of ENETx_RACC register is set we need to
1193 * align the payload to 4-byte boundary.
1195 const uint8_t zeros[2] = { 0 };
1197 dma_memory_write(&address_space_memory, buf_addr,
1198 zeros, sizeof(zeros));
1200 buf_addr += sizeof(zeros);
1201 buf_len -= sizeof(zeros);
1203 /* We only do this once per Ethernet frame */
1204 shift16 = false;
1207 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1208 buf += buf_len;
1209 if (size < 4) {
1210 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1211 crc_ptr, 4 - size);
1212 crc_ptr += 4 - size;
1214 bd.flags &= ~ENET_BD_E;
1215 if (size == 0) {
1216 /* Last buffer in frame. */
1217 bd.flags |= flags | ENET_BD_L;
1218 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1219 if (bd.option & ENET_BD_RX_INT) {
1220 s->regs[ENET_EIR] |= ENET_INT_RXF;
1222 } else {
1223 if (bd.option & ENET_BD_RX_INT) {
1224 s->regs[ENET_EIR] |= ENET_INT_RXB;
1227 imx_enet_write_bd(&bd, addr);
1228 /* Advance to the next descriptor. */
1229 if ((bd.flags & ENET_BD_W) != 0) {
1230 addr = s->regs[ENET_RDSR];
1231 } else {
1232 addr += sizeof(bd);
1235 s->rx_descriptor = addr;
1236 imx_eth_enable_rx(s, false);
1237 imx_eth_update(s);
1238 return len;
1241 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1242 size_t len)
1244 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1246 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1247 return imx_enet_receive(nc, buf, len);
1248 } else {
1249 return imx_fec_receive(nc, buf, len);
1253 static const MemoryRegionOps imx_eth_ops = {
1254 .read = imx_eth_read,
1255 .write = imx_eth_write,
1256 .valid.min_access_size = 4,
1257 .valid.max_access_size = 4,
1258 .endianness = DEVICE_NATIVE_ENDIAN,
1261 static void imx_eth_cleanup(NetClientState *nc)
1263 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1265 s->nic = NULL;
1268 static NetClientInfo imx_eth_net_info = {
1269 .type = NET_CLIENT_DRIVER_NIC,
1270 .size = sizeof(NICState),
1271 .can_receive = imx_eth_can_receive,
1272 .receive = imx_eth_receive,
1273 .cleanup = imx_eth_cleanup,
1274 .link_status_changed = imx_eth_set_link,
1278 static void imx_eth_realize(DeviceState *dev, Error **errp)
1280 IMXFECState *s = IMX_FEC(dev);
1281 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1283 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1284 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1285 sysbus_init_mmio(sbd, &s->iomem);
1286 sysbus_init_irq(sbd, &s->irq[0]);
1287 sysbus_init_irq(sbd, &s->irq[1]);
1289 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1291 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1292 object_get_typename(OBJECT(dev)),
1293 DEVICE(dev)->id, s);
1295 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1298 static Property imx_eth_properties[] = {
1299 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1300 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1301 DEFINE_PROP_END_OF_LIST(),
1304 static void imx_eth_class_init(ObjectClass *klass, void *data)
1306 DeviceClass *dc = DEVICE_CLASS(klass);
1308 dc->vmsd = &vmstate_imx_eth;
1309 dc->reset = imx_eth_reset;
1310 dc->props = imx_eth_properties;
1311 dc->realize = imx_eth_realize;
1312 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1315 static void imx_fec_init(Object *obj)
1317 IMXFECState *s = IMX_FEC(obj);
1319 s->is_fec = true;
1322 static void imx_enet_init(Object *obj)
1324 IMXFECState *s = IMX_FEC(obj);
1326 s->is_fec = false;
1329 static const TypeInfo imx_fec_info = {
1330 .name = TYPE_IMX_FEC,
1331 .parent = TYPE_SYS_BUS_DEVICE,
1332 .instance_size = sizeof(IMXFECState),
1333 .instance_init = imx_fec_init,
1334 .class_init = imx_eth_class_init,
1337 static const TypeInfo imx_enet_info = {
1338 .name = TYPE_IMX_ENET,
1339 .parent = TYPE_IMX_FEC,
1340 .instance_init = imx_enet_init,
1343 static void imx_eth_register_types(void)
1345 type_register_static(&imx_fec_info);
1346 type_register_static(&imx_enet_info);
1349 type_init(imx_eth_register_types)