2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
39 #define DEBUG_IMX_FEC 0
42 #define FEC_PRINTF(fmt, args...) \
44 if (DEBUG_IMX_FEC) { \
45 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
51 #define DEBUG_IMX_PHY 0
54 #define PHY_PRINTF(fmt, args...) \
56 if (DEBUG_IMX_PHY) { \
57 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
62 #define IMX_MAX_DESC 1024
64 static const char *imx_default_reg_name(IMXFECState
*s
, uint32_t index
)
67 sprintf(tmp
, "index %d", index
);
71 static const char *imx_fec_reg_name(IMXFECState
*s
, uint32_t index
)
78 case ENET_MIIGSK_CFGR
:
83 return imx_default_reg_name(s
, index
);
87 static const char *imx_enet_reg_name(IMXFECState
*s
, uint32_t index
)
145 return imx_default_reg_name(s
, index
);
149 static const char *imx_eth_reg_name(IMXFECState
*s
, uint32_t index
)
196 return imx_fec_reg_name(s
, index
);
198 return imx_enet_reg_name(s
, index
);
204 * Versions of this device with more than one TX descriptor save the
205 * 2nd and 3rd descriptors in a subsection, to maintain migration
206 * compatibility with previous versions of the device that only
207 * supported a single descriptor.
209 static bool imx_eth_is_multi_tx_ring(void *opaque
)
211 IMXFECState
*s
= IMX_FEC(opaque
);
213 return s
->tx_ring_num
> 1;
216 static const VMStateDescription vmstate_imx_eth_txdescs
= {
217 .name
= "imx.fec/txdescs",
219 .minimum_version_id
= 1,
220 .needed
= imx_eth_is_multi_tx_ring
,
221 .fields
= (VMStateField
[]) {
222 VMSTATE_UINT32(tx_descriptor
[1], IMXFECState
),
223 VMSTATE_UINT32(tx_descriptor
[2], IMXFECState
),
224 VMSTATE_END_OF_LIST()
228 static const VMStateDescription vmstate_imx_eth
= {
229 .name
= TYPE_IMX_FEC
,
231 .minimum_version_id
= 2,
232 .fields
= (VMStateField
[]) {
233 VMSTATE_UINT32_ARRAY(regs
, IMXFECState
, ENET_MAX
),
234 VMSTATE_UINT32(rx_descriptor
, IMXFECState
),
235 VMSTATE_UINT32(tx_descriptor
[0], IMXFECState
),
236 VMSTATE_UINT32(phy_status
, IMXFECState
),
237 VMSTATE_UINT32(phy_control
, IMXFECState
),
238 VMSTATE_UINT32(phy_advertise
, IMXFECState
),
239 VMSTATE_UINT32(phy_int
, IMXFECState
),
240 VMSTATE_UINT32(phy_int_mask
, IMXFECState
),
241 VMSTATE_END_OF_LIST()
243 .subsections
= (const VMStateDescription
* []) {
244 &vmstate_imx_eth_txdescs
,
249 #define PHY_INT_ENERGYON (1 << 7)
250 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
251 #define PHY_INT_FAULT (1 << 5)
252 #define PHY_INT_DOWN (1 << 4)
253 #define PHY_INT_AUTONEG_LP (1 << 3)
254 #define PHY_INT_PARFAULT (1 << 2)
255 #define PHY_INT_AUTONEG_PAGE (1 << 1)
257 static void imx_eth_update(IMXFECState
*s
);
260 * The MII phy could raise a GPIO to the processor which in turn
261 * could be handled as an interrpt by the OS.
262 * For now we don't handle any GPIO/interrupt line, so the OS will
263 * have to poll for the PHY status.
265 static void phy_update_irq(IMXFECState
*s
)
270 static void phy_update_link(IMXFECState
*s
)
272 /* Autonegotiation status mirrors link status. */
273 if (qemu_get_queue(s
->nic
)->link_down
) {
274 PHY_PRINTF("link is down\n");
275 s
->phy_status
&= ~0x0024;
276 s
->phy_int
|= PHY_INT_DOWN
;
278 PHY_PRINTF("link is up\n");
279 s
->phy_status
|= 0x0024;
280 s
->phy_int
|= PHY_INT_ENERGYON
;
281 s
->phy_int
|= PHY_INT_AUTONEG_COMPLETE
;
286 static void imx_eth_set_link(NetClientState
*nc
)
288 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc
)));
291 static void phy_reset(IMXFECState
*s
)
293 s
->phy_status
= 0x7809;
294 s
->phy_control
= 0x3000;
295 s
->phy_advertise
= 0x01e1;
301 static uint32_t do_phy_read(IMXFECState
*s
, int reg
)
306 /* we only advertise one phy */
311 case 0: /* Basic Control */
312 val
= s
->phy_control
;
314 case 1: /* Basic Status */
323 case 4: /* Auto-neg advertisement */
324 val
= s
->phy_advertise
;
326 case 5: /* Auto-neg Link Partner Ability */
329 case 6: /* Auto-neg Expansion */
332 case 29: /* Interrupt source. */
337 case 30: /* Interrupt mask */
338 val
= s
->phy_int_mask
;
344 qemu_log_mask(LOG_UNIMP
, "[%s.phy]%s: reg %d not implemented\n",
345 TYPE_IMX_FEC
, __func__
, reg
);
349 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
350 TYPE_IMX_FEC
, __func__
, reg
);
355 PHY_PRINTF("read 0x%04x @ %d\n", val
, reg
);
360 static void do_phy_write(IMXFECState
*s
, int reg
, uint32_t val
)
362 PHY_PRINTF("write 0x%04x @ %d\n", val
, reg
);
365 /* we only advertise one phy */
370 case 0: /* Basic Control */
374 s
->phy_control
= val
& 0x7980;
375 /* Complete autonegotiation immediately. */
377 s
->phy_status
|= 0x0020;
381 case 4: /* Auto-neg advertisement */
382 s
->phy_advertise
= (val
& 0x2d7f) | 0x80;
384 case 30: /* Interrupt mask */
385 s
->phy_int_mask
= val
& 0xff;
392 qemu_log_mask(LOG_UNIMP
, "[%s.phy)%s: reg %d not implemented\n",
393 TYPE_IMX_FEC
, __func__
, reg
);
396 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
397 TYPE_IMX_FEC
, __func__
, reg
);
402 static void imx_fec_read_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
404 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
407 static void imx_fec_write_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
409 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
412 static void imx_enet_read_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
414 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
417 static void imx_enet_write_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
419 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
422 static void imx_eth_update(IMXFECState
*s
)
425 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
426 * interrupts swapped. This worked with older versions of Linux (4.14
427 * and older) since Linux associated both interrupt lines with Ethernet
428 * MAC interrupts. Specifically,
429 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
430 * timer interrupts. Those versions of Linux fail with versions of QEMU
431 * with swapped interrupt assignments.
432 * - In linux 4.14, both interrupt lines were registered with the Ethernet
433 * MAC interrupt handler. As a result, all versions of qemu happen to
434 * work, though that is accidental.
435 * - In Linux 4.9 and older, the timer interrupt was registered directly
436 * with the Ethernet MAC interrupt handler. The MAC interrupt was
437 * redirected to a GPIO interrupt to work around erratum ERR006687.
438 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
439 * interrupt never fired since IOMUX is currently not supported in qemu.
440 * Linux instead received MAC interrupts on the timer interrupt.
441 * As a result, qemu versions with the swapped interrupt assignment work,
442 * albeit accidentally, but qemu versions with the correct interrupt
445 * To ensure that all versions of Linux work, generate ENET_INT_MAC
446 * interrrupts on both interrupt lines. This should be changed if and when
447 * qemu supports IOMUX.
449 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] &
450 (ENET_INT_MAC
| ENET_INT_TS_TIMER
)) {
451 qemu_set_irq(s
->irq
[1], 1);
453 qemu_set_irq(s
->irq
[1], 0);
456 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] & ENET_INT_MAC
) {
457 qemu_set_irq(s
->irq
[0], 1);
459 qemu_set_irq(s
->irq
[0], 0);
463 static void imx_fec_do_tx(IMXFECState
*s
)
465 int frame_size
= 0, descnt
= 0;
466 uint8_t *ptr
= s
->frame
;
467 uint32_t addr
= s
->tx_descriptor
[0];
469 while (descnt
++ < IMX_MAX_DESC
) {
473 imx_fec_read_bd(&bd
, addr
);
474 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
475 addr
, bd
.flags
, bd
.length
, bd
.data
);
476 if ((bd
.flags
& ENET_BD_R
) == 0) {
477 /* Run out of descriptors to transmit. */
478 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
482 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
483 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
484 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
486 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
489 if (bd
.flags
& ENET_BD_L
) {
490 /* Last buffer in frame. */
491 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
494 s
->regs
[ENET_EIR
] |= ENET_INT_TXF
;
496 s
->regs
[ENET_EIR
] |= ENET_INT_TXB
;
497 bd
.flags
&= ~ENET_BD_R
;
498 /* Write back the modified descriptor. */
499 imx_fec_write_bd(&bd
, addr
);
500 /* Advance to the next descriptor. */
501 if ((bd
.flags
& ENET_BD_W
) != 0) {
502 addr
= s
->regs
[ENET_TDSR
];
508 s
->tx_descriptor
[0] = addr
;
513 static void imx_enet_do_tx(IMXFECState
*s
, uint32_t index
)
515 int frame_size
= 0, descnt
= 0;
517 uint8_t *ptr
= s
->frame
;
518 uint32_t addr
, int_txb
, int_txf
, tdsr
;
524 int_txb
= ENET_INT_TXB
;
525 int_txf
= ENET_INT_TXF
;
530 int_txb
= ENET_INT_TXB1
;
531 int_txf
= ENET_INT_TXF1
;
536 int_txb
= ENET_INT_TXB2
;
537 int_txf
= ENET_INT_TXF2
;
541 qemu_log_mask(LOG_GUEST_ERROR
,
542 "%s: bogus value for index %x\n",
548 addr
= s
->tx_descriptor
[ring
];
550 while (descnt
++ < IMX_MAX_DESC
) {
554 imx_enet_read_bd(&bd
, addr
);
555 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
556 "status %04x\n", addr
, bd
.flags
, bd
.length
, bd
.data
,
557 bd
.option
, bd
.status
);
558 if ((bd
.flags
& ENET_BD_R
) == 0) {
559 /* Run out of descriptors to transmit. */
563 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
564 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
565 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
567 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
570 if (bd
.flags
& ENET_BD_L
) {
571 if (bd
.option
& ENET_BD_PINS
) {
572 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
573 if (IP_HEADER_VERSION(ip_hd
) == 4) {
574 net_checksum_calculate(s
->frame
, frame_size
);
577 if (bd
.option
& ENET_BD_IINS
) {
578 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
579 /* We compute checksum only for IPv4 frames */
580 if (IP_HEADER_VERSION(ip_hd
) == 4) {
583 csum
= net_raw_checksum((uint8_t *)ip_hd
, sizeof(*ip_hd
));
584 ip_hd
->ip_sum
= cpu_to_be16(csum
);
587 /* Last buffer in frame. */
589 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
593 if (bd
.option
& ENET_BD_TX_INT
) {
594 s
->regs
[ENET_EIR
] |= int_txf
;
596 /* Indicate that we've updated the last buffer descriptor. */
597 bd
.last_buffer
= ENET_BD_BDU
;
599 if (bd
.option
& ENET_BD_TX_INT
) {
600 s
->regs
[ENET_EIR
] |= int_txb
;
602 bd
.flags
&= ~ENET_BD_R
;
603 /* Write back the modified descriptor. */
604 imx_enet_write_bd(&bd
, addr
);
605 /* Advance to the next descriptor. */
606 if ((bd
.flags
& ENET_BD_W
) != 0) {
607 addr
= s
->regs
[tdsr
];
613 s
->tx_descriptor
[ring
] = addr
;
618 static void imx_eth_do_tx(IMXFECState
*s
, uint32_t index
)
620 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
621 imx_enet_do_tx(s
, index
);
627 static void imx_eth_enable_rx(IMXFECState
*s
, bool flush
)
631 imx_fec_read_bd(&bd
, s
->rx_descriptor
);
633 s
->regs
[ENET_RDAR
] = (bd
.flags
& ENET_BD_E
) ? ENET_RDAR_RDAR
: 0;
635 if (!s
->regs
[ENET_RDAR
]) {
636 FEC_PRINTF("RX buffer full\n");
638 qemu_flush_queued_packets(qemu_get_queue(s
->nic
));
642 static void imx_eth_reset(DeviceState
*d
)
644 IMXFECState
*s
= IMX_FEC(d
);
646 /* Reset the Device */
647 memset(s
->regs
, 0, sizeof(s
->regs
));
648 s
->regs
[ENET_ECR
] = 0xf0000000;
649 s
->regs
[ENET_MIBC
] = 0xc0000000;
650 s
->regs
[ENET_RCR
] = 0x05ee0001;
651 s
->regs
[ENET_OPD
] = 0x00010000;
653 s
->regs
[ENET_PALR
] = (s
->conf
.macaddr
.a
[0] << 24)
654 | (s
->conf
.macaddr
.a
[1] << 16)
655 | (s
->conf
.macaddr
.a
[2] << 8)
656 | s
->conf
.macaddr
.a
[3];
657 s
->regs
[ENET_PAUR
] = (s
->conf
.macaddr
.a
[4] << 24)
658 | (s
->conf
.macaddr
.a
[5] << 16)
662 s
->regs
[ENET_FRBR
] = 0x00000600;
663 s
->regs
[ENET_FRSR
] = 0x00000500;
664 s
->regs
[ENET_MIIGSK_ENR
] = 0x00000006;
666 s
->regs
[ENET_RAEM
] = 0x00000004;
667 s
->regs
[ENET_RAFL
] = 0x00000004;
668 s
->regs
[ENET_TAEM
] = 0x00000004;
669 s
->regs
[ENET_TAFL
] = 0x00000008;
670 s
->regs
[ENET_TIPG
] = 0x0000000c;
671 s
->regs
[ENET_FTRL
] = 0x000007ff;
672 s
->regs
[ENET_ATPER
] = 0x3b9aca00;
675 s
->rx_descriptor
= 0;
676 memset(s
->tx_descriptor
, 0, sizeof(s
->tx_descriptor
));
678 /* We also reset the PHY */
682 static uint32_t imx_default_read(IMXFECState
*s
, uint32_t index
)
684 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad register at offset 0x%"
685 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
689 static uint32_t imx_fec_read(IMXFECState
*s
, uint32_t index
)
694 case ENET_MIIGSK_CFGR
:
695 case ENET_MIIGSK_ENR
:
696 return s
->regs
[index
];
698 return imx_default_read(s
, index
);
702 static uint32_t imx_enet_read(IMXFECState
*s
, uint32_t index
)
732 return s
->regs
[index
];
734 return imx_default_read(s
, index
);
738 static uint64_t imx_eth_read(void *opaque
, hwaddr offset
, unsigned size
)
741 IMXFECState
*s
= IMX_FEC(opaque
);
742 uint32_t index
= offset
>> 2;
766 value
= s
->regs
[index
];
770 value
= imx_fec_read(s
, index
);
772 value
= imx_enet_read(s
, index
);
777 FEC_PRINTF("reg[%s] => 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
783 static void imx_default_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
785 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad address at offset 0x%"
786 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
790 static void imx_fec_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
794 /* FRBR is read only */
795 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register FRBR is read only\n",
796 TYPE_IMX_FEC
, __func__
);
799 s
->regs
[index
] = (value
& 0x000003fc) | 0x00000400;
801 case ENET_MIIGSK_CFGR
:
802 s
->regs
[index
] = value
& 0x00000053;
804 case ENET_MIIGSK_ENR
:
805 s
->regs
[index
] = (value
& 0x00000002) ? 0x00000006 : 0;
808 imx_default_write(s
, index
, value
);
813 static void imx_enet_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
823 s
->regs
[index
] = value
& 0x000001ff;
826 s
->regs
[index
] = value
& 0x0000001f;
829 s
->regs
[index
] = value
& 0x00003fff;
832 s
->regs
[index
] = value
& 0x00000019;
835 s
->regs
[index
] = value
& 0x000000C7;
838 s
->regs
[index
] = value
& 0x00002a9d;
843 s
->regs
[index
] = value
;
846 /* ATSTMP is read only */
847 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register ATSTMP is read only\n",
848 TYPE_IMX_FEC
, __func__
);
851 s
->regs
[index
] = value
& 0x7fffffff;
854 s
->regs
[index
] = value
& 0x00007f7f;
857 /* implement clear timer flag */
858 s
->regs
[index
] &= ~(value
& 0x0000000f); /* all bits W1C */
864 s
->regs
[index
] &= ~(value
& 0x00000080); /* W1C bits */
865 s
->regs
[index
] &= ~0x0000007d; /* writable fields */
866 s
->regs
[index
] |= (value
& 0x0000007d);
872 s
->regs
[index
] = value
;
875 imx_default_write(s
, index
, value
);
880 static void imx_eth_write(void *opaque
, hwaddr offset
, uint64_t value
,
883 IMXFECState
*s
= IMX_FEC(opaque
);
884 const bool single_tx_ring
= !imx_eth_is_multi_tx_ring(s
);
885 uint32_t index
= offset
>> 2;
887 FEC_PRINTF("reg[%s] <= 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
892 s
->regs
[index
] &= ~value
;
895 s
->regs
[index
] = value
;
898 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
899 if (!s
->regs
[index
]) {
900 imx_eth_enable_rx(s
, true);
908 if (unlikely(single_tx_ring
)) {
909 qemu_log_mask(LOG_GUEST_ERROR
,
910 "[%s]%s: trying to access TDAR2 or TDAR1\n",
911 TYPE_IMX_FEC
, __func__
);
916 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
917 s
->regs
[index
] = ENET_TDAR_TDAR
;
918 imx_eth_do_tx(s
, index
);
923 if (value
& ENET_ECR_RESET
) {
924 return imx_eth_reset(DEVICE(s
));
926 s
->regs
[index
] = value
;
927 if ((s
->regs
[index
] & ENET_ECR_ETHEREN
) == 0) {
928 s
->regs
[ENET_RDAR
] = 0;
929 s
->rx_descriptor
= s
->regs
[ENET_RDSR
];
930 s
->regs
[ENET_TDAR
] = 0;
931 s
->regs
[ENET_TDAR1
] = 0;
932 s
->regs
[ENET_TDAR2
] = 0;
933 s
->tx_descriptor
[0] = s
->regs
[ENET_TDSR
];
934 s
->tx_descriptor
[1] = s
->regs
[ENET_TDSR1
];
935 s
->tx_descriptor
[2] = s
->regs
[ENET_TDSR2
];
939 s
->regs
[index
] = value
;
940 if (extract32(value
, 29, 1)) {
941 /* This is a read operation */
942 s
->regs
[ENET_MMFR
] = deposit32(s
->regs
[ENET_MMFR
], 0, 16,
947 /* This a write operation */
948 do_phy_write(s
, extract32(value
, 18, 10), extract32(value
, 0, 16));
950 /* raise the interrupt as the PHY operation is done */
951 s
->regs
[ENET_EIR
] |= ENET_INT_MII
;
954 s
->regs
[index
] = value
& 0xfe;
957 /* TODO: Implement MIB. */
958 s
->regs
[index
] = (value
& 0x80000000) ? 0xc0000000 : 0;
961 s
->regs
[index
] = value
& 0x07ff003f;
962 /* TODO: Implement LOOP mode. */
965 /* We transmit immediately, so raise GRA immediately. */
966 s
->regs
[index
] = value
;
968 s
->regs
[ENET_EIR
] |= ENET_INT_GRA
;
972 s
->regs
[index
] = value
;
973 s
->conf
.macaddr
.a
[0] = value
>> 24;
974 s
->conf
.macaddr
.a
[1] = value
>> 16;
975 s
->conf
.macaddr
.a
[2] = value
>> 8;
976 s
->conf
.macaddr
.a
[3] = value
;
979 s
->regs
[index
] = (value
| 0x0000ffff) & 0xffff8808;
980 s
->conf
.macaddr
.a
[4] = value
>> 24;
981 s
->conf
.macaddr
.a
[5] = value
>> 16;
984 s
->regs
[index
] = (value
& 0x0000ffff) | 0x00010000;
990 /* TODO: implement MAC hash filtering. */
994 s
->regs
[index
] = value
& 0x3;
996 s
->regs
[index
] = value
& 0x13f;
1001 s
->regs
[index
] = value
& ~3;
1003 s
->regs
[index
] = value
& ~7;
1005 s
->rx_descriptor
= s
->regs
[index
];
1009 s
->regs
[index
] = value
& ~3;
1011 s
->regs
[index
] = value
& ~7;
1013 s
->tx_descriptor
[0] = s
->regs
[index
];
1016 if (unlikely(single_tx_ring
)) {
1017 qemu_log_mask(LOG_GUEST_ERROR
,
1018 "[%s]%s: trying to access TDSR1\n",
1019 TYPE_IMX_FEC
, __func__
);
1023 s
->regs
[index
] = value
& ~7;
1024 s
->tx_descriptor
[1] = s
->regs
[index
];
1027 if (unlikely(single_tx_ring
)) {
1028 qemu_log_mask(LOG_GUEST_ERROR
,
1029 "[%s]%s: trying to access TDSR2\n",
1030 TYPE_IMX_FEC
, __func__
);
1034 s
->regs
[index
] = value
& ~7;
1035 s
->tx_descriptor
[2] = s
->regs
[index
];
1038 s
->regs
[index
] = value
& 0x00003ff0;
1042 imx_fec_write(s
, index
, value
);
1044 imx_enet_write(s
, index
, value
);
1052 static bool imx_eth_can_receive(NetClientState
*nc
)
1054 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1058 return !!s
->regs
[ENET_RDAR
];
1061 static ssize_t
imx_fec_receive(NetClientState
*nc
, const uint8_t *buf
,
1064 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1071 unsigned int buf_len
;
1074 FEC_PRINTF("len %d\n", (int)size
);
1076 if (!s
->regs
[ENET_RDAR
]) {
1077 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1078 TYPE_IMX_FEC
, __func__
);
1082 /* 4 bytes for the CRC. */
1084 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1085 crc_ptr
= (uint8_t *) &crc
;
1087 /* Huge frames are truncated. */
1088 if (size
> ENET_MAX_FRAME_SIZE
) {
1089 size
= ENET_MAX_FRAME_SIZE
;
1090 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1093 /* Frames larger than the user limit just set error flags. */
1094 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1095 flags
|= ENET_BD_LG
;
1098 addr
= s
->rx_descriptor
;
1100 imx_fec_read_bd(&bd
, addr
);
1101 if ((bd
.flags
& ENET_BD_E
) == 0) {
1102 /* No descriptors available. Bail out. */
1104 * FIXME: This is wrong. We should probably either
1105 * save the remainder for when more RX buffers are
1106 * available, or flag an error.
1108 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1109 TYPE_IMX_FEC
, __func__
);
1112 buf_len
= (size
<= s
->regs
[ENET_MRBR
]) ? size
: s
->regs
[ENET_MRBR
];
1113 bd
.length
= buf_len
;
1116 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1118 /* The last 4 bytes are the CRC. */
1120 buf_len
+= size
- 4;
1123 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1126 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1128 crc_ptr
+= 4 - size
;
1130 bd
.flags
&= ~ENET_BD_E
;
1132 /* Last buffer in frame. */
1133 bd
.flags
|= flags
| ENET_BD_L
;
1134 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1135 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1137 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1139 imx_fec_write_bd(&bd
, addr
);
1140 /* Advance to the next descriptor. */
1141 if ((bd
.flags
& ENET_BD_W
) != 0) {
1142 addr
= s
->regs
[ENET_RDSR
];
1147 s
->rx_descriptor
= addr
;
1148 imx_eth_enable_rx(s
, false);
1153 static ssize_t
imx_enet_receive(NetClientState
*nc
, const uint8_t *buf
,
1156 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1163 unsigned int buf_len
;
1165 bool shift16
= s
->regs
[ENET_RACC
] & ENET_RACC_SHIFT16
;
1167 FEC_PRINTF("len %d\n", (int)size
);
1169 if (!s
->regs
[ENET_RDAR
]) {
1170 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1171 TYPE_IMX_FEC
, __func__
);
1175 /* 4 bytes for the CRC. */
1177 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1178 crc_ptr
= (uint8_t *) &crc
;
1184 /* Huge frames are truncated. */
1185 if (size
> s
->regs
[ENET_FTRL
]) {
1186 size
= s
->regs
[ENET_FTRL
];
1187 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1190 /* Frames larger than the user limit just set error flags. */
1191 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1192 flags
|= ENET_BD_LG
;
1195 addr
= s
->rx_descriptor
;
1197 imx_enet_read_bd(&bd
, addr
);
1198 if ((bd
.flags
& ENET_BD_E
) == 0) {
1199 /* No descriptors available. Bail out. */
1201 * FIXME: This is wrong. We should probably either
1202 * save the remainder for when more RX buffers are
1203 * available, or flag an error.
1205 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1206 TYPE_IMX_FEC
, __func__
);
1209 buf_len
= MIN(size
, s
->regs
[ENET_MRBR
]);
1210 bd
.length
= buf_len
;
1213 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1215 /* The last 4 bytes are the CRC. */
1217 buf_len
+= size
- 4;
1223 * If SHIFT16 bit of ENETx_RACC register is set we need to
1224 * align the payload to 4-byte boundary.
1226 const uint8_t zeros
[2] = { 0 };
1228 dma_memory_write(&address_space_memory
, buf_addr
,
1229 zeros
, sizeof(zeros
));
1231 buf_addr
+= sizeof(zeros
);
1232 buf_len
-= sizeof(zeros
);
1234 /* We only do this once per Ethernet frame */
1238 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1241 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1243 crc_ptr
+= 4 - size
;
1245 bd
.flags
&= ~ENET_BD_E
;
1247 /* Last buffer in frame. */
1248 bd
.flags
|= flags
| ENET_BD_L
;
1249 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1250 /* Indicate that we've updated the last buffer descriptor. */
1251 bd
.last_buffer
= ENET_BD_BDU
;
1252 if (bd
.option
& ENET_BD_RX_INT
) {
1253 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1256 if (bd
.option
& ENET_BD_RX_INT
) {
1257 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1260 imx_enet_write_bd(&bd
, addr
);
1261 /* Advance to the next descriptor. */
1262 if ((bd
.flags
& ENET_BD_W
) != 0) {
1263 addr
= s
->regs
[ENET_RDSR
];
1268 s
->rx_descriptor
= addr
;
1269 imx_eth_enable_rx(s
, false);
1274 static ssize_t
imx_eth_receive(NetClientState
*nc
, const uint8_t *buf
,
1277 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1279 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
1280 return imx_enet_receive(nc
, buf
, len
);
1282 return imx_fec_receive(nc
, buf
, len
);
1286 static const MemoryRegionOps imx_eth_ops
= {
1287 .read
= imx_eth_read
,
1288 .write
= imx_eth_write
,
1289 .valid
.min_access_size
= 4,
1290 .valid
.max_access_size
= 4,
1291 .endianness
= DEVICE_NATIVE_ENDIAN
,
1294 static void imx_eth_cleanup(NetClientState
*nc
)
1296 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1301 static NetClientInfo imx_eth_net_info
= {
1302 .type
= NET_CLIENT_DRIVER_NIC
,
1303 .size
= sizeof(NICState
),
1304 .can_receive
= imx_eth_can_receive
,
1305 .receive
= imx_eth_receive
,
1306 .cleanup
= imx_eth_cleanup
,
1307 .link_status_changed
= imx_eth_set_link
,
1311 static void imx_eth_realize(DeviceState
*dev
, Error
**errp
)
1313 IMXFECState
*s
= IMX_FEC(dev
);
1314 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1316 memory_region_init_io(&s
->iomem
, OBJECT(dev
), &imx_eth_ops
, s
,
1317 TYPE_IMX_FEC
, FSL_IMX25_FEC_SIZE
);
1318 sysbus_init_mmio(sbd
, &s
->iomem
);
1319 sysbus_init_irq(sbd
, &s
->irq
[0]);
1320 sysbus_init_irq(sbd
, &s
->irq
[1]);
1322 qemu_macaddr_default_if_unset(&s
->conf
.macaddr
);
1324 s
->nic
= qemu_new_nic(&imx_eth_net_info
, &s
->conf
,
1325 object_get_typename(OBJECT(dev
)),
1328 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
1331 static Property imx_eth_properties
[] = {
1332 DEFINE_NIC_PROPERTIES(IMXFECState
, conf
),
1333 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState
, tx_ring_num
, 1),
1334 DEFINE_PROP_END_OF_LIST(),
1337 static void imx_eth_class_init(ObjectClass
*klass
, void *data
)
1339 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1341 dc
->vmsd
= &vmstate_imx_eth
;
1342 dc
->reset
= imx_eth_reset
;
1343 device_class_set_props(dc
, imx_eth_properties
);
1344 dc
->realize
= imx_eth_realize
;
1345 dc
->desc
= "i.MX FEC/ENET Ethernet Controller";
1348 static void imx_fec_init(Object
*obj
)
1350 IMXFECState
*s
= IMX_FEC(obj
);
1355 static void imx_enet_init(Object
*obj
)
1357 IMXFECState
*s
= IMX_FEC(obj
);
1362 static const TypeInfo imx_fec_info
= {
1363 .name
= TYPE_IMX_FEC
,
1364 .parent
= TYPE_SYS_BUS_DEVICE
,
1365 .instance_size
= sizeof(IMXFECState
),
1366 .instance_init
= imx_fec_init
,
1367 .class_init
= imx_eth_class_init
,
1370 static const TypeInfo imx_enet_info
= {
1371 .name
= TYPE_IMX_ENET
,
1372 .parent
= TYPE_IMX_FEC
,
1373 .instance_init
= imx_enet_init
,
1376 static void imx_eth_register_types(void)
1378 type_register_static(&imx_fec_info
);
1379 type_register_static(&imx_enet_info
);
1382 type_init(imx_eth_register_types
)