2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
39 #define IMX_MAX_DESC 1024
41 static const char *imx_default_reg_name(IMXFECState
*s
, uint32_t index
)
44 sprintf(tmp
, "index %d", index
);
48 static const char *imx_fec_reg_name(IMXFECState
*s
, uint32_t index
)
55 case ENET_MIIGSK_CFGR
:
60 return imx_default_reg_name(s
, index
);
64 static const char *imx_enet_reg_name(IMXFECState
*s
, uint32_t index
)
122 return imx_default_reg_name(s
, index
);
126 static const char *imx_eth_reg_name(IMXFECState
*s
, uint32_t index
)
173 return imx_fec_reg_name(s
, index
);
175 return imx_enet_reg_name(s
, index
);
181 * Versions of this device with more than one TX descriptor save the
182 * 2nd and 3rd descriptors in a subsection, to maintain migration
183 * compatibility with previous versions of the device that only
184 * supported a single descriptor.
186 static bool imx_eth_is_multi_tx_ring(void *opaque
)
188 IMXFECState
*s
= IMX_FEC(opaque
);
190 return s
->tx_ring_num
> 1;
193 static const VMStateDescription vmstate_imx_eth_txdescs
= {
194 .name
= "imx.fec/txdescs",
196 .minimum_version_id
= 1,
197 .needed
= imx_eth_is_multi_tx_ring
,
198 .fields
= (VMStateField
[]) {
199 VMSTATE_UINT32(tx_descriptor
[1], IMXFECState
),
200 VMSTATE_UINT32(tx_descriptor
[2], IMXFECState
),
201 VMSTATE_END_OF_LIST()
205 static const VMStateDescription vmstate_imx_eth
= {
206 .name
= TYPE_IMX_FEC
,
208 .minimum_version_id
= 2,
209 .fields
= (VMStateField
[]) {
210 VMSTATE_UINT32_ARRAY(regs
, IMXFECState
, ENET_MAX
),
211 VMSTATE_UINT32(rx_descriptor
, IMXFECState
),
212 VMSTATE_UINT32(tx_descriptor
[0], IMXFECState
),
213 VMSTATE_UINT32(phy_status
, IMXFECState
),
214 VMSTATE_UINT32(phy_control
, IMXFECState
),
215 VMSTATE_UINT32(phy_advertise
, IMXFECState
),
216 VMSTATE_UINT32(phy_int
, IMXFECState
),
217 VMSTATE_UINT32(phy_int_mask
, IMXFECState
),
218 VMSTATE_END_OF_LIST()
220 .subsections
= (const VMStateDescription
* []) {
221 &vmstate_imx_eth_txdescs
,
226 #define PHY_INT_ENERGYON (1 << 7)
227 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
228 #define PHY_INT_FAULT (1 << 5)
229 #define PHY_INT_DOWN (1 << 4)
230 #define PHY_INT_AUTONEG_LP (1 << 3)
231 #define PHY_INT_PARFAULT (1 << 2)
232 #define PHY_INT_AUTONEG_PAGE (1 << 1)
234 static void imx_eth_update(IMXFECState
*s
);
237 * The MII phy could raise a GPIO to the processor which in turn
238 * could be handled as an interrpt by the OS.
239 * For now we don't handle any GPIO/interrupt line, so the OS will
240 * have to poll for the PHY status.
242 static void imx_phy_update_irq(IMXFECState
*s
)
247 static void imx_phy_update_link(IMXFECState
*s
)
249 /* Autonegotiation status mirrors link status. */
250 if (qemu_get_queue(s
->nic
)->link_down
) {
251 trace_imx_phy_update_link("down");
252 s
->phy_status
&= ~0x0024;
253 s
->phy_int
|= PHY_INT_DOWN
;
255 trace_imx_phy_update_link("up");
256 s
->phy_status
|= 0x0024;
257 s
->phy_int
|= PHY_INT_ENERGYON
;
258 s
->phy_int
|= PHY_INT_AUTONEG_COMPLETE
;
260 imx_phy_update_irq(s
);
263 static void imx_eth_set_link(NetClientState
*nc
)
265 imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc
)));
268 static void imx_phy_reset(IMXFECState
*s
)
270 trace_imx_phy_reset();
272 s
->phy_status
= 0x7809;
273 s
->phy_control
= 0x3000;
274 s
->phy_advertise
= 0x01e1;
277 imx_phy_update_link(s
);
280 static uint32_t imx_phy_read(IMXFECState
*s
, int reg
)
285 /* we only advertise one phy */
290 case 0: /* Basic Control */
291 val
= s
->phy_control
;
293 case 1: /* Basic Status */
302 case 4: /* Auto-neg advertisement */
303 val
= s
->phy_advertise
;
305 case 5: /* Auto-neg Link Partner Ability */
308 case 6: /* Auto-neg Expansion */
311 case 29: /* Interrupt source. */
314 imx_phy_update_irq(s
);
316 case 30: /* Interrupt mask */
317 val
= s
->phy_int_mask
;
323 qemu_log_mask(LOG_UNIMP
, "[%s.phy]%s: reg %d not implemented\n",
324 TYPE_IMX_FEC
, __func__
, reg
);
328 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
329 TYPE_IMX_FEC
, __func__
, reg
);
334 trace_imx_phy_read(val
, reg
);
339 static void imx_phy_write(IMXFECState
*s
, int reg
, uint32_t val
)
341 trace_imx_phy_write(val
, reg
);
344 /* we only advertise one phy */
349 case 0: /* Basic Control */
353 s
->phy_control
= val
& 0x7980;
354 /* Complete autonegotiation immediately. */
356 s
->phy_status
|= 0x0020;
360 case 4: /* Auto-neg advertisement */
361 s
->phy_advertise
= (val
& 0x2d7f) | 0x80;
363 case 30: /* Interrupt mask */
364 s
->phy_int_mask
= val
& 0xff;
365 imx_phy_update_irq(s
);
371 qemu_log_mask(LOG_UNIMP
, "[%s.phy)%s: reg %d not implemented\n",
372 TYPE_IMX_FEC
, __func__
, reg
);
375 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
376 TYPE_IMX_FEC
, __func__
, reg
);
381 static void imx_fec_read_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
383 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
385 trace_imx_fec_read_bd(addr
, bd
->flags
, bd
->length
, bd
->data
);
388 static void imx_fec_write_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
390 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
393 static void imx_enet_read_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
395 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
397 trace_imx_enet_read_bd(addr
, bd
->flags
, bd
->length
, bd
->data
,
398 bd
->option
, bd
->status
);
401 static void imx_enet_write_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
403 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
406 static void imx_eth_update(IMXFECState
*s
)
409 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
410 * interrupts swapped. This worked with older versions of Linux (4.14
411 * and older) since Linux associated both interrupt lines with Ethernet
412 * MAC interrupts. Specifically,
413 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
414 * timer interrupts. Those versions of Linux fail with versions of QEMU
415 * with swapped interrupt assignments.
416 * - In linux 4.14, both interrupt lines were registered with the Ethernet
417 * MAC interrupt handler. As a result, all versions of qemu happen to
418 * work, though that is accidental.
419 * - In Linux 4.9 and older, the timer interrupt was registered directly
420 * with the Ethernet MAC interrupt handler. The MAC interrupt was
421 * redirected to a GPIO interrupt to work around erratum ERR006687.
422 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
423 * interrupt never fired since IOMUX is currently not supported in qemu.
424 * Linux instead received MAC interrupts on the timer interrupt.
425 * As a result, qemu versions with the swapped interrupt assignment work,
426 * albeit accidentally, but qemu versions with the correct interrupt
429 * To ensure that all versions of Linux work, generate ENET_INT_MAC
430 * interrrupts on both interrupt lines. This should be changed if and when
431 * qemu supports IOMUX.
433 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] &
434 (ENET_INT_MAC
| ENET_INT_TS_TIMER
)) {
435 qemu_set_irq(s
->irq
[1], 1);
437 qemu_set_irq(s
->irq
[1], 0);
440 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] & ENET_INT_MAC
) {
441 qemu_set_irq(s
->irq
[0], 1);
443 qemu_set_irq(s
->irq
[0], 0);
447 static void imx_fec_do_tx(IMXFECState
*s
)
449 int frame_size
= 0, descnt
= 0;
450 uint8_t *ptr
= s
->frame
;
451 uint32_t addr
= s
->tx_descriptor
[0];
453 while (descnt
++ < IMX_MAX_DESC
) {
457 imx_fec_read_bd(&bd
, addr
);
458 if ((bd
.flags
& ENET_BD_R
) == 0) {
460 /* Run out of descriptors to transmit. */
461 trace_imx_eth_tx_bd_busy();
466 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
467 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
468 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
470 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
473 if (bd
.flags
& ENET_BD_L
) {
474 /* Last buffer in frame. */
475 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
478 s
->regs
[ENET_EIR
] |= ENET_INT_TXF
;
480 s
->regs
[ENET_EIR
] |= ENET_INT_TXB
;
481 bd
.flags
&= ~ENET_BD_R
;
482 /* Write back the modified descriptor. */
483 imx_fec_write_bd(&bd
, addr
);
484 /* Advance to the next descriptor. */
485 if ((bd
.flags
& ENET_BD_W
) != 0) {
486 addr
= s
->regs
[ENET_TDSR
];
492 s
->tx_descriptor
[0] = addr
;
497 static void imx_enet_do_tx(IMXFECState
*s
, uint32_t index
)
499 int frame_size
= 0, descnt
= 0;
501 uint8_t *ptr
= s
->frame
;
502 uint32_t addr
, int_txb
, int_txf
, tdsr
;
508 int_txb
= ENET_INT_TXB
;
509 int_txf
= ENET_INT_TXF
;
514 int_txb
= ENET_INT_TXB1
;
515 int_txf
= ENET_INT_TXF1
;
520 int_txb
= ENET_INT_TXB2
;
521 int_txf
= ENET_INT_TXF2
;
525 qemu_log_mask(LOG_GUEST_ERROR
,
526 "%s: bogus value for index %x\n",
532 addr
= s
->tx_descriptor
[ring
];
534 while (descnt
++ < IMX_MAX_DESC
) {
538 imx_enet_read_bd(&bd
, addr
);
539 if ((bd
.flags
& ENET_BD_R
) == 0) {
540 /* Run out of descriptors to transmit. */
542 trace_imx_eth_tx_bd_busy();
547 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
548 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
549 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
551 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
554 if (bd
.flags
& ENET_BD_L
) {
555 if (bd
.option
& ENET_BD_PINS
) {
556 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
557 if (IP_HEADER_VERSION(ip_hd
) == 4) {
558 net_checksum_calculate(s
->frame
, frame_size
);
561 if (bd
.option
& ENET_BD_IINS
) {
562 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
563 /* We compute checksum only for IPv4 frames */
564 if (IP_HEADER_VERSION(ip_hd
) == 4) {
567 csum
= net_raw_checksum((uint8_t *)ip_hd
, sizeof(*ip_hd
));
568 ip_hd
->ip_sum
= cpu_to_be16(csum
);
571 /* Last buffer in frame. */
573 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
577 if (bd
.option
& ENET_BD_TX_INT
) {
578 s
->regs
[ENET_EIR
] |= int_txf
;
580 /* Indicate that we've updated the last buffer descriptor. */
581 bd
.last_buffer
= ENET_BD_BDU
;
583 if (bd
.option
& ENET_BD_TX_INT
) {
584 s
->regs
[ENET_EIR
] |= int_txb
;
586 bd
.flags
&= ~ENET_BD_R
;
587 /* Write back the modified descriptor. */
588 imx_enet_write_bd(&bd
, addr
);
589 /* Advance to the next descriptor. */
590 if ((bd
.flags
& ENET_BD_W
) != 0) {
591 addr
= s
->regs
[tdsr
];
597 s
->tx_descriptor
[ring
] = addr
;
602 static void imx_eth_do_tx(IMXFECState
*s
, uint32_t index
)
604 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
605 imx_enet_do_tx(s
, index
);
611 static void imx_eth_enable_rx(IMXFECState
*s
, bool flush
)
615 imx_fec_read_bd(&bd
, s
->rx_descriptor
);
617 s
->regs
[ENET_RDAR
] = (bd
.flags
& ENET_BD_E
) ? ENET_RDAR_RDAR
: 0;
619 if (!s
->regs
[ENET_RDAR
]) {
620 trace_imx_eth_rx_bd_full();
622 qemu_flush_queued_packets(qemu_get_queue(s
->nic
));
626 static void imx_eth_reset(DeviceState
*d
)
628 IMXFECState
*s
= IMX_FEC(d
);
630 /* Reset the Device */
631 memset(s
->regs
, 0, sizeof(s
->regs
));
632 s
->regs
[ENET_ECR
] = 0xf0000000;
633 s
->regs
[ENET_MIBC
] = 0xc0000000;
634 s
->regs
[ENET_RCR
] = 0x05ee0001;
635 s
->regs
[ENET_OPD
] = 0x00010000;
637 s
->regs
[ENET_PALR
] = (s
->conf
.macaddr
.a
[0] << 24)
638 | (s
->conf
.macaddr
.a
[1] << 16)
639 | (s
->conf
.macaddr
.a
[2] << 8)
640 | s
->conf
.macaddr
.a
[3];
641 s
->regs
[ENET_PAUR
] = (s
->conf
.macaddr
.a
[4] << 24)
642 | (s
->conf
.macaddr
.a
[5] << 16)
646 s
->regs
[ENET_FRBR
] = 0x00000600;
647 s
->regs
[ENET_FRSR
] = 0x00000500;
648 s
->regs
[ENET_MIIGSK_ENR
] = 0x00000006;
650 s
->regs
[ENET_RAEM
] = 0x00000004;
651 s
->regs
[ENET_RAFL
] = 0x00000004;
652 s
->regs
[ENET_TAEM
] = 0x00000004;
653 s
->regs
[ENET_TAFL
] = 0x00000008;
654 s
->regs
[ENET_TIPG
] = 0x0000000c;
655 s
->regs
[ENET_FTRL
] = 0x000007ff;
656 s
->regs
[ENET_ATPER
] = 0x3b9aca00;
659 s
->rx_descriptor
= 0;
660 memset(s
->tx_descriptor
, 0, sizeof(s
->tx_descriptor
));
662 /* We also reset the PHY */
666 static uint32_t imx_default_read(IMXFECState
*s
, uint32_t index
)
668 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad register at offset 0x%"
669 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
673 static uint32_t imx_fec_read(IMXFECState
*s
, uint32_t index
)
678 case ENET_MIIGSK_CFGR
:
679 case ENET_MIIGSK_ENR
:
680 return s
->regs
[index
];
682 return imx_default_read(s
, index
);
686 static uint32_t imx_enet_read(IMXFECState
*s
, uint32_t index
)
716 return s
->regs
[index
];
718 return imx_default_read(s
, index
);
722 static uint64_t imx_eth_read(void *opaque
, hwaddr offset
, unsigned size
)
725 IMXFECState
*s
= IMX_FEC(opaque
);
726 uint32_t index
= offset
>> 2;
750 value
= s
->regs
[index
];
754 value
= imx_fec_read(s
, index
);
756 value
= imx_enet_read(s
, index
);
761 trace_imx_eth_read(index
, imx_eth_reg_name(s
, index
), value
);
766 static void imx_default_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
768 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad address at offset 0x%"
769 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
773 static void imx_fec_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
777 /* FRBR is read only */
778 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register FRBR is read only\n",
779 TYPE_IMX_FEC
, __func__
);
782 s
->regs
[index
] = (value
& 0x000003fc) | 0x00000400;
784 case ENET_MIIGSK_CFGR
:
785 s
->regs
[index
] = value
& 0x00000053;
787 case ENET_MIIGSK_ENR
:
788 s
->regs
[index
] = (value
& 0x00000002) ? 0x00000006 : 0;
791 imx_default_write(s
, index
, value
);
796 static void imx_enet_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
806 s
->regs
[index
] = value
& 0x000001ff;
809 s
->regs
[index
] = value
& 0x0000001f;
812 s
->regs
[index
] = value
& 0x00003fff;
815 s
->regs
[index
] = value
& 0x00000019;
818 s
->regs
[index
] = value
& 0x000000C7;
821 s
->regs
[index
] = value
& 0x00002a9d;
826 s
->regs
[index
] = value
;
829 /* ATSTMP is read only */
830 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register ATSTMP is read only\n",
831 TYPE_IMX_FEC
, __func__
);
834 s
->regs
[index
] = value
& 0x7fffffff;
837 s
->regs
[index
] = value
& 0x00007f7f;
840 /* implement clear timer flag */
841 s
->regs
[index
] &= ~(value
& 0x0000000f); /* all bits W1C */
847 s
->regs
[index
] &= ~(value
& 0x00000080); /* W1C bits */
848 s
->regs
[index
] &= ~0x0000007d; /* writable fields */
849 s
->regs
[index
] |= (value
& 0x0000007d);
855 s
->regs
[index
] = value
;
858 imx_default_write(s
, index
, value
);
863 static void imx_eth_write(void *opaque
, hwaddr offset
, uint64_t value
,
866 IMXFECState
*s
= IMX_FEC(opaque
);
867 const bool single_tx_ring
= !imx_eth_is_multi_tx_ring(s
);
868 uint32_t index
= offset
>> 2;
870 trace_imx_eth_write(index
, imx_eth_reg_name(s
, index
), value
);
874 s
->regs
[index
] &= ~value
;
877 s
->regs
[index
] = value
;
880 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
881 if (!s
->regs
[index
]) {
882 imx_eth_enable_rx(s
, true);
890 if (unlikely(single_tx_ring
)) {
891 qemu_log_mask(LOG_GUEST_ERROR
,
892 "[%s]%s: trying to access TDAR2 or TDAR1\n",
893 TYPE_IMX_FEC
, __func__
);
898 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
899 s
->regs
[index
] = ENET_TDAR_TDAR
;
900 imx_eth_do_tx(s
, index
);
905 if (value
& ENET_ECR_RESET
) {
906 return imx_eth_reset(DEVICE(s
));
908 s
->regs
[index
] = value
;
909 if ((s
->regs
[index
] & ENET_ECR_ETHEREN
) == 0) {
910 s
->regs
[ENET_RDAR
] = 0;
911 s
->rx_descriptor
= s
->regs
[ENET_RDSR
];
912 s
->regs
[ENET_TDAR
] = 0;
913 s
->regs
[ENET_TDAR1
] = 0;
914 s
->regs
[ENET_TDAR2
] = 0;
915 s
->tx_descriptor
[0] = s
->regs
[ENET_TDSR
];
916 s
->tx_descriptor
[1] = s
->regs
[ENET_TDSR1
];
917 s
->tx_descriptor
[2] = s
->regs
[ENET_TDSR2
];
921 s
->regs
[index
] = value
;
922 if (extract32(value
, 29, 1)) {
923 /* This is a read operation */
924 s
->regs
[ENET_MMFR
] = deposit32(s
->regs
[ENET_MMFR
], 0, 16,
929 /* This a write operation */
930 imx_phy_write(s
, extract32(value
, 18, 10), extract32(value
, 0, 16));
932 /* raise the interrupt as the PHY operation is done */
933 s
->regs
[ENET_EIR
] |= ENET_INT_MII
;
936 s
->regs
[index
] = value
& 0xfe;
939 /* TODO: Implement MIB. */
940 s
->regs
[index
] = (value
& 0x80000000) ? 0xc0000000 : 0;
943 s
->regs
[index
] = value
& 0x07ff003f;
944 /* TODO: Implement LOOP mode. */
947 /* We transmit immediately, so raise GRA immediately. */
948 s
->regs
[index
] = value
;
950 s
->regs
[ENET_EIR
] |= ENET_INT_GRA
;
954 s
->regs
[index
] = value
;
955 s
->conf
.macaddr
.a
[0] = value
>> 24;
956 s
->conf
.macaddr
.a
[1] = value
>> 16;
957 s
->conf
.macaddr
.a
[2] = value
>> 8;
958 s
->conf
.macaddr
.a
[3] = value
;
961 s
->regs
[index
] = (value
| 0x0000ffff) & 0xffff8808;
962 s
->conf
.macaddr
.a
[4] = value
>> 24;
963 s
->conf
.macaddr
.a
[5] = value
>> 16;
966 s
->regs
[index
] = (value
& 0x0000ffff) | 0x00010000;
972 /* TODO: implement MAC hash filtering. */
976 s
->regs
[index
] = value
& 0x3;
978 s
->regs
[index
] = value
& 0x13f;
983 s
->regs
[index
] = value
& ~3;
985 s
->regs
[index
] = value
& ~7;
987 s
->rx_descriptor
= s
->regs
[index
];
991 s
->regs
[index
] = value
& ~3;
993 s
->regs
[index
] = value
& ~7;
995 s
->tx_descriptor
[0] = s
->regs
[index
];
998 if (unlikely(single_tx_ring
)) {
999 qemu_log_mask(LOG_GUEST_ERROR
,
1000 "[%s]%s: trying to access TDSR1\n",
1001 TYPE_IMX_FEC
, __func__
);
1005 s
->regs
[index
] = value
& ~7;
1006 s
->tx_descriptor
[1] = s
->regs
[index
];
1009 if (unlikely(single_tx_ring
)) {
1010 qemu_log_mask(LOG_GUEST_ERROR
,
1011 "[%s]%s: trying to access TDSR2\n",
1012 TYPE_IMX_FEC
, __func__
);
1016 s
->regs
[index
] = value
& ~7;
1017 s
->tx_descriptor
[2] = s
->regs
[index
];
1020 s
->regs
[index
] = value
& 0x00003ff0;
1024 imx_fec_write(s
, index
, value
);
1026 imx_enet_write(s
, index
, value
);
1034 static bool imx_eth_can_receive(NetClientState
*nc
)
1036 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1038 return !!s
->regs
[ENET_RDAR
];
1041 static ssize_t
imx_fec_receive(NetClientState
*nc
, const uint8_t *buf
,
1044 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1051 unsigned int buf_len
;
1054 trace_imx_fec_receive(size
);
1056 if (!s
->regs
[ENET_RDAR
]) {
1057 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1058 TYPE_IMX_FEC
, __func__
);
1062 /* 4 bytes for the CRC. */
1064 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1065 crc_ptr
= (uint8_t *) &crc
;
1067 /* Huge frames are truncated. */
1068 if (size
> ENET_MAX_FRAME_SIZE
) {
1069 size
= ENET_MAX_FRAME_SIZE
;
1070 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1073 /* Frames larger than the user limit just set error flags. */
1074 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1075 flags
|= ENET_BD_LG
;
1078 addr
= s
->rx_descriptor
;
1080 imx_fec_read_bd(&bd
, addr
);
1081 if ((bd
.flags
& ENET_BD_E
) == 0) {
1082 /* No descriptors available. Bail out. */
1084 * FIXME: This is wrong. We should probably either
1085 * save the remainder for when more RX buffers are
1086 * available, or flag an error.
1088 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1089 TYPE_IMX_FEC
, __func__
);
1092 buf_len
= (size
<= s
->regs
[ENET_MRBR
]) ? size
: s
->regs
[ENET_MRBR
];
1093 bd
.length
= buf_len
;
1096 trace_imx_fec_receive_len(addr
, bd
.length
);
1098 /* The last 4 bytes are the CRC. */
1100 buf_len
+= size
- 4;
1103 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1106 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1108 crc_ptr
+= 4 - size
;
1110 bd
.flags
&= ~ENET_BD_E
;
1112 /* Last buffer in frame. */
1113 bd
.flags
|= flags
| ENET_BD_L
;
1115 trace_imx_fec_receive_last(bd
.flags
);
1117 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1119 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1121 imx_fec_write_bd(&bd
, addr
);
1122 /* Advance to the next descriptor. */
1123 if ((bd
.flags
& ENET_BD_W
) != 0) {
1124 addr
= s
->regs
[ENET_RDSR
];
1129 s
->rx_descriptor
= addr
;
1130 imx_eth_enable_rx(s
, false);
1135 static ssize_t
imx_enet_receive(NetClientState
*nc
, const uint8_t *buf
,
1138 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1145 unsigned int buf_len
;
1147 bool shift16
= s
->regs
[ENET_RACC
] & ENET_RACC_SHIFT16
;
1149 trace_imx_enet_receive(size
);
1151 if (!s
->regs
[ENET_RDAR
]) {
1152 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1153 TYPE_IMX_FEC
, __func__
);
1157 /* 4 bytes for the CRC. */
1159 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1160 crc_ptr
= (uint8_t *) &crc
;
1166 /* Huge frames are truncated. */
1167 if (size
> s
->regs
[ENET_FTRL
]) {
1168 size
= s
->regs
[ENET_FTRL
];
1169 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1172 /* Frames larger than the user limit just set error flags. */
1173 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1174 flags
|= ENET_BD_LG
;
1177 addr
= s
->rx_descriptor
;
1179 imx_enet_read_bd(&bd
, addr
);
1180 if ((bd
.flags
& ENET_BD_E
) == 0) {
1181 /* No descriptors available. Bail out. */
1183 * FIXME: This is wrong. We should probably either
1184 * save the remainder for when more RX buffers are
1185 * available, or flag an error.
1187 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1188 TYPE_IMX_FEC
, __func__
);
1191 buf_len
= MIN(size
, s
->regs
[ENET_MRBR
]);
1192 bd
.length
= buf_len
;
1195 trace_imx_enet_receive_len(addr
, bd
.length
);
1197 /* The last 4 bytes are the CRC. */
1199 buf_len
+= size
- 4;
1205 * If SHIFT16 bit of ENETx_RACC register is set we need to
1206 * align the payload to 4-byte boundary.
1208 const uint8_t zeros
[2] = { 0 };
1210 dma_memory_write(&address_space_memory
, buf_addr
,
1211 zeros
, sizeof(zeros
));
1213 buf_addr
+= sizeof(zeros
);
1214 buf_len
-= sizeof(zeros
);
1216 /* We only do this once per Ethernet frame */
1220 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1223 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1225 crc_ptr
+= 4 - size
;
1227 bd
.flags
&= ~ENET_BD_E
;
1229 /* Last buffer in frame. */
1230 bd
.flags
|= flags
| ENET_BD_L
;
1232 trace_imx_enet_receive_last(bd
.flags
);
1234 /* Indicate that we've updated the last buffer descriptor. */
1235 bd
.last_buffer
= ENET_BD_BDU
;
1236 if (bd
.option
& ENET_BD_RX_INT
) {
1237 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1240 if (bd
.option
& ENET_BD_RX_INT
) {
1241 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1244 imx_enet_write_bd(&bd
, addr
);
1245 /* Advance to the next descriptor. */
1246 if ((bd
.flags
& ENET_BD_W
) != 0) {
1247 addr
= s
->regs
[ENET_RDSR
];
1252 s
->rx_descriptor
= addr
;
1253 imx_eth_enable_rx(s
, false);
1258 static ssize_t
imx_eth_receive(NetClientState
*nc
, const uint8_t *buf
,
1261 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1263 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
1264 return imx_enet_receive(nc
, buf
, len
);
1266 return imx_fec_receive(nc
, buf
, len
);
1270 static const MemoryRegionOps imx_eth_ops
= {
1271 .read
= imx_eth_read
,
1272 .write
= imx_eth_write
,
1273 .valid
.min_access_size
= 4,
1274 .valid
.max_access_size
= 4,
1275 .endianness
= DEVICE_NATIVE_ENDIAN
,
1278 static void imx_eth_cleanup(NetClientState
*nc
)
1280 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1285 static NetClientInfo imx_eth_net_info
= {
1286 .type
= NET_CLIENT_DRIVER_NIC
,
1287 .size
= sizeof(NICState
),
1288 .can_receive
= imx_eth_can_receive
,
1289 .receive
= imx_eth_receive
,
1290 .cleanup
= imx_eth_cleanup
,
1291 .link_status_changed
= imx_eth_set_link
,
1295 static void imx_eth_realize(DeviceState
*dev
, Error
**errp
)
1297 IMXFECState
*s
= IMX_FEC(dev
);
1298 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1300 memory_region_init_io(&s
->iomem
, OBJECT(dev
), &imx_eth_ops
, s
,
1301 TYPE_IMX_FEC
, FSL_IMX25_FEC_SIZE
);
1302 sysbus_init_mmio(sbd
, &s
->iomem
);
1303 sysbus_init_irq(sbd
, &s
->irq
[0]);
1304 sysbus_init_irq(sbd
, &s
->irq
[1]);
1306 qemu_macaddr_default_if_unset(&s
->conf
.macaddr
);
1308 s
->nic
= qemu_new_nic(&imx_eth_net_info
, &s
->conf
,
1309 object_get_typename(OBJECT(dev
)),
1312 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
1315 static Property imx_eth_properties
[] = {
1316 DEFINE_NIC_PROPERTIES(IMXFECState
, conf
),
1317 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState
, tx_ring_num
, 1),
1318 DEFINE_PROP_END_OF_LIST(),
1321 static void imx_eth_class_init(ObjectClass
*klass
, void *data
)
1323 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1325 dc
->vmsd
= &vmstate_imx_eth
;
1326 dc
->reset
= imx_eth_reset
;
1327 device_class_set_props(dc
, imx_eth_properties
);
1328 dc
->realize
= imx_eth_realize
;
1329 dc
->desc
= "i.MX FEC/ENET Ethernet Controller";
1332 static void imx_fec_init(Object
*obj
)
1334 IMXFECState
*s
= IMX_FEC(obj
);
1339 static void imx_enet_init(Object
*obj
)
1341 IMXFECState
*s
= IMX_FEC(obj
);
1346 static const TypeInfo imx_fec_info
= {
1347 .name
= TYPE_IMX_FEC
,
1348 .parent
= TYPE_SYS_BUS_DEVICE
,
1349 .instance_size
= sizeof(IMXFECState
),
1350 .instance_init
= imx_fec_init
,
1351 .class_init
= imx_eth_class_init
,
1354 static const TypeInfo imx_enet_info
= {
1355 .name
= TYPE_IMX_ENET
,
1356 .parent
= TYPE_IMX_FEC
,
1357 .instance_init
= imx_enet_init
,
1360 static void imx_eth_register_types(void)
1362 type_register_static(&imx_fec_info
);
1363 type_register_static(&imx_enet_info
);
1366 type_init(imx_eth_register_types
)