2 * ColdFire Fast Ethernet Controller emulation.
4 * Copyright (c) 2007 CodeSourcery.
6 * This code is licensed under the GPL
8 #include "qemu/osdep.h"
11 #include "hw/m68k/mcf.h"
12 #include "hw/m68k/mcf_fec.h"
13 #include "hw/net/mii.h"
14 #include "hw/sysbus.h"
21 #define DPRINTF(fmt, ...) \
22 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
24 #define DPRINTF(fmt, ...) do {} while(0)
27 #define FEC_MAX_DESC 1024
28 #define FEC_MAX_FRAME_SIZE 2032
29 #define FEC_MIB_SIZE 64
32 SysBusDevice parent_obj
;
35 qemu_irq irq
[FEC_NUM_IRQ
];
42 uint32_t rx_descriptor
;
43 uint32_t tx_descriptor
;
54 uint32_t mib
[FEC_MIB_SIZE
];
57 #define FEC_INT_HB 0x80000000
58 #define FEC_INT_BABR 0x40000000
59 #define FEC_INT_BABT 0x20000000
60 #define FEC_INT_GRA 0x10000000
61 #define FEC_INT_TXF 0x08000000
62 #define FEC_INT_TXB 0x04000000
63 #define FEC_INT_RXF 0x02000000
64 #define FEC_INT_RXB 0x01000000
65 #define FEC_INT_MII 0x00800000
66 #define FEC_INT_EB 0x00400000
67 #define FEC_INT_LC 0x00200000
68 #define FEC_INT_RL 0x00100000
69 #define FEC_INT_UN 0x00080000
74 /* Map interrupt flags onto IRQ lines. */
75 static const uint32_t mcf_fec_irq_map
[FEC_NUM_IRQ
] = {
91 /* Buffer Descriptor. */
98 #define FEC_BD_R 0x8000
99 #define FEC_BD_E 0x8000
100 #define FEC_BD_O1 0x4000
101 #define FEC_BD_W 0x2000
102 #define FEC_BD_O2 0x1000
103 #define FEC_BD_L 0x0800
104 #define FEC_BD_TC 0x0400
105 #define FEC_BD_ABC 0x0200
106 #define FEC_BD_M 0x0100
107 #define FEC_BD_BC 0x0080
108 #define FEC_BD_MC 0x0040
109 #define FEC_BD_LG 0x0020
110 #define FEC_BD_NO 0x0010
111 #define FEC_BD_CR 0x0004
112 #define FEC_BD_OV 0x0002
113 #define FEC_BD_TR 0x0001
115 #define MIB_RMON_T_DROP 0
116 #define MIB_RMON_T_PACKETS 1
117 #define MIB_RMON_T_BC_PKT 2
118 #define MIB_RMON_T_MC_PKT 3
119 #define MIB_RMON_T_CRC_ALIGN 4
120 #define MIB_RMON_T_UNDERSIZE 5
121 #define MIB_RMON_T_OVERSIZE 6
122 #define MIB_RMON_T_FRAG 7
123 #define MIB_RMON_T_JAB 8
124 #define MIB_RMON_T_COL 9
125 #define MIB_RMON_T_P64 10
126 #define MIB_RMON_T_P65TO127 11
127 #define MIB_RMON_T_P128TO255 12
128 #define MIB_RMON_T_P256TO511 13
129 #define MIB_RMON_T_P512TO1023 14
130 #define MIB_RMON_T_P1024TO2047 15
131 #define MIB_RMON_T_P_GTE2048 16
132 #define MIB_RMON_T_OCTETS 17
133 #define MIB_IEEE_T_DROP 18
134 #define MIB_IEEE_T_FRAME_OK 19
135 #define MIB_IEEE_T_1COL 20
136 #define MIB_IEEE_T_MCOL 21
137 #define MIB_IEEE_T_DEF 22
138 #define MIB_IEEE_T_LCOL 23
139 #define MIB_IEEE_T_EXCOL 24
140 #define MIB_IEEE_T_MACERR 25
141 #define MIB_IEEE_T_CSERR 26
142 #define MIB_IEEE_T_SQE 27
143 #define MIB_IEEE_T_FDXFC 28
144 #define MIB_IEEE_T_OCTETS_OK 29
146 #define MIB_RMON_R_DROP 32
147 #define MIB_RMON_R_PACKETS 33
148 #define MIB_RMON_R_BC_PKT 34
149 #define MIB_RMON_R_MC_PKT 35
150 #define MIB_RMON_R_CRC_ALIGN 36
151 #define MIB_RMON_R_UNDERSIZE 37
152 #define MIB_RMON_R_OVERSIZE 38
153 #define MIB_RMON_R_FRAG 39
154 #define MIB_RMON_R_JAB 40
155 #define MIB_RMON_R_RESVD_0 41
156 #define MIB_RMON_R_P64 42
157 #define MIB_RMON_R_P65TO127 43
158 #define MIB_RMON_R_P128TO255 44
159 #define MIB_RMON_R_P256TO511 45
160 #define MIB_RMON_R_P512TO1023 46
161 #define MIB_RMON_R_P1024TO2047 47
162 #define MIB_RMON_R_P_GTE2048 48
163 #define MIB_RMON_R_OCTETS 49
164 #define MIB_IEEE_R_DROP 50
165 #define MIB_IEEE_R_FRAME_OK 51
166 #define MIB_IEEE_R_CRC 52
167 #define MIB_IEEE_R_ALIGN 53
168 #define MIB_IEEE_R_MACERR 54
169 #define MIB_IEEE_R_FDXFC 55
170 #define MIB_IEEE_R_OCTETS_OK 56
172 static void mcf_fec_read_bd(mcf_fec_bd
*bd
, uint32_t addr
)
174 cpu_physical_memory_read(addr
, bd
, sizeof(*bd
));
175 be16_to_cpus(&bd
->flags
);
176 be16_to_cpus(&bd
->length
);
177 be32_to_cpus(&bd
->data
);
180 static void mcf_fec_write_bd(mcf_fec_bd
*bd
, uint32_t addr
)
183 tmp
.flags
= cpu_to_be16(bd
->flags
);
184 tmp
.length
= cpu_to_be16(bd
->length
);
185 tmp
.data
= cpu_to_be32(bd
->data
);
186 cpu_physical_memory_write(addr
, &tmp
, sizeof(tmp
));
189 static void mcf_fec_update(mcf_fec_state
*s
)
196 active
= s
->eir
& s
->eimr
;
197 changed
= active
^s
->irq_state
;
198 for (i
= 0; i
< FEC_NUM_IRQ
; i
++) {
199 mask
= mcf_fec_irq_map
[i
];
200 if (changed
& mask
) {
201 DPRINTF("IRQ %d = %d\n", i
, (active
& mask
) != 0);
202 qemu_set_irq(s
->irq
[i
], (active
& mask
) != 0);
205 s
->irq_state
= active
;
208 static void mcf_fec_tx_stats(mcf_fec_state
*s
, int size
)
210 s
->mib
[MIB_RMON_T_PACKETS
]++;
211 s
->mib
[MIB_RMON_T_OCTETS
] += size
;
213 s
->mib
[MIB_RMON_T_FRAG
]++;
214 } else if (size
== 64) {
215 s
->mib
[MIB_RMON_T_P64
]++;
216 } else if (size
< 128) {
217 s
->mib
[MIB_RMON_T_P65TO127
]++;
218 } else if (size
< 256) {
219 s
->mib
[MIB_RMON_T_P128TO255
]++;
220 } else if (size
< 512) {
221 s
->mib
[MIB_RMON_T_P256TO511
]++;
222 } else if (size
< 1024) {
223 s
->mib
[MIB_RMON_T_P512TO1023
]++;
224 } else if (size
< 2048) {
225 s
->mib
[MIB_RMON_T_P1024TO2047
]++;
227 s
->mib
[MIB_RMON_T_P_GTE2048
]++;
229 s
->mib
[MIB_IEEE_T_FRAME_OK
]++;
230 s
->mib
[MIB_IEEE_T_OCTETS_OK
] += size
;
233 static void mcf_fec_do_tx(mcf_fec_state
*s
)
239 uint8_t frame
[FEC_MAX_FRAME_SIZE
];
245 addr
= s
->tx_descriptor
;
246 while (descnt
++ < FEC_MAX_DESC
) {
247 mcf_fec_read_bd(&bd
, addr
);
248 DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
249 addr
, bd
.flags
, bd
.length
, bd
.data
);
250 if ((bd
.flags
& FEC_BD_R
) == 0) {
251 /* Run out of descriptors to transmit. */
255 if (frame_size
+ len
> FEC_MAX_FRAME_SIZE
) {
256 len
= FEC_MAX_FRAME_SIZE
- frame_size
;
257 s
->eir
|= FEC_INT_BABT
;
259 cpu_physical_memory_read(bd
.data
, ptr
, len
);
262 if (bd
.flags
& FEC_BD_L
) {
263 /* Last buffer in frame. */
264 DPRINTF("Sending packet\n");
265 qemu_send_packet(qemu_get_queue(s
->nic
), frame
, frame_size
);
266 mcf_fec_tx_stats(s
, frame_size
);
269 s
->eir
|= FEC_INT_TXF
;
271 s
->eir
|= FEC_INT_TXB
;
272 bd
.flags
&= ~FEC_BD_R
;
273 /* Write back the modified descriptor. */
274 mcf_fec_write_bd(&bd
, addr
);
275 /* Advance to the next descriptor. */
276 if ((bd
.flags
& FEC_BD_W
) != 0) {
282 s
->tx_descriptor
= addr
;
285 static void mcf_fec_enable_rx(mcf_fec_state
*s
)
287 NetClientState
*nc
= qemu_get_queue(s
->nic
);
290 mcf_fec_read_bd(&bd
, s
->rx_descriptor
);
291 s
->rx_enabled
= ((bd
.flags
& FEC_BD_E
) != 0);
293 qemu_flush_queued_packets(nc
);
297 static void mcf_fec_reset(DeviceState
*dev
)
299 mcf_fec_state
*s
= MCF_FEC_NET(dev
);
312 #define MMFR_WRITE_OP (1 << 28)
313 #define MMFR_READ_OP (2 << 28)
314 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f)
315 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f)
317 static uint64_t mcf_fec_read_mdio(mcf_fec_state
*s
)
321 if (s
->mmfr
& MMFR_WRITE_OP
)
323 if (MMFR_PHYADDR(s
->mmfr
) != 1)
324 return s
->mmfr
|= 0xffff;
326 switch (MMFR_REGNUM(s
->mmfr
)) {
328 v
= MII_BMCR_SPEED
| MII_BMCR_AUTOEN
| MII_BMCR_FD
;
331 v
= MII_BMSR_100TX_FD
| MII_BMSR_100TX_HD
| MII_BMSR_10T_FD
|
332 MII_BMSR_10T_HD
| MII_BMSR_MFPS
| MII_BMSR_AN_COMP
|
333 MII_BMSR_AUTONEG
| MII_BMSR_LINK_ST
;
342 v
= MII_ANAR_TXFD
| MII_ANAR_TX
| MII_ANAR_10FD
|
343 MII_ANAR_10
| MII_ANAR_CSMACD
;
346 v
= MII_ANLPAR_ACK
| MII_ANLPAR_TXFD
| MII_ANLPAR_TX
|
347 MII_ANLPAR_10FD
| MII_ANLPAR_10
| MII_ANLPAR_CSMACD
;
353 s
->mmfr
= (s
->mmfr
& ~0xffff) | v
;
357 static uint64_t mcf_fec_read(void *opaque
, hwaddr addr
,
360 mcf_fec_state
*s
= (mcf_fec_state
*)opaque
;
361 switch (addr
& 0x3ff) {
362 case 0x004: return s
->eir
;
363 case 0x008: return s
->eimr
;
364 case 0x010: return s
->rx_enabled
? (1 << 24) : 0; /* RDAR */
365 case 0x014: return 0; /* TDAR */
366 case 0x024: return s
->ecr
;
367 case 0x040: return mcf_fec_read_mdio(s
);
368 case 0x044: return s
->mscr
;
369 case 0x064: return 0; /* MIBC */
370 case 0x084: return s
->rcr
;
371 case 0x0c4: return s
->tcr
;
372 case 0x0e4: /* PALR */
373 return (s
->conf
.macaddr
.a
[0] << 24) | (s
->conf
.macaddr
.a
[1] << 16)
374 | (s
->conf
.macaddr
.a
[2] << 8) | s
->conf
.macaddr
.a
[3];
376 case 0x0e8: /* PAUR */
377 return (s
->conf
.macaddr
.a
[4] << 24) | (s
->conf
.macaddr
.a
[5] << 16) | 0x8808;
378 case 0x0ec: return 0x10000; /* OPD */
379 case 0x118: return 0;
380 case 0x11c: return 0;
381 case 0x120: return 0;
382 case 0x124: return 0;
383 case 0x144: return s
->tfwr
;
384 case 0x14c: return 0x600;
385 case 0x150: return s
->rfsr
;
386 case 0x180: return s
->erdsr
;
387 case 0x184: return s
->etdsr
;
388 case 0x188: return s
->emrbr
;
389 case 0x200 ... 0x2e0: return s
->mib
[(addr
& 0x1ff) / 4];
391 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr
);
396 static void mcf_fec_write(void *opaque
, hwaddr addr
,
397 uint64_t value
, unsigned size
)
399 mcf_fec_state
*s
= (mcf_fec_state
*)opaque
;
400 switch (addr
& 0x3ff) {
407 case 0x010: /* RDAR */
408 if ((s
->ecr
& FEC_EN
) && !s
->rx_enabled
) {
409 DPRINTF("RX enable\n");
410 mcf_fec_enable_rx(s
);
413 case 0x014: /* TDAR */
414 if (s
->ecr
& FEC_EN
) {
420 if (value
& FEC_RESET
) {
422 mcf_fec_reset(opaque
);
424 if ((s
->ecr
& FEC_EN
) == 0) {
430 s
->eir
|= FEC_INT_MII
;
433 s
->mscr
= value
& 0xfe;
436 /* TODO: Implement MIB. */
439 s
->rcr
= value
& 0x07ff003f;
440 /* TODO: Implement LOOP mode. */
442 case 0x0c4: /* TCR */
443 /* We transmit immediately, so raise GRA immediately. */
446 s
->eir
|= FEC_INT_GRA
;
448 case 0x0e4: /* PALR */
449 s
->conf
.macaddr
.a
[0] = value
>> 24;
450 s
->conf
.macaddr
.a
[1] = value
>> 16;
451 s
->conf
.macaddr
.a
[2] = value
>> 8;
452 s
->conf
.macaddr
.a
[3] = value
;
454 case 0x0e8: /* PAUR */
455 s
->conf
.macaddr
.a
[4] = value
>> 24;
456 s
->conf
.macaddr
.a
[5] = value
>> 16;
465 /* TODO: implement MAC hash filtering. */
471 /* FRBR writes ignored. */
474 s
->rfsr
= (value
& 0x3fc) | 0x400;
477 s
->erdsr
= value
& ~3;
478 s
->rx_descriptor
= s
->erdsr
;
481 s
->etdsr
= value
& ~3;
482 s
->tx_descriptor
= s
->etdsr
;
485 s
->emrbr
= value
> 0 ? value
& 0x7F0 : 0x7F0;
487 case 0x200 ... 0x2e0:
488 s
->mib
[(addr
& 0x1ff) / 4] = value
;
491 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr
);
496 static void mcf_fec_rx_stats(mcf_fec_state
*s
, int size
)
498 s
->mib
[MIB_RMON_R_PACKETS
]++;
499 s
->mib
[MIB_RMON_R_OCTETS
] += size
;
501 s
->mib
[MIB_RMON_R_FRAG
]++;
502 } else if (size
== 64) {
503 s
->mib
[MIB_RMON_R_P64
]++;
504 } else if (size
< 128) {
505 s
->mib
[MIB_RMON_R_P65TO127
]++;
506 } else if (size
< 256) {
507 s
->mib
[MIB_RMON_R_P128TO255
]++;
508 } else if (size
< 512) {
509 s
->mib
[MIB_RMON_R_P256TO511
]++;
510 } else if (size
< 1024) {
511 s
->mib
[MIB_RMON_R_P512TO1023
]++;
512 } else if (size
< 2048) {
513 s
->mib
[MIB_RMON_R_P1024TO2047
]++;
515 s
->mib
[MIB_RMON_R_P_GTE2048
]++;
517 s
->mib
[MIB_IEEE_R_FRAME_OK
]++;
518 s
->mib
[MIB_IEEE_R_OCTETS_OK
] += size
;
521 static int mcf_fec_have_receive_space(mcf_fec_state
*s
, size_t want
)
526 /* Walk descriptor list to determine if we have enough buffer */
527 addr
= s
->rx_descriptor
;
529 mcf_fec_read_bd(&bd
, addr
);
530 if ((bd
.flags
& FEC_BD_E
) == 0) {
533 if (want
< s
->emrbr
) {
537 /* Advance to the next descriptor. */
538 if ((bd
.flags
& FEC_BD_W
) != 0) {
547 static ssize_t
mcf_fec_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
549 mcf_fec_state
*s
= qemu_get_nic_opaque(nc
);
556 unsigned int buf_len
;
559 DPRINTF("do_rx len %d\n", size
);
560 if (!s
->rx_enabled
) {
563 /* 4 bytes for the CRC. */
565 crc
= cpu_to_be32(crc32(~0, buf
, size
));
566 crc_ptr
= (uint8_t *)&crc
;
567 /* Huge frames are truncted. */
568 if (size
> FEC_MAX_FRAME_SIZE
) {
569 size
= FEC_MAX_FRAME_SIZE
;
570 flags
|= FEC_BD_TR
| FEC_BD_LG
;
572 /* Frames larger than the user limit just set error flags. */
573 if (size
> (s
->rcr
>> 16)) {
576 /* Check if we have enough space in current descriptors */
577 if (!mcf_fec_have_receive_space(s
, size
)) {
580 addr
= s
->rx_descriptor
;
583 mcf_fec_read_bd(&bd
, addr
);
584 buf_len
= (size
<= s
->emrbr
) ? size
: s
->emrbr
;
587 DPRINTF("rx_bd %x length %d\n", addr
, bd
.length
);
588 /* The last 4 bytes are the CRC. */
592 cpu_physical_memory_write(buf_addr
, buf
, buf_len
);
595 cpu_physical_memory_write(buf_addr
+ buf_len
, crc_ptr
, 4 - size
);
598 bd
.flags
&= ~FEC_BD_E
;
600 /* Last buffer in frame. */
601 bd
.flags
|= flags
| FEC_BD_L
;
602 DPRINTF("rx frame flags %04x\n", bd
.flags
);
603 s
->eir
|= FEC_INT_RXF
;
605 s
->eir
|= FEC_INT_RXB
;
607 mcf_fec_write_bd(&bd
, addr
);
608 /* Advance to the next descriptor. */
609 if ((bd
.flags
& FEC_BD_W
) != 0) {
615 s
->rx_descriptor
= addr
;
616 mcf_fec_rx_stats(s
, retsize
);
617 mcf_fec_enable_rx(s
);
622 static const MemoryRegionOps mcf_fec_ops
= {
623 .read
= mcf_fec_read
,
624 .write
= mcf_fec_write
,
625 .endianness
= DEVICE_NATIVE_ENDIAN
,
628 static NetClientInfo net_mcf_fec_info
= {
629 .type
= NET_CLIENT_DRIVER_NIC
,
630 .size
= sizeof(NICState
),
631 .receive
= mcf_fec_receive
,
634 static void mcf_fec_realize(DeviceState
*dev
, Error
**errp
)
636 mcf_fec_state
*s
= MCF_FEC_NET(dev
);
638 s
->nic
= qemu_new_nic(&net_mcf_fec_info
, &s
->conf
,
639 object_get_typename(OBJECT(dev
)), dev
->id
, s
);
640 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
643 static void mcf_fec_instance_init(Object
*obj
)
645 SysBusDevice
*sbd
= SYS_BUS_DEVICE(obj
);
646 mcf_fec_state
*s
= MCF_FEC_NET(obj
);
649 memory_region_init_io(&s
->iomem
, obj
, &mcf_fec_ops
, s
, "fec", 0x400);
650 sysbus_init_mmio(sbd
, &s
->iomem
);
651 for (i
= 0; i
< FEC_NUM_IRQ
; i
++) {
652 sysbus_init_irq(sbd
, &s
->irq
[i
]);
656 static Property mcf_fec_properties
[] = {
657 DEFINE_NIC_PROPERTIES(mcf_fec_state
, conf
),
658 DEFINE_PROP_END_OF_LIST(),
661 static void mcf_fec_class_init(ObjectClass
*oc
, void *data
)
663 DeviceClass
*dc
= DEVICE_CLASS(oc
);
665 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
666 dc
->realize
= mcf_fec_realize
;
667 dc
->desc
= "MCF Fast Ethernet Controller network device";
668 dc
->reset
= mcf_fec_reset
;
669 dc
->props
= mcf_fec_properties
;
672 static const TypeInfo mcf_fec_info
= {
673 .name
= TYPE_MCF_FEC_NET
,
674 .parent
= TYPE_SYS_BUS_DEVICE
,
675 .instance_size
= sizeof(mcf_fec_state
),
676 .instance_init
= mcf_fec_instance_init
,
677 .class_init
= mcf_fec_class_init
,
680 static void mcf_fec_register_types(void)
682 type_register_static(&mcf_fec_info
);
685 type_init(mcf_fec_register_types
)