2 * QEMU rocker switch emulation - PCI device
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include "qemu/osdep.h"
20 #include "hw/pci/pci.h"
21 #include "hw/pci/msix.h"
25 #include "qemu/bitops.h"
26 #include "qmp-commands.h"
29 #include "rocker_hw.h"
30 #include "rocker_fp.h"
31 #include "rocker_desc.h"
32 #include "rocker_tlv.h"
33 #include "rocker_world.h"
34 #include "rocker_of_dpa.h"
42 MemoryRegion msix_bar
;
44 /* switch configuration */
45 char *name
; /* switch name */
46 uint32_t fp_ports
; /* front-panel port count */
47 NICPeers
*fp_ports_peers
;
48 MACAddr fp_start_macaddr
; /* front-panel port 0 mac addr */
49 uint64_t switch_id
; /* switch id */
51 /* front-panel ports */
52 FpPort
*fp_port
[ROCKER_FP_PORTS_MAX
];
54 /* register backings */
57 dma_addr_t test_dma_addr
;
58 uint32_t test_dma_size
;
59 uint64_t lower32
; /* lower 32-bit val in 2-part 64-bit access */
65 World
*worlds
[ROCKER_WORLD_TYPE_MAX
];
68 QLIST_ENTRY(rocker
) next
;
71 #define ROCKER "rocker"
73 #define to_rocker(obj) \
74 OBJECT_CHECK(Rocker, (obj), ROCKER)
76 static QLIST_HEAD(, rocker
) rockers
;
78 Rocker
*rocker_find(const char *name
)
82 QLIST_FOREACH(r
, &rockers
, next
)
83 if (strcmp(r
->name
, name
) == 0) {
90 World
*rocker_get_world(Rocker
*r
, enum rocker_world_type type
)
92 if (type
< ROCKER_WORLD_TYPE_MAX
) {
93 return r
->worlds
[type
];
98 RockerSwitch
*qmp_query_rocker(const char *name
, Error
**errp
)
100 RockerSwitch
*rocker
;
103 r
= rocker_find(name
);
105 error_setg(errp
, "rocker %s not found", name
);
109 rocker
= g_new0(RockerSwitch
, 1);
110 rocker
->name
= g_strdup(r
->name
);
111 rocker
->id
= r
->switch_id
;
112 rocker
->ports
= r
->fp_ports
;
117 RockerPortList
*qmp_query_rocker_ports(const char *name
, Error
**errp
)
119 RockerPortList
*list
= NULL
;
123 r
= rocker_find(name
);
125 error_setg(errp
, "rocker %s not found", name
);
129 for (i
= r
->fp_ports
- 1; i
>= 0; i
--) {
130 RockerPortList
*info
= g_malloc0(sizeof(*info
));
131 info
->value
= g_malloc0(sizeof(*info
->value
));
132 struct fp_port
*port
= r
->fp_port
[i
];
134 fp_port_get_info(port
, info
);
142 uint32_t rocker_fp_ports(Rocker
*r
)
147 static uint32_t rocker_get_pport_by_tx_ring(Rocker
*r
,
150 return (desc_ring_index(ring
) - 2) / 2 + 1;
153 static int tx_consume(Rocker
*r
, DescInfo
*info
)
155 PCIDevice
*dev
= PCI_DEVICE(r
);
156 char *buf
= desc_get_buf(info
, true);
158 RockerTlv
*tlvs
[ROCKER_TLV_TX_MAX
+ 1];
159 struct iovec iov
[ROCKER_TX_FRAGS_MAX
] = { { 0, }, };
162 uint16_t tx_offload
= ROCKER_TX_OFFLOAD_NONE
;
163 uint16_t tx_l3_csum_off
= 0;
164 uint16_t tx_tso_mss
= 0;
165 uint16_t tx_tso_hdr_len
= 0;
172 return -ROCKER_ENXIO
;
175 rocker_tlv_parse(tlvs
, ROCKER_TLV_TX_MAX
, buf
, desc_tlv_size(info
));
177 if (!tlvs
[ROCKER_TLV_TX_FRAGS
]) {
178 return -ROCKER_EINVAL
;
181 pport
= rocker_get_pport_by_tx_ring(r
, desc_get_ring(info
));
182 if (!fp_port_from_pport(pport
, &port
)) {
183 return -ROCKER_EINVAL
;
186 if (tlvs
[ROCKER_TLV_TX_OFFLOAD
]) {
187 tx_offload
= rocker_tlv_get_u8(tlvs
[ROCKER_TLV_TX_OFFLOAD
]);
190 switch (tx_offload
) {
191 case ROCKER_TX_OFFLOAD_L3_CSUM
:
192 if (!tlvs
[ROCKER_TLV_TX_L3_CSUM_OFF
]) {
193 return -ROCKER_EINVAL
;
196 case ROCKER_TX_OFFLOAD_TSO
:
197 if (!tlvs
[ROCKER_TLV_TX_TSO_MSS
] ||
198 !tlvs
[ROCKER_TLV_TX_TSO_HDR_LEN
]) {
199 return -ROCKER_EINVAL
;
204 if (tlvs
[ROCKER_TLV_TX_L3_CSUM_OFF
]) {
205 tx_l3_csum_off
= rocker_tlv_get_le16(tlvs
[ROCKER_TLV_TX_L3_CSUM_OFF
]);
208 if (tlvs
[ROCKER_TLV_TX_TSO_MSS
]) {
209 tx_tso_mss
= rocker_tlv_get_le16(tlvs
[ROCKER_TLV_TX_TSO_MSS
]);
212 if (tlvs
[ROCKER_TLV_TX_TSO_HDR_LEN
]) {
213 tx_tso_hdr_len
= rocker_tlv_get_le16(tlvs
[ROCKER_TLV_TX_TSO_HDR_LEN
]);
216 rocker_tlv_for_each_nested(tlv_frag
, tlvs
[ROCKER_TLV_TX_FRAGS
], rem
) {
220 if (rocker_tlv_type(tlv_frag
) != ROCKER_TLV_TX_FRAG
) {
221 err
= -ROCKER_EINVAL
;
225 rocker_tlv_parse_nested(tlvs
, ROCKER_TLV_TX_FRAG_ATTR_MAX
, tlv_frag
);
227 if (!tlvs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
] ||
228 !tlvs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
]) {
229 err
= -ROCKER_EINVAL
;
233 frag_addr
= rocker_tlv_get_le64(tlvs
[ROCKER_TLV_TX_FRAG_ATTR_ADDR
]);
234 frag_len
= rocker_tlv_get_le16(tlvs
[ROCKER_TLV_TX_FRAG_ATTR_LEN
]);
236 if (iovcnt
>= ROCKER_TX_FRAGS_MAX
) {
237 goto err_too_many_frags
;
239 iov
[iovcnt
].iov_len
= frag_len
;
240 iov
[iovcnt
].iov_base
= g_malloc(frag_len
);
241 if (!iov
[iovcnt
].iov_base
) {
242 err
= -ROCKER_ENOMEM
;
246 if (pci_dma_read(dev
, frag_addr
, iov
[iovcnt
].iov_base
,
247 iov
[iovcnt
].iov_len
)) {
255 /* XXX perform Tx offloads */
256 /* XXX silence compiler for now */
257 tx_l3_csum_off
+= tx_tso_mss
= tx_tso_hdr_len
= 0;
260 err
= fp_port_eg(r
->fp_port
[port
], iov
, iovcnt
);
266 for (i
= 0; i
< ROCKER_TX_FRAGS_MAX
; i
++) {
267 g_free(iov
[i
].iov_base
);
273 static int cmd_get_port_settings(Rocker
*r
,
274 DescInfo
*info
, char *buf
,
275 RockerTlv
*cmd_info_tlv
)
277 RockerTlv
*tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
288 enum rocker_world_type mode
;
293 rocker_tlv_parse_nested(tlvs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
296 if (!tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
]) {
297 return -ROCKER_EINVAL
;
300 pport
= rocker_tlv_get_le32(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
]);
301 if (!fp_port_from_pport(pport
, &port
)) {
302 return -ROCKER_EINVAL
;
304 fp_port
= r
->fp_port
[port
];
306 err
= fp_port_get_settings(fp_port
, &speed
, &duplex
, &autoneg
);
311 fp_port_get_macaddr(fp_port
, &macaddr
);
312 mode
= world_type(fp_port_get_world(fp_port
));
313 learning
= fp_port_get_learning(fp_port
);
314 phys_name
= fp_port_get_name(fp_port
);
316 tlv_size
= rocker_tlv_total_size(0) + /* nest */
317 rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
318 rocker_tlv_total_size(sizeof(uint32_t)) + /* speed */
319 rocker_tlv_total_size(sizeof(uint8_t)) + /* duplex */
320 rocker_tlv_total_size(sizeof(uint8_t)) + /* autoneg */
321 rocker_tlv_total_size(sizeof(macaddr
.a
)) + /* macaddr */
322 rocker_tlv_total_size(sizeof(uint8_t)) + /* mode */
323 rocker_tlv_total_size(sizeof(uint8_t)) + /* learning */
324 rocker_tlv_total_size(strlen(phys_name
));
326 if (tlv_size
> desc_buf_size(info
)) {
327 return -ROCKER_EMSGSIZE
;
331 nest
= rocker_tlv_nest_start(buf
, &pos
, ROCKER_TLV_CMD_INFO
);
332 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
, pport
);
333 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
, speed
);
334 rocker_tlv_put_u8(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
, duplex
);
335 rocker_tlv_put_u8(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
, autoneg
);
336 rocker_tlv_put(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
,
337 sizeof(macaddr
.a
), macaddr
.a
);
338 rocker_tlv_put_u8(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_MODE
, mode
);
339 rocker_tlv_put_u8(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING
,
341 rocker_tlv_put(buf
, &pos
, ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME
,
342 strlen(phys_name
), phys_name
);
343 rocker_tlv_nest_end(buf
, &pos
, nest
);
345 return desc_set_buf(info
, tlv_size
);
348 static int cmd_set_port_settings(Rocker
*r
,
349 RockerTlv
*cmd_info_tlv
)
351 RockerTlv
*tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MAX
+ 1];
360 enum rocker_world_type mode
;
363 rocker_tlv_parse_nested(tlvs
, ROCKER_TLV_CMD_PORT_SETTINGS_MAX
,
366 if (!tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
]) {
367 return -ROCKER_EINVAL
;
370 pport
= rocker_tlv_get_le32(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT
]);
371 if (!fp_port_from_pport(pport
, &port
)) {
372 return -ROCKER_EINVAL
;
374 fp_port
= r
->fp_port
[port
];
376 if (tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
] &&
377 tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
] &&
378 tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
]) {
380 speed
= rocker_tlv_get_le32(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED
]);
381 duplex
= rocker_tlv_get_u8(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX
]);
382 autoneg
= rocker_tlv_get_u8(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG
]);
384 err
= fp_port_set_settings(fp_port
, speed
, duplex
, autoneg
);
390 if (tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
]) {
391 if (rocker_tlv_len(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
]) !=
393 return -ROCKER_EINVAL
;
396 rocker_tlv_data(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR
]),
398 fp_port_set_macaddr(fp_port
, &macaddr
);
401 if (tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MODE
]) {
402 mode
= rocker_tlv_get_u8(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_MODE
]);
403 if (mode
>= ROCKER_WORLD_TYPE_MAX
) {
404 return -ROCKER_EINVAL
;
406 /* We don't support world change. */
407 if (!fp_port_check_world(fp_port
, r
->worlds
[mode
])) {
408 return -ROCKER_EINVAL
;
412 if (tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING
]) {
414 rocker_tlv_get_u8(tlvs
[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING
]);
415 fp_port_set_learning(fp_port
, learning
);
421 static int cmd_consume(Rocker
*r
, DescInfo
*info
)
423 char *buf
= desc_get_buf(info
, false);
424 RockerTlv
*tlvs
[ROCKER_TLV_CMD_MAX
+ 1];
431 return -ROCKER_ENXIO
;
434 rocker_tlv_parse(tlvs
, ROCKER_TLV_CMD_MAX
, buf
, desc_tlv_size(info
));
436 if (!tlvs
[ROCKER_TLV_CMD_TYPE
] || !tlvs
[ROCKER_TLV_CMD_INFO
]) {
437 return -ROCKER_EINVAL
;
440 cmd
= rocker_tlv_get_le16(tlvs
[ROCKER_TLV_CMD_TYPE
]);
441 info_tlv
= tlvs
[ROCKER_TLV_CMD_INFO
];
443 /* This might be reworked to something like this:
444 * Every world will have an array of command handlers from
445 * ROCKER_TLV_CMD_TYPE_UNSPEC to ROCKER_TLV_CMD_TYPE_MAX. There is
446 * up to each world to implement whatever command it want.
447 * It can reference "generic" commands as cmd_set_port_settings or
448 * cmd_get_port_settings
452 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
:
453 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
:
454 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
:
455 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS
:
456 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
:
457 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
:
458 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
:
459 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS
:
460 world
= r
->worlds
[ROCKER_WORLD_TYPE_OF_DPA
];
461 err
= world_do_cmd(world
, info
, buf
, cmd
, info_tlv
);
463 case ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS
:
464 err
= cmd_get_port_settings(r
, info
, buf
, info_tlv
);
466 case ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS
:
467 err
= cmd_set_port_settings(r
, info_tlv
);
470 err
= -ROCKER_EINVAL
;
477 static void rocker_msix_irq(Rocker
*r
, unsigned vector
)
479 PCIDevice
*dev
= PCI_DEVICE(r
);
481 DPRINTF("MSI-X notify request for vector %d\n", vector
);
482 if (vector
>= ROCKER_MSIX_VEC_COUNT(r
->fp_ports
)) {
483 DPRINTF("incorrect vector %d\n", vector
);
486 msix_notify(dev
, vector
);
489 int rocker_event_link_changed(Rocker
*r
, uint32_t pport
, bool link_up
)
491 DescRing
*ring
= r
->rings
[ROCKER_RING_EVENT
];
492 DescInfo
*info
= desc_ring_fetch_desc(ring
);
500 return -ROCKER_ENOBUFS
;
503 tlv_size
= rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
504 rocker_tlv_total_size(0) + /* nest */
505 rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
506 rocker_tlv_total_size(sizeof(uint8_t)); /* link up */
508 if (tlv_size
> desc_buf_size(info
)) {
509 err
= -ROCKER_EMSGSIZE
;
513 buf
= desc_get_buf(info
, false);
515 err
= -ROCKER_ENOMEM
;
520 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_EVENT_TYPE
,
521 ROCKER_TLV_EVENT_TYPE_LINK_CHANGED
);
522 nest
= rocker_tlv_nest_start(buf
, &pos
, ROCKER_TLV_EVENT_INFO
);
523 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_EVENT_LINK_CHANGED_PPORT
, pport
);
524 rocker_tlv_put_u8(buf
, &pos
, ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP
,
526 rocker_tlv_nest_end(buf
, &pos
, nest
);
528 err
= desc_set_buf(info
, tlv_size
);
532 if (desc_ring_post_desc(ring
, err
)) {
533 rocker_msix_irq(r
, ROCKER_MSIX_VEC_EVENT
);
539 int rocker_event_mac_vlan_seen(Rocker
*r
, uint32_t pport
, uint8_t *addr
,
542 DescRing
*ring
= r
->rings
[ROCKER_RING_EVENT
];
552 if (!fp_port_from_pport(pport
, &port
)) {
553 return -ROCKER_EINVAL
;
555 fp_port
= r
->fp_port
[port
];
556 if (!fp_port_get_learning(fp_port
)) {
560 info
= desc_ring_fetch_desc(ring
);
562 return -ROCKER_ENOBUFS
;
565 tlv_size
= rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
566 rocker_tlv_total_size(0) + /* nest */
567 rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
568 rocker_tlv_total_size(ETH_ALEN
) + /* mac addr */
569 rocker_tlv_total_size(sizeof(uint16_t)); /* vlan_id */
571 if (tlv_size
> desc_buf_size(info
)) {
572 err
= -ROCKER_EMSGSIZE
;
576 buf
= desc_get_buf(info
, false);
578 err
= -ROCKER_ENOMEM
;
583 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_EVENT_TYPE
,
584 ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN
);
585 nest
= rocker_tlv_nest_start(buf
, &pos
, ROCKER_TLV_EVENT_INFO
);
586 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_EVENT_MAC_VLAN_PPORT
, pport
);
587 rocker_tlv_put(buf
, &pos
, ROCKER_TLV_EVENT_MAC_VLAN_MAC
, ETH_ALEN
, addr
);
588 rocker_tlv_put_u16(buf
, &pos
, ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID
, vlan_id
);
589 rocker_tlv_nest_end(buf
, &pos
, nest
);
591 err
= desc_set_buf(info
, tlv_size
);
595 if (desc_ring_post_desc(ring
, err
)) {
596 rocker_msix_irq(r
, ROCKER_MSIX_VEC_EVENT
);
602 static DescRing
*rocker_get_rx_ring_by_pport(Rocker
*r
,
605 return r
->rings
[(pport
- 1) * 2 + 3];
608 int rx_produce(World
*world
, uint32_t pport
,
609 const struct iovec
*iov
, int iovcnt
, uint8_t copy_to_cpu
)
611 Rocker
*r
= world_rocker(world
);
612 PCIDevice
*dev
= (PCIDevice
*)r
;
613 DescRing
*ring
= rocker_get_rx_ring_by_pport(r
, pport
);
614 DescInfo
*info
= desc_ring_fetch_desc(ring
);
616 size_t data_size
= iov_size(iov
, iovcnt
);
618 uint16_t rx_flags
= 0;
619 uint16_t rx_csum
= 0;
621 RockerTlv
*tlvs
[ROCKER_TLV_RX_MAX
+ 1];
623 uint16_t frag_max_len
;
628 return -ROCKER_ENOBUFS
;
631 buf
= desc_get_buf(info
, false);
636 rocker_tlv_parse(tlvs
, ROCKER_TLV_RX_MAX
, buf
, desc_tlv_size(info
));
638 if (!tlvs
[ROCKER_TLV_RX_FRAG_ADDR
] ||
639 !tlvs
[ROCKER_TLV_RX_FRAG_MAX_LEN
]) {
640 err
= -ROCKER_EINVAL
;
644 frag_addr
= rocker_tlv_get_le64(tlvs
[ROCKER_TLV_RX_FRAG_ADDR
]);
645 frag_max_len
= rocker_tlv_get_le16(tlvs
[ROCKER_TLV_RX_FRAG_MAX_LEN
]);
647 if (data_size
> frag_max_len
) {
648 err
= -ROCKER_EMSGSIZE
;
653 rx_flags
|= ROCKER_RX_FLAGS_FWD_OFFLOAD
;
656 /* XXX calc rx flags/csum */
658 tlv_size
= rocker_tlv_total_size(sizeof(uint16_t)) + /* flags */
659 rocker_tlv_total_size(sizeof(uint16_t)) + /* scum */
660 rocker_tlv_total_size(sizeof(uint64_t)) + /* frag addr */
661 rocker_tlv_total_size(sizeof(uint16_t)) + /* frag max len */
662 rocker_tlv_total_size(sizeof(uint16_t)); /* frag len */
664 if (tlv_size
> desc_buf_size(info
)) {
665 err
= -ROCKER_EMSGSIZE
;
670 * iov dma write can be optimized in similar way e1000 does it in
671 * e1000_receive_iov. But maybe if would make sense to introduce
672 * generic helper iov_dma_write.
675 data
= g_malloc(data_size
);
677 err
= -ROCKER_ENOMEM
;
680 iov_to_buf(iov
, iovcnt
, 0, data
, data_size
);
681 pci_dma_write(dev
, frag_addr
, data
, data_size
);
685 rocker_tlv_put_le16(buf
, &pos
, ROCKER_TLV_RX_FLAGS
, rx_flags
);
686 rocker_tlv_put_le16(buf
, &pos
, ROCKER_TLV_RX_CSUM
, rx_csum
);
687 rocker_tlv_put_le64(buf
, &pos
, ROCKER_TLV_RX_FRAG_ADDR
, frag_addr
);
688 rocker_tlv_put_le16(buf
, &pos
, ROCKER_TLV_RX_FRAG_MAX_LEN
, frag_max_len
);
689 rocker_tlv_put_le16(buf
, &pos
, ROCKER_TLV_RX_FRAG_LEN
, data_size
);
691 err
= desc_set_buf(info
, tlv_size
);
694 if (desc_ring_post_desc(ring
, err
)) {
695 rocker_msix_irq(r
, ROCKER_MSIX_VEC_RX(pport
- 1));
701 int rocker_port_eg(Rocker
*r
, uint32_t pport
,
702 const struct iovec
*iov
, int iovcnt
)
707 if (!fp_port_from_pport(pport
, &port
)) {
708 return -ROCKER_EINVAL
;
711 fp_port
= r
->fp_port
[port
];
713 return fp_port_eg(fp_port
, iov
, iovcnt
);
716 static void rocker_test_dma_ctrl(Rocker
*r
, uint32_t val
)
718 PCIDevice
*dev
= PCI_DEVICE(r
);
722 buf
= g_malloc(r
->test_dma_size
);
725 DPRINTF("test dma buffer alloc failed");
730 case ROCKER_TEST_DMA_CTRL_CLEAR
:
731 memset(buf
, 0, r
->test_dma_size
);
733 case ROCKER_TEST_DMA_CTRL_FILL
:
734 memset(buf
, 0x96, r
->test_dma_size
);
736 case ROCKER_TEST_DMA_CTRL_INVERT
:
737 pci_dma_read(dev
, r
->test_dma_addr
, buf
, r
->test_dma_size
);
738 for (i
= 0; i
< r
->test_dma_size
; i
++) {
743 DPRINTF("not test dma control val=0x%08x\n", val
);
746 pci_dma_write(dev
, r
->test_dma_addr
, buf
, r
->test_dma_size
);
748 rocker_msix_irq(r
, ROCKER_MSIX_VEC_TEST
);
754 static void rocker_reset(DeviceState
*dev
);
756 static void rocker_control(Rocker
*r
, uint32_t val
)
758 if (val
& ROCKER_CONTROL_RESET
) {
759 rocker_reset(DEVICE(r
));
763 static int rocker_pci_ring_count(Rocker
*r
)
768 * - tx and rx ring per each port
770 return 2 + (2 * r
->fp_ports
);
773 static bool rocker_addr_is_desc_reg(Rocker
*r
, hwaddr addr
)
775 hwaddr start
= ROCKER_DMA_DESC_BASE
;
776 hwaddr end
= start
+ (ROCKER_DMA_DESC_SIZE
* rocker_pci_ring_count(r
));
778 return addr
>= start
&& addr
< end
;
781 static void rocker_port_phys_enable_write(Rocker
*r
, uint64_t new)
788 for (i
= 0; i
< r
->fp_ports
; i
++) {
789 fp_port
= r
->fp_port
[i
];
790 old_enabled
= fp_port_enabled(fp_port
);
791 new_enabled
= (new >> (i
+ 1)) & 0x1;
792 if (new_enabled
== old_enabled
) {
796 fp_port_enable(r
->fp_port
[i
]);
798 fp_port_disable(r
->fp_port
[i
]);
803 static void rocker_io_writel(void *opaque
, hwaddr addr
, uint32_t val
)
807 if (rocker_addr_is_desc_reg(r
, addr
)) {
808 unsigned index
= ROCKER_RING_INDEX(addr
);
809 unsigned offset
= addr
& ROCKER_DMA_DESC_MASK
;
812 case ROCKER_DMA_DESC_ADDR_OFFSET
:
813 r
->lower32
= (uint64_t)val
;
815 case ROCKER_DMA_DESC_ADDR_OFFSET
+ 4:
816 desc_ring_set_base_addr(r
->rings
[index
],
817 ((uint64_t)val
) << 32 | r
->lower32
);
820 case ROCKER_DMA_DESC_SIZE_OFFSET
:
821 desc_ring_set_size(r
->rings
[index
], val
);
823 case ROCKER_DMA_DESC_HEAD_OFFSET
:
824 if (desc_ring_set_head(r
->rings
[index
], val
)) {
825 rocker_msix_irq(r
, desc_ring_get_msix_vector(r
->rings
[index
]));
828 case ROCKER_DMA_DESC_CTRL_OFFSET
:
829 desc_ring_set_ctrl(r
->rings
[index
], val
);
831 case ROCKER_DMA_DESC_CREDITS_OFFSET
:
832 if (desc_ring_ret_credits(r
->rings
[index
], val
)) {
833 rocker_msix_irq(r
, desc_ring_get_msix_vector(r
->rings
[index
]));
837 DPRINTF("not implemented dma reg write(l) addr=0x" TARGET_FMT_plx
838 " val=0x%08x (ring %d, addr=0x%02x)\n",
839 addr
, val
, index
, offset
);
846 case ROCKER_TEST_REG
:
849 case ROCKER_TEST_REG64
:
850 case ROCKER_TEST_DMA_ADDR
:
851 case ROCKER_PORT_PHYS_ENABLE
:
852 r
->lower32
= (uint64_t)val
;
854 case ROCKER_TEST_REG64
+ 4:
855 r
->test_reg64
= ((uint64_t)val
) << 32 | r
->lower32
;
858 case ROCKER_TEST_IRQ
:
859 rocker_msix_irq(r
, val
);
861 case ROCKER_TEST_DMA_SIZE
:
862 r
->test_dma_size
= val
;
864 case ROCKER_TEST_DMA_ADDR
+ 4:
865 r
->test_dma_addr
= ((uint64_t)val
) << 32 | r
->lower32
;
868 case ROCKER_TEST_DMA_CTRL
:
869 rocker_test_dma_ctrl(r
, val
);
872 rocker_control(r
, val
);
874 case ROCKER_PORT_PHYS_ENABLE
+ 4:
875 rocker_port_phys_enable_write(r
, ((uint64_t)val
) << 32 | r
->lower32
);
879 DPRINTF("not implemented write(l) addr=0x" TARGET_FMT_plx
880 " val=0x%08x\n", addr
, val
);
885 static void rocker_io_writeq(void *opaque
, hwaddr addr
, uint64_t val
)
889 if (rocker_addr_is_desc_reg(r
, addr
)) {
890 unsigned index
= ROCKER_RING_INDEX(addr
);
891 unsigned offset
= addr
& ROCKER_DMA_DESC_MASK
;
894 case ROCKER_DMA_DESC_ADDR_OFFSET
:
895 desc_ring_set_base_addr(r
->rings
[index
], val
);
898 DPRINTF("not implemented dma reg write(q) addr=0x" TARGET_FMT_plx
899 " val=0x" TARGET_FMT_plx
" (ring %d, offset=0x%02x)\n",
900 addr
, val
, index
, offset
);
907 case ROCKER_TEST_REG64
:
910 case ROCKER_TEST_DMA_ADDR
:
911 r
->test_dma_addr
= val
;
913 case ROCKER_PORT_PHYS_ENABLE
:
914 rocker_port_phys_enable_write(r
, val
);
917 DPRINTF("not implemented write(q) addr=0x" TARGET_FMT_plx
918 " val=0x" TARGET_FMT_plx
"\n", addr
, val
);
924 #define regname(reg) case (reg): return #reg
925 static const char *rocker_reg_name(void *opaque
, hwaddr addr
)
929 if (rocker_addr_is_desc_reg(r
, addr
)) {
930 unsigned index
= ROCKER_RING_INDEX(addr
);
931 unsigned offset
= addr
& ROCKER_DMA_DESC_MASK
;
932 static char buf
[100];
937 sprintf(ring_name
, "cmd");
940 sprintf(ring_name
, "event");
943 sprintf(ring_name
, "%s-%d", index
% 2 ? "rx" : "tx",
948 case ROCKER_DMA_DESC_ADDR_OFFSET
:
949 sprintf(buf
, "Ring[%s] ADDR", ring_name
);
951 case ROCKER_DMA_DESC_ADDR_OFFSET
+4:
952 sprintf(buf
, "Ring[%s] ADDR+4", ring_name
);
954 case ROCKER_DMA_DESC_SIZE_OFFSET
:
955 sprintf(buf
, "Ring[%s] SIZE", ring_name
);
957 case ROCKER_DMA_DESC_HEAD_OFFSET
:
958 sprintf(buf
, "Ring[%s] HEAD", ring_name
);
960 case ROCKER_DMA_DESC_TAIL_OFFSET
:
961 sprintf(buf
, "Ring[%s] TAIL", ring_name
);
963 case ROCKER_DMA_DESC_CTRL_OFFSET
:
964 sprintf(buf
, "Ring[%s] CTRL", ring_name
);
966 case ROCKER_DMA_DESC_CREDITS_OFFSET
:
967 sprintf(buf
, "Ring[%s] CREDITS", ring_name
);
970 sprintf(buf
, "Ring[%s] ???", ring_name
);
975 regname(ROCKER_BOGUS_REG0
);
976 regname(ROCKER_BOGUS_REG1
);
977 regname(ROCKER_BOGUS_REG2
);
978 regname(ROCKER_BOGUS_REG3
);
979 regname(ROCKER_TEST_REG
);
980 regname(ROCKER_TEST_REG64
);
981 regname(ROCKER_TEST_REG64
+4);
982 regname(ROCKER_TEST_IRQ
);
983 regname(ROCKER_TEST_DMA_ADDR
);
984 regname(ROCKER_TEST_DMA_ADDR
+4);
985 regname(ROCKER_TEST_DMA_SIZE
);
986 regname(ROCKER_TEST_DMA_CTRL
);
987 regname(ROCKER_CONTROL
);
988 regname(ROCKER_PORT_PHYS_COUNT
);
989 regname(ROCKER_PORT_PHYS_LINK_STATUS
);
990 regname(ROCKER_PORT_PHYS_LINK_STATUS
+4);
991 regname(ROCKER_PORT_PHYS_ENABLE
);
992 regname(ROCKER_PORT_PHYS_ENABLE
+4);
993 regname(ROCKER_SWITCH_ID
);
994 regname(ROCKER_SWITCH_ID
+4);
1000 static const char *rocker_reg_name(void *opaque
, hwaddr addr
)
1006 static void rocker_mmio_write(void *opaque
, hwaddr addr
, uint64_t val
,
1009 DPRINTF("Write %s addr " TARGET_FMT_plx
1010 ", size %u, val " TARGET_FMT_plx
"\n",
1011 rocker_reg_name(opaque
, addr
), addr
, size
, val
);
1015 rocker_io_writel(opaque
, addr
, val
);
1018 rocker_io_writeq(opaque
, addr
, val
);
1023 static uint64_t rocker_port_phys_link_status(Rocker
*r
)
1026 uint64_t status
= 0;
1028 for (i
= 0; i
< r
->fp_ports
; i
++) {
1029 FpPort
*port
= r
->fp_port
[i
];
1031 if (fp_port_get_link_up(port
)) {
1032 status
|= 1 << (i
+ 1);
1038 static uint64_t rocker_port_phys_enable_read(Rocker
*r
)
1043 for (i
= 0; i
< r
->fp_ports
; i
++) {
1044 FpPort
*port
= r
->fp_port
[i
];
1046 if (fp_port_enabled(port
)) {
1047 ret
|= 1 << (i
+ 1);
1053 static uint32_t rocker_io_readl(void *opaque
, hwaddr addr
)
1058 if (rocker_addr_is_desc_reg(r
, addr
)) {
1059 unsigned index
= ROCKER_RING_INDEX(addr
);
1060 unsigned offset
= addr
& ROCKER_DMA_DESC_MASK
;
1063 case ROCKER_DMA_DESC_ADDR_OFFSET
:
1064 ret
= (uint32_t)desc_ring_get_base_addr(r
->rings
[index
]);
1066 case ROCKER_DMA_DESC_ADDR_OFFSET
+ 4:
1067 ret
= (uint32_t)(desc_ring_get_base_addr(r
->rings
[index
]) >> 32);
1069 case ROCKER_DMA_DESC_SIZE_OFFSET
:
1070 ret
= desc_ring_get_size(r
->rings
[index
]);
1072 case ROCKER_DMA_DESC_HEAD_OFFSET
:
1073 ret
= desc_ring_get_head(r
->rings
[index
]);
1075 case ROCKER_DMA_DESC_TAIL_OFFSET
:
1076 ret
= desc_ring_get_tail(r
->rings
[index
]);
1078 case ROCKER_DMA_DESC_CREDITS_OFFSET
:
1079 ret
= desc_ring_get_credits(r
->rings
[index
]);
1082 DPRINTF("not implemented dma reg read(l) addr=0x" TARGET_FMT_plx
1083 " (ring %d, addr=0x%02x)\n", addr
, index
, offset
);
1091 case ROCKER_BOGUS_REG0
:
1092 case ROCKER_BOGUS_REG1
:
1093 case ROCKER_BOGUS_REG2
:
1094 case ROCKER_BOGUS_REG3
:
1097 case ROCKER_TEST_REG
:
1098 ret
= r
->test_reg
* 2;
1100 case ROCKER_TEST_REG64
:
1101 ret
= (uint32_t)(r
->test_reg64
* 2);
1103 case ROCKER_TEST_REG64
+ 4:
1104 ret
= (uint32_t)((r
->test_reg64
* 2) >> 32);
1106 case ROCKER_TEST_DMA_SIZE
:
1107 ret
= r
->test_dma_size
;
1109 case ROCKER_TEST_DMA_ADDR
:
1110 ret
= (uint32_t)r
->test_dma_addr
;
1112 case ROCKER_TEST_DMA_ADDR
+ 4:
1113 ret
= (uint32_t)(r
->test_dma_addr
>> 32);
1115 case ROCKER_PORT_PHYS_COUNT
:
1118 case ROCKER_PORT_PHYS_LINK_STATUS
:
1119 ret
= (uint32_t)rocker_port_phys_link_status(r
);
1121 case ROCKER_PORT_PHYS_LINK_STATUS
+ 4:
1122 ret
= (uint32_t)(rocker_port_phys_link_status(r
) >> 32);
1124 case ROCKER_PORT_PHYS_ENABLE
:
1125 ret
= (uint32_t)rocker_port_phys_enable_read(r
);
1127 case ROCKER_PORT_PHYS_ENABLE
+ 4:
1128 ret
= (uint32_t)(rocker_port_phys_enable_read(r
) >> 32);
1130 case ROCKER_SWITCH_ID
:
1131 ret
= (uint32_t)r
->switch_id
;
1133 case ROCKER_SWITCH_ID
+ 4:
1134 ret
= (uint32_t)(r
->switch_id
>> 32);
1137 DPRINTF("not implemented read(l) addr=0x" TARGET_FMT_plx
"\n", addr
);
1144 static uint64_t rocker_io_readq(void *opaque
, hwaddr addr
)
1149 if (rocker_addr_is_desc_reg(r
, addr
)) {
1150 unsigned index
= ROCKER_RING_INDEX(addr
);
1151 unsigned offset
= addr
& ROCKER_DMA_DESC_MASK
;
1153 switch (addr
& ROCKER_DMA_DESC_MASK
) {
1154 case ROCKER_DMA_DESC_ADDR_OFFSET
:
1155 ret
= desc_ring_get_base_addr(r
->rings
[index
]);
1158 DPRINTF("not implemented dma reg read(q) addr=0x" TARGET_FMT_plx
1159 " (ring %d, addr=0x%02x)\n", addr
, index
, offset
);
1167 case ROCKER_BOGUS_REG0
:
1168 case ROCKER_BOGUS_REG2
:
1169 ret
= 0xDEADBABEDEADBABEULL
;
1171 case ROCKER_TEST_REG64
:
1172 ret
= r
->test_reg64
* 2;
1174 case ROCKER_TEST_DMA_ADDR
:
1175 ret
= r
->test_dma_addr
;
1177 case ROCKER_PORT_PHYS_LINK_STATUS
:
1178 ret
= rocker_port_phys_link_status(r
);
1180 case ROCKER_PORT_PHYS_ENABLE
:
1181 ret
= rocker_port_phys_enable_read(r
);
1183 case ROCKER_SWITCH_ID
:
1187 DPRINTF("not implemented read(q) addr=0x" TARGET_FMT_plx
"\n", addr
);
1194 static uint64_t rocker_mmio_read(void *opaque
, hwaddr addr
, unsigned size
)
1196 DPRINTF("Read %s addr " TARGET_FMT_plx
", size %u\n",
1197 rocker_reg_name(opaque
, addr
), addr
, size
);
1201 return rocker_io_readl(opaque
, addr
);
1203 return rocker_io_readq(opaque
, addr
);
1209 static const MemoryRegionOps rocker_mmio_ops
= {
1210 .read
= rocker_mmio_read
,
1211 .write
= rocker_mmio_write
,
1212 .endianness
= DEVICE_LITTLE_ENDIAN
,
1214 .min_access_size
= 4,
1215 .max_access_size
= 8,
1218 .min_access_size
= 4,
1219 .max_access_size
= 8,
1223 static void rocker_msix_vectors_unuse(Rocker
*r
,
1224 unsigned int num_vectors
)
1226 PCIDevice
*dev
= PCI_DEVICE(r
);
1229 for (i
= 0; i
< num_vectors
; i
++) {
1230 msix_vector_unuse(dev
, i
);
1234 static int rocker_msix_vectors_use(Rocker
*r
,
1235 unsigned int num_vectors
)
1237 PCIDevice
*dev
= PCI_DEVICE(r
);
1241 for (i
= 0; i
< num_vectors
; i
++) {
1242 err
= msix_vector_use(dev
, i
);
1250 rocker_msix_vectors_unuse(r
, i
);
1254 static int rocker_msix_init(Rocker
*r
)
1256 PCIDevice
*dev
= PCI_DEVICE(r
);
1259 err
= msix_init(dev
, ROCKER_MSIX_VEC_COUNT(r
->fp_ports
),
1261 ROCKER_PCI_MSIX_BAR_IDX
, ROCKER_PCI_MSIX_TABLE_OFFSET
,
1263 ROCKER_PCI_MSIX_BAR_IDX
, ROCKER_PCI_MSIX_PBA_OFFSET
,
1269 err
= rocker_msix_vectors_use(r
, ROCKER_MSIX_VEC_COUNT(r
->fp_ports
));
1271 goto err_msix_vectors_use
;
1276 err_msix_vectors_use
:
1277 msix_uninit(dev
, &r
->msix_bar
, &r
->msix_bar
);
1281 static void rocker_msix_uninit(Rocker
*r
)
1283 PCIDevice
*dev
= PCI_DEVICE(r
);
1285 msix_uninit(dev
, &r
->msix_bar
, &r
->msix_bar
);
1286 rocker_msix_vectors_unuse(r
, ROCKER_MSIX_VEC_COUNT(r
->fp_ports
));
1289 static int pci_rocker_init(PCIDevice
*dev
)
1291 Rocker
*r
= to_rocker(dev
);
1292 const MACAddr zero
= { .a
= { 0, 0, 0, 0, 0, 0 } };
1293 const MACAddr dflt
= { .a
= { 0x52, 0x54, 0x00, 0x12, 0x35, 0x01 } };
1294 static int sw_index
;
1297 /* allocate worlds */
1299 r
->worlds
[ROCKER_WORLD_TYPE_OF_DPA
] = of_dpa_world_alloc(r
);
1300 r
->world_dflt
= r
->worlds
[ROCKER_WORLD_TYPE_OF_DPA
];
1302 for (i
= 0; i
< ROCKER_WORLD_TYPE_MAX
; i
++) {
1303 if (!r
->worlds
[i
]) {
1304 goto err_world_alloc
;
1308 /* set up memory-mapped region at BAR0 */
1310 memory_region_init_io(&r
->mmio
, OBJECT(r
), &rocker_mmio_ops
, r
,
1311 "rocker-mmio", ROCKER_PCI_BAR0_SIZE
);
1312 pci_register_bar(dev
, ROCKER_PCI_BAR0_IDX
,
1313 PCI_BASE_ADDRESS_SPACE_MEMORY
, &r
->mmio
);
1315 /* set up memory-mapped region for MSI-X */
1317 memory_region_init(&r
->msix_bar
, OBJECT(r
), "rocker-msix-bar",
1318 ROCKER_PCI_MSIX_BAR_SIZE
);
1319 pci_register_bar(dev
, ROCKER_PCI_MSIX_BAR_IDX
,
1320 PCI_BASE_ADDRESS_SPACE_MEMORY
, &r
->msix_bar
);
1324 err
= rocker_msix_init(r
);
1329 /* validate switch properties */
1332 r
->name
= g_strdup(ROCKER
);
1335 if (rocker_find(r
->name
)) {
1340 /* Rocker name is passed in port name requests to OS with the intention
1341 * that the name is used in interface names. Limit the length of the
1342 * rocker name to avoid naming problems in the OS. Also, adding the
1343 * port number as p# and unganged breakout b#, where # is at most 2
1344 * digits, so leave room for it too (-1 for string terminator, -3 for
1347 #define ROCKER_IFNAMSIZ 16
1348 #define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3)
1349 if (strlen(r
->name
) > MAX_ROCKER_NAME_LEN
) {
1351 "rocker: name too long; please shorten to at most %d chars\n",
1352 MAX_ROCKER_NAME_LEN
);
1356 if (memcmp(&r
->fp_start_macaddr
, &zero
, sizeof(zero
)) == 0) {
1357 memcpy(&r
->fp_start_macaddr
, &dflt
, sizeof(dflt
));
1358 r
->fp_start_macaddr
.a
[4] += (sw_index
++);
1361 if (!r
->switch_id
) {
1362 memcpy(&r
->switch_id
, &r
->fp_start_macaddr
,
1363 sizeof(r
->fp_start_macaddr
));
1366 if (r
->fp_ports
> ROCKER_FP_PORTS_MAX
) {
1367 r
->fp_ports
= ROCKER_FP_PORTS_MAX
;
1370 r
->rings
= g_new(DescRing
*, rocker_pci_ring_count(r
));
1372 goto err_rings_alloc
;
1375 /* Rings are ordered like this:
1386 for (i
= 0; i
< rocker_pci_ring_count(r
); i
++) {
1387 DescRing
*ring
= desc_ring_alloc(r
, i
);
1390 goto err_ring_alloc
;
1393 if (i
== ROCKER_RING_CMD
) {
1394 desc_ring_set_consume(ring
, cmd_consume
, ROCKER_MSIX_VEC_CMD
);
1395 } else if (i
== ROCKER_RING_EVENT
) {
1396 desc_ring_set_consume(ring
, NULL
, ROCKER_MSIX_VEC_EVENT
);
1397 } else if (i
% 2 == 0) {
1398 desc_ring_set_consume(ring
, tx_consume
,
1399 ROCKER_MSIX_VEC_TX((i
- 2) / 2));
1400 } else if (i
% 2 == 1) {
1401 desc_ring_set_consume(ring
, NULL
, ROCKER_MSIX_VEC_RX((i
- 3) / 2));
1407 for (i
= 0; i
< r
->fp_ports
; i
++) {
1409 fp_port_alloc(r
, r
->name
, &r
->fp_start_macaddr
,
1410 i
, &r
->fp_ports_peers
[i
]);
1413 goto err_port_alloc
;
1416 r
->fp_port
[i
] = port
;
1417 fp_port_set_world(port
, r
->world_dflt
);
1420 QLIST_INSERT_HEAD(&rockers
, r
, next
);
1425 for (--i
; i
>= 0; i
--) {
1426 FpPort
*port
= r
->fp_port
[i
];
1429 i
= rocker_pci_ring_count(r
);
1431 for (--i
; i
>= 0; i
--) {
1432 desc_ring_free(r
->rings
[i
]);
1437 rocker_msix_uninit(r
);
1439 object_unparent(OBJECT(&r
->msix_bar
));
1440 object_unparent(OBJECT(&r
->mmio
));
1442 for (i
= 0; i
< ROCKER_WORLD_TYPE_MAX
; i
++) {
1444 world_free(r
->worlds
[i
]);
1450 static void pci_rocker_uninit(PCIDevice
*dev
)
1452 Rocker
*r
= to_rocker(dev
);
1455 QLIST_REMOVE(r
, next
);
1457 for (i
= 0; i
< r
->fp_ports
; i
++) {
1458 FpPort
*port
= r
->fp_port
[i
];
1461 r
->fp_port
[i
] = NULL
;
1464 for (i
= 0; i
< rocker_pci_ring_count(r
); i
++) {
1466 desc_ring_free(r
->rings
[i
]);
1471 rocker_msix_uninit(r
);
1472 object_unparent(OBJECT(&r
->msix_bar
));
1473 object_unparent(OBJECT(&r
->mmio
));
1475 for (i
= 0; i
< ROCKER_WORLD_TYPE_MAX
; i
++) {
1477 world_free(r
->worlds
[i
]);
1480 g_free(r
->fp_ports_peers
);
1483 static void rocker_reset(DeviceState
*dev
)
1485 Rocker
*r
= to_rocker(dev
);
1488 for (i
= 0; i
< ROCKER_WORLD_TYPE_MAX
; i
++) {
1490 world_reset(r
->worlds
[i
]);
1493 for (i
= 0; i
< r
->fp_ports
; i
++) {
1494 fp_port_reset(r
->fp_port
[i
]);
1495 fp_port_set_world(r
->fp_port
[i
], r
->world_dflt
);
1500 r
->test_dma_addr
= 0;
1501 r
->test_dma_size
= 0;
1503 for (i
= 0; i
< rocker_pci_ring_count(r
); i
++) {
1504 desc_ring_reset(r
->rings
[i
]);
1507 DPRINTF("Reset done\n");
1510 static Property rocker_properties
[] = {
1511 DEFINE_PROP_STRING("name", Rocker
, name
),
1512 DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker
,
1514 DEFINE_PROP_UINT64("switch_id", Rocker
,
1516 DEFINE_PROP_ARRAY("ports", Rocker
, fp_ports
,
1517 fp_ports_peers
, qdev_prop_netdev
, NICPeers
),
1518 DEFINE_PROP_END_OF_LIST(),
1521 static const VMStateDescription rocker_vmsd
= {
1526 static void rocker_class_init(ObjectClass
*klass
, void *data
)
1528 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1529 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1531 k
->init
= pci_rocker_init
;
1532 k
->exit
= pci_rocker_uninit
;
1533 k
->vendor_id
= PCI_VENDOR_ID_REDHAT
;
1534 k
->device_id
= PCI_DEVICE_ID_REDHAT_ROCKER
;
1535 k
->revision
= ROCKER_PCI_REVISION
;
1536 k
->class_id
= PCI_CLASS_NETWORK_OTHER
;
1537 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
1538 dc
->desc
= "Rocker Switch";
1539 dc
->reset
= rocker_reset
;
1540 dc
->props
= rocker_properties
;
1541 dc
->vmsd
= &rocker_vmsd
;
1544 static const TypeInfo rocker_info
= {
1546 .parent
= TYPE_PCI_DEVICE
,
1547 .instance_size
= sizeof(Rocker
),
1548 .class_init
= rocker_class_init
,
1551 static void rocker_register_types(void)
1553 type_register_static(&rocker_info
);
1556 type_init(rocker_register_types
)