2 * QEMU paravirtual RDMA
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu/module.h"
19 #include "hw/pci/pci.h"
20 #include "hw/pci/pci_ids.h"
21 #include "hw/pci/msi.h"
22 #include "hw/pci/msix.h"
23 #include "hw/qdev-properties.h"
24 #include "hw/qdev-properties-system.h"
27 #include "monitor/monitor.h"
28 #include "hw/rdma/rdma.h"
30 #include "../rdma_rm.h"
31 #include "../rdma_backend.h"
32 #include "../rdma_utils.h"
34 #include <infiniband/verbs.h>
36 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
37 #include "sysemu/runstate.h"
38 #include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h"
39 #include "pvrdma_qp_ops.h"
41 static Property pvrdma_dev_properties
[] = {
42 DEFINE_PROP_STRING("netdev", PVRDMADev
, backend_eth_device_name
),
43 DEFINE_PROP_STRING("ibdev", PVRDMADev
, backend_device_name
),
44 DEFINE_PROP_UINT8("ibport", PVRDMADev
, backend_port_num
, 1),
45 DEFINE_PROP_UINT64("dev-caps-max-mr-size", PVRDMADev
, dev_attr
.max_mr_size
,
47 DEFINE_PROP_INT32("dev-caps-max-qp", PVRDMADev
, dev_attr
.max_qp
, MAX_QP
),
48 DEFINE_PROP_INT32("dev-caps-max-cq", PVRDMADev
, dev_attr
.max_cq
, MAX_CQ
),
49 DEFINE_PROP_INT32("dev-caps-max-mr", PVRDMADev
, dev_attr
.max_mr
, MAX_MR
),
50 DEFINE_PROP_INT32("dev-caps-max-pd", PVRDMADev
, dev_attr
.max_pd
, MAX_PD
),
51 DEFINE_PROP_INT32("dev-caps-qp-rd-atom", PVRDMADev
, dev_attr
.max_qp_rd_atom
,
53 DEFINE_PROP_INT32("dev-caps-max-qp-init-rd-atom", PVRDMADev
,
54 dev_attr
.max_qp_init_rd_atom
, MAX_QP_INIT_RD_ATOM
),
55 DEFINE_PROP_INT32("dev-caps-max-ah", PVRDMADev
, dev_attr
.max_ah
, MAX_AH
),
56 DEFINE_PROP_INT32("dev-caps-max-srq", PVRDMADev
, dev_attr
.max_srq
, MAX_SRQ
),
57 DEFINE_PROP_CHR("mad-chardev", PVRDMADev
, mad_chr
),
58 DEFINE_PROP_END_OF_LIST(),
61 static void pvrdma_format_statistics(RdmaProvider
*obj
, GString
*buf
)
63 PVRDMADev
*dev
= PVRDMA_DEV(obj
);
64 PCIDevice
*pdev
= PCI_DEVICE(dev
);
66 g_string_append_printf(buf
, "%s, %x.%x\n",
67 pdev
->name
, PCI_SLOT(pdev
->devfn
),
68 PCI_FUNC(pdev
->devfn
));
69 g_string_append_printf(buf
, "\tcommands : %" PRId64
"\n",
71 g_string_append_printf(buf
, "\tregs_reads : %" PRId64
"\n",
72 dev
->stats
.regs_reads
);
73 g_string_append_printf(buf
, "\tregs_writes : %" PRId64
"\n",
74 dev
->stats
.regs_writes
);
75 g_string_append_printf(buf
, "\tuar_writes : %" PRId64
"\n",
76 dev
->stats
.uar_writes
);
77 g_string_append_printf(buf
, "\tinterrupts : %" PRId64
"\n",
78 dev
->stats
.interrupts
);
79 rdma_format_device_counters(&dev
->rdma_dev_res
, buf
);
82 static void free_dev_ring(PCIDevice
*pci_dev
, PvrdmaRing
*ring
,
85 pvrdma_ring_free(ring
);
86 rdma_pci_dma_unmap(pci_dev
, ring_state
, TARGET_PAGE_SIZE
);
89 static int init_dev_ring(PvrdmaRing
*ring
, PvrdmaRingState
**ring_state
,
90 const char *name
, PCIDevice
*pci_dev
,
91 dma_addr_t dir_addr
, uint32_t num_pages
)
94 int max_pages
, rc
= 0;
97 rdma_error_report("Ring pages count must be strictly positive");
102 * Make sure we can satisfy the requested number of pages in a single
103 * TARGET_PAGE_SIZE sized page table (taking into account that first entry
104 * is reserved for ring-state)
106 max_pages
= TARGET_PAGE_SIZE
/ sizeof(dma_addr_t
) - 1;
107 if (num_pages
> max_pages
) {
108 rdma_error_report("Maximum pages on a single directory must not exceed %d\n",
113 dir
= rdma_pci_dma_map(pci_dev
, dir_addr
, TARGET_PAGE_SIZE
);
115 rdma_error_report("Failed to map to page directory (ring %s)", name
);
120 /* We support only one page table for a ring */
121 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
123 rdma_error_report("Failed to map to page table (ring %s)", name
);
128 *ring_state
= rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
130 rdma_error_report("Failed to map to ring state (ring %s)", name
);
134 /* RX ring is the second */
136 rc
= pvrdma_ring_init(ring
, name
, pci_dev
,
137 (PvrdmaRingState
*)*ring_state
,
138 (num_pages
- 1) * TARGET_PAGE_SIZE
/
139 sizeof(struct pvrdma_cqne
),
140 sizeof(struct pvrdma_cqne
),
141 (dma_addr_t
*)&tbl
[1], (dma_addr_t
)num_pages
- 1);
144 goto out_free_ring_state
;
150 rdma_pci_dma_unmap(pci_dev
, *ring_state
, TARGET_PAGE_SIZE
);
153 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
156 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
162 static void free_dsr(PVRDMADev
*dev
)
164 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
166 if (!dev
->dsr_info
.dsr
) {
170 free_dev_ring(pci_dev
, &dev
->dsr_info
.async
,
171 dev
->dsr_info
.async_ring_state
);
173 free_dev_ring(pci_dev
, &dev
->dsr_info
.cq
, dev
->dsr_info
.cq_ring_state
);
175 rdma_pci_dma_unmap(pci_dev
, dev
->dsr_info
.req
,
176 sizeof(union pvrdma_cmd_req
));
178 rdma_pci_dma_unmap(pci_dev
, dev
->dsr_info
.rsp
,
179 sizeof(union pvrdma_cmd_resp
));
181 rdma_pci_dma_unmap(pci_dev
, dev
->dsr_info
.dsr
,
182 sizeof(struct pvrdma_device_shared_region
));
184 dev
->dsr_info
.dsr
= NULL
;
187 static int load_dsr(PVRDMADev
*dev
)
190 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
192 struct pvrdma_device_shared_region
*dsr
;
197 dev
->dsr_info
.dsr
= rdma_pci_dma_map(pci_dev
, dev
->dsr_info
.dma
,
198 sizeof(struct pvrdma_device_shared_region
));
199 if (!dev
->dsr_info
.dsr
) {
200 rdma_error_report("Failed to map to DSR");
206 dsr_info
= &dev
->dsr_info
;
209 /* Map to command slot */
210 dsr_info
->req
= rdma_pci_dma_map(pci_dev
, dsr
->cmd_slot_dma
,
211 sizeof(union pvrdma_cmd_req
));
212 if (!dsr_info
->req
) {
213 rdma_error_report("Failed to map to command slot address");
218 /* Map to response slot */
219 dsr_info
->rsp
= rdma_pci_dma_map(pci_dev
, dsr
->resp_slot_dma
,
220 sizeof(union pvrdma_cmd_resp
));
221 if (!dsr_info
->rsp
) {
222 rdma_error_report("Failed to map to response slot address");
227 /* Map to CQ notification ring */
228 rc
= init_dev_ring(&dsr_info
->cq
, &dsr_info
->cq_ring_state
, "dev_cq",
229 pci_dev
, dsr
->cq_ring_pages
.pdir_dma
,
230 dsr
->cq_ring_pages
.num_pages
);
236 /* Map to event notification ring */
237 rc
= init_dev_ring(&dsr_info
->async
, &dsr_info
->async_ring_state
,
238 "dev_async", pci_dev
, dsr
->async_ring_pages
.pdir_dma
,
239 dsr
->async_ring_pages
.num_pages
);
248 rdma_pci_dma_unmap(pci_dev
, dsr_info
->rsp
, sizeof(union pvrdma_cmd_resp
));
251 rdma_pci_dma_unmap(pci_dev
, dsr_info
->req
, sizeof(union pvrdma_cmd_req
));
254 rdma_pci_dma_unmap(pci_dev
, dsr_info
->dsr
,
255 sizeof(struct pvrdma_device_shared_region
));
256 dsr_info
->dsr
= NULL
;
262 static void init_dsr_dev_caps(PVRDMADev
*dev
)
264 struct pvrdma_device_shared_region
*dsr
;
266 if (!dev
->dsr_info
.dsr
) {
267 /* Buggy or malicious guest driver */
268 rdma_error_report("Can't initialized DSR");
272 dsr
= dev
->dsr_info
.dsr
;
273 dsr
->caps
.fw_ver
= PVRDMA_FW_VERSION
;
274 dsr
->caps
.mode
= PVRDMA_DEVICE_MODE_ROCE
;
275 dsr
->caps
.gid_types
|= PVRDMA_GID_TYPE_FLAG_ROCE_V1
;
276 dsr
->caps
.max_uar
= RDMA_BAR2_UAR_SIZE
;
277 dsr
->caps
.max_mr_size
= dev
->dev_attr
.max_mr_size
;
278 dsr
->caps
.max_qp
= dev
->dev_attr
.max_qp
;
279 dsr
->caps
.max_qp_wr
= dev
->dev_attr
.max_qp_wr
;
280 dsr
->caps
.max_sge
= dev
->dev_attr
.max_sge
;
281 dsr
->caps
.max_cq
= dev
->dev_attr
.max_cq
;
282 dsr
->caps
.max_cqe
= dev
->dev_attr
.max_cqe
;
283 dsr
->caps
.max_mr
= dev
->dev_attr
.max_mr
;
284 dsr
->caps
.max_pd
= dev
->dev_attr
.max_pd
;
285 dsr
->caps
.max_ah
= dev
->dev_attr
.max_ah
;
286 dsr
->caps
.max_srq
= dev
->dev_attr
.max_srq
;
287 dsr
->caps
.max_srq_wr
= dev
->dev_attr
.max_srq_wr
;
288 dsr
->caps
.max_srq_sge
= dev
->dev_attr
.max_srq_sge
;
289 dsr
->caps
.gid_tbl_len
= MAX_GIDS
;
290 dsr
->caps
.sys_image_guid
= 0;
291 dsr
->caps
.node_guid
= dev
->node_guid
;
292 dsr
->caps
.phys_port_cnt
= MAX_PORTS
;
293 dsr
->caps
.max_pkeys
= MAX_PKEYS
;
296 static void uninit_msix(PCIDevice
*pdev
, int used_vectors
)
298 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
301 for (i
= 0; i
< used_vectors
; i
++) {
302 msix_vector_unuse(pdev
, i
);
305 msix_uninit(pdev
, &dev
->msix
, &dev
->msix
);
308 static int init_msix(PCIDevice
*pdev
)
310 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
314 rc
= msix_init(pdev
, RDMA_MAX_INTRS
, &dev
->msix
, RDMA_MSIX_BAR_IDX
,
315 RDMA_MSIX_TABLE
, &dev
->msix
, RDMA_MSIX_BAR_IDX
,
316 RDMA_MSIX_PBA
, 0, NULL
);
319 rdma_error_report("Failed to initialize MSI-X");
323 for (i
= 0; i
< RDMA_MAX_INTRS
; i
++) {
324 msix_vector_use(PCI_DEVICE(dev
), i
);
330 static void pvrdma_fini(PCIDevice
*pdev
)
332 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
334 notifier_remove(&dev
->shutdown_notifier
);
336 pvrdma_qp_ops_fini();
338 rdma_backend_stop(&dev
->backend_dev
);
340 rdma_rm_fini(&dev
->rdma_dev_res
, &dev
->backend_dev
,
341 dev
->backend_eth_device_name
);
343 rdma_backend_fini(&dev
->backend_dev
);
347 if (msix_enabled(pdev
)) {
348 uninit_msix(pdev
, RDMA_MAX_INTRS
);
351 rdma_info_report("Device %s %x.%x is down", pdev
->name
,
352 PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));
355 static void pvrdma_stop(PVRDMADev
*dev
)
357 rdma_backend_stop(&dev
->backend_dev
);
360 static void pvrdma_start(PVRDMADev
*dev
)
362 rdma_backend_start(&dev
->backend_dev
);
365 static void activate_device(PVRDMADev
*dev
)
368 set_reg_val(dev
, PVRDMA_REG_ERR
, 0);
371 static int unquiesce_device(PVRDMADev
*dev
)
376 static void reset_device(PVRDMADev
*dev
)
381 static uint64_t pvrdma_regs_read(void *opaque
, hwaddr addr
, unsigned size
)
383 PVRDMADev
*dev
= opaque
;
386 dev
->stats
.regs_reads
++;
388 if (get_reg_val(dev
, addr
, &val
)) {
389 rdma_error_report("Failed to read REG value from address 0x%x",
394 trace_pvrdma_regs_read(addr
, val
);
399 static void pvrdma_regs_write(void *opaque
, hwaddr addr
, uint64_t val
,
402 PVRDMADev
*dev
= opaque
;
404 dev
->stats
.regs_writes
++;
406 if (set_reg_val(dev
, addr
, val
)) {
407 rdma_error_report("Failed to set REG value, addr=0x%"PRIx64
", val=0x%"PRIx64
,
413 case PVRDMA_REG_DSRLOW
:
414 trace_pvrdma_regs_write(addr
, val
, "DSRLOW", "");
415 dev
->dsr_info
.dma
= val
;
417 case PVRDMA_REG_DSRHIGH
:
418 trace_pvrdma_regs_write(addr
, val
, "DSRHIGH", "");
419 dev
->dsr_info
.dma
|= val
<< 32;
421 init_dsr_dev_caps(dev
);
425 case PVRDMA_DEVICE_CTL_ACTIVATE
:
426 trace_pvrdma_regs_write(addr
, val
, "CTL", "ACTIVATE");
427 activate_device(dev
);
429 case PVRDMA_DEVICE_CTL_UNQUIESCE
:
430 trace_pvrdma_regs_write(addr
, val
, "CTL", "UNQUIESCE");
431 unquiesce_device(dev
);
433 case PVRDMA_DEVICE_CTL_RESET
:
434 trace_pvrdma_regs_write(addr
, val
, "CTL", "URESET");
440 trace_pvrdma_regs_write(addr
, val
, "INTR_MASK", "");
441 dev
->interrupt_mask
= val
;
443 case PVRDMA_REG_REQUEST
:
445 trace_pvrdma_regs_write(addr
, val
, "REQUEST", "");
446 pvrdma_exec_cmd(dev
);
454 static const MemoryRegionOps regs_ops
= {
455 .read
= pvrdma_regs_read
,
456 .write
= pvrdma_regs_write
,
457 .endianness
= DEVICE_LITTLE_ENDIAN
,
459 .min_access_size
= sizeof(uint32_t),
460 .max_access_size
= sizeof(uint32_t),
464 static uint64_t pvrdma_uar_read(void *opaque
, hwaddr addr
, unsigned size
)
469 static void pvrdma_uar_write(void *opaque
, hwaddr addr
, uint64_t val
,
472 PVRDMADev
*dev
= opaque
;
474 dev
->stats
.uar_writes
++;
476 switch (addr
& 0xFFF) { /* Mask with 0xFFF as each UC gets page */
477 case PVRDMA_UAR_QP_OFFSET
:
478 if (val
& PVRDMA_UAR_QP_SEND
) {
479 trace_pvrdma_uar_write(addr
, val
, "QP", "SEND",
480 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
481 pvrdma_qp_send(dev
, val
& PVRDMA_UAR_HANDLE_MASK
);
483 if (val
& PVRDMA_UAR_QP_RECV
) {
484 trace_pvrdma_uar_write(addr
, val
, "QP", "RECV",
485 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
486 pvrdma_qp_recv(dev
, val
& PVRDMA_UAR_HANDLE_MASK
);
489 case PVRDMA_UAR_CQ_OFFSET
:
490 if (val
& PVRDMA_UAR_CQ_ARM
) {
491 trace_pvrdma_uar_write(addr
, val
, "CQ", "ARM",
492 val
& PVRDMA_UAR_HANDLE_MASK
,
493 !!(val
& PVRDMA_UAR_CQ_ARM_SOL
));
494 rdma_rm_req_notify_cq(&dev
->rdma_dev_res
,
495 val
& PVRDMA_UAR_HANDLE_MASK
,
496 !!(val
& PVRDMA_UAR_CQ_ARM_SOL
));
498 if (val
& PVRDMA_UAR_CQ_ARM_SOL
) {
499 trace_pvrdma_uar_write(addr
, val
, "CQ", "ARMSOL - not supported", 0,
502 if (val
& PVRDMA_UAR_CQ_POLL
) {
503 trace_pvrdma_uar_write(addr
, val
, "CQ", "POLL",
504 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
505 pvrdma_cq_poll(&dev
->rdma_dev_res
, val
& PVRDMA_UAR_HANDLE_MASK
);
508 case PVRDMA_UAR_SRQ_OFFSET
:
509 if (val
& PVRDMA_UAR_SRQ_RECV
) {
510 trace_pvrdma_uar_write(addr
, val
, "QP", "SRQ",
511 val
& PVRDMA_UAR_HANDLE_MASK
, 0);
512 pvrdma_srq_recv(dev
, val
& PVRDMA_UAR_HANDLE_MASK
);
516 rdma_error_report("Unsupported command, addr=0x%"PRIx64
", val=0x%"PRIx64
,
522 static const MemoryRegionOps uar_ops
= {
523 .read
= pvrdma_uar_read
,
524 .write
= pvrdma_uar_write
,
525 .endianness
= DEVICE_LITTLE_ENDIAN
,
527 .min_access_size
= sizeof(uint32_t),
528 .max_access_size
= sizeof(uint32_t),
532 static void init_pci_config(PCIDevice
*pdev
)
534 pdev
->config
[PCI_INTERRUPT_PIN
] = 1;
537 static void init_bars(PCIDevice
*pdev
)
539 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
542 memory_region_init(&dev
->msix
, OBJECT(dev
), "pvrdma-msix",
543 RDMA_BAR0_MSIX_SIZE
);
544 pci_register_bar(pdev
, RDMA_MSIX_BAR_IDX
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
547 /* BAR 1 - Registers */
548 memset(&dev
->regs_data
, 0, sizeof(dev
->regs_data
));
549 memory_region_init_io(&dev
->regs
, OBJECT(dev
), ®s_ops
, dev
,
550 "pvrdma-regs", sizeof(dev
->regs_data
));
551 pci_register_bar(pdev
, RDMA_REG_BAR_IDX
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
555 memset(&dev
->uar_data
, 0, sizeof(dev
->uar_data
));
556 memory_region_init_io(&dev
->uar
, OBJECT(dev
), &uar_ops
, dev
, "rdma-uar",
557 sizeof(dev
->uar_data
));
558 pci_register_bar(pdev
, RDMA_UAR_BAR_IDX
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
562 static void init_regs(PCIDevice
*pdev
)
564 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
566 set_reg_val(dev
, PVRDMA_REG_VERSION
, PVRDMA_HW_VERSION
);
567 set_reg_val(dev
, PVRDMA_REG_ERR
, 0xFFFF);
570 static void init_dev_caps(PVRDMADev
*dev
)
572 size_t pg_tbl_bytes
= TARGET_PAGE_SIZE
*
573 (TARGET_PAGE_SIZE
/ sizeof(uint64_t));
574 size_t wr_sz
= MAX(sizeof(struct pvrdma_sq_wqe_hdr
),
575 sizeof(struct pvrdma_rq_wqe_hdr
));
577 dev
->dev_attr
.max_qp_wr
= pg_tbl_bytes
/
578 (wr_sz
+ sizeof(struct pvrdma_sge
) *
579 dev
->dev_attr
.max_sge
) - TARGET_PAGE_SIZE
;
580 /* First page is ring state ^^^^ */
582 dev
->dev_attr
.max_cqe
= pg_tbl_bytes
/ sizeof(struct pvrdma_cqe
) -
583 TARGET_PAGE_SIZE
; /* First page is ring state */
585 dev
->dev_attr
.max_srq_wr
= pg_tbl_bytes
/
586 ((sizeof(struct pvrdma_rq_wqe_hdr
) +
587 sizeof(struct pvrdma_sge
)) *
588 dev
->dev_attr
.max_sge
) - TARGET_PAGE_SIZE
;
591 static int pvrdma_check_ram_shared(Object
*obj
, void *opaque
)
593 bool *shared
= opaque
;
595 if (object_dynamic_cast(obj
, "memory-backend-ram")) {
596 *shared
= object_property_get_bool(obj
, "share", NULL
);
602 static void pvrdma_shutdown_notifier(Notifier
*n
, void *opaque
)
604 PVRDMADev
*dev
= container_of(n
, PVRDMADev
, shutdown_notifier
);
605 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
607 pvrdma_fini(pci_dev
);
610 static void pvrdma_realize(PCIDevice
*pdev
, Error
**errp
)
613 PVRDMADev
*dev
= PVRDMA_DEV(pdev
);
615 bool ram_shared
= false;
618 warn_report_once("pvrdma is deprecated and will be removed in a future release");
620 rdma_info_report("Initializing device %s %x.%x", pdev
->name
,
621 PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));
623 if (TARGET_PAGE_SIZE
!= qemu_real_host_page_size()) {
624 error_setg(errp
, "Target page size must be the same as host page size");
628 func0
= pci_get_function_0(pdev
);
629 /* Break if not vmxnet3 device in slot 0 */
630 if (strcmp(object_get_typename(OBJECT(func0
)), TYPE_VMXNET3
)) {
631 error_setg(errp
, "Device on %x.0 must be %s", PCI_SLOT(pdev
->devfn
),
635 dev
->func0
= VMXNET3(func0
);
637 addrconf_addr_eui48((unsigned char *)&dev
->node_guid
,
638 (const char *)&dev
->func0
->conf
.macaddr
.a
);
640 memdev_root
= object_resolve_path("/objects", NULL
);
642 object_child_foreach(memdev_root
, pvrdma_check_ram_shared
, &ram_shared
);
645 error_setg(errp
, "Only shared memory backed ram is supported");
649 dev
->dsr_info
.dsr
= NULL
;
651 init_pci_config(pdev
);
657 rc
= init_msix(pdev
);
662 rc
= rdma_backend_init(&dev
->backend_dev
, pdev
, &dev
->rdma_dev_res
,
663 dev
->backend_device_name
, dev
->backend_port_num
,
664 &dev
->dev_attr
, &dev
->mad_chr
);
671 rc
= rdma_rm_init(&dev
->rdma_dev_res
, &dev
->dev_attr
);
676 rc
= pvrdma_qp_ops_init();
681 memset(&dev
->stats
, 0, sizeof(dev
->stats
));
683 dev
->shutdown_notifier
.notify
= pvrdma_shutdown_notifier
;
684 qemu_register_shutdown_notifier(&dev
->shutdown_notifier
);
686 #ifdef LEGACY_RDMA_REG_MR
687 rdma_info_report("Using legacy reg_mr");
689 rdma_info_report("Using iova reg_mr");
695 error_append_hint(errp
, "Device failed to load\n");
699 static void pvrdma_class_init(ObjectClass
*klass
, void *data
)
701 DeviceClass
*dc
= DEVICE_CLASS(klass
);
702 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
703 RdmaProviderClass
*ir
= RDMA_PROVIDER_CLASS(klass
);
705 k
->realize
= pvrdma_realize
;
706 k
->vendor_id
= PCI_VENDOR_ID_VMWARE
;
707 k
->device_id
= PCI_DEVICE_ID_VMWARE_PVRDMA
;
709 k
->class_id
= PCI_CLASS_NETWORK_OTHER
;
711 dc
->desc
= "RDMA Device";
712 device_class_set_props(dc
, pvrdma_dev_properties
);
713 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
715 ir
->format_statistics
= pvrdma_format_statistics
;
718 static const TypeInfo pvrdma_info
= {
719 .name
= PVRDMA_HW_NAME
,
720 .parent
= TYPE_PCI_DEVICE
,
721 .instance_size
= sizeof(PVRDMADev
),
722 .class_init
= pvrdma_class_init
,
723 .interfaces
= (InterfaceInfo
[]) {
724 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
725 { INTERFACE_RDMA_PROVIDER
},
730 static void register_types(void)
732 type_register_static(&pvrdma_info
);
735 type_init(register_types
)