Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[qemu/ar7.git] / hw / ppc / spapr_iommu.c
blobce85f8ac632f5d9b4e2e5276a5125cf01e4a3311
1 /*
2 * QEMU sPAPR IOMMU (TCE) code
4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/error-report.h"
22 #include "qemu/log.h"
23 #include "qemu/module.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "migration/vmstate.h"
27 #include "sysemu/dma.h"
28 #include "exec/address-spaces.h"
29 #include "trace.h"
31 #include "hw/ppc/spapr.h"
32 #include "hw/ppc/spapr_vio.h"
34 #include <libfdt.h>
36 enum SpaprTceAccess {
37 SPAPR_TCE_FAULT = 0,
38 SPAPR_TCE_RO = 1,
39 SPAPR_TCE_WO = 2,
40 SPAPR_TCE_RW = 3,
43 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
44 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
46 static QLIST_HEAD(, SpaprTceTable) spapr_tce_tables;
48 SpaprTceTable *spapr_tce_find_by_liobn(target_ulong liobn)
50 SpaprTceTable *tcet;
52 if (liobn & 0xFFFFFFFF00000000ULL) {
53 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
54 liobn);
55 return NULL;
58 QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
59 if (tcet->liobn == (uint32_t)liobn) {
60 return tcet;
64 return NULL;
67 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce)
69 switch (tce & SPAPR_TCE_RW) {
70 case SPAPR_TCE_FAULT:
71 return IOMMU_NONE;
72 case SPAPR_TCE_RO:
73 return IOMMU_RO;
74 case SPAPR_TCE_WO:
75 return IOMMU_WO;
76 default: /* SPAPR_TCE_RW */
77 return IOMMU_RW;
81 static uint64_t *spapr_tce_alloc_table(uint32_t liobn,
82 uint32_t page_shift,
83 uint64_t bus_offset,
84 uint32_t nb_table,
85 int *fd,
86 bool need_vfio)
88 uint64_t *table = NULL;
90 if (kvm_enabled()) {
91 table = kvmppc_create_spapr_tce(liobn, page_shift, bus_offset, nb_table,
92 fd, need_vfio);
95 if (!table) {
96 *fd = -1;
97 table = g_new0(uint64_t, nb_table);
100 trace_spapr_iommu_new_table(liobn, table, *fd);
102 return table;
105 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table)
107 if (!kvm_enabled() ||
108 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) {
109 g_free(table);
113 /* Called from RCU critical section */
114 static IOMMUTLBEntry spapr_tce_translate_iommu(IOMMUMemoryRegion *iommu,
115 hwaddr addr,
116 IOMMUAccessFlags flag,
117 int iommu_idx)
119 SpaprTceTable *tcet = container_of(iommu, SpaprTceTable, iommu);
120 uint64_t tce;
121 IOMMUTLBEntry ret = {
122 .target_as = &address_space_memory,
123 .iova = 0,
124 .translated_addr = 0,
125 .addr_mask = ~(hwaddr)0,
126 .perm = IOMMU_NONE,
129 if ((addr >> tcet->page_shift) < tcet->nb_table) {
130 /* Check if we are in bound */
131 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
133 tce = tcet->table[addr >> tcet->page_shift];
134 ret.iova = addr & page_mask;
135 ret.translated_addr = tce & page_mask;
136 ret.addr_mask = ~page_mask;
137 ret.perm = spapr_tce_iommu_access_flags(tce);
139 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
140 ret.addr_mask);
142 return ret;
145 static void spapr_tce_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
147 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
148 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
149 hwaddr addr, granularity;
150 IOMMUTLBEntry iotlb;
151 SpaprTceTable *tcet = container_of(iommu_mr, SpaprTceTable, iommu);
153 if (tcet->skipping_replay) {
154 return;
157 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
159 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
160 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
161 if (iotlb.perm != IOMMU_NONE) {
162 n->notify(n, &iotlb);
166 * if (2^64 - MR size) < granularity, it's possible to get an
167 * infinite loop here. This should catch such a wraparound.
169 if ((addr + granularity) < addr) {
170 break;
175 static int spapr_tce_table_pre_save(void *opaque)
177 SpaprTceTable *tcet = SPAPR_TCE_TABLE(opaque);
179 tcet->mig_table = tcet->table;
180 tcet->mig_nb_table = tcet->nb_table;
182 trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table,
183 tcet->bus_offset, tcet->page_shift);
185 return 0;
188 static uint64_t spapr_tce_get_min_page_size(IOMMUMemoryRegion *iommu)
190 SpaprTceTable *tcet = container_of(iommu, SpaprTceTable, iommu);
192 return 1ULL << tcet->page_shift;
195 static int spapr_tce_get_attr(IOMMUMemoryRegion *iommu,
196 enum IOMMUMemoryRegionAttr attr, void *data)
198 SpaprTceTable *tcet = container_of(iommu, SpaprTceTable, iommu);
200 if (attr == IOMMU_ATTR_SPAPR_TCE_FD && kvmppc_has_cap_spapr_vfio()) {
201 *(int *) data = tcet->fd;
202 return 0;
205 return -EINVAL;
208 static void spapr_tce_notify_flag_changed(IOMMUMemoryRegion *iommu,
209 IOMMUNotifierFlag old,
210 IOMMUNotifierFlag new)
212 struct SpaprTceTable *tbl = container_of(iommu, SpaprTceTable, iommu);
214 if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) {
215 spapr_tce_set_need_vfio(tbl, true);
216 } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) {
217 spapr_tce_set_need_vfio(tbl, false);
221 static int spapr_tce_table_post_load(void *opaque, int version_id)
223 SpaprTceTable *tcet = SPAPR_TCE_TABLE(opaque);
224 uint32_t old_nb_table = tcet->nb_table;
225 uint64_t old_bus_offset = tcet->bus_offset;
226 uint32_t old_page_shift = tcet->page_shift;
228 if (tcet->vdev) {
229 spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
232 if (tcet->mig_nb_table != tcet->nb_table) {
233 spapr_tce_table_disable(tcet);
236 if (tcet->mig_nb_table) {
237 if (!tcet->nb_table) {
238 spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset,
239 tcet->mig_nb_table);
242 memcpy(tcet->table, tcet->mig_table,
243 tcet->nb_table * sizeof(tcet->table[0]));
245 free(tcet->mig_table);
246 tcet->mig_table = NULL;
249 trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table,
250 tcet->bus_offset, tcet->page_shift);
252 return 0;
255 static bool spapr_tce_table_ex_needed(void *opaque)
257 SpaprTceTable *tcet = opaque;
259 return tcet->bus_offset || tcet->page_shift != 0xC;
262 static const VMStateDescription vmstate_spapr_tce_table_ex = {
263 .name = "spapr_iommu_ex",
264 .version_id = 1,
265 .minimum_version_id = 1,
266 .needed = spapr_tce_table_ex_needed,
267 .fields = (VMStateField[]) {
268 VMSTATE_UINT64(bus_offset, SpaprTceTable),
269 VMSTATE_UINT32(page_shift, SpaprTceTable),
270 VMSTATE_END_OF_LIST()
274 static const VMStateDescription vmstate_spapr_tce_table = {
275 .name = "spapr_iommu",
276 .version_id = 2,
277 .minimum_version_id = 2,
278 .pre_save = spapr_tce_table_pre_save,
279 .post_load = spapr_tce_table_post_load,
280 .fields = (VMStateField []) {
281 /* Sanity check */
282 VMSTATE_UINT32_EQUAL(liobn, SpaprTceTable, NULL),
284 /* IOMMU state */
285 VMSTATE_UINT32(mig_nb_table, SpaprTceTable),
286 VMSTATE_BOOL(bypass, SpaprTceTable),
287 VMSTATE_VARRAY_UINT32_ALLOC(mig_table, SpaprTceTable, mig_nb_table, 0,
288 vmstate_info_uint64, uint64_t),
290 VMSTATE_END_OF_LIST()
292 .subsections = (const VMStateDescription*[]) {
293 &vmstate_spapr_tce_table_ex,
294 NULL
298 static void spapr_tce_table_realize(DeviceState *dev, Error **errp)
300 SpaprTceTable *tcet = SPAPR_TCE_TABLE(dev);
301 Object *tcetobj = OBJECT(tcet);
302 gchar *tmp;
304 tcet->fd = -1;
305 tcet->need_vfio = false;
306 tmp = g_strdup_printf("tce-root-%x", tcet->liobn);
307 memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX);
308 g_free(tmp);
310 tmp = g_strdup_printf("tce-iommu-%x", tcet->liobn);
311 memory_region_init_iommu(&tcet->iommu, sizeof(tcet->iommu),
312 TYPE_SPAPR_IOMMU_MEMORY_REGION,
313 tcetobj, tmp, 0);
314 g_free(tmp);
316 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
318 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table,
319 tcet);
322 void spapr_tce_set_need_vfio(SpaprTceTable *tcet, bool need_vfio)
324 size_t table_size = tcet->nb_table * sizeof(uint64_t);
325 uint64_t *oldtable;
326 int newfd = -1;
328 g_assert(need_vfio != tcet->need_vfio);
330 tcet->need_vfio = need_vfio;
332 if (!need_vfio || (tcet->fd != -1 && kvmppc_has_cap_spapr_vfio())) {
333 return;
336 oldtable = tcet->table;
338 tcet->table = spapr_tce_alloc_table(tcet->liobn,
339 tcet->page_shift,
340 tcet->bus_offset,
341 tcet->nb_table,
342 &newfd,
343 need_vfio);
344 memcpy(tcet->table, oldtable, table_size);
346 spapr_tce_free_table(oldtable, tcet->fd, tcet->nb_table);
348 tcet->fd = newfd;
351 SpaprTceTable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn)
353 SpaprTceTable *tcet;
354 gchar *tmp;
356 if (spapr_tce_find_by_liobn(liobn)) {
357 error_report("Attempted to create TCE table with duplicate"
358 " LIOBN 0x%x", liobn);
359 return NULL;
362 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
363 tcet->liobn = liobn;
365 tmp = g_strdup_printf("tce-table-%x", liobn);
366 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
367 g_free(tmp);
368 object_unref(OBJECT(tcet));
370 object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
372 return tcet;
375 void spapr_tce_table_enable(SpaprTceTable *tcet,
376 uint32_t page_shift, uint64_t bus_offset,
377 uint32_t nb_table)
379 if (tcet->nb_table) {
380 warn_report("trying to enable already enabled TCE table");
381 return;
384 tcet->bus_offset = bus_offset;
385 tcet->page_shift = page_shift;
386 tcet->nb_table = nb_table;
387 tcet->table = spapr_tce_alloc_table(tcet->liobn,
388 tcet->page_shift,
389 tcet->bus_offset,
390 tcet->nb_table,
391 &tcet->fd,
392 tcet->need_vfio);
394 memory_region_set_size(MEMORY_REGION(&tcet->iommu),
395 (uint64_t)tcet->nb_table << tcet->page_shift);
396 memory_region_add_subregion(&tcet->root, tcet->bus_offset,
397 MEMORY_REGION(&tcet->iommu));
400 void spapr_tce_table_disable(SpaprTceTable *tcet)
402 if (!tcet->nb_table) {
403 return;
406 memory_region_del_subregion(&tcet->root, MEMORY_REGION(&tcet->iommu));
407 memory_region_set_size(MEMORY_REGION(&tcet->iommu), 0);
409 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table);
410 tcet->fd = -1;
411 tcet->table = NULL;
412 tcet->bus_offset = 0;
413 tcet->page_shift = 0;
414 tcet->nb_table = 0;
417 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp)
419 SpaprTceTable *tcet = SPAPR_TCE_TABLE(dev);
421 vmstate_unregister(DEVICE(tcet), &vmstate_spapr_tce_table, tcet);
423 QLIST_REMOVE(tcet, list);
425 spapr_tce_table_disable(tcet);
428 MemoryRegion *spapr_tce_get_iommu(SpaprTceTable *tcet)
430 return &tcet->root;
433 static void spapr_tce_reset(DeviceState *dev)
435 SpaprTceTable *tcet = SPAPR_TCE_TABLE(dev);
436 size_t table_size = tcet->nb_table * sizeof(uint64_t);
438 if (tcet->nb_table) {
439 memset(tcet->table, 0, table_size);
443 static target_ulong put_tce_emu(SpaprTceTable *tcet, target_ulong ioba,
444 target_ulong tce)
446 IOMMUTLBEntry entry;
447 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
448 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
450 if (index >= tcet->nb_table) {
451 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
452 TARGET_FMT_lx "\n", ioba);
453 return H_PARAMETER;
456 tcet->table[index] = tce;
458 entry.target_as = &address_space_memory,
459 entry.iova = (ioba - tcet->bus_offset) & page_mask;
460 entry.translated_addr = tce & page_mask;
461 entry.addr_mask = ~page_mask;
462 entry.perm = spapr_tce_iommu_access_flags(tce);
463 memory_region_notify_iommu(&tcet->iommu, 0, entry);
465 return H_SUCCESS;
468 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
469 SpaprMachineState *spapr,
470 target_ulong opcode, target_ulong *args)
472 int i;
473 target_ulong liobn = args[0];
474 target_ulong ioba = args[1];
475 target_ulong ioba1 = ioba;
476 target_ulong tce_list = args[2];
477 target_ulong npages = args[3];
478 target_ulong ret = H_PARAMETER, tce = 0;
479 SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn);
480 CPUState *cs = CPU(cpu);
481 hwaddr page_mask, page_size;
483 if (!tcet) {
484 return H_PARAMETER;
487 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) {
488 return H_PARAMETER;
491 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
492 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
493 ioba &= page_mask;
495 for (i = 0; i < npages; ++i, ioba += page_size) {
496 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong));
498 ret = put_tce_emu(tcet, ioba, tce);
499 if (ret) {
500 break;
504 /* Trace last successful or the first problematic entry */
505 i = i ? (i - 1) : 0;
506 if (SPAPR_IS_PCI_LIOBN(liobn)) {
507 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
508 } else {
509 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
511 return ret;
514 static target_ulong h_stuff_tce(PowerPCCPU *cpu, SpaprMachineState *spapr,
515 target_ulong opcode, target_ulong *args)
517 int i;
518 target_ulong liobn = args[0];
519 target_ulong ioba = args[1];
520 target_ulong tce_value = args[2];
521 target_ulong npages = args[3];
522 target_ulong ret = H_PARAMETER;
523 SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn);
524 hwaddr page_mask, page_size;
526 if (!tcet) {
527 return H_PARAMETER;
530 if (npages > tcet->nb_table) {
531 return H_PARAMETER;
534 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
535 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
536 ioba &= page_mask;
538 for (i = 0; i < npages; ++i, ioba += page_size) {
539 ret = put_tce_emu(tcet, ioba, tce_value);
540 if (ret) {
541 break;
544 if (SPAPR_IS_PCI_LIOBN(liobn)) {
545 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
546 } else {
547 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
550 return ret;
553 static target_ulong h_put_tce(PowerPCCPU *cpu, SpaprMachineState *spapr,
554 target_ulong opcode, target_ulong *args)
556 target_ulong liobn = args[0];
557 target_ulong ioba = args[1];
558 target_ulong tce = args[2];
559 target_ulong ret = H_PARAMETER;
560 SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn);
562 if (tcet) {
563 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
565 ioba &= page_mask;
567 ret = put_tce_emu(tcet, ioba, tce);
569 if (SPAPR_IS_PCI_LIOBN(liobn)) {
570 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
571 } else {
572 trace_spapr_iommu_put(liobn, ioba, tce, ret);
575 return ret;
578 static target_ulong get_tce_emu(SpaprTceTable *tcet, target_ulong ioba,
579 target_ulong *tce)
581 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
583 if (index >= tcet->nb_table) {
584 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
585 TARGET_FMT_lx "\n", ioba);
586 return H_PARAMETER;
589 *tce = tcet->table[index];
591 return H_SUCCESS;
594 static target_ulong h_get_tce(PowerPCCPU *cpu, SpaprMachineState *spapr,
595 target_ulong opcode, target_ulong *args)
597 target_ulong liobn = args[0];
598 target_ulong ioba = args[1];
599 target_ulong tce = 0;
600 target_ulong ret = H_PARAMETER;
601 SpaprTceTable *tcet = spapr_tce_find_by_liobn(liobn);
603 if (tcet) {
604 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
606 ioba &= page_mask;
608 ret = get_tce_emu(tcet, ioba, &tce);
609 if (!ret) {
610 args[0] = tce;
613 if (SPAPR_IS_PCI_LIOBN(liobn)) {
614 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
615 } else {
616 trace_spapr_iommu_get(liobn, ioba, ret, tce);
619 return ret;
622 int spapr_dma_dt(void *fdt, int node_off, const char *propname,
623 uint32_t liobn, uint64_t window, uint32_t size)
625 uint32_t dma_prop[5];
626 int ret;
628 dma_prop[0] = cpu_to_be32(liobn);
629 dma_prop[1] = cpu_to_be32(window >> 32);
630 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
631 dma_prop[3] = 0; /* window size is 32 bits */
632 dma_prop[4] = cpu_to_be32(size);
634 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
635 if (ret < 0) {
636 return ret;
639 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
640 if (ret < 0) {
641 return ret;
644 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
645 if (ret < 0) {
646 return ret;
649 return 0;
652 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
653 SpaprTceTable *tcet)
655 if (!tcet) {
656 return 0;
659 return spapr_dma_dt(fdt, node_off, propname,
660 tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
663 static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
665 DeviceClass *dc = DEVICE_CLASS(klass);
666 dc->realize = spapr_tce_table_realize;
667 dc->reset = spapr_tce_reset;
668 dc->unrealize = spapr_tce_table_unrealize;
669 /* Reason: This is just an internal device for handling the hypercalls */
670 dc->user_creatable = false;
672 QLIST_INIT(&spapr_tce_tables);
674 /* hcall-tce */
675 spapr_register_hypercall(H_PUT_TCE, h_put_tce);
676 spapr_register_hypercall(H_GET_TCE, h_get_tce);
677 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
678 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
681 static TypeInfo spapr_tce_table_info = {
682 .name = TYPE_SPAPR_TCE_TABLE,
683 .parent = TYPE_DEVICE,
684 .instance_size = sizeof(SpaprTceTable),
685 .class_init = spapr_tce_table_class_init,
688 static void spapr_iommu_memory_region_class_init(ObjectClass *klass, void *data)
690 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
692 imrc->translate = spapr_tce_translate_iommu;
693 imrc->replay = spapr_tce_replay;
694 imrc->get_min_page_size = spapr_tce_get_min_page_size;
695 imrc->notify_flag_changed = spapr_tce_notify_flag_changed;
696 imrc->get_attr = spapr_tce_get_attr;
699 static const TypeInfo spapr_iommu_memory_region_info = {
700 .parent = TYPE_IOMMU_MEMORY_REGION,
701 .name = TYPE_SPAPR_IOMMU_MEMORY_REGION,
702 .class_init = spapr_iommu_memory_region_class_init,
705 static void register_types(void)
707 type_register_static(&spapr_tce_table_info);
708 type_register_static(&spapr_iommu_memory_region_info);
711 type_init(register_types);