2 * QEMU sPAPR IOMMU (TCE) code
4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/error-report.h"
23 #include "sysemu/kvm.h"
26 #include "sysemu/dma.h"
27 #include "exec/address-spaces.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/ppc/spapr_vio.h"
42 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
43 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
45 static QLIST_HEAD(, SpaprTceTable
) spapr_tce_tables
;
47 SpaprTceTable
*spapr_tce_find_by_liobn(target_ulong liobn
)
51 if (liobn
& 0xFFFFFFFF00000000ULL
) {
52 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx
"\n",
57 QLIST_FOREACH(tcet
, &spapr_tce_tables
, list
) {
58 if (tcet
->liobn
== (uint32_t)liobn
) {
66 static IOMMUAccessFlags
spapr_tce_iommu_access_flags(uint64_t tce
)
68 switch (tce
& SPAPR_TCE_RW
) {
75 default: /* SPAPR_TCE_RW */
80 static uint64_t *spapr_tce_alloc_table(uint32_t liobn
,
87 uint64_t *table
= NULL
;
90 table
= kvmppc_create_spapr_tce(liobn
, page_shift
, bus_offset
, nb_table
,
96 table
= g_new0(uint64_t, nb_table
);
99 trace_spapr_iommu_new_table(liobn
, table
, *fd
);
104 static void spapr_tce_free_table(uint64_t *table
, int fd
, uint32_t nb_table
)
106 if (!kvm_enabled() ||
107 (kvmppc_remove_spapr_tce(table
, fd
, nb_table
) != 0)) {
112 /* Called from RCU critical section */
113 static IOMMUTLBEntry
spapr_tce_translate_iommu(IOMMUMemoryRegion
*iommu
,
115 IOMMUAccessFlags flag
,
118 SpaprTceTable
*tcet
= container_of(iommu
, SpaprTceTable
, iommu
);
120 IOMMUTLBEntry ret
= {
121 .target_as
= &address_space_memory
,
123 .translated_addr
= 0,
124 .addr_mask
= ~(hwaddr
)0,
128 if ((addr
>> tcet
->page_shift
) < tcet
->nb_table
) {
129 /* Check if we are in bound */
130 hwaddr page_mask
= IOMMU_PAGE_MASK(tcet
->page_shift
);
132 tce
= tcet
->table
[addr
>> tcet
->page_shift
];
133 ret
.iova
= addr
& page_mask
;
134 ret
.translated_addr
= tce
& page_mask
;
135 ret
.addr_mask
= ~page_mask
;
136 ret
.perm
= spapr_tce_iommu_access_flags(tce
);
138 trace_spapr_iommu_xlate(tcet
->liobn
, addr
, ret
.iova
, ret
.perm
,
144 static void spapr_tce_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
146 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
147 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
148 hwaddr addr
, granularity
;
150 SpaprTceTable
*tcet
= container_of(iommu_mr
, SpaprTceTable
, iommu
);
152 if (tcet
->skipping_replay
) {
156 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
158 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
159 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, n
->iommu_idx
);
160 if (iotlb
.perm
!= IOMMU_NONE
) {
161 n
->notify(n
, &iotlb
);
165 * if (2^64 - MR size) < granularity, it's possible to get an
166 * infinite loop here. This should catch such a wraparound.
168 if ((addr
+ granularity
) < addr
) {
174 static int spapr_tce_table_pre_save(void *opaque
)
176 SpaprTceTable
*tcet
= SPAPR_TCE_TABLE(opaque
);
178 tcet
->mig_table
= tcet
->table
;
179 tcet
->mig_nb_table
= tcet
->nb_table
;
181 trace_spapr_iommu_pre_save(tcet
->liobn
, tcet
->mig_nb_table
,
182 tcet
->bus_offset
, tcet
->page_shift
);
187 static uint64_t spapr_tce_get_min_page_size(IOMMUMemoryRegion
*iommu
)
189 SpaprTceTable
*tcet
= container_of(iommu
, SpaprTceTable
, iommu
);
191 return 1ULL << tcet
->page_shift
;
194 static int spapr_tce_get_attr(IOMMUMemoryRegion
*iommu
,
195 enum IOMMUMemoryRegionAttr attr
, void *data
)
197 SpaprTceTable
*tcet
= container_of(iommu
, SpaprTceTable
, iommu
);
199 if (attr
== IOMMU_ATTR_SPAPR_TCE_FD
&& kvmppc_has_cap_spapr_vfio()) {
200 *(int *) data
= tcet
->fd
;
207 static void spapr_tce_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
208 IOMMUNotifierFlag old
,
209 IOMMUNotifierFlag
new)
211 struct SpaprTceTable
*tbl
= container_of(iommu
, SpaprTceTable
, iommu
);
213 if (old
== IOMMU_NOTIFIER_NONE
&& new != IOMMU_NOTIFIER_NONE
) {
214 spapr_tce_set_need_vfio(tbl
, true);
215 } else if (old
!= IOMMU_NOTIFIER_NONE
&& new == IOMMU_NOTIFIER_NONE
) {
216 spapr_tce_set_need_vfio(tbl
, false);
220 static int spapr_tce_table_post_load(void *opaque
, int version_id
)
222 SpaprTceTable
*tcet
= SPAPR_TCE_TABLE(opaque
);
223 uint32_t old_nb_table
= tcet
->nb_table
;
224 uint64_t old_bus_offset
= tcet
->bus_offset
;
225 uint32_t old_page_shift
= tcet
->page_shift
;
228 spapr_vio_set_bypass(tcet
->vdev
, tcet
->bypass
);
231 if (tcet
->mig_nb_table
!= tcet
->nb_table
) {
232 spapr_tce_table_disable(tcet
);
235 if (tcet
->mig_nb_table
) {
236 if (!tcet
->nb_table
) {
237 spapr_tce_table_enable(tcet
, old_page_shift
, old_bus_offset
,
241 memcpy(tcet
->table
, tcet
->mig_table
,
242 tcet
->nb_table
* sizeof(tcet
->table
[0]));
244 free(tcet
->mig_table
);
245 tcet
->mig_table
= NULL
;
248 trace_spapr_iommu_post_load(tcet
->liobn
, old_nb_table
, tcet
->nb_table
,
249 tcet
->bus_offset
, tcet
->page_shift
);
254 static bool spapr_tce_table_ex_needed(void *opaque
)
256 SpaprTceTable
*tcet
= opaque
;
258 return tcet
->bus_offset
|| tcet
->page_shift
!= 0xC;
261 static const VMStateDescription vmstate_spapr_tce_table_ex
= {
262 .name
= "spapr_iommu_ex",
264 .minimum_version_id
= 1,
265 .needed
= spapr_tce_table_ex_needed
,
266 .fields
= (VMStateField
[]) {
267 VMSTATE_UINT64(bus_offset
, SpaprTceTable
),
268 VMSTATE_UINT32(page_shift
, SpaprTceTable
),
269 VMSTATE_END_OF_LIST()
273 static const VMStateDescription vmstate_spapr_tce_table
= {
274 .name
= "spapr_iommu",
276 .minimum_version_id
= 2,
277 .pre_save
= spapr_tce_table_pre_save
,
278 .post_load
= spapr_tce_table_post_load
,
279 .fields
= (VMStateField
[]) {
281 VMSTATE_UINT32_EQUAL(liobn
, SpaprTceTable
, NULL
),
284 VMSTATE_UINT32(mig_nb_table
, SpaprTceTable
),
285 VMSTATE_BOOL(bypass
, SpaprTceTable
),
286 VMSTATE_VARRAY_UINT32_ALLOC(mig_table
, SpaprTceTable
, mig_nb_table
, 0,
287 vmstate_info_uint64
, uint64_t),
289 VMSTATE_END_OF_LIST()
291 .subsections
= (const VMStateDescription
*[]) {
292 &vmstate_spapr_tce_table_ex
,
297 static void spapr_tce_table_realize(DeviceState
*dev
, Error
**errp
)
299 SpaprTceTable
*tcet
= SPAPR_TCE_TABLE(dev
);
300 Object
*tcetobj
= OBJECT(tcet
);
304 tcet
->need_vfio
= false;
305 tmp
= g_strdup_printf("tce-root-%x", tcet
->liobn
);
306 memory_region_init(&tcet
->root
, tcetobj
, tmp
, UINT64_MAX
);
309 tmp
= g_strdup_printf("tce-iommu-%x", tcet
->liobn
);
310 memory_region_init_iommu(&tcet
->iommu
, sizeof(tcet
->iommu
),
311 TYPE_SPAPR_IOMMU_MEMORY_REGION
,
315 QLIST_INSERT_HEAD(&spapr_tce_tables
, tcet
, list
);
317 vmstate_register(DEVICE(tcet
), tcet
->liobn
, &vmstate_spapr_tce_table
,
321 void spapr_tce_set_need_vfio(SpaprTceTable
*tcet
, bool need_vfio
)
323 size_t table_size
= tcet
->nb_table
* sizeof(uint64_t);
327 g_assert(need_vfio
!= tcet
->need_vfio
);
329 tcet
->need_vfio
= need_vfio
;
331 if (!need_vfio
|| (tcet
->fd
!= -1 && kvmppc_has_cap_spapr_vfio())) {
335 oldtable
= tcet
->table
;
337 tcet
->table
= spapr_tce_alloc_table(tcet
->liobn
,
343 memcpy(tcet
->table
, oldtable
, table_size
);
345 spapr_tce_free_table(oldtable
, tcet
->fd
, tcet
->nb_table
);
350 SpaprTceTable
*spapr_tce_new_table(DeviceState
*owner
, uint32_t liobn
)
355 if (spapr_tce_find_by_liobn(liobn
)) {
356 error_report("Attempted to create TCE table with duplicate"
357 " LIOBN 0x%x", liobn
);
361 tcet
= SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE
));
364 tmp
= g_strdup_printf("tce-table-%x", liobn
);
365 object_property_add_child(OBJECT(owner
), tmp
, OBJECT(tcet
), NULL
);
367 object_unref(OBJECT(tcet
));
369 object_property_set_bool(OBJECT(tcet
), true, "realized", NULL
);
374 void spapr_tce_table_enable(SpaprTceTable
*tcet
,
375 uint32_t page_shift
, uint64_t bus_offset
,
378 if (tcet
->nb_table
) {
379 warn_report("trying to enable already enabled TCE table");
383 tcet
->bus_offset
= bus_offset
;
384 tcet
->page_shift
= page_shift
;
385 tcet
->nb_table
= nb_table
;
386 tcet
->table
= spapr_tce_alloc_table(tcet
->liobn
,
393 memory_region_set_size(MEMORY_REGION(&tcet
->iommu
),
394 (uint64_t)tcet
->nb_table
<< tcet
->page_shift
);
395 memory_region_add_subregion(&tcet
->root
, tcet
->bus_offset
,
396 MEMORY_REGION(&tcet
->iommu
));
399 void spapr_tce_table_disable(SpaprTceTable
*tcet
)
401 if (!tcet
->nb_table
) {
405 memory_region_del_subregion(&tcet
->root
, MEMORY_REGION(&tcet
->iommu
));
406 memory_region_set_size(MEMORY_REGION(&tcet
->iommu
), 0);
408 spapr_tce_free_table(tcet
->table
, tcet
->fd
, tcet
->nb_table
);
411 tcet
->bus_offset
= 0;
412 tcet
->page_shift
= 0;
416 static void spapr_tce_table_unrealize(DeviceState
*dev
, Error
**errp
)
418 SpaprTceTable
*tcet
= SPAPR_TCE_TABLE(dev
);
420 vmstate_unregister(DEVICE(tcet
), &vmstate_spapr_tce_table
, tcet
);
422 QLIST_REMOVE(tcet
, list
);
424 spapr_tce_table_disable(tcet
);
427 MemoryRegion
*spapr_tce_get_iommu(SpaprTceTable
*tcet
)
432 static void spapr_tce_reset(DeviceState
*dev
)
434 SpaprTceTable
*tcet
= SPAPR_TCE_TABLE(dev
);
435 size_t table_size
= tcet
->nb_table
* sizeof(uint64_t);
437 if (tcet
->nb_table
) {
438 memset(tcet
->table
, 0, table_size
);
442 static target_ulong
put_tce_emu(SpaprTceTable
*tcet
, target_ulong ioba
,
446 hwaddr page_mask
= IOMMU_PAGE_MASK(tcet
->page_shift
);
447 unsigned long index
= (ioba
- tcet
->bus_offset
) >> tcet
->page_shift
;
449 if (index
>= tcet
->nb_table
) {
450 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
451 TARGET_FMT_lx
"\n", ioba
);
455 tcet
->table
[index
] = tce
;
457 entry
.target_as
= &address_space_memory
,
458 entry
.iova
= (ioba
- tcet
->bus_offset
) & page_mask
;
459 entry
.translated_addr
= tce
& page_mask
;
460 entry
.addr_mask
= ~page_mask
;
461 entry
.perm
= spapr_tce_iommu_access_flags(tce
);
462 memory_region_notify_iommu(&tcet
->iommu
, 0, entry
);
467 static target_ulong
h_put_tce_indirect(PowerPCCPU
*cpu
,
468 SpaprMachineState
*spapr
,
469 target_ulong opcode
, target_ulong
*args
)
472 target_ulong liobn
= args
[0];
473 target_ulong ioba
= args
[1];
474 target_ulong ioba1
= ioba
;
475 target_ulong tce_list
= args
[2];
476 target_ulong npages
= args
[3];
477 target_ulong ret
= H_PARAMETER
, tce
= 0;
478 SpaprTceTable
*tcet
= spapr_tce_find_by_liobn(liobn
);
479 CPUState
*cs
= CPU(cpu
);
480 hwaddr page_mask
, page_size
;
486 if ((npages
> 512) || (tce_list
& SPAPR_TCE_PAGE_MASK
)) {
490 page_mask
= IOMMU_PAGE_MASK(tcet
->page_shift
);
491 page_size
= IOMMU_PAGE_SIZE(tcet
->page_shift
);
494 for (i
= 0; i
< npages
; ++i
, ioba
+= page_size
) {
495 tce
= ldq_be_phys(cs
->as
, tce_list
+ i
* sizeof(target_ulong
));
497 ret
= put_tce_emu(tcet
, ioba
, tce
);
503 /* Trace last successful or the first problematic entry */
505 if (SPAPR_IS_PCI_LIOBN(liobn
)) {
506 trace_spapr_iommu_pci_indirect(liobn
, ioba1
, tce_list
, i
, tce
, ret
);
508 trace_spapr_iommu_indirect(liobn
, ioba1
, tce_list
, i
, tce
, ret
);
513 static target_ulong
h_stuff_tce(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
514 target_ulong opcode
, target_ulong
*args
)
517 target_ulong liobn
= args
[0];
518 target_ulong ioba
= args
[1];
519 target_ulong tce_value
= args
[2];
520 target_ulong npages
= args
[3];
521 target_ulong ret
= H_PARAMETER
;
522 SpaprTceTable
*tcet
= spapr_tce_find_by_liobn(liobn
);
523 hwaddr page_mask
, page_size
;
529 if (npages
> tcet
->nb_table
) {
533 page_mask
= IOMMU_PAGE_MASK(tcet
->page_shift
);
534 page_size
= IOMMU_PAGE_SIZE(tcet
->page_shift
);
537 for (i
= 0; i
< npages
; ++i
, ioba
+= page_size
) {
538 ret
= put_tce_emu(tcet
, ioba
, tce_value
);
543 if (SPAPR_IS_PCI_LIOBN(liobn
)) {
544 trace_spapr_iommu_pci_stuff(liobn
, ioba
, tce_value
, npages
, ret
);
546 trace_spapr_iommu_stuff(liobn
, ioba
, tce_value
, npages
, ret
);
552 static target_ulong
h_put_tce(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
553 target_ulong opcode
, target_ulong
*args
)
555 target_ulong liobn
= args
[0];
556 target_ulong ioba
= args
[1];
557 target_ulong tce
= args
[2];
558 target_ulong ret
= H_PARAMETER
;
559 SpaprTceTable
*tcet
= spapr_tce_find_by_liobn(liobn
);
562 hwaddr page_mask
= IOMMU_PAGE_MASK(tcet
->page_shift
);
566 ret
= put_tce_emu(tcet
, ioba
, tce
);
568 if (SPAPR_IS_PCI_LIOBN(liobn
)) {
569 trace_spapr_iommu_pci_put(liobn
, ioba
, tce
, ret
);
571 trace_spapr_iommu_put(liobn
, ioba
, tce
, ret
);
577 static target_ulong
get_tce_emu(SpaprTceTable
*tcet
, target_ulong ioba
,
580 unsigned long index
= (ioba
- tcet
->bus_offset
) >> tcet
->page_shift
;
582 if (index
>= tcet
->nb_table
) {
583 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
584 TARGET_FMT_lx
"\n", ioba
);
588 *tce
= tcet
->table
[index
];
593 static target_ulong
h_get_tce(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
594 target_ulong opcode
, target_ulong
*args
)
596 target_ulong liobn
= args
[0];
597 target_ulong ioba
= args
[1];
598 target_ulong tce
= 0;
599 target_ulong ret
= H_PARAMETER
;
600 SpaprTceTable
*tcet
= spapr_tce_find_by_liobn(liobn
);
603 hwaddr page_mask
= IOMMU_PAGE_MASK(tcet
->page_shift
);
607 ret
= get_tce_emu(tcet
, ioba
, &tce
);
612 if (SPAPR_IS_PCI_LIOBN(liobn
)) {
613 trace_spapr_iommu_pci_get(liobn
, ioba
, ret
, tce
);
615 trace_spapr_iommu_get(liobn
, ioba
, ret
, tce
);
621 int spapr_dma_dt(void *fdt
, int node_off
, const char *propname
,
622 uint32_t liobn
, uint64_t window
, uint32_t size
)
624 uint32_t dma_prop
[5];
627 dma_prop
[0] = cpu_to_be32(liobn
);
628 dma_prop
[1] = cpu_to_be32(window
>> 32);
629 dma_prop
[2] = cpu_to_be32(window
& 0xFFFFFFFF);
630 dma_prop
[3] = 0; /* window size is 32 bits */
631 dma_prop
[4] = cpu_to_be32(size
);
633 ret
= fdt_setprop_cell(fdt
, node_off
, "ibm,#dma-address-cells", 2);
638 ret
= fdt_setprop_cell(fdt
, node_off
, "ibm,#dma-size-cells", 2);
643 ret
= fdt_setprop(fdt
, node_off
, propname
, dma_prop
, sizeof(dma_prop
));
651 int spapr_tcet_dma_dt(void *fdt
, int node_off
, const char *propname
,
658 return spapr_dma_dt(fdt
, node_off
, propname
,
659 tcet
->liobn
, 0, tcet
->nb_table
<< tcet
->page_shift
);
662 static void spapr_tce_table_class_init(ObjectClass
*klass
, void *data
)
664 DeviceClass
*dc
= DEVICE_CLASS(klass
);
665 dc
->realize
= spapr_tce_table_realize
;
666 dc
->reset
= spapr_tce_reset
;
667 dc
->unrealize
= spapr_tce_table_unrealize
;
668 /* Reason: This is just an internal device for handling the hypercalls */
669 dc
->user_creatable
= false;
671 QLIST_INIT(&spapr_tce_tables
);
674 spapr_register_hypercall(H_PUT_TCE
, h_put_tce
);
675 spapr_register_hypercall(H_GET_TCE
, h_get_tce
);
676 spapr_register_hypercall(H_PUT_TCE_INDIRECT
, h_put_tce_indirect
);
677 spapr_register_hypercall(H_STUFF_TCE
, h_stuff_tce
);
680 static TypeInfo spapr_tce_table_info
= {
681 .name
= TYPE_SPAPR_TCE_TABLE
,
682 .parent
= TYPE_DEVICE
,
683 .instance_size
= sizeof(SpaprTceTable
),
684 .class_init
= spapr_tce_table_class_init
,
687 static void spapr_iommu_memory_region_class_init(ObjectClass
*klass
, void *data
)
689 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
691 imrc
->translate
= spapr_tce_translate_iommu
;
692 imrc
->replay
= spapr_tce_replay
;
693 imrc
->get_min_page_size
= spapr_tce_get_min_page_size
;
694 imrc
->notify_flag_changed
= spapr_tce_notify_flag_changed
;
695 imrc
->get_attr
= spapr_tce_get_attr
;
698 static const TypeInfo spapr_iommu_memory_region_info
= {
699 .parent
= TYPE_IOMMU_MEMORY_REGION
,
700 .name
= TYPE_SPAPR_IOMMU_MEMORY_REGION
,
701 .class_init
= spapr_iommu_memory_region_class_init
,
704 static void register_types(void)
706 type_register_static(&spapr_tce_table_info
);
707 type_register_static(&spapr_iommu_memory_region_info
);
710 type_init(register_types
);