spapr: Restore support for older PowerPC CPU cores
[qemu.git] / hw / ppc / spapr_iommu.c
blobe230bacae17e56ea7d369ab5933e2d39a4b66e74
1 /*
2 * QEMU sPAPR IOMMU (TCE) code
4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/error-report.h"
21 #include "hw/hw.h"
22 #include "qemu/log.h"
23 #include "sysemu/kvm.h"
24 #include "hw/qdev.h"
25 #include "kvm_ppc.h"
26 #include "sysemu/dma.h"
27 #include "exec/address-spaces.h"
28 #include "trace.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/ppc/spapr_vio.h"
33 #include <libfdt.h>
35 enum sPAPRTCEAccess {
36 SPAPR_TCE_FAULT = 0,
37 SPAPR_TCE_RO = 1,
38 SPAPR_TCE_WO = 2,
39 SPAPR_TCE_RW = 3,
42 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
43 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
45 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
47 sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
49 sPAPRTCETable *tcet;
51 if (liobn & 0xFFFFFFFF00000000ULL) {
52 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
53 liobn);
54 return NULL;
57 QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
58 if (tcet->liobn == (uint32_t)liobn) {
59 return tcet;
63 return NULL;
66 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce)
68 switch (tce & SPAPR_TCE_RW) {
69 case SPAPR_TCE_FAULT:
70 return IOMMU_NONE;
71 case SPAPR_TCE_RO:
72 return IOMMU_RO;
73 case SPAPR_TCE_WO:
74 return IOMMU_WO;
75 default: /* SPAPR_TCE_RW */
76 return IOMMU_RW;
80 static uint64_t *spapr_tce_alloc_table(uint32_t liobn,
81 uint32_t page_shift,
82 uint32_t nb_table,
83 int *fd,
84 bool need_vfio)
86 uint64_t *table = NULL;
87 uint64_t window_size = (uint64_t)nb_table << page_shift;
89 if (kvm_enabled() && !(window_size >> 32)) {
90 table = kvmppc_create_spapr_tce(liobn, window_size, fd, need_vfio);
93 if (!table) {
94 *fd = -1;
95 table = g_malloc0(nb_table * sizeof(uint64_t));
98 trace_spapr_iommu_new_table(liobn, table, *fd);
100 return table;
103 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table)
105 if (!kvm_enabled() ||
106 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) {
107 g_free(table);
111 /* Called from RCU critical section */
112 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
113 bool is_write)
115 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
116 uint64_t tce;
117 IOMMUTLBEntry ret = {
118 .target_as = &address_space_memory,
119 .iova = 0,
120 .translated_addr = 0,
121 .addr_mask = ~(hwaddr)0,
122 .perm = IOMMU_NONE,
125 if ((addr >> tcet->page_shift) < tcet->nb_table) {
126 /* Check if we are in bound */
127 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
129 tce = tcet->table[addr >> tcet->page_shift];
130 ret.iova = addr & page_mask;
131 ret.translated_addr = tce & page_mask;
132 ret.addr_mask = ~page_mask;
133 ret.perm = spapr_tce_iommu_access_flags(tce);
135 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
136 ret.addr_mask);
138 return ret;
141 static void spapr_tce_table_pre_save(void *opaque)
143 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
145 tcet->mig_table = tcet->table;
146 tcet->mig_nb_table = tcet->nb_table;
148 trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table,
149 tcet->bus_offset, tcet->page_shift);
152 static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
154 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
156 return 1ULL << tcet->page_shift;
159 static int spapr_tce_table_post_load(void *opaque, int version_id)
161 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
162 uint32_t old_nb_table = tcet->nb_table;
163 uint64_t old_bus_offset = tcet->bus_offset;
164 uint32_t old_page_shift = tcet->page_shift;
166 if (tcet->vdev) {
167 spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
170 if (tcet->mig_nb_table != tcet->nb_table) {
171 spapr_tce_table_disable(tcet);
174 if (tcet->mig_nb_table) {
175 if (!tcet->nb_table) {
176 spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset,
177 tcet->mig_nb_table);
180 memcpy(tcet->table, tcet->mig_table,
181 tcet->nb_table * sizeof(tcet->table[0]));
183 free(tcet->mig_table);
184 tcet->mig_table = NULL;
187 trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table,
188 tcet->bus_offset, tcet->page_shift);
190 return 0;
193 static bool spapr_tce_table_ex_needed(void *opaque)
195 sPAPRTCETable *tcet = opaque;
197 return tcet->bus_offset || tcet->page_shift != 0xC;
200 static const VMStateDescription vmstate_spapr_tce_table_ex = {
201 .name = "spapr_iommu_ex",
202 .version_id = 1,
203 .minimum_version_id = 1,
204 .needed = spapr_tce_table_ex_needed,
205 .fields = (VMStateField[]) {
206 VMSTATE_UINT64(bus_offset, sPAPRTCETable),
207 VMSTATE_UINT32(page_shift, sPAPRTCETable),
208 VMSTATE_END_OF_LIST()
212 static const VMStateDescription vmstate_spapr_tce_table = {
213 .name = "spapr_iommu",
214 .version_id = 2,
215 .minimum_version_id = 2,
216 .pre_save = spapr_tce_table_pre_save,
217 .post_load = spapr_tce_table_post_load,
218 .fields = (VMStateField []) {
219 /* Sanity check */
220 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable),
222 /* IOMMU state */
223 VMSTATE_UINT32(mig_nb_table, sPAPRTCETable),
224 VMSTATE_BOOL(bypass, sPAPRTCETable),
225 VMSTATE_VARRAY_UINT32_ALLOC(mig_table, sPAPRTCETable, mig_nb_table, 0,
226 vmstate_info_uint64, uint64_t),
228 VMSTATE_END_OF_LIST()
230 .subsections = (const VMStateDescription*[]) {
231 &vmstate_spapr_tce_table_ex,
232 NULL
236 static MemoryRegionIOMMUOps spapr_iommu_ops = {
237 .translate = spapr_tce_translate_iommu,
238 .get_min_page_size = spapr_tce_get_min_page_size,
241 static int spapr_tce_table_realize(DeviceState *dev)
243 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
244 Object *tcetobj = OBJECT(tcet);
245 char tmp[32];
247 tcet->fd = -1;
248 tcet->need_vfio = false;
249 snprintf(tmp, sizeof(tmp), "tce-root-%x", tcet->liobn);
250 memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX);
252 snprintf(tmp, sizeof(tmp), "tce-iommu-%x", tcet->liobn);
253 memory_region_init_iommu(&tcet->iommu, tcetobj, &spapr_iommu_ops, tmp, 0);
255 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
257 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table,
258 tcet);
260 return 0;
263 void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio)
265 size_t table_size = tcet->nb_table * sizeof(uint64_t);
266 void *newtable;
268 if (need_vfio == tcet->need_vfio) {
269 /* Nothing to do */
270 return;
273 if (!need_vfio) {
274 /* FIXME: We don't support transition back to KVM accelerated
275 * TCEs yet */
276 return;
279 tcet->need_vfio = true;
281 if (tcet->fd < 0) {
282 /* Table is already in userspace, nothing to be do */
283 return;
286 newtable = g_malloc(table_size);
287 memcpy(newtable, tcet->table, table_size);
289 kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table);
291 tcet->fd = -1;
292 tcet->table = newtable;
295 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn)
297 sPAPRTCETable *tcet;
298 char tmp[32];
300 if (spapr_tce_find_by_liobn(liobn)) {
301 fprintf(stderr, "Attempted to create TCE table with duplicate"
302 " LIOBN 0x%x\n", liobn);
303 return NULL;
306 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
307 tcet->liobn = liobn;
309 snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn);
310 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
312 object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
314 return tcet;
317 void spapr_tce_table_enable(sPAPRTCETable *tcet,
318 uint32_t page_shift, uint64_t bus_offset,
319 uint32_t nb_table)
321 if (tcet->nb_table) {
322 error_report("Warning: trying to enable already enabled TCE table");
323 return;
326 tcet->bus_offset = bus_offset;
327 tcet->page_shift = page_shift;
328 tcet->nb_table = nb_table;
329 tcet->table = spapr_tce_alloc_table(tcet->liobn,
330 tcet->page_shift,
331 tcet->nb_table,
332 &tcet->fd,
333 tcet->need_vfio);
335 memory_region_set_size(&tcet->iommu,
336 (uint64_t)tcet->nb_table << tcet->page_shift);
337 memory_region_add_subregion(&tcet->root, tcet->bus_offset, &tcet->iommu);
340 void spapr_tce_table_disable(sPAPRTCETable *tcet)
342 if (!tcet->nb_table) {
343 return;
346 memory_region_del_subregion(&tcet->root, &tcet->iommu);
347 memory_region_set_size(&tcet->iommu, 0);
349 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table);
350 tcet->fd = -1;
351 tcet->table = NULL;
352 tcet->bus_offset = 0;
353 tcet->page_shift = 0;
354 tcet->nb_table = 0;
357 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp)
359 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
361 QLIST_REMOVE(tcet, list);
363 spapr_tce_table_disable(tcet);
366 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
368 return &tcet->root;
371 static void spapr_tce_reset(DeviceState *dev)
373 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
374 size_t table_size = tcet->nb_table * sizeof(uint64_t);
376 memset(tcet->table, 0, table_size);
379 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
380 target_ulong tce)
382 IOMMUTLBEntry entry;
383 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
384 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
386 if (index >= tcet->nb_table) {
387 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
388 TARGET_FMT_lx "\n", ioba);
389 return H_PARAMETER;
392 tcet->table[index] = tce;
394 entry.target_as = &address_space_memory,
395 entry.iova = (ioba - tcet->bus_offset) & page_mask;
396 entry.translated_addr = tce & page_mask;
397 entry.addr_mask = ~page_mask;
398 entry.perm = spapr_tce_iommu_access_flags(tce);
399 memory_region_notify_iommu(&tcet->iommu, entry);
401 return H_SUCCESS;
404 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
405 sPAPRMachineState *spapr,
406 target_ulong opcode, target_ulong *args)
408 int i;
409 target_ulong liobn = args[0];
410 target_ulong ioba = args[1];
411 target_ulong ioba1 = ioba;
412 target_ulong tce_list = args[2];
413 target_ulong npages = args[3];
414 target_ulong ret = H_PARAMETER, tce = 0;
415 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
416 CPUState *cs = CPU(cpu);
417 hwaddr page_mask, page_size;
419 if (!tcet) {
420 return H_PARAMETER;
423 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) {
424 return H_PARAMETER;
427 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
428 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
429 ioba &= page_mask;
431 for (i = 0; i < npages; ++i, ioba += page_size) {
432 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong));
434 ret = put_tce_emu(tcet, ioba, tce);
435 if (ret) {
436 break;
440 /* Trace last successful or the first problematic entry */
441 i = i ? (i - 1) : 0;
442 if (SPAPR_IS_PCI_LIOBN(liobn)) {
443 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
444 } else {
445 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
447 return ret;
450 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
451 target_ulong opcode, target_ulong *args)
453 int i;
454 target_ulong liobn = args[0];
455 target_ulong ioba = args[1];
456 target_ulong tce_value = args[2];
457 target_ulong npages = args[3];
458 target_ulong ret = H_PARAMETER;
459 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
460 hwaddr page_mask, page_size;
462 if (!tcet) {
463 return H_PARAMETER;
466 if (npages > tcet->nb_table) {
467 return H_PARAMETER;
470 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
471 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
472 ioba &= page_mask;
474 for (i = 0; i < npages; ++i, ioba += page_size) {
475 ret = put_tce_emu(tcet, ioba, tce_value);
476 if (ret) {
477 break;
480 if (SPAPR_IS_PCI_LIOBN(liobn)) {
481 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
482 } else {
483 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
486 return ret;
489 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
490 target_ulong opcode, target_ulong *args)
492 target_ulong liobn = args[0];
493 target_ulong ioba = args[1];
494 target_ulong tce = args[2];
495 target_ulong ret = H_PARAMETER;
496 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
498 if (tcet) {
499 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
501 ioba &= page_mask;
503 ret = put_tce_emu(tcet, ioba, tce);
505 if (SPAPR_IS_PCI_LIOBN(liobn)) {
506 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
507 } else {
508 trace_spapr_iommu_put(liobn, ioba, tce, ret);
511 return ret;
514 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
515 target_ulong *tce)
517 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
519 if (index >= tcet->nb_table) {
520 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
521 TARGET_FMT_lx "\n", ioba);
522 return H_PARAMETER;
525 *tce = tcet->table[index];
527 return H_SUCCESS;
530 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
531 target_ulong opcode, target_ulong *args)
533 target_ulong liobn = args[0];
534 target_ulong ioba = args[1];
535 target_ulong tce = 0;
536 target_ulong ret = H_PARAMETER;
537 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
539 if (tcet) {
540 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
542 ioba &= page_mask;
544 ret = get_tce_emu(tcet, ioba, &tce);
545 if (!ret) {
546 args[0] = tce;
549 if (SPAPR_IS_PCI_LIOBN(liobn)) {
550 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
551 } else {
552 trace_spapr_iommu_get(liobn, ioba, ret, tce);
555 return ret;
558 int spapr_dma_dt(void *fdt, int node_off, const char *propname,
559 uint32_t liobn, uint64_t window, uint32_t size)
561 uint32_t dma_prop[5];
562 int ret;
564 dma_prop[0] = cpu_to_be32(liobn);
565 dma_prop[1] = cpu_to_be32(window >> 32);
566 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
567 dma_prop[3] = 0; /* window size is 32 bits */
568 dma_prop[4] = cpu_to_be32(size);
570 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
571 if (ret < 0) {
572 return ret;
575 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
576 if (ret < 0) {
577 return ret;
580 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
581 if (ret < 0) {
582 return ret;
585 return 0;
588 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
589 sPAPRTCETable *tcet)
591 if (!tcet) {
592 return 0;
595 return spapr_dma_dt(fdt, node_off, propname,
596 tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
599 static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
601 DeviceClass *dc = DEVICE_CLASS(klass);
602 dc->init = spapr_tce_table_realize;
603 dc->reset = spapr_tce_reset;
604 dc->unrealize = spapr_tce_table_unrealize;
606 QLIST_INIT(&spapr_tce_tables);
608 /* hcall-tce */
609 spapr_register_hypercall(H_PUT_TCE, h_put_tce);
610 spapr_register_hypercall(H_GET_TCE, h_get_tce);
611 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
612 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
615 static TypeInfo spapr_tce_table_info = {
616 .name = TYPE_SPAPR_TCE_TABLE,
617 .parent = TYPE_DEVICE,
618 .instance_size = sizeof(sPAPRTCETable),
619 .class_init = spapr_tce_table_class_init,
622 static void register_types(void)
624 type_register_static(&spapr_tce_table_info);
627 type_init(register_types);