spapr_vio/spapr_iommu: Move VIO bypass where it belongs
[qemu/ar7.git] / hw / ppc / spapr_iommu.c
blobf3990fdc325385553f7660b516afced001c3554c
1 /*
2 * QEMU sPAPR IOMMU (TCE) code
4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "hw/hw.h"
20 #include "sysemu/kvm.h"
21 #include "hw/qdev.h"
22 #include "kvm_ppc.h"
23 #include "sysemu/dma.h"
24 #include "exec/address-spaces.h"
25 #include "trace.h"
27 #include "hw/ppc/spapr.h"
28 #include "hw/ppc/spapr_vio.h"
30 #include <libfdt.h>
32 enum sPAPRTCEAccess {
33 SPAPR_TCE_FAULT = 0,
34 SPAPR_TCE_RO = 1,
35 SPAPR_TCE_WO = 2,
36 SPAPR_TCE_RW = 3,
39 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
40 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
42 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
44 static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
46 sPAPRTCETable *tcet;
48 if (liobn & 0xFFFFFFFF00000000ULL) {
49 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
50 liobn);
51 return NULL;
54 QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
55 if (tcet->liobn == liobn) {
56 return tcet;
60 return NULL;
63 /* Called from RCU critical section */
64 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
65 bool is_write)
67 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
68 uint64_t tce;
69 IOMMUTLBEntry ret = {
70 .target_as = &address_space_memory,
71 .iova = 0,
72 .translated_addr = 0,
73 .addr_mask = ~(hwaddr)0,
74 .perm = IOMMU_NONE,
77 if ((addr >> tcet->page_shift) < tcet->nb_table) {
78 /* Check if we are in bound */
79 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
81 tce = tcet->table[addr >> tcet->page_shift];
82 ret.iova = addr & page_mask;
83 ret.translated_addr = tce & page_mask;
84 ret.addr_mask = ~page_mask;
85 ret.perm = tce & IOMMU_RW;
87 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
88 ret.addr_mask);
90 return ret;
93 static int spapr_tce_table_post_load(void *opaque, int version_id)
95 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
97 if (tcet->vdev) {
98 spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
101 return 0;
104 static const VMStateDescription vmstate_spapr_tce_table = {
105 .name = "spapr_iommu",
106 .version_id = 2,
107 .minimum_version_id = 2,
108 .post_load = spapr_tce_table_post_load,
109 .fields = (VMStateField []) {
110 /* Sanity check */
111 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable),
112 VMSTATE_UINT32_EQUAL(nb_table, sPAPRTCETable),
114 /* IOMMU state */
115 VMSTATE_BOOL(bypass, sPAPRTCETable),
116 VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t),
118 VMSTATE_END_OF_LIST()
122 static MemoryRegionIOMMUOps spapr_iommu_ops = {
123 .translate = spapr_tce_translate_iommu,
126 static int spapr_tce_table_realize(DeviceState *dev)
128 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
130 if (kvm_enabled()) {
131 tcet->table = kvmppc_create_spapr_tce(tcet->liobn,
132 tcet->nb_table <<
133 tcet->page_shift,
134 &tcet->fd,
135 tcet->vfio_accel);
138 if (!tcet->table) {
139 size_t table_size = tcet->nb_table * sizeof(uint64_t);
140 tcet->table = g_malloc0(table_size);
143 trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd);
145 memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops,
146 "iommu-spapr",
147 (uint64_t)tcet->nb_table << tcet->page_shift);
149 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
151 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table,
152 tcet);
154 return 0;
157 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
158 uint64_t bus_offset,
159 uint32_t page_shift,
160 uint32_t nb_table,
161 bool vfio_accel)
163 sPAPRTCETable *tcet;
165 if (spapr_tce_find_by_liobn(liobn)) {
166 fprintf(stderr, "Attempted to create TCE table with duplicate"
167 " LIOBN 0x%x\n", liobn);
168 return NULL;
171 if (!nb_table) {
172 return NULL;
175 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
176 tcet->liobn = liobn;
177 tcet->bus_offset = bus_offset;
178 tcet->page_shift = page_shift;
179 tcet->nb_table = nb_table;
180 tcet->vfio_accel = vfio_accel;
182 object_property_add_child(OBJECT(owner), "tce-table", OBJECT(tcet), NULL);
184 object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
186 return tcet;
189 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp)
191 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
193 QLIST_REMOVE(tcet, list);
195 if (!kvm_enabled() ||
196 (kvmppc_remove_spapr_tce(tcet->table, tcet->fd,
197 tcet->nb_table) != 0)) {
198 g_free(tcet->table);
202 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
204 return &tcet->iommu;
207 static void spapr_tce_reset(DeviceState *dev)
209 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
210 size_t table_size = tcet->nb_table * sizeof(uint64_t);
212 memset(tcet->table, 0, table_size);
215 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
216 target_ulong tce)
218 IOMMUTLBEntry entry;
219 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
220 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
222 if (index >= tcet->nb_table) {
223 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
224 TARGET_FMT_lx "\n", ioba);
225 return H_PARAMETER;
228 tcet->table[index] = tce;
230 entry.target_as = &address_space_memory,
231 entry.iova = ioba & page_mask;
232 entry.translated_addr = tce & page_mask;
233 entry.addr_mask = ~page_mask;
234 entry.perm = tce & IOMMU_RW;
235 memory_region_notify_iommu(&tcet->iommu, entry);
237 return H_SUCCESS;
240 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
241 sPAPREnvironment *spapr,
242 target_ulong opcode, target_ulong *args)
244 int i;
245 target_ulong liobn = args[0];
246 target_ulong ioba = args[1];
247 target_ulong ioba1 = ioba;
248 target_ulong tce_list = args[2];
249 target_ulong npages = args[3];
250 target_ulong ret = H_PARAMETER;
251 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
252 CPUState *cs = CPU(cpu);
253 hwaddr page_mask, page_size;
255 if (!tcet) {
256 return H_PARAMETER;
259 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) {
260 return H_PARAMETER;
263 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
264 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
265 ioba &= page_mask;
267 for (i = 0; i < npages; ++i, ioba += page_size) {
268 target_ulong off = (tce_list & ~SPAPR_TCE_RW) +
269 i * sizeof(target_ulong);
270 target_ulong tce = ldq_phys(cs->as, off);
272 ret = put_tce_emu(tcet, ioba, tce);
273 if (ret) {
274 break;
278 /* Trace last successful or the first problematic entry */
279 i = i ? (i - 1) : 0;
280 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i,
281 ldq_phys(cs->as,
282 tce_list + i * sizeof(target_ulong)),
283 ret);
285 return ret;
288 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
289 target_ulong opcode, target_ulong *args)
291 int i;
292 target_ulong liobn = args[0];
293 target_ulong ioba = args[1];
294 target_ulong tce_value = args[2];
295 target_ulong npages = args[3];
296 target_ulong ret = H_PARAMETER;
297 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
298 hwaddr page_mask, page_size;
300 if (!tcet) {
301 return H_PARAMETER;
304 if (npages > tcet->nb_table) {
305 return H_PARAMETER;
308 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
309 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
310 ioba &= page_mask;
312 for (i = 0; i < npages; ++i, ioba += page_size) {
313 ret = put_tce_emu(tcet, ioba, tce_value);
314 if (ret) {
315 break;
318 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
320 return ret;
323 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
324 target_ulong opcode, target_ulong *args)
326 target_ulong liobn = args[0];
327 target_ulong ioba = args[1];
328 target_ulong tce = args[2];
329 target_ulong ret = H_PARAMETER;
330 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
332 if (tcet) {
333 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
335 ioba &= page_mask;
337 ret = put_tce_emu(tcet, ioba, tce);
339 trace_spapr_iommu_put(liobn, ioba, tce, ret);
341 return ret;
344 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
345 target_ulong *tce)
347 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
349 if (index >= tcet->nb_table) {
350 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
351 TARGET_FMT_lx "\n", ioba);
352 return H_PARAMETER;
355 *tce = tcet->table[index];
357 return H_SUCCESS;
360 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
361 target_ulong opcode, target_ulong *args)
363 target_ulong liobn = args[0];
364 target_ulong ioba = args[1];
365 target_ulong tce = 0;
366 target_ulong ret = H_PARAMETER;
367 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
369 if (tcet) {
370 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
372 ioba &= page_mask;
374 ret = get_tce_emu(tcet, ioba, &tce);
375 if (!ret) {
376 args[0] = tce;
379 trace_spapr_iommu_get(liobn, ioba, ret, tce);
381 return ret;
384 int spapr_dma_dt(void *fdt, int node_off, const char *propname,
385 uint32_t liobn, uint64_t window, uint32_t size)
387 uint32_t dma_prop[5];
388 int ret;
390 dma_prop[0] = cpu_to_be32(liobn);
391 dma_prop[1] = cpu_to_be32(window >> 32);
392 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
393 dma_prop[3] = 0; /* window size is 32 bits */
394 dma_prop[4] = cpu_to_be32(size);
396 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
397 if (ret < 0) {
398 return ret;
401 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
402 if (ret < 0) {
403 return ret;
406 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
407 if (ret < 0) {
408 return ret;
411 return 0;
414 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
415 sPAPRTCETable *tcet)
417 if (!tcet) {
418 return 0;
421 return spapr_dma_dt(fdt, node_off, propname,
422 tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
425 static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
427 DeviceClass *dc = DEVICE_CLASS(klass);
428 dc->init = spapr_tce_table_realize;
429 dc->reset = spapr_tce_reset;
430 dc->unrealize = spapr_tce_table_unrealize;
432 QLIST_INIT(&spapr_tce_tables);
434 /* hcall-tce */
435 spapr_register_hypercall(H_PUT_TCE, h_put_tce);
436 spapr_register_hypercall(H_GET_TCE, h_get_tce);
437 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
438 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
441 static TypeInfo spapr_tce_table_info = {
442 .name = TYPE_SPAPR_TCE_TABLE,
443 .parent = TYPE_DEVICE,
444 .instance_size = sizeof(sPAPRTCETable),
445 .class_init = spapr_tce_table_class_init,
448 static void register_types(void)
450 type_register_static(&spapr_tce_table_info);
453 type_init(register_types);