2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/boards.h"
21 #include "sysemu/sysemu.h"
22 #include "hw/sysbus.h"
23 #include "hw/qdev-core.h"
24 #include "hw/pci/pci.h"
25 #include "exec/address-spaces.h"
28 #include "qemu/error-report.h"
29 #include "qapi/error.h"
31 #include "hw/arm/smmuv3.h"
32 #include "smmuv3-internal.h"
35 * smmuv3_trigger_irq - pulse @irq if enabled and update
36 * GERROR register in case of GERROR interrupt
39 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
41 void smmuv3_trigger_irq(SMMUv3State
*s
, SMMUIrq irq
, uint32_t gerror_mask
)
48 pulse
= smmuv3_eventq_irq_enabled(s
);
51 qemu_log_mask(LOG_UNIMP
, "PRI not yet supported\n");
53 case SMMU_IRQ_CMD_SYNC
:
58 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
59 uint32_t new_gerrors
= ~pending
& gerror_mask
;
62 /* only toggle non pending errors */
65 s
->gerror
^= new_gerrors
;
66 trace_smmuv3_write_gerror(new_gerrors
, s
->gerror
);
68 pulse
= smmuv3_gerror_irq_enabled(s
);
73 trace_smmuv3_trigger_irq(irq
);
74 qemu_irq_pulse(s
->irq
[irq
]);
78 void smmuv3_write_gerrorn(SMMUv3State
*s
, uint32_t new_gerrorn
)
80 uint32_t pending
= s
->gerror
^ s
->gerrorn
;
81 uint32_t toggled
= s
->gerrorn
^ new_gerrorn
;
83 if (toggled
& ~pending
) {
84 qemu_log_mask(LOG_GUEST_ERROR
,
85 "guest toggles non pending errors = 0x%x\n",
90 * We do not raise any error in case guest toggles bits corresponding
91 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
93 s
->gerrorn
= new_gerrorn
;
95 trace_smmuv3_write_gerrorn(toggled
& pending
, s
->gerrorn
);
98 static void smmuv3_init_regs(SMMUv3State
*s
)
101 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
102 * multi-level stream table
104 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, S1P
, 1); /* stage 1 supported */
105 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTF
, 2); /* AArch64 PTW only */
106 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, COHACC
, 1); /* IO coherent */
107 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, ASID16
, 1); /* 16-bit ASID */
108 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TTENDIAN
, 2); /* little endian */
109 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STALL_MODEL
, 1); /* No stall */
110 /* terminated transaction will always be aborted/error returned */
111 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, TERM_MODEL
, 1);
112 /* 2-level stream table supported */
113 s
->idr
[0] = FIELD_DP32(s
->idr
[0], IDR0
, STLEVEL
, 1);
115 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, SIDSIZE
, SMMU_IDR1_SIDSIZE
);
116 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, EVENTQS
, SMMU_EVENTQS
);
117 s
->idr
[1] = FIELD_DP32(s
->idr
[1], IDR1
, CMDQS
, SMMU_CMDQS
);
119 /* 4K and 64K granule support */
120 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN4K
, 1);
121 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, GRAN64K
, 1);
122 s
->idr
[5] = FIELD_DP32(s
->idr
[5], IDR5
, OAS
, SMMU_IDR5_OAS
); /* 44 bits */
124 s
->cmdq
.base
= deposit64(s
->cmdq
.base
, 0, 5, SMMU_CMDQS
);
127 s
->cmdq
.entry_size
= sizeof(struct Cmd
);
128 s
->eventq
.base
= deposit64(s
->eventq
.base
, 0, 5, SMMU_EVENTQS
);
131 s
->eventq
.entry_size
= sizeof(struct Evt
);
137 static MemTxResult
smmu_write_mmio(void *opaque
, hwaddr offset
, uint64_t data
,
138 unsigned size
, MemTxAttrs attrs
)
140 /* not yet implemented */
144 static MemTxResult
smmu_readll(SMMUv3State
*s
, hwaddr offset
,
145 uint64_t *data
, MemTxAttrs attrs
)
148 case A_GERROR_IRQ_CFG0
:
149 *data
= s
->gerror_irq_cfg0
;
152 *data
= s
->strtab_base
;
155 *data
= s
->cmdq
.base
;
158 *data
= s
->eventq
.base
;
162 qemu_log_mask(LOG_UNIMP
,
163 "%s Unexpected 64-bit access to 0x%"PRIx64
" (RAZ)\n",
169 static MemTxResult
smmu_readl(SMMUv3State
*s
, hwaddr offset
,
170 uint64_t *data
, MemTxAttrs attrs
)
173 case A_IDREGS
... A_IDREGS
+ 0x1f:
174 *data
= smmuv3_idreg(offset
- A_IDREGS
);
176 case A_IDR0
... A_IDR5
:
177 *data
= s
->idr
[(offset
- A_IDR0
) / 4];
207 case A_GERROR_IRQ_CFG0
: /* 64b */
208 *data
= extract64(s
->gerror_irq_cfg0
, 0, 32);
210 case A_GERROR_IRQ_CFG0
+ 4:
211 *data
= extract64(s
->gerror_irq_cfg0
, 32, 32);
213 case A_GERROR_IRQ_CFG1
:
214 *data
= s
->gerror_irq_cfg1
;
216 case A_GERROR_IRQ_CFG2
:
217 *data
= s
->gerror_irq_cfg2
;
219 case A_STRTAB_BASE
: /* 64b */
220 *data
= extract64(s
->strtab_base
, 0, 32);
222 case A_STRTAB_BASE
+ 4: /* 64b */
223 *data
= extract64(s
->strtab_base
, 32, 32);
225 case A_STRTAB_BASE_CFG
:
226 *data
= s
->strtab_base_cfg
;
228 case A_CMDQ_BASE
: /* 64b */
229 *data
= extract64(s
->cmdq
.base
, 0, 32);
231 case A_CMDQ_BASE
+ 4:
232 *data
= extract64(s
->cmdq
.base
, 32, 32);
235 *data
= s
->cmdq
.prod
;
238 *data
= s
->cmdq
.cons
;
240 case A_EVENTQ_BASE
: /* 64b */
241 *data
= extract64(s
->eventq
.base
, 0, 32);
243 case A_EVENTQ_BASE
+ 4: /* 64b */
244 *data
= extract64(s
->eventq
.base
, 32, 32);
247 *data
= s
->eventq
.prod
;
250 *data
= s
->eventq
.cons
;
254 qemu_log_mask(LOG_UNIMP
,
255 "%s unhandled 32-bit access at 0x%"PRIx64
" (RAZ)\n",
261 static MemTxResult
smmu_read_mmio(void *opaque
, hwaddr offset
, uint64_t *data
,
262 unsigned size
, MemTxAttrs attrs
)
264 SMMUState
*sys
= opaque
;
265 SMMUv3State
*s
= ARM_SMMUV3(sys
);
268 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
273 r
= smmu_readll(s
, offset
, data
, attrs
);
276 r
= smmu_readl(s
, offset
, data
, attrs
);
283 trace_smmuv3_read_mmio(offset
, *data
, size
, r
);
287 static const MemoryRegionOps smmu_mem_ops
= {
288 .read_with_attrs
= smmu_read_mmio
,
289 .write_with_attrs
= smmu_write_mmio
,
290 .endianness
= DEVICE_LITTLE_ENDIAN
,
292 .min_access_size
= 4,
293 .max_access_size
= 8,
296 .min_access_size
= 4,
297 .max_access_size
= 8,
301 static void smmu_init_irq(SMMUv3State
*s
, SysBusDevice
*dev
)
305 for (i
= 0; i
< ARRAY_SIZE(s
->irq
); i
++) {
306 sysbus_init_irq(dev
, &s
->irq
[i
]);
310 static void smmu_reset(DeviceState
*dev
)
312 SMMUv3State
*s
= ARM_SMMUV3(dev
);
313 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
315 c
->parent_reset(dev
);
320 static void smmu_realize(DeviceState
*d
, Error
**errp
)
322 SMMUState
*sys
= ARM_SMMU(d
);
323 SMMUv3State
*s
= ARM_SMMUV3(sys
);
324 SMMUv3Class
*c
= ARM_SMMUV3_GET_CLASS(s
);
325 SysBusDevice
*dev
= SYS_BUS_DEVICE(d
);
326 Error
*local_err
= NULL
;
328 c
->parent_realize(d
, &local_err
);
330 error_propagate(errp
, local_err
);
334 memory_region_init_io(&sys
->iomem
, OBJECT(s
),
335 &smmu_mem_ops
, sys
, TYPE_ARM_SMMUV3
, 0x20000);
337 sys
->mrtypename
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
;
339 sysbus_init_mmio(dev
, &sys
->iomem
);
341 smmu_init_irq(s
, dev
);
344 static const VMStateDescription vmstate_smmuv3_queue
= {
345 .name
= "smmuv3_queue",
347 .minimum_version_id
= 1,
348 .fields
= (VMStateField
[]) {
349 VMSTATE_UINT64(base
, SMMUQueue
),
350 VMSTATE_UINT32(prod
, SMMUQueue
),
351 VMSTATE_UINT32(cons
, SMMUQueue
),
352 VMSTATE_UINT8(log2size
, SMMUQueue
),
356 static const VMStateDescription vmstate_smmuv3
= {
359 .minimum_version_id
= 1,
360 .fields
= (VMStateField
[]) {
361 VMSTATE_UINT32(features
, SMMUv3State
),
362 VMSTATE_UINT8(sid_size
, SMMUv3State
),
363 VMSTATE_UINT8(sid_split
, SMMUv3State
),
365 VMSTATE_UINT32_ARRAY(cr
, SMMUv3State
, 3),
366 VMSTATE_UINT32(cr0ack
, SMMUv3State
),
367 VMSTATE_UINT32(statusr
, SMMUv3State
),
368 VMSTATE_UINT32(irq_ctrl
, SMMUv3State
),
369 VMSTATE_UINT32(gerror
, SMMUv3State
),
370 VMSTATE_UINT32(gerrorn
, SMMUv3State
),
371 VMSTATE_UINT64(gerror_irq_cfg0
, SMMUv3State
),
372 VMSTATE_UINT32(gerror_irq_cfg1
, SMMUv3State
),
373 VMSTATE_UINT32(gerror_irq_cfg2
, SMMUv3State
),
374 VMSTATE_UINT64(strtab_base
, SMMUv3State
),
375 VMSTATE_UINT32(strtab_base_cfg
, SMMUv3State
),
376 VMSTATE_UINT64(eventq_irq_cfg0
, SMMUv3State
),
377 VMSTATE_UINT32(eventq_irq_cfg1
, SMMUv3State
),
378 VMSTATE_UINT32(eventq_irq_cfg2
, SMMUv3State
),
380 VMSTATE_STRUCT(cmdq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
381 VMSTATE_STRUCT(eventq
, SMMUv3State
, 0, vmstate_smmuv3_queue
, SMMUQueue
),
383 VMSTATE_END_OF_LIST(),
387 static void smmuv3_instance_init(Object
*obj
)
389 /* Nothing much to do here as of now */
392 static void smmuv3_class_init(ObjectClass
*klass
, void *data
)
394 DeviceClass
*dc
= DEVICE_CLASS(klass
);
395 SMMUv3Class
*c
= ARM_SMMUV3_CLASS(klass
);
397 dc
->vmsd
= &vmstate_smmuv3
;
398 device_class_set_parent_reset(dc
, smmu_reset
, &c
->parent_reset
);
399 c
->parent_realize
= dc
->realize
;
400 dc
->realize
= smmu_realize
;
403 static void smmuv3_iommu_memory_region_class_init(ObjectClass
*klass
,
408 static const TypeInfo smmuv3_type_info
= {
409 .name
= TYPE_ARM_SMMUV3
,
410 .parent
= TYPE_ARM_SMMU
,
411 .instance_size
= sizeof(SMMUv3State
),
412 .instance_init
= smmuv3_instance_init
,
413 .class_size
= sizeof(SMMUv3Class
),
414 .class_init
= smmuv3_class_init
,
417 static const TypeInfo smmuv3_iommu_memory_region_info
= {
418 .parent
= TYPE_IOMMU_MEMORY_REGION
,
419 .name
= TYPE_SMMUV3_IOMMU_MEMORY_REGION
,
420 .class_init
= smmuv3_iommu_memory_region_class_init
,
423 static void smmuv3_register_types(void)
425 type_register(&smmuv3_type_info
);
426 type_register(&smmuv3_iommu_memory_region_info
);
429 type_init(smmuv3_register_types
)