Include qemu/module.h where needed, drop it from qemu-common.h
[qemu/ar7.git] / hw / misc / tz-mpc.c
blob45a3e31c3d5cdbdd815067e9bcc0763d4066f418
1 /*
2 * ARM AHB5 TrustZone Memory Protection Controller emulation
4 * Copyright (c) 2018 Linaro Limited
5 * Written by Peter Maydell
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 or
9 * (at your option) any later version.
12 #include "qemu/osdep.h"
13 #include "qemu/log.h"
14 #include "qemu/module.h"
15 #include "qapi/error.h"
16 #include "trace.h"
17 #include "hw/sysbus.h"
18 #include "hw/registerfields.h"
19 #include "hw/misc/tz-mpc.h"
21 /* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
22 * non-secure transactions.
24 enum {
25 IOMMU_IDX_S,
26 IOMMU_IDX_NS,
27 IOMMU_NUM_INDEXES,
30 /* Config registers */
31 REG32(CTRL, 0x00)
32 FIELD(CTRL, SEC_RESP, 4, 1)
33 FIELD(CTRL, AUTOINC, 8, 1)
34 FIELD(CTRL, LOCKDOWN, 31, 1)
35 REG32(BLK_MAX, 0x10)
36 REG32(BLK_CFG, 0x14)
37 REG32(BLK_IDX, 0x18)
38 REG32(BLK_LUT, 0x1c)
39 REG32(INT_STAT, 0x20)
40 FIELD(INT_STAT, IRQ, 0, 1)
41 REG32(INT_CLEAR, 0x24)
42 FIELD(INT_CLEAR, IRQ, 0, 1)
43 REG32(INT_EN, 0x28)
44 FIELD(INT_EN, IRQ, 0, 1)
45 REG32(INT_INFO1, 0x2c)
46 REG32(INT_INFO2, 0x30)
47 FIELD(INT_INFO2, HMASTER, 0, 16)
48 FIELD(INT_INFO2, HNONSEC, 16, 1)
49 FIELD(INT_INFO2, CFG_NS, 17, 1)
50 REG32(INT_SET, 0x34)
51 FIELD(INT_SET, IRQ, 0, 1)
52 REG32(PIDR4, 0xfd0)
53 REG32(PIDR5, 0xfd4)
54 REG32(PIDR6, 0xfd8)
55 REG32(PIDR7, 0xfdc)
56 REG32(PIDR0, 0xfe0)
57 REG32(PIDR1, 0xfe4)
58 REG32(PIDR2, 0xfe8)
59 REG32(PIDR3, 0xfec)
60 REG32(CIDR0, 0xff0)
61 REG32(CIDR1, 0xff4)
62 REG32(CIDR2, 0xff8)
63 REG32(CIDR3, 0xffc)
65 static const uint8_t tz_mpc_idregs[] = {
66 0x04, 0x00, 0x00, 0x00,
67 0x60, 0xb8, 0x1b, 0x00,
68 0x0d, 0xf0, 0x05, 0xb1,
71 static void tz_mpc_irq_update(TZMPC *s)
73 qemu_set_irq(s->irq, s->int_stat && s->int_en);
76 static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
77 uint32_t oldlut, uint32_t newlut)
79 /* Called when the LUT word at lutidx has changed from oldlut to newlut;
80 * must call the IOMMU notifiers for the changed blocks.
82 IOMMUTLBEntry entry = {
83 .addr_mask = s->blocksize - 1,
85 hwaddr addr = lutidx * s->blocksize * 32;
86 int i;
88 for (i = 0; i < 32; i++, addr += s->blocksize) {
89 bool block_is_ns;
91 if (!((oldlut ^ newlut) & (1 << i))) {
92 continue;
94 /* This changes the mappings for both the S and the NS space,
95 * so we need to do four notifies: an UNMAP then a MAP for each.
97 block_is_ns = newlut & (1 << i);
99 trace_tz_mpc_iommu_notify(addr);
100 entry.iova = addr;
101 entry.translated_addr = addr;
103 entry.perm = IOMMU_NONE;
104 memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
105 memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
107 entry.perm = IOMMU_RW;
108 if (block_is_ns) {
109 entry.target_as = &s->blocked_io_as;
110 } else {
111 entry.target_as = &s->downstream_as;
113 memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
114 if (block_is_ns) {
115 entry.target_as = &s->downstream_as;
116 } else {
117 entry.target_as = &s->blocked_io_as;
119 memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
123 static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
125 /* Auto-increment BLK_IDX if necessary */
126 if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
127 s->blk_idx++;
128 s->blk_idx %= s->blk_max;
132 static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
133 uint64_t *pdata,
134 unsigned size, MemTxAttrs attrs)
136 TZMPC *s = TZ_MPC(opaque);
137 uint64_t r;
138 uint32_t offset = addr & ~0x3;
140 if (!attrs.secure && offset < A_PIDR4) {
141 /* NS accesses can only see the ID registers */
142 qemu_log_mask(LOG_GUEST_ERROR,
143 "TZ MPC register read: NS access to offset 0x%x\n",
144 offset);
145 r = 0;
146 goto read_out;
149 switch (offset) {
150 case A_CTRL:
151 r = s->ctrl;
152 break;
153 case A_BLK_MAX:
154 r = s->blk_max - 1;
155 break;
156 case A_BLK_CFG:
157 /* We are never in "init in progress state", so this just indicates
158 * the block size. s->blocksize == (1 << BLK_CFG + 5), so
159 * BLK_CFG == ctz32(s->blocksize) - 5
161 r = ctz32(s->blocksize) - 5;
162 break;
163 case A_BLK_IDX:
164 r = s->blk_idx;
165 break;
166 case A_BLK_LUT:
167 r = s->blk_lut[s->blk_idx];
168 tz_mpc_autoinc_idx(s, size);
169 break;
170 case A_INT_STAT:
171 r = s->int_stat;
172 break;
173 case A_INT_EN:
174 r = s->int_en;
175 break;
176 case A_INT_INFO1:
177 r = s->int_info1;
178 break;
179 case A_INT_INFO2:
180 r = s->int_info2;
181 break;
182 case A_PIDR4:
183 case A_PIDR5:
184 case A_PIDR6:
185 case A_PIDR7:
186 case A_PIDR0:
187 case A_PIDR1:
188 case A_PIDR2:
189 case A_PIDR3:
190 case A_CIDR0:
191 case A_CIDR1:
192 case A_CIDR2:
193 case A_CIDR3:
194 r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
195 break;
196 case A_INT_CLEAR:
197 case A_INT_SET:
198 qemu_log_mask(LOG_GUEST_ERROR,
199 "TZ MPC register read: write-only offset 0x%x\n",
200 offset);
201 r = 0;
202 break;
203 default:
204 qemu_log_mask(LOG_GUEST_ERROR,
205 "TZ MPC register read: bad offset 0x%x\n", offset);
206 r = 0;
207 break;
210 if (size != 4) {
211 /* None of our registers are read-sensitive (except BLK_LUT,
212 * which can special case the "size not 4" case), so just
213 * pull the right bytes out of the word read result.
215 r = extract32(r, (addr & 3) * 8, size * 8);
218 read_out:
219 trace_tz_mpc_reg_read(addr, r, size);
220 *pdata = r;
221 return MEMTX_OK;
224 static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
225 uint64_t value,
226 unsigned size, MemTxAttrs attrs)
228 TZMPC *s = TZ_MPC(opaque);
229 uint32_t offset = addr & ~0x3;
231 trace_tz_mpc_reg_write(addr, value, size);
233 if (!attrs.secure && offset < A_PIDR4) {
234 /* NS accesses can only see the ID registers */
235 qemu_log_mask(LOG_GUEST_ERROR,
236 "TZ MPC register write: NS access to offset 0x%x\n",
237 offset);
238 return MEMTX_OK;
241 if (size != 4) {
242 /* Expand the byte or halfword write to a full word size.
243 * In most cases we can do this with zeroes; the exceptions
244 * are CTRL, BLK_IDX and BLK_LUT.
246 uint32_t oldval;
248 switch (offset) {
249 case A_CTRL:
250 oldval = s->ctrl;
251 break;
252 case A_BLK_IDX:
253 oldval = s->blk_idx;
254 break;
255 case A_BLK_LUT:
256 oldval = s->blk_lut[s->blk_idx];
257 break;
258 default:
259 oldval = 0;
260 break;
262 value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
265 if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
266 (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
267 /* Lockdown mode makes these three registers read-only, and
268 * the only way out of it is to reset the device.
270 qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
271 "while MPC is in lockdown mode\n", offset);
272 return MEMTX_OK;
275 switch (offset) {
276 case A_CTRL:
277 /* We don't implement the 'data gating' feature so all other bits
278 * are reserved and we make them RAZ/WI.
280 s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
281 R_CTRL_AUTOINC_MASK |
282 R_CTRL_LOCKDOWN_MASK);
283 break;
284 case A_BLK_IDX:
285 s->blk_idx = value % s->blk_max;
286 break;
287 case A_BLK_LUT:
288 tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
289 s->blk_lut[s->blk_idx] = value;
290 tz_mpc_autoinc_idx(s, size);
291 break;
292 case A_INT_CLEAR:
293 if (value & R_INT_CLEAR_IRQ_MASK) {
294 s->int_stat = 0;
295 tz_mpc_irq_update(s);
297 break;
298 case A_INT_EN:
299 s->int_en = value & R_INT_EN_IRQ_MASK;
300 tz_mpc_irq_update(s);
301 break;
302 case A_INT_SET:
303 if (value & R_INT_SET_IRQ_MASK) {
304 s->int_stat = R_INT_STAT_IRQ_MASK;
305 tz_mpc_irq_update(s);
307 break;
308 case A_PIDR4:
309 case A_PIDR5:
310 case A_PIDR6:
311 case A_PIDR7:
312 case A_PIDR0:
313 case A_PIDR1:
314 case A_PIDR2:
315 case A_PIDR3:
316 case A_CIDR0:
317 case A_CIDR1:
318 case A_CIDR2:
319 case A_CIDR3:
320 qemu_log_mask(LOG_GUEST_ERROR,
321 "TZ MPC register write: read-only offset 0x%x\n", offset);
322 break;
323 default:
324 qemu_log_mask(LOG_GUEST_ERROR,
325 "TZ MPC register write: bad offset 0x%x\n", offset);
326 break;
329 return MEMTX_OK;
332 static const MemoryRegionOps tz_mpc_reg_ops = {
333 .read_with_attrs = tz_mpc_reg_read,
334 .write_with_attrs = tz_mpc_reg_write,
335 .endianness = DEVICE_LITTLE_ENDIAN,
336 .valid.min_access_size = 1,
337 .valid.max_access_size = 4,
338 .impl.min_access_size = 1,
339 .impl.max_access_size = 4,
342 static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
344 /* Return the cfg_ns bit from the LUT for the specified address */
345 hwaddr blknum = addr / s->blocksize;
346 hwaddr blkword = blknum / 32;
347 uint32_t blkbit = 1U << (blknum % 32);
349 /* This would imply the address was larger than the size we
350 * defined this memory region to be, so it can't happen.
352 assert(blkword < s->blk_max);
353 return s->blk_lut[blkword] & blkbit;
356 static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
358 /* Handle a blocked transaction: raise IRQ, capture info, etc */
359 if (!s->int_stat) {
360 /* First blocked transfer: capture information into INT_INFO1 and
361 * INT_INFO2. Subsequent transfers are still blocked but don't
362 * capture information until the guest clears the interrupt.
365 s->int_info1 = addr;
366 s->int_info2 = 0;
367 s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
368 attrs.requester_id & 0xffff);
369 s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
370 ~attrs.secure);
371 s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
372 tz_mpc_cfg_ns(s, addr));
373 s->int_stat |= R_INT_STAT_IRQ_MASK;
374 tz_mpc_irq_update(s);
377 /* Generate bus error if desired; otherwise RAZ/WI */
378 return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
381 /* Accesses only reach these read and write functions if the MPC is
382 * blocking them; non-blocked accesses go directly to the downstream
383 * memory region without passing through this code.
385 static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
386 uint64_t *pdata,
387 unsigned size, MemTxAttrs attrs)
389 TZMPC *s = TZ_MPC(opaque);
391 trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
393 *pdata = 0;
394 return tz_mpc_handle_block(s, addr, attrs);
397 static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
398 uint64_t value,
399 unsigned size, MemTxAttrs attrs)
401 TZMPC *s = TZ_MPC(opaque);
403 trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
405 return tz_mpc_handle_block(s, addr, attrs);
408 static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
409 .read_with_attrs = tz_mpc_mem_blocked_read,
410 .write_with_attrs = tz_mpc_mem_blocked_write,
411 .endianness = DEVICE_LITTLE_ENDIAN,
412 .valid.min_access_size = 1,
413 .valid.max_access_size = 8,
414 .impl.min_access_size = 1,
415 .impl.max_access_size = 8,
418 static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
419 hwaddr addr, IOMMUAccessFlags flags,
420 int iommu_idx)
422 TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
423 bool ok;
425 IOMMUTLBEntry ret = {
426 .iova = addr & ~(s->blocksize - 1),
427 .translated_addr = addr & ~(s->blocksize - 1),
428 .addr_mask = s->blocksize - 1,
429 .perm = IOMMU_RW,
432 /* Look at the per-block configuration for this address, and
433 * return a TLB entry directing the transaction at either
434 * downstream_as or blocked_io_as, as appropriate.
435 * If the LUT cfg_ns bit is 1, only non-secure transactions
436 * may pass. If the bit is 0, only secure transactions may pass.
438 ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
440 trace_tz_mpc_translate(addr, flags,
441 iommu_idx == IOMMU_IDX_S ? "S" : "NS",
442 ok ? "pass" : "block");
444 ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
445 return ret;
448 static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
450 /* We treat unspecified attributes like secure. Transactions with
451 * unspecified attributes come from places like
452 * rom_reset() for initial image load, and we want
453 * those to pass through the from-reset "everything is secure" config.
454 * All the real during-emulation transactions from the CPU will
455 * specify attributes.
457 return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
460 static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
462 return IOMMU_NUM_INDEXES;
465 static void tz_mpc_reset(DeviceState *dev)
467 TZMPC *s = TZ_MPC(dev);
469 s->ctrl = 0x00000100;
470 s->blk_idx = 0;
471 s->int_stat = 0;
472 s->int_en = 1;
473 s->int_info1 = 0;
474 s->int_info2 = 0;
476 memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
479 static void tz_mpc_init(Object *obj)
481 DeviceState *dev = DEVICE(obj);
482 TZMPC *s = TZ_MPC(obj);
484 qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
487 static void tz_mpc_realize(DeviceState *dev, Error **errp)
489 Object *obj = OBJECT(dev);
490 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
491 TZMPC *s = TZ_MPC(dev);
492 uint64_t size;
494 /* We can't create the upstream end of the port until realize,
495 * as we don't know the size of the MR used as the downstream until then.
496 * We insist on having a downstream, to avoid complicating the code
497 * with handling the "don't know how big this is" case. It's easy
498 * enough for the user to create an unimplemented_device as downstream
499 * if they have nothing else to plug into this.
501 if (!s->downstream) {
502 error_setg(errp, "MPC 'downstream' link not set");
503 return;
506 size = memory_region_size(s->downstream);
508 memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
509 TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
510 obj, "tz-mpc-upstream", size);
512 /* In real hardware the block size is configurable. In QEMU we could
513 * make it configurable but will need it to be at least as big as the
514 * target page size so we can execute out of the resulting MRs. Guest
515 * software is supposed to check the block size using the BLK_CFG
516 * register, so make it fixed at the page size.
518 s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
519 if (size % s->blocksize != 0) {
520 error_setg(errp,
521 "MPC 'downstream' size %" PRId64
522 " is not a multiple of %" HWADDR_PRIx " bytes",
523 size, s->blocksize);
524 object_unref(OBJECT(&s->upstream));
525 return;
528 /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
529 * words, each bit of which indicates one block.
531 s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
533 memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
534 s, "tz-mpc-regs", 0x1000);
535 sysbus_init_mmio(sbd, &s->regmr);
537 sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
539 /* This memory region is not exposed to users of this device as a
540 * sysbus MMIO region, but is instead used internally as something
541 * that our IOMMU translate function might direct accesses to.
543 memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
544 s, "tz-mpc-blocked-io", size);
546 address_space_init(&s->downstream_as, s->downstream,
547 "tz-mpc-downstream");
548 address_space_init(&s->blocked_io_as, &s->blocked_io,
549 "tz-mpc-blocked-io");
551 s->blk_lut = g_new0(uint32_t, s->blk_max);
554 static int tz_mpc_post_load(void *opaque, int version_id)
556 TZMPC *s = TZ_MPC(opaque);
558 /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
559 if (s->blk_idx >= s->blk_max) {
560 return -1;
562 return 0;
565 static const VMStateDescription tz_mpc_vmstate = {
566 .name = "tz-mpc",
567 .version_id = 1,
568 .minimum_version_id = 1,
569 .post_load = tz_mpc_post_load,
570 .fields = (VMStateField[]) {
571 VMSTATE_UINT32(ctrl, TZMPC),
572 VMSTATE_UINT32(blk_idx, TZMPC),
573 VMSTATE_UINT32(int_stat, TZMPC),
574 VMSTATE_UINT32(int_en, TZMPC),
575 VMSTATE_UINT32(int_info1, TZMPC),
576 VMSTATE_UINT32(int_info2, TZMPC),
577 VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
578 0, vmstate_info_uint32, uint32_t),
579 VMSTATE_END_OF_LIST()
583 static Property tz_mpc_properties[] = {
584 DEFINE_PROP_LINK("downstream", TZMPC, downstream,
585 TYPE_MEMORY_REGION, MemoryRegion *),
586 DEFINE_PROP_END_OF_LIST(),
589 static void tz_mpc_class_init(ObjectClass *klass, void *data)
591 DeviceClass *dc = DEVICE_CLASS(klass);
593 dc->realize = tz_mpc_realize;
594 dc->vmsd = &tz_mpc_vmstate;
595 dc->reset = tz_mpc_reset;
596 dc->props = tz_mpc_properties;
599 static const TypeInfo tz_mpc_info = {
600 .name = TYPE_TZ_MPC,
601 .parent = TYPE_SYS_BUS_DEVICE,
602 .instance_size = sizeof(TZMPC),
603 .instance_init = tz_mpc_init,
604 .class_init = tz_mpc_class_init,
607 static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
608 void *data)
610 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
612 imrc->translate = tz_mpc_translate;
613 imrc->attrs_to_index = tz_mpc_attrs_to_index;
614 imrc->num_indexes = tz_mpc_num_indexes;
617 static const TypeInfo tz_mpc_iommu_memory_region_info = {
618 .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
619 .parent = TYPE_IOMMU_MEMORY_REGION,
620 .class_init = tz_mpc_iommu_memory_region_class_init,
623 static void tz_mpc_register_types(void)
625 type_register_static(&tz_mpc_info);
626 type_register_static(&tz_mpc_iommu_memory_region_info);
629 type_init(tz_mpc_register_types);