hw/intc/arm_gicv3: Implement CPU i/f SGI generation registers
[qemu/kevin.git] / hw / intc / arm_gicv3_redist.c
blob55c25e8935314dd0a665df76297c72e8735c4068
1 /*
2 * ARM GICv3 emulation: Redistributor
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited.
6 * Written by Shlomo Pongratz, Peter Maydell
8 * This code is licensed under the GPL, version 2 or (at your option)
9 * any later version.
12 #include "qemu/osdep.h"
13 #include "trace.h"
14 #include "gicv3_internal.h"
16 static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
18 /* Return a 32-bit mask which should be applied for this set of 32
19 * interrupts; each bit is 1 if access is permitted by the
20 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
21 * not affect config register accesses, unlike GICD_NSACR.)
23 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
24 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
25 return cs->gicr_igroupr0;
27 return 0xFFFFFFFFU;
30 static int gicr_ns_access(GICv3CPUState *cs, int irq)
32 /* Return the 2 bit NSACR.NS_access field for this SGI */
33 assert(irq < 16);
34 return extract32(cs->gicr_nsacr, irq * 2, 2);
37 static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
38 uint32_t *reg, uint32_t val)
40 /* Helper routine to implement writing to a "set-bitmap" register */
41 val &= mask_group(cs, attrs);
42 *reg |= val;
43 gicv3_redist_update(cs);
46 static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
47 uint32_t *reg, uint32_t val)
49 /* Helper routine to implement writing to a "clear-bitmap" register */
50 val &= mask_group(cs, attrs);
51 *reg &= ~val;
52 gicv3_redist_update(cs);
55 static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
56 uint32_t reg)
58 reg &= mask_group(cs, attrs);
59 return reg;
62 static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
63 int irq)
65 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
66 * honouring security state (these are RAZ/WI for Group 0 or Secure
67 * Group 1 interrupts).
69 uint32_t prio;
71 prio = cs->gicr_ipriorityr[irq];
73 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
74 if (!(cs->gicr_igroupr0 & (1U << irq))) {
75 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
76 return 0;
78 /* NS view of the interrupt priority */
79 prio = (prio << 1) & 0xff;
81 return prio;
84 static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
85 uint8_t value)
87 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
88 * honouring security state (these are RAZ/WI for Group 0 or Secure
89 * Group 1 interrupts).
91 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
92 if (!(cs->gicr_igroupr0 & (1U << irq))) {
93 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
94 return;
96 /* NS view of the interrupt priority */
97 value = 0x80 | (value >> 1);
99 cs->gicr_ipriorityr[irq] = value;
102 static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
103 uint64_t *data, MemTxAttrs attrs)
105 switch (offset) {
106 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
107 *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
108 return MEMTX_OK;
109 default:
110 return MEMTX_ERROR;
114 static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
115 uint64_t value, MemTxAttrs attrs)
117 switch (offset) {
118 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
119 gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
120 gicv3_redist_update(cs);
121 return MEMTX_OK;
122 default:
123 return MEMTX_ERROR;
127 static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
128 uint64_t *data, MemTxAttrs attrs)
130 switch (offset) {
131 case GICR_CTLR:
132 *data = cs->gicr_ctlr;
133 return MEMTX_OK;
134 case GICR_IIDR:
135 *data = gicv3_iidr();
136 return MEMTX_OK;
137 case GICR_TYPER:
138 *data = extract64(cs->gicr_typer, 0, 32);
139 return MEMTX_OK;
140 case GICR_TYPER + 4:
141 *data = extract64(cs->gicr_typer, 32, 32);
142 return MEMTX_OK;
143 case GICR_STATUSR:
144 /* RAZ/WI for us (this is an optional register and our implementation
145 * does not track RO/WO/reserved violations to report them to the guest)
147 *data = 0;
148 return MEMTX_OK;
149 case GICR_WAKER:
150 *data = cs->gicr_waker;
151 return MEMTX_OK;
152 case GICR_PROPBASER:
153 *data = extract64(cs->gicr_propbaser, 0, 32);
154 return MEMTX_OK;
155 case GICR_PROPBASER + 4:
156 *data = extract64(cs->gicr_propbaser, 32, 32);
157 return MEMTX_OK;
158 case GICR_PENDBASER:
159 *data = extract64(cs->gicr_pendbaser, 0, 32);
160 return MEMTX_OK;
161 case GICR_PENDBASER + 4:
162 *data = extract64(cs->gicr_pendbaser, 32, 32);
163 return MEMTX_OK;
164 case GICR_IGROUPR0:
165 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
166 *data = 0;
167 return MEMTX_OK;
169 *data = cs->gicr_igroupr0;
170 return MEMTX_OK;
171 case GICR_ISENABLER0:
172 case GICR_ICENABLER0:
173 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
174 return MEMTX_OK;
175 case GICR_ISPENDR0:
176 case GICR_ICPENDR0:
178 /* The pending register reads as the logical OR of the pending
179 * latch and the input line level for level-triggered interrupts.
181 uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
182 *data = gicr_read_bitmap_reg(cs, attrs, val);
183 return MEMTX_OK;
185 case GICR_ISACTIVER0:
186 case GICR_ICACTIVER0:
187 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
188 return MEMTX_OK;
189 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
191 int i, irq = offset - GICR_IPRIORITYR;
192 uint32_t value = 0;
194 for (i = irq + 3; i >= irq; i--, value <<= 8) {
195 value |= gicr_read_ipriorityr(cs, attrs, i);
197 *data = value;
198 return MEMTX_OK;
200 case GICR_ICFGR0:
201 case GICR_ICFGR1:
203 /* Our edge_trigger bitmap is one bit per irq; take the correct
204 * half of it, and spread it out into the odd bits.
206 uint32_t value;
208 value = cs->edge_trigger & mask_group(cs, attrs);
209 value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
210 value = half_shuffle32(value) << 1;
211 *data = value;
212 return MEMTX_OK;
214 case GICR_IGRPMODR0:
215 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
216 /* RAZ/WI if security disabled, or if
217 * security enabled and this is an NS access
219 *data = 0;
220 return MEMTX_OK;
222 *data = cs->gicr_igrpmodr0;
223 return MEMTX_OK;
224 case GICR_NSACR:
225 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
226 /* RAZ/WI if security disabled, or if
227 * security enabled and this is an NS access
229 *data = 0;
230 return MEMTX_OK;
232 *data = cs->gicr_nsacr;
233 return MEMTX_OK;
234 case GICR_IDREGS ... GICR_IDREGS + 0x1f:
235 *data = gicv3_idreg(offset - GICR_IDREGS);
236 return MEMTX_OK;
237 default:
238 return MEMTX_ERROR;
242 static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
243 uint64_t value, MemTxAttrs attrs)
245 switch (offset) {
246 case GICR_CTLR:
247 /* For our implementation, GICR_TYPER.DPGS is 0 and so all
248 * the DPG bits are RAZ/WI. We don't do anything asynchronously,
249 * so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't
250 * implement LPIs) so Enable_LPIs is RES0. So there are no writable
251 * bits for us.
253 return MEMTX_OK;
254 case GICR_STATUSR:
255 /* RAZ/WI for our implementation */
256 return MEMTX_OK;
257 case GICR_WAKER:
258 /* Only the ProcessorSleep bit is writeable. When the guest sets
259 * it it requests that we transition the channel between the
260 * redistributor and the cpu interface to quiescent, and that
261 * we set the ChildrenAsleep bit once the inteface has reached the
262 * quiescent state.
263 * Setting the ProcessorSleep to 0 reverses the quiescing, and
264 * ChildrenAsleep is cleared once the transition is complete.
265 * Since our interface is not asynchronous, we complete these
266 * transitions instantaneously, so we set ChildrenAsleep to the
267 * same value as ProcessorSleep here.
269 value &= GICR_WAKER_ProcessorSleep;
270 if (value & GICR_WAKER_ProcessorSleep) {
271 value |= GICR_WAKER_ChildrenAsleep;
273 cs->gicr_waker = value;
274 return MEMTX_OK;
275 case GICR_PROPBASER:
276 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
277 return MEMTX_OK;
278 case GICR_PROPBASER + 4:
279 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
280 return MEMTX_OK;
281 case GICR_PENDBASER:
282 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
283 return MEMTX_OK;
284 case GICR_PENDBASER + 4:
285 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
286 return MEMTX_OK;
287 case GICR_IGROUPR0:
288 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
289 return MEMTX_OK;
291 cs->gicr_igroupr0 = value;
292 gicv3_redist_update(cs);
293 return MEMTX_OK;
294 case GICR_ISENABLER0:
295 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
296 return MEMTX_OK;
297 case GICR_ICENABLER0:
298 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
299 return MEMTX_OK;
300 case GICR_ISPENDR0:
301 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
302 return MEMTX_OK;
303 case GICR_ICPENDR0:
304 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
305 return MEMTX_OK;
306 case GICR_ISACTIVER0:
307 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
308 return MEMTX_OK;
309 case GICR_ICACTIVER0:
310 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
311 return MEMTX_OK;
312 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
314 int i, irq = offset - GICR_IPRIORITYR;
316 for (i = irq; i < irq + 4; i++, value >>= 8) {
317 gicr_write_ipriorityr(cs, attrs, i, value);
319 gicv3_redist_update(cs);
320 return MEMTX_OK;
322 case GICR_ICFGR0:
323 /* Register is all RAZ/WI or RAO/WI bits */
324 return MEMTX_OK;
325 case GICR_ICFGR1:
327 uint32_t mask;
329 /* Since our edge_trigger bitmap is one bit per irq, our input
330 * 32-bits will compress down into 16 bits which we need
331 * to write into the bitmap.
333 value = half_unshuffle32(value >> 1) << 16;
334 mask = mask_group(cs, attrs) & 0xffff0000U;
336 cs->edge_trigger &= ~mask;
337 cs->edge_trigger |= (value & mask);
339 gicv3_redist_update(cs);
340 return MEMTX_OK;
342 case GICR_IGRPMODR0:
343 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
344 /* RAZ/WI if security disabled, or if
345 * security enabled and this is an NS access
347 return MEMTX_OK;
349 cs->gicr_igrpmodr0 = value;
350 gicv3_redist_update(cs);
351 return MEMTX_OK;
352 case GICR_NSACR:
353 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
354 /* RAZ/WI if security disabled, or if
355 * security enabled and this is an NS access
357 return MEMTX_OK;
359 cs->gicr_nsacr = value;
360 /* no update required as this only affects access permission checks */
361 return MEMTX_OK;
362 case GICR_IIDR:
363 case GICR_TYPER:
364 case GICR_IDREGS ... GICR_IDREGS + 0x1f:
365 /* RO registers, ignore the write */
366 qemu_log_mask(LOG_GUEST_ERROR,
367 "%s: invalid guest write to RO register at offset "
368 TARGET_FMT_plx "\n", __func__, offset);
369 return MEMTX_OK;
370 default:
371 return MEMTX_ERROR;
375 static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
376 uint64_t *data, MemTxAttrs attrs)
378 switch (offset) {
379 case GICR_TYPER:
380 *data = cs->gicr_typer;
381 return MEMTX_OK;
382 case GICR_PROPBASER:
383 *data = cs->gicr_propbaser;
384 return MEMTX_OK;
385 case GICR_PENDBASER:
386 *data = cs->gicr_pendbaser;
387 return MEMTX_OK;
388 default:
389 return MEMTX_ERROR;
393 static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
394 uint64_t value, MemTxAttrs attrs)
396 switch (offset) {
397 case GICR_PROPBASER:
398 cs->gicr_propbaser = value;
399 return MEMTX_OK;
400 case GICR_PENDBASER:
401 cs->gicr_pendbaser = value;
402 return MEMTX_OK;
403 case GICR_TYPER:
404 /* RO register, ignore the write */
405 qemu_log_mask(LOG_GUEST_ERROR,
406 "%s: invalid guest write to RO register at offset "
407 TARGET_FMT_plx "\n", __func__, offset);
408 return MEMTX_OK;
409 default:
410 return MEMTX_ERROR;
414 MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
415 unsigned size, MemTxAttrs attrs)
417 GICv3State *s = opaque;
418 GICv3CPUState *cs;
419 MemTxResult r;
420 int cpuidx;
422 /* This region covers all the redistributor pages; there are
423 * (for GICv3) two 64K pages per CPU. At the moment they are
424 * all contiguous (ie in this one region), though we might later
425 * want to allow splitting of redistributor pages into several
426 * blocks so we can support more CPUs.
428 cpuidx = offset / 0x20000;
429 offset %= 0x20000;
430 assert(cpuidx < s->num_cpu);
432 cs = &s->cpu[cpuidx];
434 switch (size) {
435 case 1:
436 r = gicr_readb(cs, offset, data, attrs);
437 break;
438 case 4:
439 r = gicr_readl(cs, offset, data, attrs);
440 break;
441 case 8:
442 r = gicr_readll(cs, offset, data, attrs);
443 break;
444 default:
445 r = MEMTX_ERROR;
446 break;
449 if (r == MEMTX_ERROR) {
450 qemu_log_mask(LOG_GUEST_ERROR,
451 "%s: invalid guest read at offset " TARGET_FMT_plx
452 "size %u\n", __func__, offset, size);
453 trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
454 size, attrs.secure);
455 } else {
456 trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
457 size, attrs.secure);
459 return r;
462 MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
463 unsigned size, MemTxAttrs attrs)
465 GICv3State *s = opaque;
466 GICv3CPUState *cs;
467 MemTxResult r;
468 int cpuidx;
470 /* This region covers all the redistributor pages; there are
471 * (for GICv3) two 64K pages per CPU. At the moment they are
472 * all contiguous (ie in this one region), though we might later
473 * want to allow splitting of redistributor pages into several
474 * blocks so we can support more CPUs.
476 cpuidx = offset / 0x20000;
477 offset %= 0x20000;
478 assert(cpuidx < s->num_cpu);
480 cs = &s->cpu[cpuidx];
482 switch (size) {
483 case 1:
484 r = gicr_writeb(cs, offset, data, attrs);
485 break;
486 case 4:
487 r = gicr_writel(cs, offset, data, attrs);
488 break;
489 case 8:
490 r = gicr_writell(cs, offset, data, attrs);
491 break;
492 default:
493 r = MEMTX_ERROR;
494 break;
497 if (r == MEMTX_ERROR) {
498 qemu_log_mask(LOG_GUEST_ERROR,
499 "%s: invalid guest write at offset " TARGET_FMT_plx
500 "size %u\n", __func__, offset, size);
501 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
502 size, attrs.secure);
503 } else {
504 trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
505 size, attrs.secure);
507 return r;
510 void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
512 /* Update redistributor state for a change in an external PPI input line */
513 if (level == extract32(cs->level, irq, 1)) {
514 return;
517 trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
519 cs->level = deposit32(cs->level, irq, 1, level);
521 if (level) {
522 /* 0->1 edges latch the pending bit for edge-triggered interrupts */
523 if (extract32(cs->edge_trigger, irq, 1)) {
524 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
528 gicv3_redist_update(cs);
531 void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
533 /* Update redistributor state for a generated SGI */
534 int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
536 /* If we are asked for a Secure Group 1 SGI and it's actually
537 * configured as Secure Group 0 this is OK (subject to the usual
538 * NSACR checks).
540 if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
541 grp = GICV3_G0;
544 if (grp != irqgrp) {
545 return;
548 if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
549 /* If security is enabled we must test the NSACR bits */
550 int nsaccess = gicr_ns_access(cs, irq);
552 if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
553 (irqgrp == GICV3_G1 && nsaccess < 2)) {
554 return;
558 /* OK, we can accept the SGI */
559 trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
560 cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
561 gicv3_redist_update(cs);