2 * ARM GICv3 emulation: Redistributor
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited.
6 * Written by Shlomo Pongratz, Peter Maydell
8 * This code is licensed under the GPL, version 2 or (at your option)
12 #include "qemu/osdep.h"
15 #include "gicv3_internal.h"
17 static uint32_t mask_group(GICv3CPUState
*cs
, MemTxAttrs attrs
)
19 /* Return a 32-bit mask which should be applied for this set of 32
20 * interrupts; each bit is 1 if access is permitted by the
21 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
22 * not affect config register accesses, unlike GICD_NSACR.)
24 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
25 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
26 return cs
->gicr_igroupr0
;
31 static int gicr_ns_access(GICv3CPUState
*cs
, int irq
)
33 /* Return the 2 bit NSACR.NS_access field for this SGI */
35 return extract32(cs
->gicr_nsacr
, irq
* 2, 2);
38 static void gicr_write_set_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
39 uint32_t *reg
, uint32_t val
)
41 /* Helper routine to implement writing to a "set-bitmap" register */
42 val
&= mask_group(cs
, attrs
);
44 gicv3_redist_update(cs
);
47 static void gicr_write_clear_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
48 uint32_t *reg
, uint32_t val
)
50 /* Helper routine to implement writing to a "clear-bitmap" register */
51 val
&= mask_group(cs
, attrs
);
53 gicv3_redist_update(cs
);
56 static uint32_t gicr_read_bitmap_reg(GICv3CPUState
*cs
, MemTxAttrs attrs
,
59 reg
&= mask_group(cs
, attrs
);
63 static bool vcpu_resident(GICv3CPUState
*cs
, uint64_t vptaddr
)
66 * Return true if a vCPU is resident, which is defined by
67 * whether the GICR_VPENDBASER register is marked VALID and
68 * has the right virtual pending table address.
70 if (!FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, VALID
)) {
73 return vptaddr
== (cs
->gicr_vpendbaser
& R_GICR_VPENDBASER_PHYADDR_MASK
);
77 * update_for_one_lpi: Update pending information if this LPI is better
80 * @irq: interrupt to look up in the LPI Configuration table
81 * @ctbase: physical address of the LPI Configuration table to use
82 * @ds: true if priority value should not be shifted
83 * @hpp: points to pending information to update
85 * Look up @irq in the Configuration table specified by @ctbase
86 * to see if it is enabled and what its priority is. If it is an
87 * enabled interrupt with a higher priority than that currently
88 * recorded in @hpp, update @hpp.
90 static void update_for_one_lpi(GICv3CPUState
*cs
, int irq
,
91 uint64_t ctbase
, bool ds
, PendingIrq
*hpp
)
96 address_space_read(&cs
->gic
->dma_as
,
97 ctbase
+ ((irq
- GICV3_LPI_INTID_START
) * sizeof(lpite
)),
98 MEMTXATTRS_UNSPECIFIED
, &lpite
, sizeof(lpite
));
100 if (!(lpite
& LPI_CTE_ENABLED
)) {
105 prio
= lpite
& LPI_PRIORITY_MASK
;
107 prio
= ((lpite
& LPI_PRIORITY_MASK
) >> 1) | 0x80;
110 if ((prio
< hpp
->prio
) ||
111 ((prio
== hpp
->prio
) && (irq
<= hpp
->irq
))) {
114 /* LPIs and vLPIs are always non-secure Grp1 interrupts */
115 hpp
->grp
= GICV3_G1NS
;
120 * update_for_all_lpis: Fully scan LPI tables and find best pending LPI
123 * @ptbase: physical address of LPI Pending table
124 * @ctbase: physical address of LPI Configuration table
125 * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1
126 * @ds: true if priority value should not be shifted
127 * @hpp: points to pending information to set
129 * Recalculate the highest priority pending enabled LPI from scratch,
130 * and set @hpp accordingly.
132 * We scan the LPI pending table @ptbase; for each pending LPI, we read the
133 * corresponding entry in the LPI configuration table @ctbase to extract
134 * the priority and enabled information.
136 * We take @ptsizebits in the form idbits-1 because this is the way that
137 * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits
138 * and in the VMAPP command's VPT_size field.
140 static void update_for_all_lpis(GICv3CPUState
*cs
, uint64_t ptbase
,
141 uint64_t ctbase
, unsigned ptsizebits
,
142 bool ds
, PendingIrq
*hpp
)
144 AddressSpace
*as
= &cs
->gic
->dma_as
;
146 uint32_t pendt_size
= (1ULL << (ptsizebits
+ 1));
151 for (i
= GICV3_LPI_INTID_START
/ 8; i
< pendt_size
/ 8; i
++) {
152 address_space_read(as
, ptbase
+ i
, MEMTXATTRS_UNSPECIFIED
, &pend
, 1);
155 update_for_one_lpi(cs
, i
* 8 + bit
, ctbase
, ds
, hpp
);
162 * set_lpi_pending_bit: Set or clear pending bit for an LPI
165 * @ptbase: physical address of LPI Pending table
166 * @irq: LPI to change pending state for
167 * @level: false to clear pending state, true to set
169 * Returns true if we needed to do something, false if the pending bit
170 * was already at @level.
172 static bool set_pending_table_bit(GICv3CPUState
*cs
, uint64_t ptbase
,
175 AddressSpace
*as
= &cs
->gic
->dma_as
;
176 uint64_t addr
= ptbase
+ irq
/ 8;
179 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
, &pend
, 1);
180 if (extract32(pend
, irq
% 8, 1) == level
) {
181 /* Bit already at requested state, no action required */
184 pend
= deposit32(pend
, irq
% 8, 1, level
? 1 : 0);
185 address_space_write(as
, addr
, MEMTXATTRS_UNSPECIFIED
, &pend
, 1);
189 static uint8_t gicr_read_ipriorityr(GICv3CPUState
*cs
, MemTxAttrs attrs
,
192 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
193 * honouring security state (these are RAZ/WI for Group 0 or Secure
194 * Group 1 interrupts).
198 prio
= cs
->gicr_ipriorityr
[irq
];
200 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
201 if (!(cs
->gicr_igroupr0
& (1U << irq
))) {
202 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
205 /* NS view of the interrupt priority */
206 prio
= (prio
<< 1) & 0xff;
211 static void gicr_write_ipriorityr(GICv3CPUState
*cs
, MemTxAttrs attrs
, int irq
,
214 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
215 * honouring security state (these are RAZ/WI for Group 0 or Secure
216 * Group 1 interrupts).
218 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
219 if (!(cs
->gicr_igroupr0
& (1U << irq
))) {
220 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
223 /* NS view of the interrupt priority */
224 value
= 0x80 | (value
>> 1);
226 cs
->gicr_ipriorityr
[irq
] = value
;
229 static void gicv3_redist_update_vlpi_only(GICv3CPUState
*cs
)
231 uint64_t ptbase
, ctbase
, idbits
;
233 if (!FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, VALID
)) {
234 cs
->hppvlpi
.prio
= 0xff;
238 ptbase
= cs
->gicr_vpendbaser
& R_GICR_VPENDBASER_PHYADDR_MASK
;
239 ctbase
= cs
->gicr_vpropbaser
& R_GICR_VPROPBASER_PHYADDR_MASK
;
240 idbits
= FIELD_EX64(cs
->gicr_vpropbaser
, GICR_VPROPBASER
, IDBITS
);
242 update_for_all_lpis(cs
, ptbase
, ctbase
, idbits
, true, &cs
->hppvlpi
);
245 static void gicv3_redist_update_vlpi(GICv3CPUState
*cs
)
247 gicv3_redist_update_vlpi_only(cs
);
248 gicv3_cpuif_virt_irq_fiq_update(cs
);
251 static void gicr_write_vpendbaser(GICv3CPUState
*cs
, uint64_t newval
)
253 /* Write @newval to GICR_VPENDBASER, handling its effects */
254 bool oldvalid
= FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, VALID
);
255 bool newvalid
= FIELD_EX64(newval
, GICR_VPENDBASER
, VALID
);
259 * The DIRTY bit is read-only and for us is always zero;
260 * other fields are writable.
262 newval
&= R_GICR_VPENDBASER_INNERCACHE_MASK
|
263 R_GICR_VPENDBASER_SHAREABILITY_MASK
|
264 R_GICR_VPENDBASER_PHYADDR_MASK
|
265 R_GICR_VPENDBASER_OUTERCACHE_MASK
|
266 R_GICR_VPENDBASER_PENDINGLAST_MASK
|
267 R_GICR_VPENDBASER_IDAI_MASK
|
268 R_GICR_VPENDBASER_VALID_MASK
;
270 if (oldvalid
&& newvalid
) {
272 * Changing other fields while VALID is 1 is UNPREDICTABLE;
273 * we choose to log and ignore the write.
275 if (cs
->gicr_vpendbaser
^ newval
) {
276 qemu_log_mask(LOG_GUEST_ERROR
,
277 "%s: Changing GICR_VPENDBASER when VALID=1 "
278 "is UNPREDICTABLE\n", __func__
);
282 if (!oldvalid
&& !newvalid
) {
283 cs
->gicr_vpendbaser
= newval
;
289 * Valid going from 0 to 1: update hppvlpi from tables.
290 * If IDAI is 0 we are allowed to use the info we cached in
291 * the IMPDEF area of the table.
292 * PendingLast is RES1 when we make this transition.
297 * Valid going from 1 to 0:
298 * Set PendingLast if there was a pending enabled interrupt
299 * for the vPE that was just descheduled.
300 * If we cache info in the IMPDEF area, write it out here.
302 pendinglast
= cs
->hppvlpi
.prio
!= 0xff;
305 newval
= FIELD_DP64(newval
, GICR_VPENDBASER
, PENDINGLAST
, pendinglast
);
306 cs
->gicr_vpendbaser
= newval
;
307 gicv3_redist_update_vlpi(cs
);
310 static MemTxResult
gicr_readb(GICv3CPUState
*cs
, hwaddr offset
,
311 uint64_t *data
, MemTxAttrs attrs
)
314 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
315 *data
= gicr_read_ipriorityr(cs
, attrs
, offset
- GICR_IPRIORITYR
);
322 static MemTxResult
gicr_writeb(GICv3CPUState
*cs
, hwaddr offset
,
323 uint64_t value
, MemTxAttrs attrs
)
326 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
327 gicr_write_ipriorityr(cs
, attrs
, offset
- GICR_IPRIORITYR
, value
);
328 gicv3_redist_update(cs
);
335 static MemTxResult
gicr_readl(GICv3CPUState
*cs
, hwaddr offset
,
336 uint64_t *data
, MemTxAttrs attrs
)
340 *data
= cs
->gicr_ctlr
;
343 *data
= gicv3_iidr();
346 *data
= extract64(cs
->gicr_typer
, 0, 32);
349 *data
= extract64(cs
->gicr_typer
, 32, 32);
352 /* RAZ/WI for us (this is an optional register and our implementation
353 * does not track RO/WO/reserved violations to report them to the guest)
358 *data
= cs
->gicr_waker
;
361 *data
= extract64(cs
->gicr_propbaser
, 0, 32);
363 case GICR_PROPBASER
+ 4:
364 *data
= extract64(cs
->gicr_propbaser
, 32, 32);
367 *data
= extract64(cs
->gicr_pendbaser
, 0, 32);
369 case GICR_PENDBASER
+ 4:
370 *data
= extract64(cs
->gicr_pendbaser
, 32, 32);
373 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
377 *data
= cs
->gicr_igroupr0
;
379 case GICR_ISENABLER0
:
380 case GICR_ICENABLER0
:
381 *data
= gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_ienabler0
);
386 /* The pending register reads as the logical OR of the pending
387 * latch and the input line level for level-triggered interrupts.
389 uint32_t val
= cs
->gicr_ipendr0
| (~cs
->edge_trigger
& cs
->level
);
390 *data
= gicr_read_bitmap_reg(cs
, attrs
, val
);
393 case GICR_ISACTIVER0
:
394 case GICR_ICACTIVER0
:
395 *data
= gicr_read_bitmap_reg(cs
, attrs
, cs
->gicr_iactiver0
);
397 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
399 int i
, irq
= offset
- GICR_IPRIORITYR
;
402 for (i
= irq
+ 3; i
>= irq
; i
--) {
404 value
|= gicr_read_ipriorityr(cs
, attrs
, i
);
412 /* Our edge_trigger bitmap is one bit per irq; take the correct
413 * half of it, and spread it out into the odd bits.
417 value
= cs
->edge_trigger
& mask_group(cs
, attrs
);
418 value
= extract32(value
, (offset
== GICR_ICFGR1
) ? 16 : 0, 16);
419 value
= half_shuffle32(value
) << 1;
424 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
425 /* RAZ/WI if security disabled, or if
426 * security enabled and this is an NS access
431 *data
= cs
->gicr_igrpmodr0
;
434 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
435 /* RAZ/WI if security disabled, or if
436 * security enabled and this is an NS access
441 *data
= cs
->gicr_nsacr
;
443 case GICR_IDREGS
... GICR_IDREGS
+ 0x2f:
444 *data
= gicv3_idreg(cs
->gic
, offset
- GICR_IDREGS
, GICV3_PIDR0_REDIST
);
447 * VLPI frame registers. We don't need a version check for
448 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
449 * prevent pre-v4 GIC from passing us offsets this high.
451 case GICR_VPROPBASER
:
452 *data
= extract64(cs
->gicr_vpropbaser
, 0, 32);
454 case GICR_VPROPBASER
+ 4:
455 *data
= extract64(cs
->gicr_vpropbaser
, 32, 32);
457 case GICR_VPENDBASER
:
458 *data
= extract64(cs
->gicr_vpendbaser
, 0, 32);
460 case GICR_VPENDBASER
+ 4:
461 *data
= extract64(cs
->gicr_vpendbaser
, 32, 32);
468 static MemTxResult
gicr_writel(GICv3CPUState
*cs
, hwaddr offset
,
469 uint64_t value
, MemTxAttrs attrs
)
473 /* For our implementation, GICR_TYPER.DPGS is 0 and so all
474 * the DPG bits are RAZ/WI. We don't do anything asynchronously,
475 * so UWP and RWP are RAZ/WI. GICR_TYPER.LPIS is 1 (we
476 * implement LPIs) so Enable_LPIs is programmable.
478 if (cs
->gicr_typer
& GICR_TYPER_PLPIS
) {
479 if (value
& GICR_CTLR_ENABLE_LPIS
) {
480 cs
->gicr_ctlr
|= GICR_CTLR_ENABLE_LPIS
;
481 /* Check for any pending interr in pending table */
482 gicv3_redist_update_lpi(cs
);
484 cs
->gicr_ctlr
&= ~GICR_CTLR_ENABLE_LPIS
;
485 /* cs->hppi might have been an LPI; recalculate */
486 gicv3_redist_update(cs
);
491 /* RAZ/WI for our implementation */
494 /* Only the ProcessorSleep bit is writable. When the guest sets
495 * it, it requests that we transition the channel between the
496 * redistributor and the cpu interface to quiescent, and that
497 * we set the ChildrenAsleep bit once the inteface has reached the
499 * Setting the ProcessorSleep to 0 reverses the quiescing, and
500 * ChildrenAsleep is cleared once the transition is complete.
501 * Since our interface is not asynchronous, we complete these
502 * transitions instantaneously, so we set ChildrenAsleep to the
503 * same value as ProcessorSleep here.
505 value
&= GICR_WAKER_ProcessorSleep
;
506 if (value
& GICR_WAKER_ProcessorSleep
) {
507 value
|= GICR_WAKER_ChildrenAsleep
;
509 cs
->gicr_waker
= value
;
512 cs
->gicr_propbaser
= deposit64(cs
->gicr_propbaser
, 0, 32, value
);
514 case GICR_PROPBASER
+ 4:
515 cs
->gicr_propbaser
= deposit64(cs
->gicr_propbaser
, 32, 32, value
);
518 cs
->gicr_pendbaser
= deposit64(cs
->gicr_pendbaser
, 0, 32, value
);
520 case GICR_PENDBASER
+ 4:
521 cs
->gicr_pendbaser
= deposit64(cs
->gicr_pendbaser
, 32, 32, value
);
524 if (!attrs
.secure
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
527 cs
->gicr_igroupr0
= value
;
528 gicv3_redist_update(cs
);
530 case GICR_ISENABLER0
:
531 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_ienabler0
, value
);
533 case GICR_ICENABLER0
:
534 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_ienabler0
, value
);
537 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_ipendr0
, value
);
540 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_ipendr0
, value
);
542 case GICR_ISACTIVER0
:
543 gicr_write_set_bitmap_reg(cs
, attrs
, &cs
->gicr_iactiver0
, value
);
545 case GICR_ICACTIVER0
:
546 gicr_write_clear_bitmap_reg(cs
, attrs
, &cs
->gicr_iactiver0
, value
);
548 case GICR_IPRIORITYR
... GICR_IPRIORITYR
+ 0x1f:
550 int i
, irq
= offset
- GICR_IPRIORITYR
;
552 for (i
= irq
; i
< irq
+ 4; i
++, value
>>= 8) {
553 gicr_write_ipriorityr(cs
, attrs
, i
, value
);
555 gicv3_redist_update(cs
);
559 /* Register is all RAZ/WI or RAO/WI bits */
565 /* Since our edge_trigger bitmap is one bit per irq, our input
566 * 32-bits will compress down into 16 bits which we need
567 * to write into the bitmap.
569 value
= half_unshuffle32(value
>> 1) << 16;
570 mask
= mask_group(cs
, attrs
) & 0xffff0000U
;
572 cs
->edge_trigger
&= ~mask
;
573 cs
->edge_trigger
|= (value
& mask
);
575 gicv3_redist_update(cs
);
579 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
580 /* RAZ/WI if security disabled, or if
581 * security enabled and this is an NS access
585 cs
->gicr_igrpmodr0
= value
;
586 gicv3_redist_update(cs
);
589 if ((cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
) || !attrs
.secure
) {
590 /* RAZ/WI if security disabled, or if
591 * security enabled and this is an NS access
595 cs
->gicr_nsacr
= value
;
596 /* no update required as this only affects access permission checks */
600 case GICR_IDREGS
... GICR_IDREGS
+ 0x2f:
601 /* RO registers, ignore the write */
602 qemu_log_mask(LOG_GUEST_ERROR
,
603 "%s: invalid guest write to RO register at offset "
604 TARGET_FMT_plx
"\n", __func__
, offset
);
607 * VLPI frame registers. We don't need a version check for
608 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
609 * prevent pre-v4 GIC from passing us offsets this high.
611 case GICR_VPROPBASER
:
612 cs
->gicr_vpropbaser
= deposit64(cs
->gicr_vpropbaser
, 0, 32, value
);
614 case GICR_VPROPBASER
+ 4:
615 cs
->gicr_vpropbaser
= deposit64(cs
->gicr_vpropbaser
, 32, 32, value
);
617 case GICR_VPENDBASER
:
618 gicr_write_vpendbaser(cs
, deposit64(cs
->gicr_vpendbaser
, 0, 32, value
));
620 case GICR_VPENDBASER
+ 4:
621 gicr_write_vpendbaser(cs
, deposit64(cs
->gicr_vpendbaser
, 32, 32, value
));
628 static MemTxResult
gicr_readll(GICv3CPUState
*cs
, hwaddr offset
,
629 uint64_t *data
, MemTxAttrs attrs
)
633 *data
= cs
->gicr_typer
;
636 *data
= cs
->gicr_propbaser
;
639 *data
= cs
->gicr_pendbaser
;
642 * VLPI frame registers. We don't need a version check for
643 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
644 * prevent pre-v4 GIC from passing us offsets this high.
646 case GICR_VPROPBASER
:
647 *data
= cs
->gicr_vpropbaser
;
649 case GICR_VPENDBASER
:
650 *data
= cs
->gicr_vpendbaser
;
657 static MemTxResult
gicr_writell(GICv3CPUState
*cs
, hwaddr offset
,
658 uint64_t value
, MemTxAttrs attrs
)
662 cs
->gicr_propbaser
= value
;
665 cs
->gicr_pendbaser
= value
;
668 /* RO register, ignore the write */
669 qemu_log_mask(LOG_GUEST_ERROR
,
670 "%s: invalid guest write to RO register at offset "
671 TARGET_FMT_plx
"\n", __func__
, offset
);
674 * VLPI frame registers. We don't need a version check for
675 * VPROPBASER and VPENDBASER because gicv3_redist_size() will
676 * prevent pre-v4 GIC from passing us offsets this high.
678 case GICR_VPROPBASER
:
679 cs
->gicr_vpropbaser
= value
;
681 case GICR_VPENDBASER
:
682 gicr_write_vpendbaser(cs
, value
);
689 MemTxResult
gicv3_redist_read(void *opaque
, hwaddr offset
, uint64_t *data
,
690 unsigned size
, MemTxAttrs attrs
)
692 GICv3RedistRegion
*region
= opaque
;
693 GICv3State
*s
= region
->gic
;
698 assert((offset
& (size
- 1)) == 0);
701 * There are (for GICv3) two 64K redistributor pages per CPU.
702 * In some cases the redistributor pages for all CPUs are not
703 * contiguous (eg on the virt board they are split into two
704 * parts if there are too many CPUs to all fit in the same place
705 * in the memory map); if so then the GIC has multiple MemoryRegions
706 * for the redistributors.
708 cpuidx
= region
->cpuidx
+ offset
/ gicv3_redist_size(s
);
709 offset
%= gicv3_redist_size(s
);
711 cs
= &s
->cpu
[cpuidx
];
715 r
= gicr_readb(cs
, offset
, data
, attrs
);
718 r
= gicr_readl(cs
, offset
, data
, attrs
);
721 r
= gicr_readll(cs
, offset
, data
, attrs
);
729 qemu_log_mask(LOG_GUEST_ERROR
,
730 "%s: invalid guest read at offset " TARGET_FMT_plx
731 " size %u\n", __func__
, offset
, size
);
732 trace_gicv3_redist_badread(gicv3_redist_affid(cs
), offset
,
734 /* The spec requires that reserved registers are RAZ/WI;
735 * so use MEMTX_ERROR returns from leaf functions as a way to
736 * trigger the guest-error logging but don't return it to
737 * the caller, or we'll cause a spurious guest data abort.
742 trace_gicv3_redist_read(gicv3_redist_affid(cs
), offset
, *data
,
748 MemTxResult
gicv3_redist_write(void *opaque
, hwaddr offset
, uint64_t data
,
749 unsigned size
, MemTxAttrs attrs
)
751 GICv3RedistRegion
*region
= opaque
;
752 GICv3State
*s
= region
->gic
;
757 assert((offset
& (size
- 1)) == 0);
760 * There are (for GICv3) two 64K redistributor pages per CPU.
761 * In some cases the redistributor pages for all CPUs are not
762 * contiguous (eg on the virt board they are split into two
763 * parts if there are too many CPUs to all fit in the same place
764 * in the memory map); if so then the GIC has multiple MemoryRegions
765 * for the redistributors.
767 cpuidx
= region
->cpuidx
+ offset
/ gicv3_redist_size(s
);
768 offset
%= gicv3_redist_size(s
);
770 cs
= &s
->cpu
[cpuidx
];
774 r
= gicr_writeb(cs
, offset
, data
, attrs
);
777 r
= gicr_writel(cs
, offset
, data
, attrs
);
780 r
= gicr_writell(cs
, offset
, data
, attrs
);
788 qemu_log_mask(LOG_GUEST_ERROR
,
789 "%s: invalid guest write at offset " TARGET_FMT_plx
790 " size %u\n", __func__
, offset
, size
);
791 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs
), offset
, data
,
793 /* The spec requires that reserved registers are RAZ/WI;
794 * so use MEMTX_ERROR returns from leaf functions as a way to
795 * trigger the guest-error logging but don't return it to
796 * the caller, or we'll cause a spurious guest data abort.
800 trace_gicv3_redist_write(gicv3_redist_affid(cs
), offset
, data
,
806 static void gicv3_redist_check_lpi_priority(GICv3CPUState
*cs
, int irq
)
808 uint64_t lpict_baddr
= cs
->gicr_propbaser
& R_GICR_PROPBASER_PHYADDR_MASK
;
810 update_for_one_lpi(cs
, irq
, lpict_baddr
,
811 cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
,
815 void gicv3_redist_update_lpi_only(GICv3CPUState
*cs
)
818 * This function scans the LPI pending table and for each pending
819 * LPI, reads the corresponding entry from LPI configuration table
820 * to extract the priority info and determine if the current LPI
821 * priority is lower than the last computed high priority lpi interrupt.
822 * If yes, replace current LPI as the new high priority lpi interrupt.
824 uint64_t lpipt_baddr
, lpict_baddr
;
827 idbits
= MIN(FIELD_EX64(cs
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
830 if (!(cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
834 lpipt_baddr
= cs
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
835 lpict_baddr
= cs
->gicr_propbaser
& R_GICR_PROPBASER_PHYADDR_MASK
;
837 update_for_all_lpis(cs
, lpipt_baddr
, lpict_baddr
, idbits
,
838 cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
, &cs
->hpplpi
);
841 void gicv3_redist_update_lpi(GICv3CPUState
*cs
)
843 gicv3_redist_update_lpi_only(cs
);
844 gicv3_redist_update(cs
);
847 void gicv3_redist_lpi_pending(GICv3CPUState
*cs
, int irq
, int level
)
850 * This function updates the pending bit in lpi pending table for
851 * the irq being activated or deactivated.
853 uint64_t lpipt_baddr
;
855 lpipt_baddr
= cs
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
856 if (!set_pending_table_bit(cs
, lpipt_baddr
, irq
, level
)) {
857 /* no change in the value of pending bit, return */
862 * check if this LPI is better than the current hpplpi, if yes
863 * just set hpplpi.prio and .irq without doing a full rescan
866 gicv3_redist_check_lpi_priority(cs
, irq
);
867 gicv3_redist_update(cs
);
869 if (irq
== cs
->hpplpi
.irq
) {
870 gicv3_redist_update_lpi(cs
);
875 void gicv3_redist_process_lpi(GICv3CPUState
*cs
, int irq
, int level
)
879 idbits
= MIN(FIELD_EX64(cs
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
882 if (!(cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) ||
883 (irq
> (1ULL << (idbits
+ 1)) - 1) || irq
< GICV3_LPI_INTID_START
) {
887 /* set/clear the pending bit for this irq */
888 gicv3_redist_lpi_pending(cs
, irq
, level
);
891 void gicv3_redist_inv_lpi(GICv3CPUState
*cs
, int irq
)
894 * The only cached information for LPIs we have is the HPPLPI.
895 * We could be cleverer about identifying when we don't need
896 * to do a full rescan of the pending table, but until we find
897 * this is a performance issue, just always recalculate.
899 gicv3_redist_update_lpi(cs
);
902 void gicv3_redist_mov_lpi(GICv3CPUState
*src
, GICv3CPUState
*dest
, int irq
)
905 * Move the specified LPI's pending state from the source redistributor
906 * to the destination.
908 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
909 * we choose to NOP. If LPIs are disabled on source there's nothing
910 * to be transferred anyway.
916 if (!(src
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) ||
917 !(dest
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
921 idbits
= MIN(FIELD_EX64(src
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
923 idbits
= MIN(FIELD_EX64(dest
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
926 pendt_size
= 1ULL << (idbits
+ 1);
927 if ((irq
/ 8) >= pendt_size
) {
931 src_baddr
= src
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
933 if (!set_pending_table_bit(src
, src_baddr
, irq
, 0)) {
934 /* Not pending on source, nothing to do */
937 if (irq
== src
->hpplpi
.irq
) {
939 * We just made this LPI not-pending so only need to update
940 * if it was previously the highest priority pending LPI
942 gicv3_redist_update_lpi(src
);
944 /* Mark it pending on the destination */
945 gicv3_redist_lpi_pending(dest
, irq
, 1);
948 void gicv3_redist_movall_lpis(GICv3CPUState
*src
, GICv3CPUState
*dest
)
951 * We must move all pending LPIs from the source redistributor
952 * to the destination. That is, for every pending LPI X on
953 * src, we must set it not-pending on src and pending on dest.
954 * LPIs that are already pending on dest are not cleared.
956 * If LPIs are disabled on dest this is CONSTRAINED UNPREDICTABLE:
957 * we choose to NOP. If LPIs are disabled on source there's nothing
958 * to be transferred anyway.
960 AddressSpace
*as
= &src
->gic
->dma_as
;
963 uint64_t src_baddr
, dest_baddr
;
966 if (!(src
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
) ||
967 !(dest
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
971 idbits
= MIN(FIELD_EX64(src
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
973 idbits
= MIN(FIELD_EX64(dest
->gicr_propbaser
, GICR_PROPBASER
, IDBITS
),
976 pendt_size
= 1ULL << (idbits
+ 1);
977 src_baddr
= src
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
978 dest_baddr
= dest
->gicr_pendbaser
& R_GICR_PENDBASER_PHYADDR_MASK
;
980 for (i
= GICV3_LPI_INTID_START
/ 8; i
< pendt_size
/ 8; i
++) {
981 uint8_t src_pend
, dest_pend
;
983 address_space_read(as
, src_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
984 &src_pend
, sizeof(src_pend
));
988 address_space_read(as
, dest_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
989 &dest_pend
, sizeof(dest_pend
));
990 dest_pend
|= src_pend
;
992 address_space_write(as
, src_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
993 &src_pend
, sizeof(src_pend
));
994 address_space_write(as
, dest_baddr
+ i
, MEMTXATTRS_UNSPECIFIED
,
995 &dest_pend
, sizeof(dest_pend
));
998 gicv3_redist_update_lpi(src
);
999 gicv3_redist_update_lpi(dest
);
1002 void gicv3_redist_vlpi_pending(GICv3CPUState
*cs
, int irq
, int level
)
1005 * Change the pending state of the specified vLPI.
1006 * Unlike gicv3_redist_process_vlpi(), we know here that the
1007 * vCPU is definitely resident on this redistributor, and that
1008 * the irq is in range.
1010 uint64_t vptbase
, ctbase
;
1012 vptbase
= FIELD_EX64(cs
->gicr_vpendbaser
, GICR_VPENDBASER
, PHYADDR
) << 16;
1014 if (set_pending_table_bit(cs
, vptbase
, irq
, level
)) {
1016 /* Check whether this vLPI is now the best */
1017 ctbase
= cs
->gicr_vpropbaser
& R_GICR_VPROPBASER_PHYADDR_MASK
;
1018 update_for_one_lpi(cs
, irq
, ctbase
, true, &cs
->hppvlpi
);
1019 gicv3_cpuif_virt_irq_fiq_update(cs
);
1021 /* Only need to recalculate if this was previously the best vLPI */
1022 if (irq
== cs
->hppvlpi
.irq
) {
1023 gicv3_redist_update_vlpi(cs
);
1029 void gicv3_redist_process_vlpi(GICv3CPUState
*cs
, int irq
, uint64_t vptaddr
,
1030 int doorbell
, int level
)
1033 bool resident
= vcpu_resident(cs
, vptaddr
);
1037 uint32_t idbits
= FIELD_EX64(cs
->gicr_vpropbaser
, GICR_VPROPBASER
, IDBITS
);
1038 if (irq
>= (1ULL << (idbits
+ 1))) {
1043 bit_changed
= set_pending_table_bit(cs
, vptaddr
, irq
, level
);
1044 if (resident
&& bit_changed
) {
1046 /* Check whether this vLPI is now the best */
1047 ctbase
= cs
->gicr_vpropbaser
& R_GICR_VPROPBASER_PHYADDR_MASK
;
1048 update_for_one_lpi(cs
, irq
, ctbase
, true, &cs
->hppvlpi
);
1049 gicv3_cpuif_virt_irq_fiq_update(cs
);
1051 /* Only need to recalculate if this was previously the best vLPI */
1052 if (irq
== cs
->hppvlpi
.irq
) {
1053 gicv3_redist_update_vlpi(cs
);
1058 if (!resident
&& level
&& doorbell
!= INTID_SPURIOUS
&&
1059 (cs
->gicr_ctlr
& GICR_CTLR_ENABLE_LPIS
)) {
1060 /* vCPU is not currently resident: ring the doorbell */
1061 gicv3_redist_process_lpi(cs
, doorbell
, 1);
1065 void gicv3_redist_mov_vlpi(GICv3CPUState
*src
, uint64_t src_vptaddr
,
1066 GICv3CPUState
*dest
, uint64_t dest_vptaddr
,
1067 int irq
, int doorbell
)
1070 * Move the specified vLPI's pending state from the source redistributor
1071 * to the destination.
1073 if (!set_pending_table_bit(src
, src_vptaddr
, irq
, 0)) {
1074 /* Not pending on source, nothing to do */
1077 if (vcpu_resident(src
, src_vptaddr
) && irq
== src
->hppvlpi
.irq
) {
1079 * Update src's cached highest-priority pending vLPI if we just made
1082 gicv3_redist_update_vlpi(src
);
1085 * Mark the vLPI pending on the destination (ringing the doorbell
1086 * if the vCPU isn't resident)
1088 gicv3_redist_process_vlpi(dest
, irq
, dest_vptaddr
, doorbell
, irq
);
1091 void gicv3_redist_vinvall(GICv3CPUState
*cs
, uint64_t vptaddr
)
1093 if (!vcpu_resident(cs
, vptaddr
)) {
1094 /* We don't have anything cached if the vCPU isn't resident */
1098 /* Otherwise, our only cached information is the HPPVLPI info */
1099 gicv3_redist_update_vlpi(cs
);
1102 void gicv3_redist_inv_vlpi(GICv3CPUState
*cs
, int irq
, uint64_t vptaddr
)
1105 * The only cached information for LPIs we have is the HPPLPI.
1106 * We could be cleverer about identifying when we don't need
1107 * to do a full rescan of the pending table, but until we find
1108 * this is a performance issue, just always recalculate.
1110 gicv3_redist_vinvall(cs
, vptaddr
);
1113 void gicv3_redist_set_irq(GICv3CPUState
*cs
, int irq
, int level
)
1115 /* Update redistributor state for a change in an external PPI input line */
1116 if (level
== extract32(cs
->level
, irq
, 1)) {
1120 trace_gicv3_redist_set_irq(gicv3_redist_affid(cs
), irq
, level
);
1122 cs
->level
= deposit32(cs
->level
, irq
, 1, level
);
1125 /* 0->1 edges latch the pending bit for edge-triggered interrupts */
1126 if (extract32(cs
->edge_trigger
, irq
, 1)) {
1127 cs
->gicr_ipendr0
= deposit32(cs
->gicr_ipendr0
, irq
, 1, 1);
1131 gicv3_redist_update(cs
);
1134 void gicv3_redist_send_sgi(GICv3CPUState
*cs
, int grp
, int irq
, bool ns
)
1136 /* Update redistributor state for a generated SGI */
1137 int irqgrp
= gicv3_irq_group(cs
->gic
, cs
, irq
);
1139 /* If we are asked for a Secure Group 1 SGI and it's actually
1140 * configured as Secure Group 0 this is OK (subject to the usual
1143 if (grp
== GICV3_G1
&& irqgrp
== GICV3_G0
) {
1147 if (grp
!= irqgrp
) {
1151 if (ns
&& !(cs
->gic
->gicd_ctlr
& GICD_CTLR_DS
)) {
1152 /* If security is enabled we must test the NSACR bits */
1153 int nsaccess
= gicr_ns_access(cs
, irq
);
1155 if ((irqgrp
== GICV3_G0
&& nsaccess
< 1) ||
1156 (irqgrp
== GICV3_G1
&& nsaccess
< 2)) {
1161 /* OK, we can accept the SGI */
1162 trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs
), irq
);
1163 cs
->gicr_ipendr0
= deposit32(cs
->gicr_ipendr0
, irq
, 1, 1);
1164 gicv3_redist_update(cs
);