2 * ARM GICv3 support - internal interfaces
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Written by Peter Maydell
8 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #ifndef QEMU_ARM_GICV3_INTERNAL_H
25 #define QEMU_ARM_GICV3_INTERNAL_H
27 #include "hw/registerfields.h"
28 #include "hw/intc/arm_gicv3_common.h"
30 /* Distributor registers, as offsets from the distributor base address */
31 #define GICD_CTLR 0x0000
32 #define GICD_TYPER 0x0004
33 #define GICD_IIDR 0x0008
34 #define GICD_STATUSR 0x0010
35 #define GICD_SETSPI_NSR 0x0040
36 #define GICD_CLRSPI_NSR 0x0048
37 #define GICD_SETSPI_SR 0x0050
38 #define GICD_CLRSPI_SR 0x0058
39 #define GICD_SEIR 0x0068
40 #define GICD_IGROUPR 0x0080
41 #define GICD_ISENABLER 0x0100
42 #define GICD_ICENABLER 0x0180
43 #define GICD_ISPENDR 0x0200
44 #define GICD_ICPENDR 0x0280
45 #define GICD_ISACTIVER 0x0300
46 #define GICD_ICACTIVER 0x0380
47 #define GICD_IPRIORITYR 0x0400
48 #define GICD_ITARGETSR 0x0800
49 #define GICD_ICFGR 0x0C00
50 #define GICD_IGRPMODR 0x0D00
51 #define GICD_NSACR 0x0E00
52 #define GICD_SGIR 0x0F00
53 #define GICD_CPENDSGIR 0x0F10
54 #define GICD_SPENDSGIR 0x0F20
55 #define GICD_IROUTER 0x6000
56 #define GICD_IDREGS 0xFFD0
58 /* GICD_CTLR fields */
59 #define GICD_CTLR_EN_GRP0 (1U << 0)
60 #define GICD_CTLR_EN_GRP1NS (1U << 1) /* GICv3 5.3.20 */
61 #define GICD_CTLR_EN_GRP1S (1U << 2)
62 #define GICD_CTLR_EN_GRP1_ALL (GICD_CTLR_EN_GRP1NS | GICD_CTLR_EN_GRP1S)
63 /* Bit 4 is ARE if the system doesn't support TrustZone, ARE_S otherwise */
64 #define GICD_CTLR_ARE (1U << 4)
65 #define GICD_CTLR_ARE_S (1U << 4)
66 #define GICD_CTLR_ARE_NS (1U << 5)
67 #define GICD_CTLR_DS (1U << 6)
68 #define GICD_CTLR_E1NWF (1U << 7)
69 #define GICD_CTLR_RWP (1U << 31)
71 #define GICD_TYPER_LPIS_SHIFT 17
74 #define GICD_TYPER_IDBITS 0xf
77 * Redistributor frame offsets from RD_base
79 #define GICR_SGI_OFFSET 0x10000
80 #define GICR_VLPI_OFFSET 0x20000
83 * Redistributor registers, offsets from RD_base
85 #define GICR_CTLR 0x0000
86 #define GICR_IIDR 0x0004
87 #define GICR_TYPER 0x0008
88 #define GICR_STATUSR 0x0010
89 #define GICR_WAKER 0x0014
90 #define GICR_SETLPIR 0x0040
91 #define GICR_CLRLPIR 0x0048
92 #define GICR_PROPBASER 0x0070
93 #define GICR_PENDBASER 0x0078
94 #define GICR_INVLPIR 0x00A0
95 #define GICR_INVALLR 0x00B0
96 #define GICR_SYNCR 0x00C0
97 #define GICR_IDREGS 0xFFD0
99 /* SGI and PPI Redistributor registers, offsets from RD_base */
100 #define GICR_IGROUPR0 (GICR_SGI_OFFSET + 0x0080)
101 #define GICR_ISENABLER0 (GICR_SGI_OFFSET + 0x0100)
102 #define GICR_ICENABLER0 (GICR_SGI_OFFSET + 0x0180)
103 #define GICR_ISPENDR0 (GICR_SGI_OFFSET + 0x0200)
104 #define GICR_ICPENDR0 (GICR_SGI_OFFSET + 0x0280)
105 #define GICR_ISACTIVER0 (GICR_SGI_OFFSET + 0x0300)
106 #define GICR_ICACTIVER0 (GICR_SGI_OFFSET + 0x0380)
107 #define GICR_IPRIORITYR (GICR_SGI_OFFSET + 0x0400)
108 #define GICR_ICFGR0 (GICR_SGI_OFFSET + 0x0C00)
109 #define GICR_ICFGR1 (GICR_SGI_OFFSET + 0x0C04)
110 #define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00)
111 #define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00)
113 /* VLPI redistributor registers, offsets from VLPI_base */
114 #define GICR_VPROPBASER (GICR_VLPI_OFFSET + 0x70)
115 #define GICR_VPENDBASER (GICR_VLPI_OFFSET + 0x78)
117 #define GICR_CTLR_ENABLE_LPIS (1U << 0)
118 #define GICR_CTLR_CES (1U << 1)
119 #define GICR_CTLR_RWP (1U << 3)
120 #define GICR_CTLR_DPG0 (1U << 24)
121 #define GICR_CTLR_DPG1NS (1U << 25)
122 #define GICR_CTLR_DPG1S (1U << 26)
123 #define GICR_CTLR_UWP (1U << 31)
125 #define GICR_TYPER_PLPIS (1U << 0)
126 #define GICR_TYPER_VLPIS (1U << 1)
127 #define GICR_TYPER_DIRECTLPI (1U << 3)
128 #define GICR_TYPER_LAST (1U << 4)
129 #define GICR_TYPER_DPGS (1U << 5)
130 #define GICR_TYPER_PROCNUM (0xFFFFU << 8)
131 #define GICR_TYPER_COMMONLPIAFF (0x3 << 24)
132 #define GICR_TYPER_AFFINITYVALUE (0xFFFFFFFFULL << 32)
134 #define GICR_WAKER_ProcessorSleep (1U << 1)
135 #define GICR_WAKER_ChildrenAsleep (1U << 2)
137 FIELD(GICR_PROPBASER
, IDBITS
, 0, 5)
138 FIELD(GICR_PROPBASER
, INNERCACHE
, 7, 3)
139 FIELD(GICR_PROPBASER
, SHAREABILITY
, 10, 2)
140 FIELD(GICR_PROPBASER
, PHYADDR
, 12, 40)
141 FIELD(GICR_PROPBASER
, OUTERCACHE
, 56, 3)
143 FIELD(GICR_PENDBASER
, INNERCACHE
, 7, 3)
144 FIELD(GICR_PENDBASER
, SHAREABILITY
, 10, 2)
145 FIELD(GICR_PENDBASER
, PHYADDR
, 16, 36)
146 FIELD(GICR_PENDBASER
, OUTERCACHE
, 56, 3)
147 FIELD(GICR_PENDBASER
, PTZ
, 62, 1)
149 #define GICR_PROPBASER_IDBITS_THRESHOLD 0xd
151 /* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */
152 FIELD(GICR_VPROPBASER
, IDBITS
, 0, 5)
153 FIELD(GICR_VPROPBASER
, INNERCACHE
, 7, 3)
154 FIELD(GICR_VPROPBASER
, SHAREABILITY
, 10, 2)
155 FIELD(GICR_VPROPBASER
, PHYADDR
, 12, 40)
156 FIELD(GICR_VPROPBASER
, OUTERCACHE
, 56, 3)
158 FIELD(GICR_VPENDBASER
, INNERCACHE
, 7, 3)
159 FIELD(GICR_VPENDBASER
, SHAREABILITY
, 10, 2)
160 FIELD(GICR_VPENDBASER
, PHYADDR
, 16, 36)
161 FIELD(GICR_VPENDBASER
, OUTERCACHE
, 56, 3)
162 FIELD(GICR_VPENDBASER
, DIRTY
, 60, 1)
163 FIELD(GICR_VPENDBASER
, PENDINGLAST
, 61, 1)
164 FIELD(GICR_VPENDBASER
, IDAI
, 62, 1)
165 FIELD(GICR_VPENDBASER
, VALID
, 63, 1)
167 #define ICC_CTLR_EL1_CBPR (1U << 0)
168 #define ICC_CTLR_EL1_EOIMODE (1U << 1)
169 #define ICC_CTLR_EL1_PMHE (1U << 6)
170 #define ICC_CTLR_EL1_PRIBITS_SHIFT 8
171 #define ICC_CTLR_EL1_PRIBITS_MASK (7U << ICC_CTLR_EL1_PRIBITS_SHIFT)
172 #define ICC_CTLR_EL1_IDBITS_SHIFT 11
173 #define ICC_CTLR_EL1_SEIS (1U << 14)
174 #define ICC_CTLR_EL1_A3V (1U << 15)
176 #define ICC_PMR_PRIORITY_MASK 0xff
177 #define ICC_BPR_BINARYPOINT_MASK 0x07
178 #define ICC_IGRPEN_ENABLE 0x01
180 #define ICC_CTLR_EL3_CBPR_EL1S (1U << 0)
181 #define ICC_CTLR_EL3_CBPR_EL1NS (1U << 1)
182 #define ICC_CTLR_EL3_EOIMODE_EL3 (1U << 2)
183 #define ICC_CTLR_EL3_EOIMODE_EL1S (1U << 3)
184 #define ICC_CTLR_EL3_EOIMODE_EL1NS (1U << 4)
185 #define ICC_CTLR_EL3_RM (1U << 5)
186 #define ICC_CTLR_EL3_PMHE (1U << 6)
187 #define ICC_CTLR_EL3_PRIBITS_SHIFT 8
188 #define ICC_CTLR_EL3_IDBITS_SHIFT 11
189 #define ICC_CTLR_EL3_SEIS (1U << 14)
190 #define ICC_CTLR_EL3_A3V (1U << 15)
191 #define ICC_CTLR_EL3_NDS (1U << 17)
193 #define ICH_VMCR_EL2_VENG0_SHIFT 0
194 #define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
195 #define ICH_VMCR_EL2_VENG1_SHIFT 1
196 #define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
197 #define ICH_VMCR_EL2_VACKCTL (1U << 2)
198 #define ICH_VMCR_EL2_VFIQEN (1U << 3)
199 #define ICH_VMCR_EL2_VCBPR_SHIFT 4
200 #define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
201 #define ICH_VMCR_EL2_VEOIM_SHIFT 9
202 #define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
203 #define ICH_VMCR_EL2_VBPR1_SHIFT 18
204 #define ICH_VMCR_EL2_VBPR1_LENGTH 3
205 #define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
206 #define ICH_VMCR_EL2_VBPR0_SHIFT 21
207 #define ICH_VMCR_EL2_VBPR0_LENGTH 3
208 #define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
209 #define ICH_VMCR_EL2_VPMR_SHIFT 24
210 #define ICH_VMCR_EL2_VPMR_LENGTH 8
211 #define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
213 #define ICH_HCR_EL2_EN (1U << 0)
214 #define ICH_HCR_EL2_UIE (1U << 1)
215 #define ICH_HCR_EL2_LRENPIE (1U << 2)
216 #define ICH_HCR_EL2_NPIE (1U << 3)
217 #define ICH_HCR_EL2_VGRP0EIE (1U << 4)
218 #define ICH_HCR_EL2_VGRP0DIE (1U << 5)
219 #define ICH_HCR_EL2_VGRP1EIE (1U << 6)
220 #define ICH_HCR_EL2_VGRP1DIE (1U << 7)
221 #define ICH_HCR_EL2_TC (1U << 10)
222 #define ICH_HCR_EL2_TALL0 (1U << 11)
223 #define ICH_HCR_EL2_TALL1 (1U << 12)
224 #define ICH_HCR_EL2_TSEI (1U << 13)
225 #define ICH_HCR_EL2_TDIR (1U << 14)
226 #define ICH_HCR_EL2_EOICOUNT_SHIFT 27
227 #define ICH_HCR_EL2_EOICOUNT_LENGTH 5
228 #define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
230 #define ICH_LR_EL2_VINTID_SHIFT 0
231 #define ICH_LR_EL2_VINTID_LENGTH 32
232 #define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
233 #define ICH_LR_EL2_PINTID_SHIFT 32
234 #define ICH_LR_EL2_PINTID_LENGTH 10
235 #define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
236 /* Note that EOI shares with the top bit of the pINTID field */
237 #define ICH_LR_EL2_EOI (1ULL << 41)
238 #define ICH_LR_EL2_PRIORITY_SHIFT 48
239 #define ICH_LR_EL2_PRIORITY_LENGTH 8
240 #define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
241 #define ICH_LR_EL2_GROUP (1ULL << 60)
242 #define ICH_LR_EL2_HW (1ULL << 61)
243 #define ICH_LR_EL2_STATE_SHIFT 62
244 #define ICH_LR_EL2_STATE_LENGTH 2
245 #define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
246 /* values for the state field: */
247 #define ICH_LR_EL2_STATE_INVALID 0
248 #define ICH_LR_EL2_STATE_PENDING 1
249 #define ICH_LR_EL2_STATE_ACTIVE 2
250 #define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
251 #define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
252 #define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
254 #define ICH_MISR_EL2_EOI (1U << 0)
255 #define ICH_MISR_EL2_U (1U << 1)
256 #define ICH_MISR_EL2_LRENP (1U << 2)
257 #define ICH_MISR_EL2_NP (1U << 3)
258 #define ICH_MISR_EL2_VGRP0E (1U << 4)
259 #define ICH_MISR_EL2_VGRP0D (1U << 5)
260 #define ICH_MISR_EL2_VGRP1E (1U << 6)
261 #define ICH_MISR_EL2_VGRP1D (1U << 7)
263 #define ICH_VTR_EL2_LISTREGS_SHIFT 0
264 #define ICH_VTR_EL2_TDS (1U << 19)
265 #define ICH_VTR_EL2_NV4 (1U << 20)
266 #define ICH_VTR_EL2_A3V (1U << 21)
267 #define ICH_VTR_EL2_SEIS (1U << 22)
268 #define ICH_VTR_EL2_IDBITS_SHIFT 23
269 #define ICH_VTR_EL2_PREBITS_SHIFT 26
270 #define ICH_VTR_EL2_PRIBITS_SHIFT 29
274 FIELD(GITS_BASER
, SIZE
, 0, 8)
275 FIELD(GITS_BASER
, PAGESIZE
, 8, 2)
276 FIELD(GITS_BASER
, SHAREABILITY
, 10, 2)
277 FIELD(GITS_BASER
, PHYADDR
, 12, 36)
278 FIELD(GITS_BASER
, PHYADDRL_64K
, 16, 32)
279 FIELD(GITS_BASER
, PHYADDRH_64K
, 12, 4)
280 FIELD(GITS_BASER
, ENTRYSIZE
, 48, 5)
281 FIELD(GITS_BASER
, OUTERCACHE
, 53, 3)
282 FIELD(GITS_BASER
, TYPE
, 56, 3)
283 FIELD(GITS_BASER
, INNERCACHE
, 59, 3)
284 FIELD(GITS_BASER
, INDIRECT
, 62, 1)
285 FIELD(GITS_BASER
, VALID
, 63, 1)
287 FIELD(GITS_CBASER
, SIZE
, 0, 8)
288 FIELD(GITS_CBASER
, SHAREABILITY
, 10, 2)
289 FIELD(GITS_CBASER
, PHYADDR
, 12, 40)
290 FIELD(GITS_CBASER
, OUTERCACHE
, 53, 3)
291 FIELD(GITS_CBASER
, INNERCACHE
, 59, 3)
292 FIELD(GITS_CBASER
, VALID
, 63, 1)
294 FIELD(GITS_CREADR
, STALLED
, 0, 1)
295 FIELD(GITS_CREADR
, OFFSET
, 5, 15)
297 FIELD(GITS_CWRITER
, RETRY
, 0, 1)
298 FIELD(GITS_CWRITER
, OFFSET
, 5, 15)
300 FIELD(GITS_CTLR
, ENABLED
, 0, 1)
301 FIELD(GITS_CTLR
, QUIESCENT
, 31, 1)
303 FIELD(GITS_TYPER
, PHYSICAL
, 0, 1)
304 FIELD(GITS_TYPER
, VIRTUAL
, 1, 1)
305 FIELD(GITS_TYPER
, ITT_ENTRY_SIZE
, 4, 4)
306 FIELD(GITS_TYPER
, IDBITS
, 8, 5)
307 FIELD(GITS_TYPER
, DEVBITS
, 13, 5)
308 FIELD(GITS_TYPER
, SEIS
, 18, 1)
309 FIELD(GITS_TYPER
, PTA
, 19, 1)
310 FIELD(GITS_TYPER
, CIDBITS
, 32, 4)
311 FIELD(GITS_TYPER
, CIL
, 36, 1)
313 #define GITS_IDREGS 0xFFD0
315 #define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \
316 R_GITS_BASER_TYPE_MASK)
318 #define GITS_BASER_PAGESIZE_4K 0
319 #define GITS_BASER_PAGESIZE_16K 1
320 #define GITS_BASER_PAGESIZE_64K 2
322 #define GITS_BASER_TYPE_DEVICE 1ULL
323 #define GITS_BASER_TYPE_VPE 2ULL
324 #define GITS_BASER_TYPE_COLLECTION 4ULL
326 #define GITS_PAGE_SIZE_4K 0x1000
327 #define GITS_PAGE_SIZE_16K 0x4000
328 #define GITS_PAGE_SIZE_64K 0x10000
330 #define L1TABLE_ENTRY_SIZE 8
332 #define LPI_CTE_ENABLED TABLE_ENTRY_VALID_MASK
333 #define LPI_PRIORITY_MASK 0xfc
335 #define GITS_CMDQ_ENTRY_WORDS 4
336 #define GITS_CMDQ_ENTRY_SIZE (GITS_CMDQ_ENTRY_WORDS * sizeof(uint64_t))
338 #define CMD_MASK 0xff
341 #define GITS_CMD_MOVI 0x01
342 #define GITS_CMD_INT 0x03
343 #define GITS_CMD_CLEAR 0x04
344 #define GITS_CMD_SYNC 0x05
345 #define GITS_CMD_MAPD 0x08
346 #define GITS_CMD_MAPC 0x09
347 #define GITS_CMD_MAPTI 0x0A
348 #define GITS_CMD_MAPI 0x0B
349 #define GITS_CMD_INV 0x0C
350 #define GITS_CMD_INVALL 0x0D
351 #define GITS_CMD_MOVALL 0x0E
352 #define GITS_CMD_DISCARD 0x0F
353 #define GITS_CMD_VMOVI 0x21
354 #define GITS_CMD_VMOVP 0x22
355 #define GITS_CMD_VSYNC 0x25
356 #define GITS_CMD_VMAPP 0x29
357 #define GITS_CMD_VMAPTI 0x2A
358 #define GITS_CMD_VMAPI 0x2B
359 #define GITS_CMD_VINVALL 0x2D
361 /* MAPC command fields */
362 #define ICID_LENGTH 16
363 #define ICID_MASK ((1U << ICID_LENGTH) - 1)
364 FIELD(MAPC
, RDBASE
, 16, 32)
366 #define RDBASE_PROCNUM_LENGTH 16
367 #define RDBASE_PROCNUM_MASK ((1ULL << RDBASE_PROCNUM_LENGTH) - 1)
369 /* MAPD command fields */
370 #define ITTADDR_LENGTH 44
371 #define ITTADDR_SHIFT 8
372 #define ITTADDR_MASK MAKE_64BIT_MASK(ITTADDR_SHIFT, ITTADDR_LENGTH)
373 #define SIZE_MASK 0x1f
375 /* MAPI command fields */
376 #define EVENTID_MASK ((1ULL << 32) - 1)
378 /* MAPTI command fields */
379 #define pINTID_SHIFT 32
380 #define pINTID_MASK MAKE_64BIT_MASK(32, 32)
382 #define DEVID_SHIFT 32
383 #define DEVID_MASK MAKE_64BIT_MASK(32, 32)
385 #define VALID_SHIFT 63
386 #define CMD_FIELD_VALID_MASK (1ULL << VALID_SHIFT)
387 #define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
388 #define TABLE_ENTRY_VALID_MASK (1ULL << 0)
390 /* MOVALL command fields */
391 FIELD(MOVALL_2
, RDBASE1
, 16, 36)
392 FIELD(MOVALL_3
, RDBASE2
, 16, 36)
394 /* MOVI command fields */
395 FIELD(MOVI_0
, DEVICEID
, 32, 32)
396 FIELD(MOVI_1
, EVENTID
, 0, 32)
397 FIELD(MOVI_2
, ICID
, 0, 16)
399 /* INV command fields */
400 FIELD(INV_0
, DEVICEID
, 32, 32)
401 FIELD(INV_1
, EVENTID
, 0, 32)
403 /* VMAPI, VMAPTI command fields */
404 FIELD(VMAPTI_0
, DEVICEID
, 32, 32)
405 FIELD(VMAPTI_1
, EVENTID
, 0, 32)
406 FIELD(VMAPTI_1
, VPEID
, 32, 16)
407 FIELD(VMAPTI_2
, VINTID
, 0, 32) /* VMAPTI only */
408 FIELD(VMAPTI_2
, DOORBELL
, 32, 32)
410 /* VMAPP command fields */
411 FIELD(VMAPP_0
, ALLOC
, 8, 1) /* GICv4.1 only */
412 FIELD(VMAPP_0
, PTZ
, 9, 1) /* GICv4.1 only */
413 FIELD(VMAPP_0
, VCONFADDR
, 16, 36) /* GICv4.1 only */
414 FIELD(VMAPP_1
, DEFAULT_DOORBELL
, 0, 32) /* GICv4.1 only */
415 FIELD(VMAPP_1
, VPEID
, 32, 16)
416 FIELD(VMAPP_2
, RDBASE
, 16, 36)
417 FIELD(VMAPP_2
, V
, 63, 1)
418 FIELD(VMAPP_3
, VPTSIZE
, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */
419 FIELD(VMAPP_3
, VPTADDR
, 16, 36)
421 /* VMOVP command fields */
422 FIELD(VMOVP_0
, SEQNUM
, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */
423 FIELD(VMOVP_1
, ITSLIST
, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */
424 FIELD(VMOVP_1
, VPEID
, 32, 16)
425 FIELD(VMOVP_2
, RDBASE
, 16, 36)
426 FIELD(VMOVP_2
, DB
, 63, 1) /* GICv4.1 only */
427 FIELD(VMOVP_3
, DEFAULT_DOORBELL
, 0, 32) /* GICv4.1 only */
429 /* VMOVI command fields */
430 FIELD(VMOVI_0
, DEVICEID
, 32, 32)
431 FIELD(VMOVI_1
, EVENTID
, 0, 32)
432 FIELD(VMOVI_1
, VPEID
, 32, 16)
433 FIELD(VMOVI_2
, D
, 0, 1)
434 FIELD(VMOVI_2
, DOORBELL
, 32, 32)
436 /* VINVALL command fields */
437 FIELD(VINVALL_1
, VPEID
, 32, 16)
440 * 12 bytes Interrupt translation Table Entry size
441 * as per Table 5.3 in GICv3 spec
443 * Bits: | 63 ... 48 | 47 ... 32 | 31 ... 26 | 25 ... 2 | 1 | 0 |
444 * Values: | vPEID | ICID | unused | IntNum | IntType | Valid |
446 * Bits: | 31 ... 25 | 24 ... 0 |
447 * Values: | unused | Doorbell |
448 * (When Doorbell is unused, as it always is for INTYPE_PHYSICAL,
449 * the value of that field in memory cannot be relied upon -- older
450 * versions of QEMU did not correctly write to that memory.)
452 #define ITS_ITT_ENTRY_SIZE 0xC
454 FIELD(ITE_L
, VALID
, 0, 1)
455 FIELD(ITE_L
, INTTYPE
, 1, 1)
456 FIELD(ITE_L
, INTID
, 2, 24)
457 FIELD(ITE_L
, ICID
, 32, 16)
458 FIELD(ITE_L
, VPEID
, 48, 16)
459 FIELD(ITE_H
, DOORBELL
, 0, 24)
461 /* Possible values for ITE_L INTTYPE */
462 #define ITE_INTTYPE_VIRTUAL 0
463 #define ITE_INTTYPE_PHYSICAL 1
465 /* 16 bits EventId */
466 #define ITS_IDBITS GICD_TYPER_IDBITS
468 /* 16 bits DeviceId */
469 #define ITS_DEVBITS 0xF
471 /* 16 bits CollectionId */
472 #define ITS_CIDBITS 0xF
475 * 8 bytes Device Table Entry size
476 * Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
478 #define GITS_DTE_SIZE (0x8ULL)
480 FIELD(DTE
, VALID
, 0, 1)
481 FIELD(DTE
, SIZE
, 1, 5)
482 FIELD(DTE
, ITTADDR
, 6, 44)
485 * 8 bytes Collection Table Entry size
486 * Valid = 1 bit, RDBase = 16 bits
488 #define GITS_CTE_SIZE (0x8ULL)
489 FIELD(CTE
, VALID
, 0, 1)
490 FIELD(CTE
, RDBASE
, 1, RDBASE_PROCNUM_LENGTH
)
493 * 8 bytes VPE table entry size:
494 * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits
496 * Field sizes for Valid and size are mandated; field sizes for RDbase
497 * and VPT_addr are IMPDEF.
499 #define GITS_VPE_SIZE 0x8ULL
501 FIELD(VTE
, VALID
, 0, 1)
502 FIELD(VTE
, VPTSIZE
, 1, 5)
503 FIELD(VTE
, VPTADDR
, 6, 36)
504 FIELD(VTE
, RDBASE
, 42, RDBASE_PROCNUM_LENGTH
)
506 /* Special interrupt IDs */
507 #define INTID_SECURE 1020
508 #define INTID_NONSECURE 1021
509 #define INTID_SPURIOUS 1023
511 /* Functions internal to the emulated GICv3 */
517 * Return the size of the redistributor register frame in bytes
518 * (which depends on what GIC version this is)
520 static inline int gicv3_redist_size(GICv3State
*s
)
523 * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS.
524 * It's the same for every redistributor in the GIC, so arbitrarily
525 * use the register field in the first one.
527 if (s
->cpu
[0].gicr_typer
& GICR_TYPER_VLPIS
) {
528 return GICV4_REDIST_SIZE
;
530 return GICV3_REDIST_SIZE
;
535 * gicv3_intid_is_special:
536 * @intid: interrupt ID
538 * Return true if @intid is a special interrupt ID (1020 to
539 * 1023 inclusive). This corresponds to the GIC spec pseudocode
540 * IsSpecial() function.
542 static inline bool gicv3_intid_is_special(int intid
)
544 return intid
>= INTID_SECURE
&& intid
<= INTID_SPURIOUS
;
548 * gicv3_redist_update:
549 * @cs: GICv3CPUState for this redistributor
551 * Recalculate the highest priority pending interrupt after a
552 * change to redistributor state, and inform the CPU accordingly.
554 void gicv3_redist_update(GICv3CPUState
*cs
);
559 * @start: first interrupt whose state changed
560 * @len: length of the range of interrupts whose state changed
562 * Recalculate the highest priority pending interrupts after a
563 * change to the distributor state affecting @len interrupts
564 * starting at @start, and inform the CPUs accordingly.
566 void gicv3_update(GICv3State
*s
, int start
, int len
);
569 * gicv3_full_update_noirqset:
572 * Recalculate the cached information about highest priority
573 * pending interrupts, but don't inform the CPUs. This should be
574 * called after an incoming migration has loaded new state.
576 void gicv3_full_update_noirqset(GICv3State
*s
);
582 * Recalculate the highest priority pending interrupts after
583 * a change that could affect the status of all interrupts,
584 * and inform the CPUs accordingly.
586 void gicv3_full_update(GICv3State
*s
);
587 MemTxResult
gicv3_dist_read(void *opaque
, hwaddr offset
, uint64_t *data
,
588 unsigned size
, MemTxAttrs attrs
);
589 MemTxResult
gicv3_dist_write(void *opaque
, hwaddr addr
, uint64_t data
,
590 unsigned size
, MemTxAttrs attrs
);
591 MemTxResult
gicv3_redist_read(void *opaque
, hwaddr offset
, uint64_t *data
,
592 unsigned size
, MemTxAttrs attrs
);
593 MemTxResult
gicv3_redist_write(void *opaque
, hwaddr offset
, uint64_t data
,
594 unsigned size
, MemTxAttrs attrs
);
595 void gicv3_dist_set_irq(GICv3State
*s
, int irq
, int level
);
596 void gicv3_redist_set_irq(GICv3CPUState
*cs
, int irq
, int level
);
597 void gicv3_redist_process_lpi(GICv3CPUState
*cs
, int irq
, int level
);
599 * gicv3_redist_process_vlpi:
601 * @irq: (virtual) interrupt number
602 * @vptaddr: (guest) address of VLPI table
603 * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell")
604 * @level: level to set @irq to
606 * Process a virtual LPI being directly injected by the ITS. This function
607 * will update the VLPI table specified by @vptaddr and @vptsize. If the
608 * vCPU corresponding to that VLPI table is currently running on
609 * the CPU associated with this redistributor, directly inject the VLPI
610 * @irq. If the vCPU is not running on this CPU, raise the doorbell
613 void gicv3_redist_process_vlpi(GICv3CPUState
*cs
, int irq
, uint64_t vptaddr
,
614 int doorbell
, int level
);
616 * gicv3_redist_vlpi_pending:
618 * @irq: (virtual) interrupt number
619 * @level: level to set @irq to
621 * Set/clear the pending status of a virtual LPI in the vLPI table
622 * that this redistributor is currently using. (The difference between
623 * this and gicv3_redist_process_vlpi() is that this is called from
624 * the cpuif and does not need to do the not-running-on-this-vcpu checks.)
626 void gicv3_redist_vlpi_pending(GICv3CPUState
*cs
, int irq
, int level
);
628 void gicv3_redist_lpi_pending(GICv3CPUState
*cs
, int irq
, int level
);
630 * gicv3_redist_update_lpi:
633 * Scan the LPI pending table and recalculate the highest priority
634 * pending LPI and also the overall highest priority pending interrupt.
636 void gicv3_redist_update_lpi(GICv3CPUState
*cs
);
638 * gicv3_redist_update_lpi_only:
641 * Scan the LPI pending table and recalculate cs->hpplpi only,
642 * without calling gicv3_redist_update() to recalculate the overall
643 * highest priority pending interrupt. This should be called after
644 * an incoming migration has loaded new state.
646 void gicv3_redist_update_lpi_only(GICv3CPUState
*cs
);
648 * gicv3_redist_inv_lpi:
650 * @irq: LPI to invalidate cached information for
652 * Forget or update any cached information associated with this LPI.
654 void gicv3_redist_inv_lpi(GICv3CPUState
*cs
, int irq
);
656 * gicv3_redist_inv_vlpi:
658 * @irq: vLPI to invalidate cached information for
659 * @vptaddr: (guest) address of vLPI table
661 * Forget or update any cached information associated with this vLPI.
663 void gicv3_redist_inv_vlpi(GICv3CPUState
*cs
, int irq
, uint64_t vptaddr
);
665 * gicv3_redist_mov_lpi:
666 * @src: source redistributor
667 * @dest: destination redistributor
668 * @irq: LPI to update
670 * Move the pending state of the specified LPI from @src to @dest,
671 * as required by the ITS MOVI command.
673 void gicv3_redist_mov_lpi(GICv3CPUState
*src
, GICv3CPUState
*dest
, int irq
);
675 * gicv3_redist_movall_lpis:
676 * @src: source redistributor
677 * @dest: destination redistributor
679 * Scan the LPI pending table for @src, and for each pending LPI there
680 * mark it as not-pending for @src and pending for @dest, as required
681 * by the ITS MOVALL command.
683 void gicv3_redist_movall_lpis(GICv3CPUState
*src
, GICv3CPUState
*dest
);
685 * gicv3_redist_mov_vlpi:
686 * @src: source redistributor
687 * @src_vptaddr: (guest) address of source VLPI table
688 * @dest: destination redistributor
689 * @dest_vptaddr: (guest) address of destination VLPI table
690 * @irq: VLPI to update
691 * @doorbell: doorbell for destination (1023 for "no doorbell")
693 * Move the pending state of the specified VLPI from @src to @dest,
694 * as required by the ITS VMOVI command.
696 void gicv3_redist_mov_vlpi(GICv3CPUState
*src
, uint64_t src_vptaddr
,
697 GICv3CPUState
*dest
, uint64_t dest_vptaddr
,
698 int irq
, int doorbell
);
700 * gicv3_redist_vinvall:
702 * @vptaddr: address of VLPI pending table
704 * On redistributor @cs, invalidate all cached information associated
705 * with the vCPU defined by @vptaddr.
707 void gicv3_redist_vinvall(GICv3CPUState
*cs
, uint64_t vptaddr
);
709 void gicv3_redist_send_sgi(GICv3CPUState
*cs
, int grp
, int irq
, bool ns
);
710 void gicv3_init_cpuif(GICv3State
*s
);
713 * gicv3_cpuif_update:
714 * @cs: GICv3CPUState for the CPU to update
716 * Recalculate whether to assert the IRQ or FIQ lines after a change
717 * to the current highest priority pending interrupt, the CPU's
718 * current running priority or the CPU's current exception level or
721 void gicv3_cpuif_update(GICv3CPUState
*cs
);
724 * gicv3_cpuif_virt_irq_fiq_update:
725 * @cs: GICv3CPUState for the CPU to update
727 * Recalculate whether to assert the virtual IRQ or FIQ lines after
728 * a change to the current highest priority pending virtual interrupt.
729 * Note that this does not recalculate and change the maintenance
730 * interrupt status (for that, see gicv3_cpuif_virt_update()).
732 void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState
*cs
);
734 static inline uint32_t gicv3_iidr(void)
736 /* Return the Implementer Identification Register value
737 * for the emulated GICv3, as reported in GICD_IIDR and GICR_IIDR.
739 * We claim to be an ARM r0p0 with a zero ProductID.
740 * This is the same as an r0p0 GIC-500.
745 /* CoreSight PIDR0 values for ARM GICv3 implementations */
746 #define GICV3_PIDR0_DIST 0x92
747 #define GICV3_PIDR0_REDIST 0x93
748 #define GICV3_PIDR0_ITS 0x94
750 static inline uint32_t gicv3_idreg(int regoffset
, uint8_t pidr0
)
752 /* Return the value of the CoreSight ID register at the specified
753 * offset from the first ID register (as found in the distributor
754 * and redistributor register banks).
755 * These values indicate an ARM implementation of a GICv3.
757 static const uint8_t gicd_ids
[] = {
758 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1
763 if (regoffset
== 4) {
766 return gicd_ids
[regoffset
];
772 * Return the group which this interrupt is configured as (GICV3_G0,
773 * GICV3_G1 or GICV3_G1NS).
775 static inline int gicv3_irq_group(GICv3State
*s
, GICv3CPUState
*cs
, int irq
)
777 bool grpbit
, grpmodbit
;
779 if (irq
< GIC_INTERNAL
) {
780 grpbit
= extract32(cs
->gicr_igroupr0
, irq
, 1);
781 grpmodbit
= extract32(cs
->gicr_igrpmodr0
, irq
, 1);
783 grpbit
= gicv3_gicd_group_test(s
, irq
);
784 grpmodbit
= gicv3_gicd_grpmod_test(s
, irq
);
789 if (s
->gicd_ctlr
& GICD_CTLR_DS
) {
792 return grpmodbit
? GICV3_G1
: GICV3_G0
;
796 * gicv3_redist_affid:
798 * Return the 32-bit affinity ID of the CPU connected to this redistributor
800 static inline uint32_t gicv3_redist_affid(GICv3CPUState
*cs
)
802 return cs
->gicr_typer
>> 32;
806 * gicv3_cache_target_cpustate:
808 * Update the cached CPU state corresponding to the target for this interrupt
809 * (which is kept in s->gicd_irouter_target[]).
811 static inline void gicv3_cache_target_cpustate(GICv3State
*s
, int irq
)
813 GICv3CPUState
*cs
= NULL
;
815 uint32_t tgtaff
= extract64(s
->gicd_irouter
[irq
], 0, 24) |
816 extract64(s
->gicd_irouter
[irq
], 32, 8) << 24;
818 for (i
= 0; i
< s
->num_cpu
; i
++) {
819 if (s
->cpu
[i
].gicr_typer
>> 32 == tgtaff
) {
825 s
->gicd_irouter_target
[irq
] = cs
;
829 * gicv3_cache_all_target_cpustates:
831 * Populate the entire cache of CPU state pointers for interrupt targets
832 * (eg after inbound migration or CPU reset)
834 static inline void gicv3_cache_all_target_cpustates(GICv3State
*s
)
838 for (irq
= GIC_INTERNAL
; irq
< GICV3_MAXIRQ
; irq
++) {
839 gicv3_cache_target_cpustate(s
, irq
);
843 void gicv3_set_gicv3state(CPUState
*cpu
, GICv3CPUState
*s
);
845 #endif /* QEMU_ARM_GICV3_INTERNAL_H */