target/rx: CPU definitions
[qemu/ar7.git] / hw / arm / smmuv3-internal.h
blob4112394129e0069018a5967cb6851b4e4cc96f93
1 /*
2 * ARM SMMUv3 support - Internal API
4 * Copyright (C) 2014-2016 Broadcom Corporation
5 * Copyright (c) 2017 Red Hat, Inc.
6 * Written by Prem Mallappa, Eric Auger
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef HW_ARM_SMMUV3_INTERNAL_H
22 #define HW_ARM_SMMUV3_INTERNAL_H
24 #include "hw/arm/smmu-common.h"
26 typedef enum SMMUTranslationStatus {
27 SMMU_TRANS_DISABLE,
28 SMMU_TRANS_ABORT,
29 SMMU_TRANS_BYPASS,
30 SMMU_TRANS_ERROR,
31 SMMU_TRANS_SUCCESS,
32 } SMMUTranslationStatus;
34 /* MMIO Registers */
36 REG32(IDR0, 0x0)
37 FIELD(IDR0, S1P, 1 , 1)
38 FIELD(IDR0, TTF, 2 , 2)
39 FIELD(IDR0, COHACC, 4 , 1)
40 FIELD(IDR0, ASID16, 12, 1)
41 FIELD(IDR0, TTENDIAN, 21, 2)
42 FIELD(IDR0, STALL_MODEL, 24, 2)
43 FIELD(IDR0, TERM_MODEL, 26, 1)
44 FIELD(IDR0, STLEVEL, 27, 2)
46 REG32(IDR1, 0x4)
47 FIELD(IDR1, SIDSIZE, 0 , 6)
48 FIELD(IDR1, EVENTQS, 16, 5)
49 FIELD(IDR1, CMDQS, 21, 5)
51 #define SMMU_IDR1_SIDSIZE 16
52 #define SMMU_CMDQS 19
53 #define SMMU_EVENTQS 19
55 REG32(IDR2, 0x8)
56 REG32(IDR3, 0xc)
57 REG32(IDR4, 0x10)
58 REG32(IDR5, 0x14)
59 FIELD(IDR5, OAS, 0, 3);
60 FIELD(IDR5, GRAN4K, 4, 1);
61 FIELD(IDR5, GRAN16K, 5, 1);
62 FIELD(IDR5, GRAN64K, 6, 1);
64 #define SMMU_IDR5_OAS 4
66 REG32(IIDR, 0x1c)
67 REG32(CR0, 0x20)
68 FIELD(CR0, SMMU_ENABLE, 0, 1)
69 FIELD(CR0, EVENTQEN, 2, 1)
70 FIELD(CR0, CMDQEN, 3, 1)
72 #define SMMU_CR0_RESERVED 0xFFFFFC20
74 REG32(CR0ACK, 0x24)
75 REG32(CR1, 0x28)
76 REG32(CR2, 0x2c)
77 REG32(STATUSR, 0x40)
78 REG32(IRQ_CTRL, 0x50)
79 FIELD(IRQ_CTRL, GERROR_IRQEN, 0, 1)
80 FIELD(IRQ_CTRL, PRI_IRQEN, 1, 1)
81 FIELD(IRQ_CTRL, EVENTQ_IRQEN, 2, 1)
83 REG32(IRQ_CTRL_ACK, 0x54)
84 REG32(GERROR, 0x60)
85 FIELD(GERROR, CMDQ_ERR, 0, 1)
86 FIELD(GERROR, EVENTQ_ABT_ERR, 2, 1)
87 FIELD(GERROR, PRIQ_ABT_ERR, 3, 1)
88 FIELD(GERROR, MSI_CMDQ_ABT_ERR, 4, 1)
89 FIELD(GERROR, MSI_EVENTQ_ABT_ERR, 5, 1)
90 FIELD(GERROR, MSI_PRIQ_ABT_ERR, 6, 1)
91 FIELD(GERROR, MSI_GERROR_ABT_ERR, 7, 1)
92 FIELD(GERROR, MSI_SFM_ERR, 8, 1)
94 REG32(GERRORN, 0x64)
96 #define A_GERROR_IRQ_CFG0 0x68 /* 64b */
97 REG32(GERROR_IRQ_CFG1, 0x70)
98 REG32(GERROR_IRQ_CFG2, 0x74)
100 #define A_STRTAB_BASE 0x80 /* 64b */
102 #define SMMU_BASE_ADDR_MASK 0xfffffffffffc0
104 REG32(STRTAB_BASE_CFG, 0x88)
105 FIELD(STRTAB_BASE_CFG, FMT, 16, 2)
106 FIELD(STRTAB_BASE_CFG, SPLIT, 6 , 5)
107 FIELD(STRTAB_BASE_CFG, LOG2SIZE, 0 , 6)
109 #define A_CMDQ_BASE 0x90 /* 64b */
110 REG32(CMDQ_PROD, 0x98)
111 REG32(CMDQ_CONS, 0x9c)
112 FIELD(CMDQ_CONS, ERR, 24, 7)
114 #define A_EVENTQ_BASE 0xa0 /* 64b */
115 REG32(EVENTQ_PROD, 0xa8)
116 REG32(EVENTQ_CONS, 0xac)
118 #define A_EVENTQ_IRQ_CFG0 0xb0 /* 64b */
119 REG32(EVENTQ_IRQ_CFG1, 0xb8)
120 REG32(EVENTQ_IRQ_CFG2, 0xbc)
122 #define A_IDREGS 0xfd0
124 static inline int smmu_enabled(SMMUv3State *s)
126 return FIELD_EX32(s->cr[0], CR0, SMMU_ENABLE);
129 /* Command Queue Entry */
130 typedef struct Cmd {
131 uint32_t word[4];
132 } Cmd;
134 /* Event Queue Entry */
135 typedef struct Evt {
136 uint32_t word[8];
137 } Evt;
139 static inline uint32_t smmuv3_idreg(int regoffset)
142 * Return the value of the Primecell/Corelink ID registers at the
143 * specified offset from the first ID register.
144 * These value indicate an ARM implementation of MMU600 p1
146 static const uint8_t smmuv3_ids[] = {
147 0x04, 0, 0, 0, 0x84, 0xB4, 0xF0, 0x10, 0x0D, 0xF0, 0x05, 0xB1
149 return smmuv3_ids[regoffset / 4];
152 static inline bool smmuv3_eventq_irq_enabled(SMMUv3State *s)
154 return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, EVENTQ_IRQEN);
157 static inline bool smmuv3_gerror_irq_enabled(SMMUv3State *s)
159 return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN);
162 /* Queue Handling */
164 #define Q_BASE(q) ((q)->base & SMMU_BASE_ADDR_MASK)
165 #define WRAP_MASK(q) (1 << (q)->log2size)
166 #define INDEX_MASK(q) (((1 << (q)->log2size)) - 1)
167 #define WRAP_INDEX_MASK(q) ((1 << ((q)->log2size + 1)) - 1)
169 #define Q_CONS(q) ((q)->cons & INDEX_MASK(q))
170 #define Q_PROD(q) ((q)->prod & INDEX_MASK(q))
172 #define Q_CONS_ENTRY(q) (Q_BASE(q) + (q)->entry_size * Q_CONS(q))
173 #define Q_PROD_ENTRY(q) (Q_BASE(q) + (q)->entry_size * Q_PROD(q))
175 #define Q_CONS_WRAP(q) (((q)->cons & WRAP_MASK(q)) >> (q)->log2size)
176 #define Q_PROD_WRAP(q) (((q)->prod & WRAP_MASK(q)) >> (q)->log2size)
178 static inline bool smmuv3_q_full(SMMUQueue *q)
180 return ((q->cons ^ q->prod) & WRAP_INDEX_MASK(q)) == WRAP_MASK(q);
183 static inline bool smmuv3_q_empty(SMMUQueue *q)
185 return (q->cons & WRAP_INDEX_MASK(q)) == (q->prod & WRAP_INDEX_MASK(q));
188 static inline void queue_prod_incr(SMMUQueue *q)
190 q->prod = (q->prod + 1) & WRAP_INDEX_MASK(q);
193 static inline void queue_cons_incr(SMMUQueue *q)
196 * We have to use deposit for the CONS registers to preserve
197 * the ERR field in the high bits.
199 q->cons = deposit32(q->cons, 0, q->log2size + 1, q->cons + 1);
202 static inline bool smmuv3_cmdq_enabled(SMMUv3State *s)
204 return FIELD_EX32(s->cr[0], CR0, CMDQEN);
207 static inline bool smmuv3_eventq_enabled(SMMUv3State *s)
209 return FIELD_EX32(s->cr[0], CR0, EVENTQEN);
212 static inline void smmu_write_cmdq_err(SMMUv3State *s, uint32_t err_type)
214 s->cmdq.cons = FIELD_DP32(s->cmdq.cons, CMDQ_CONS, ERR, err_type);
217 /* Commands */
219 typedef enum SMMUCommandType {
220 SMMU_CMD_NONE = 0x00,
221 SMMU_CMD_PREFETCH_CONFIG ,
222 SMMU_CMD_PREFETCH_ADDR,
223 SMMU_CMD_CFGI_STE,
224 SMMU_CMD_CFGI_STE_RANGE,
225 SMMU_CMD_CFGI_CD,
226 SMMU_CMD_CFGI_CD_ALL,
227 SMMU_CMD_CFGI_ALL,
228 SMMU_CMD_TLBI_NH_ALL = 0x10,
229 SMMU_CMD_TLBI_NH_ASID,
230 SMMU_CMD_TLBI_NH_VA,
231 SMMU_CMD_TLBI_NH_VAA,
232 SMMU_CMD_TLBI_EL3_ALL = 0x18,
233 SMMU_CMD_TLBI_EL3_VA = 0x1a,
234 SMMU_CMD_TLBI_EL2_ALL = 0x20,
235 SMMU_CMD_TLBI_EL2_ASID,
236 SMMU_CMD_TLBI_EL2_VA,
237 SMMU_CMD_TLBI_EL2_VAA,
238 SMMU_CMD_TLBI_S12_VMALL = 0x28,
239 SMMU_CMD_TLBI_S2_IPA = 0x2a,
240 SMMU_CMD_TLBI_NSNH_ALL = 0x30,
241 SMMU_CMD_ATC_INV = 0x40,
242 SMMU_CMD_PRI_RESP,
243 SMMU_CMD_RESUME = 0x44,
244 SMMU_CMD_STALL_TERM,
245 SMMU_CMD_SYNC,
246 } SMMUCommandType;
248 static const char *cmd_stringify[] = {
249 [SMMU_CMD_PREFETCH_CONFIG] = "SMMU_CMD_PREFETCH_CONFIG",
250 [SMMU_CMD_PREFETCH_ADDR] = "SMMU_CMD_PREFETCH_ADDR",
251 [SMMU_CMD_CFGI_STE] = "SMMU_CMD_CFGI_STE",
252 [SMMU_CMD_CFGI_STE_RANGE] = "SMMU_CMD_CFGI_STE_RANGE",
253 [SMMU_CMD_CFGI_CD] = "SMMU_CMD_CFGI_CD",
254 [SMMU_CMD_CFGI_CD_ALL] = "SMMU_CMD_CFGI_CD_ALL",
255 [SMMU_CMD_CFGI_ALL] = "SMMU_CMD_CFGI_ALL",
256 [SMMU_CMD_TLBI_NH_ALL] = "SMMU_CMD_TLBI_NH_ALL",
257 [SMMU_CMD_TLBI_NH_ASID] = "SMMU_CMD_TLBI_NH_ASID",
258 [SMMU_CMD_TLBI_NH_VA] = "SMMU_CMD_TLBI_NH_VA",
259 [SMMU_CMD_TLBI_NH_VAA] = "SMMU_CMD_TLBI_NH_VAA",
260 [SMMU_CMD_TLBI_EL3_ALL] = "SMMU_CMD_TLBI_EL3_ALL",
261 [SMMU_CMD_TLBI_EL3_VA] = "SMMU_CMD_TLBI_EL3_VA",
262 [SMMU_CMD_TLBI_EL2_ALL] = "SMMU_CMD_TLBI_EL2_ALL",
263 [SMMU_CMD_TLBI_EL2_ASID] = "SMMU_CMD_TLBI_EL2_ASID",
264 [SMMU_CMD_TLBI_EL2_VA] = "SMMU_CMD_TLBI_EL2_VA",
265 [SMMU_CMD_TLBI_EL2_VAA] = "SMMU_CMD_TLBI_EL2_VAA",
266 [SMMU_CMD_TLBI_S12_VMALL] = "SMMU_CMD_TLBI_S12_VMALL",
267 [SMMU_CMD_TLBI_S2_IPA] = "SMMU_CMD_TLBI_S2_IPA",
268 [SMMU_CMD_TLBI_NSNH_ALL] = "SMMU_CMD_TLBI_NSNH_ALL",
269 [SMMU_CMD_ATC_INV] = "SMMU_CMD_ATC_INV",
270 [SMMU_CMD_PRI_RESP] = "SMMU_CMD_PRI_RESP",
271 [SMMU_CMD_RESUME] = "SMMU_CMD_RESUME",
272 [SMMU_CMD_STALL_TERM] = "SMMU_CMD_STALL_TERM",
273 [SMMU_CMD_SYNC] = "SMMU_CMD_SYNC",
276 static inline const char *smmu_cmd_string(SMMUCommandType type)
278 if (type > SMMU_CMD_NONE && type < ARRAY_SIZE(cmd_stringify)) {
279 return cmd_stringify[type] ? cmd_stringify[type] : "UNKNOWN";
280 } else {
281 return "INVALID";
285 /* CMDQ fields */
287 typedef enum {
288 SMMU_CERROR_NONE = 0,
289 SMMU_CERROR_ILL,
290 SMMU_CERROR_ABT,
291 SMMU_CERROR_ATC_INV_SYNC,
292 } SMMUCmdError;
294 enum { /* Command completion notification */
295 CMD_SYNC_SIG_NONE,
296 CMD_SYNC_SIG_IRQ,
297 CMD_SYNC_SIG_SEV,
300 #define CMD_TYPE(x) extract32((x)->word[0], 0 , 8)
301 #define CMD_SSEC(x) extract32((x)->word[0], 10, 1)
302 #define CMD_SSV(x) extract32((x)->word[0], 11, 1)
303 #define CMD_RESUME_AC(x) extract32((x)->word[0], 12, 1)
304 #define CMD_RESUME_AB(x) extract32((x)->word[0], 13, 1)
305 #define CMD_SYNC_CS(x) extract32((x)->word[0], 12, 2)
306 #define CMD_SSID(x) extract32((x)->word[0], 12, 20)
307 #define CMD_SID(x) ((x)->word[1])
308 #define CMD_VMID(x) extract32((x)->word[1], 0 , 16)
309 #define CMD_ASID(x) extract32((x)->word[1], 16, 16)
310 #define CMD_RESUME_STAG(x) extract32((x)->word[2], 0 , 16)
311 #define CMD_RESP(x) extract32((x)->word[2], 11, 2)
312 #define CMD_LEAF(x) extract32((x)->word[2], 0 , 1)
313 #define CMD_STE_RANGE(x) extract32((x)->word[2], 0 , 5)
314 #define CMD_ADDR(x) ({ \
315 uint64_t high = (uint64_t)(x)->word[3]; \
316 uint64_t low = extract32((x)->word[2], 12, 20); \
317 uint64_t addr = high << 32 | (low << 12); \
318 addr; \
321 #define SMMU_FEATURE_2LVL_STE (1 << 0)
323 /* Events */
325 typedef enum SMMUEventType {
326 SMMU_EVT_NONE = 0x00,
327 SMMU_EVT_F_UUT ,
328 SMMU_EVT_C_BAD_STREAMID ,
329 SMMU_EVT_F_STE_FETCH ,
330 SMMU_EVT_C_BAD_STE ,
331 SMMU_EVT_F_BAD_ATS_TREQ ,
332 SMMU_EVT_F_STREAM_DISABLED ,
333 SMMU_EVT_F_TRANS_FORBIDDEN ,
334 SMMU_EVT_C_BAD_SUBSTREAMID ,
335 SMMU_EVT_F_CD_FETCH ,
336 SMMU_EVT_C_BAD_CD ,
337 SMMU_EVT_F_WALK_EABT ,
338 SMMU_EVT_F_TRANSLATION = 0x10,
339 SMMU_EVT_F_ADDR_SIZE ,
340 SMMU_EVT_F_ACCESS ,
341 SMMU_EVT_F_PERMISSION ,
342 SMMU_EVT_F_TLB_CONFLICT = 0x20,
343 SMMU_EVT_F_CFG_CONFLICT ,
344 SMMU_EVT_E_PAGE_REQ = 0x24,
345 } SMMUEventType;
347 static const char *event_stringify[] = {
348 [SMMU_EVT_NONE] = "no recorded event",
349 [SMMU_EVT_F_UUT] = "SMMU_EVT_F_UUT",
350 [SMMU_EVT_C_BAD_STREAMID] = "SMMU_EVT_C_BAD_STREAMID",
351 [SMMU_EVT_F_STE_FETCH] = "SMMU_EVT_F_STE_FETCH",
352 [SMMU_EVT_C_BAD_STE] = "SMMU_EVT_C_BAD_STE",
353 [SMMU_EVT_F_BAD_ATS_TREQ] = "SMMU_EVT_F_BAD_ATS_TREQ",
354 [SMMU_EVT_F_STREAM_DISABLED] = "SMMU_EVT_F_STREAM_DISABLED",
355 [SMMU_EVT_F_TRANS_FORBIDDEN] = "SMMU_EVT_F_TRANS_FORBIDDEN",
356 [SMMU_EVT_C_BAD_SUBSTREAMID] = "SMMU_EVT_C_BAD_SUBSTREAMID",
357 [SMMU_EVT_F_CD_FETCH] = "SMMU_EVT_F_CD_FETCH",
358 [SMMU_EVT_C_BAD_CD] = "SMMU_EVT_C_BAD_CD",
359 [SMMU_EVT_F_WALK_EABT] = "SMMU_EVT_F_WALK_EABT",
360 [SMMU_EVT_F_TRANSLATION] = "SMMU_EVT_F_TRANSLATION",
361 [SMMU_EVT_F_ADDR_SIZE] = "SMMU_EVT_F_ADDR_SIZE",
362 [SMMU_EVT_F_ACCESS] = "SMMU_EVT_F_ACCESS",
363 [SMMU_EVT_F_PERMISSION] = "SMMU_EVT_F_PERMISSION",
364 [SMMU_EVT_F_TLB_CONFLICT] = "SMMU_EVT_F_TLB_CONFLICT",
365 [SMMU_EVT_F_CFG_CONFLICT] = "SMMU_EVT_F_CFG_CONFLICT",
366 [SMMU_EVT_E_PAGE_REQ] = "SMMU_EVT_E_PAGE_REQ",
369 static inline const char *smmu_event_string(SMMUEventType type)
371 if (type < ARRAY_SIZE(event_stringify)) {
372 return event_stringify[type] ? event_stringify[type] : "UNKNOWN";
373 } else {
374 return "INVALID";
378 /* Encode an event record */
379 typedef struct SMMUEventInfo {
380 SMMUEventType type;
381 uint32_t sid;
382 bool recorded;
383 bool record_trans_faults;
384 bool inval_ste_allowed;
385 union {
386 struct {
387 uint32_t ssid;
388 bool ssv;
389 dma_addr_t addr;
390 bool rnw;
391 bool pnu;
392 bool ind;
393 } f_uut;
394 struct SSIDInfo {
395 uint32_t ssid;
396 bool ssv;
397 } c_bad_streamid;
398 struct SSIDAddrInfo {
399 uint32_t ssid;
400 bool ssv;
401 dma_addr_t addr;
402 } f_ste_fetch;
403 struct SSIDInfo c_bad_ste;
404 struct {
405 dma_addr_t addr;
406 bool rnw;
407 } f_transl_forbidden;
408 struct {
409 uint32_t ssid;
410 } c_bad_substream;
411 struct SSIDAddrInfo f_cd_fetch;
412 struct SSIDInfo c_bad_cd;
413 struct FullInfo {
414 bool stall;
415 uint16_t stag;
416 uint32_t ssid;
417 bool ssv;
418 bool s2;
419 dma_addr_t addr;
420 bool rnw;
421 bool pnu;
422 bool ind;
423 uint8_t class;
424 dma_addr_t addr2;
425 } f_walk_eabt;
426 struct FullInfo f_translation;
427 struct FullInfo f_addr_size;
428 struct FullInfo f_access;
429 struct FullInfo f_permission;
430 struct SSIDInfo f_cfg_conflict;
432 * not supported yet:
433 * F_BAD_ATS_TREQ
434 * F_BAD_ATS_TREQ
435 * F_TLB_CONFLICT
436 * E_PAGE_REQUEST
437 * IMPDEF_EVENTn
439 } u;
440 } SMMUEventInfo;
442 /* EVTQ fields */
444 #define EVT_Q_OVERFLOW (1 << 31)
446 #define EVT_SET_TYPE(x, v) ((x)->word[0] = deposit32((x)->word[0], 0 , 8 , v))
447 #define EVT_SET_SSV(x, v) ((x)->word[0] = deposit32((x)->word[0], 11, 1 , v))
448 #define EVT_SET_SSID(x, v) ((x)->word[0] = deposit32((x)->word[0], 12, 20, v))
449 #define EVT_SET_SID(x, v) ((x)->word[1] = v)
450 #define EVT_SET_STAG(x, v) ((x)->word[2] = deposit32((x)->word[2], 0 , 16, v))
451 #define EVT_SET_STALL(x, v) ((x)->word[2] = deposit32((x)->word[2], 31, 1 , v))
452 #define EVT_SET_PNU(x, v) ((x)->word[3] = deposit32((x)->word[3], 1 , 1 , v))
453 #define EVT_SET_IND(x, v) ((x)->word[3] = deposit32((x)->word[3], 2 , 1 , v))
454 #define EVT_SET_RNW(x, v) ((x)->word[3] = deposit32((x)->word[3], 3 , 1 , v))
455 #define EVT_SET_S2(x, v) ((x)->word[3] = deposit32((x)->word[3], 7 , 1 , v))
456 #define EVT_SET_CLASS(x, v) ((x)->word[3] = deposit32((x)->word[3], 8 , 2 , v))
457 #define EVT_SET_ADDR(x, addr) \
458 do { \
459 (x)->word[5] = (uint32_t)(addr >> 32); \
460 (x)->word[4] = (uint32_t)(addr & 0xffffffff); \
461 } while (0)
462 #define EVT_SET_ADDR2(x, addr) \
463 do { \
464 (x)->word[7] = (uint32_t)(addr >> 32); \
465 (x)->word[6] = (uint32_t)(addr & 0xffffffff); \
466 } while (0)
468 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *event);
470 /* Configuration Data */
472 /* STE Level 1 Descriptor */
473 typedef struct STEDesc {
474 uint32_t word[2];
475 } STEDesc;
477 /* CD Level 1 Descriptor */
478 typedef struct CDDesc {
479 uint32_t word[2];
480 } CDDesc;
482 /* Stream Table Entry(STE) */
483 typedef struct STE {
484 uint32_t word[16];
485 } STE;
487 /* Context Descriptor(CD) */
488 typedef struct CD {
489 uint32_t word[16];
490 } CD;
492 /* STE fields */
494 #define STE_VALID(x) extract32((x)->word[0], 0, 1)
496 #define STE_CONFIG(x) extract32((x)->word[0], 1, 3)
497 #define STE_CFG_S1_ENABLED(config) (config & 0x1)
498 #define STE_CFG_S2_ENABLED(config) (config & 0x2)
499 #define STE_CFG_ABORT(config) (!(config & 0x4))
500 #define STE_CFG_BYPASS(config) (config == 0x4)
502 #define STE_S1FMT(x) extract32((x)->word[0], 4 , 2)
503 #define STE_S1CDMAX(x) extract32((x)->word[1], 27, 5)
504 #define STE_S1STALLD(x) extract32((x)->word[2], 27, 1)
505 #define STE_EATS(x) extract32((x)->word[2], 28, 2)
506 #define STE_STRW(x) extract32((x)->word[2], 30, 2)
507 #define STE_S2VMID(x) extract32((x)->word[4], 0 , 16)
508 #define STE_S2T0SZ(x) extract32((x)->word[5], 0 , 6)
509 #define STE_S2SL0(x) extract32((x)->word[5], 6 , 2)
510 #define STE_S2TG(x) extract32((x)->word[5], 14, 2)
511 #define STE_S2PS(x) extract32((x)->word[5], 16, 3)
512 #define STE_S2AA64(x) extract32((x)->word[5], 19, 1)
513 #define STE_S2HD(x) extract32((x)->word[5], 24, 1)
514 #define STE_S2HA(x) extract32((x)->word[5], 25, 1)
515 #define STE_S2S(x) extract32((x)->word[5], 26, 1)
516 #define STE_CTXPTR(x) \
517 ({ \
518 unsigned long addr; \
519 addr = (uint64_t)extract32((x)->word[1], 0, 16) << 32; \
520 addr |= (uint64_t)((x)->word[0] & 0xffffffc0); \
521 addr; \
524 #define STE_S2TTB(x) \
525 ({ \
526 unsigned long addr; \
527 addr = (uint64_t)extract32((x)->word[7], 0, 16) << 32; \
528 addr |= (uint64_t)((x)->word[6] & 0xfffffff0); \
529 addr; \
532 static inline int oas2bits(int oas_field)
534 switch (oas_field) {
535 case 0:
536 return 32;
537 case 1:
538 return 36;
539 case 2:
540 return 40;
541 case 3:
542 return 42;
543 case 4:
544 return 44;
545 case 5:
546 return 48;
548 return -1;
551 static inline int pa_range(STE *ste)
553 int oas_field = MIN(STE_S2PS(ste), SMMU_IDR5_OAS);
555 if (!STE_S2AA64(ste)) {
556 return 40;
559 return oas2bits(oas_field);
562 #define MAX_PA(ste) ((1 << pa_range(ste)) - 1)
564 /* CD fields */
566 #define CD_VALID(x) extract32((x)->word[0], 30, 1)
567 #define CD_ASID(x) extract32((x)->word[1], 16, 16)
568 #define CD_TTB(x, sel) \
569 ({ \
570 uint64_t hi, lo; \
571 hi = extract32((x)->word[(sel) * 2 + 3], 0, 19); \
572 hi <<= 32; \
573 lo = (x)->word[(sel) * 2 + 2] & ~0xfULL; \
574 hi | lo; \
577 #define CD_TSZ(x, sel) extract32((x)->word[0], (16 * (sel)) + 0, 6)
578 #define CD_TG(x, sel) extract32((x)->word[0], (16 * (sel)) + 6, 2)
579 #define CD_EPD(x, sel) extract32((x)->word[0], (16 * (sel)) + 14, 1)
580 #define CD_ENDI(x) extract32((x)->word[0], 15, 1)
581 #define CD_IPS(x) extract32((x)->word[1], 0 , 3)
582 #define CD_TBI(x) extract32((x)->word[1], 6 , 2)
583 #define CD_HD(x) extract32((x)->word[1], 10 , 1)
584 #define CD_HA(x) extract32((x)->word[1], 11 , 1)
585 #define CD_S(x) extract32((x)->word[1], 12, 1)
586 #define CD_R(x) extract32((x)->word[1], 13, 1)
587 #define CD_A(x) extract32((x)->word[1], 14, 1)
588 #define CD_AARCH64(x) extract32((x)->word[1], 9 , 1)
590 #define CDM_VALID(x) ((x)->word[0] & 0x1)
592 static inline int is_cd_valid(SMMUv3State *s, STE *ste, CD *cd)
594 return CD_VALID(cd);
598 * tg2granule - Decodes the CD translation granule size field according
599 * to the ttbr in use
600 * @bits: TG0/1 fields
601 * @ttbr: ttbr index in use
603 static inline int tg2granule(int bits, int ttbr)
605 switch (bits) {
606 case 0:
607 return ttbr ? 0 : 12;
608 case 1:
609 return ttbr ? 14 : 16;
610 case 2:
611 return ttbr ? 12 : 14;
612 case 3:
613 return ttbr ? 16 : 0;
614 default:
615 return 0;
619 static inline uint64_t l1std_l2ptr(STEDesc *desc)
621 uint64_t hi, lo;
623 hi = desc->word[1];
624 lo = desc->word[0] & ~0x1fULL;
625 return hi << 32 | lo;
628 #define L1STD_SPAN(stm) (extract32((stm)->word[0], 0, 4))
630 #endif