2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qemu-common.h"
32 #include "host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
37 #define XTREG(idx, ofs, bi, sz, al, no, flags, cp, typ, grp, name, \
38 a1, a2, a3, a4, a5, a6) \
39 { .targno = (no), .type = (typ), .group = (grp) },
41 static void reset_mmu(CPUState
*env
);
43 void cpu_reset(CPUXtensaState
*env
)
45 env
->exception_taken
= 0;
46 env
->pc
= env
->config
->exception_vector
[EXC_RESET
];
47 env
->sregs
[LITBASE
] &= ~1;
48 env
->sregs
[PS
] = xtensa_option_enabled(env
->config
,
49 XTENSA_OPTION_INTERRUPT
) ? 0x1f : 0x10;
50 env
->sregs
[VECBASE
] = env
->config
->vecbase
;
52 env
->pending_irq_level
= 0;
56 static const XtensaConfig core_config
[] = {
58 .name
= "sample-xtensa-core",
60 (XTENSA_OPTION_BIT(XTENSA_OPTION_HW_ALIGNMENT
) |
61 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU
)),
66 #include "gdb-config-sample-xtensa-core.c"
72 .vecbase
= 0x5fff8400,
74 [EXC_RESET
] = 0x5fff8000,
75 [EXC_WINDOW_OVERFLOW4
] = 0x5fff8400,
76 [EXC_WINDOW_UNDERFLOW4
] = 0x5fff8440,
77 [EXC_WINDOW_OVERFLOW8
] = 0x5fff8480,
78 [EXC_WINDOW_UNDERFLOW8
] = 0x5fff84c0,
79 [EXC_WINDOW_OVERFLOW12
] = 0x5fff8500,
80 [EXC_WINDOW_UNDERFLOW12
] = 0x5fff8540,
81 [EXC_KERNEL
] = 0x5fff861c,
82 [EXC_USER
] = 0x5fff863c,
83 [EXC_DOUBLE
] = 0x5fff865c,
102 .inttype
= INTTYPE_TIMER
,
109 .clock_freq_khz
= 912000,
113 CPUXtensaState
*cpu_xtensa_init(const char *cpu_model
)
115 static int tcg_inited
;
117 const XtensaConfig
*config
= NULL
;
120 for (i
= 0; i
< ARRAY_SIZE(core_config
); ++i
)
121 if (strcmp(core_config
[i
].name
, cpu_model
) == 0) {
122 config
= core_config
+ i
;
126 if (config
== NULL
) {
130 env
= g_malloc0(sizeof(*env
));
131 env
->config
= config
;
136 xtensa_translate_init();
139 xtensa_irq_init(env
);
145 void xtensa_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
148 cpu_fprintf(f
, "Available CPUs:\n");
149 for (i
= 0; i
< ARRAY_SIZE(core_config
); ++i
) {
150 cpu_fprintf(f
, " %s\n", core_config
[i
].name
);
154 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
160 if (xtensa_get_physical_addr(env
, addr
, 0, 0,
161 &paddr
, &page_size
, &access
) == 0) {
164 if (xtensa_get_physical_addr(env
, addr
, 2, 0,
165 &paddr
, &page_size
, &access
) == 0) {
171 static uint32_t relocated_vector(CPUState
*env
, uint32_t vector
)
173 if (xtensa_option_enabled(env
->config
,
174 XTENSA_OPTION_RELOCATABLE_VECTOR
)) {
175 return vector
- env
->config
->vecbase
+ env
->sregs
[VECBASE
];
182 * Handle penging IRQ.
183 * For the high priority interrupt jump to the corresponding interrupt vector.
184 * For the level-1 interrupt convert it to either user, kernel or double
185 * exception with the 'level-1 interrupt' exception cause.
187 static void handle_interrupt(CPUState
*env
)
189 int level
= env
->pending_irq_level
;
191 if (level
> xtensa_get_cintlevel(env
) &&
192 level
<= env
->config
->nlevel
&&
193 (env
->config
->level_mask
[level
] &
195 env
->sregs
[INTENABLE
])) {
197 env
->sregs
[EPC1
+ level
- 1] = env
->pc
;
198 env
->sregs
[EPS2
+ level
- 2] = env
->sregs
[PS
];
200 (env
->sregs
[PS
] & ~PS_INTLEVEL
) | level
| PS_EXCM
;
201 env
->pc
= relocated_vector(env
,
202 env
->config
->interrupt_vector
[level
]);
204 env
->sregs
[EXCCAUSE
] = LEVEL1_INTERRUPT_CAUSE
;
206 if (env
->sregs
[PS
] & PS_EXCM
) {
207 if (env
->config
->ndepc
) {
208 env
->sregs
[DEPC
] = env
->pc
;
210 env
->sregs
[EPC1
] = env
->pc
;
212 env
->exception_index
= EXC_DOUBLE
;
214 env
->sregs
[EPC1
] = env
->pc
;
215 env
->exception_index
=
216 (env
->sregs
[PS
] & PS_UM
) ? EXC_USER
: EXC_KERNEL
;
218 env
->sregs
[PS
] |= PS_EXCM
;
220 env
->exception_taken
= 1;
224 void do_interrupt(CPUState
*env
)
226 if (env
->exception_index
== EXC_IRQ
) {
227 qemu_log_mask(CPU_LOG_INT
,
228 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
229 "pc = %08x, a0 = %08x, ps = %08x, "
230 "intset = %08x, intenable = %08x, "
232 __func__
, env
->pending_irq_level
, xtensa_get_cintlevel(env
),
233 env
->pc
, env
->regs
[0], env
->sregs
[PS
],
234 env
->sregs
[INTSET
], env
->sregs
[INTENABLE
],
236 handle_interrupt(env
);
239 switch (env
->exception_index
) {
240 case EXC_WINDOW_OVERFLOW4
:
241 case EXC_WINDOW_UNDERFLOW4
:
242 case EXC_WINDOW_OVERFLOW8
:
243 case EXC_WINDOW_UNDERFLOW8
:
244 case EXC_WINDOW_OVERFLOW12
:
245 case EXC_WINDOW_UNDERFLOW12
:
249 qemu_log_mask(CPU_LOG_INT
, "%s(%d) "
250 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
251 __func__
, env
->exception_index
,
252 env
->pc
, env
->regs
[0], env
->sregs
[PS
], env
->sregs
[CCOUNT
]);
253 if (env
->config
->exception_vector
[env
->exception_index
]) {
254 env
->pc
= relocated_vector(env
,
255 env
->config
->exception_vector
[env
->exception_index
]);
256 env
->exception_taken
= 1;
258 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
259 __func__
, env
->pc
, env
->exception_index
);
267 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
268 __func__
, env
->pc
, env
->exception_index
);
271 check_interrupts(env
);
274 static void reset_tlb_mmu_all_ways(CPUState
*env
,
275 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
279 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
280 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
281 entry
[wi
][ei
].asid
= 0;
282 entry
[wi
][ei
].variable
= true;
287 static void reset_tlb_mmu_ways56(CPUState
*env
,
288 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
290 if (!tlb
->varway56
) {
291 static const xtensa_tlb_entry way5
[] = {
306 static const xtensa_tlb_entry way6
[] = {
321 memcpy(entry
[5], way5
, sizeof(way5
));
322 memcpy(entry
[6], way6
, sizeof(way6
));
325 for (ei
= 0; ei
< 8; ++ei
) {
326 entry
[6][ei
].vaddr
= ei
<< 29;
327 entry
[6][ei
].paddr
= ei
<< 29;
328 entry
[6][ei
].asid
= 1;
329 entry
[6][ei
].attr
= 2;
334 static void reset_tlb_region_way0(CPUState
*env
,
335 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
339 for (ei
= 0; ei
< 8; ++ei
) {
340 entry
[0][ei
].vaddr
= ei
<< 29;
341 entry
[0][ei
].paddr
= ei
<< 29;
342 entry
[0][ei
].asid
= 1;
343 entry
[0][ei
].attr
= 2;
344 entry
[0][ei
].variable
= true;
348 static void reset_mmu(CPUState
*env
)
350 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
351 env
->sregs
[RASID
] = 0x04030201;
352 env
->sregs
[ITLBCFG
] = 0;
353 env
->sregs
[DTLBCFG
] = 0;
354 env
->autorefill_idx
= 0;
355 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
356 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
357 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
358 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
360 reset_tlb_region_way0(env
, env
->itlb
);
361 reset_tlb_region_way0(env
, env
->dtlb
);
365 static unsigned get_ring(const CPUState
*env
, uint8_t asid
)
368 for (i
= 0; i
< 4; ++i
) {
369 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
377 * Lookup xtensa TLB for the given virtual address.
380 * \param pwi: [out] way index
381 * \param pei: [out] entry index
382 * \param pring: [out] access ring
383 * \return 0 if ok, exception cause code otherwise
385 int xtensa_tlb_lookup(const CPUState
*env
, uint32_t addr
, bool dtlb
,
386 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
388 const xtensa_tlb
*tlb
= dtlb
?
389 &env
->config
->dtlb
: &env
->config
->itlb
;
390 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
391 env
->dtlb
: env
->itlb
;
396 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
399 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
400 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
401 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
405 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
406 INST_TLB_MULTI_HIT_CAUSE
;
415 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
419 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
422 static unsigned mmu_attr_to_access(uint32_t attr
)
431 access
|= PAGE_WRITE
;
433 } else if (attr
== 13) {
434 access
|= PAGE_READ
| PAGE_WRITE
;
440 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
443 static unsigned region_attr_to_access(uint32_t attr
)
446 if ((attr
< 6 && attr
!= 3) || attr
== 14) {
447 access
|= PAGE_READ
| PAGE_WRITE
;
449 if (attr
> 0 && attr
< 6) {
455 static bool is_access_granted(unsigned access
, int is_write
)
459 return access
& PAGE_READ
;
462 return access
& PAGE_WRITE
;
465 return access
& PAGE_EXEC
;
472 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
473 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
);
475 static int get_physical_addr_mmu(CPUState
*env
,
476 uint32_t vaddr
, int is_write
, int mmu_idx
,
477 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
479 bool dtlb
= is_write
!= 2;
483 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
485 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
486 (mmu_idx
!= 0 || ((vaddr
^ env
->sregs
[PTEVADDR
]) & 0xffc00000)) &&
487 autorefill_mmu(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
) == 0) {
494 const xtensa_tlb_entry
*entry
=
495 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
497 if (ring
< mmu_idx
) {
499 LOAD_STORE_PRIVILEGE_CAUSE
:
500 INST_FETCH_PRIVILEGE_CAUSE
;
503 *access
= mmu_attr_to_access(entry
->attr
);
504 if (!is_access_granted(*access
, is_write
)) {
507 STORE_PROHIBITED_CAUSE
:
508 LOAD_PROHIBITED_CAUSE
) :
509 INST_FETCH_PROHIBITED_CAUSE
;
512 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
513 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
518 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
519 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
)
525 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
526 int ret
= get_physical_addr_mmu(env
, pt_vaddr
, 0, 0,
527 &paddr
, &page_size
, &access
);
529 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__
,
530 vaddr
, ret
? ~0 : paddr
);
534 uint32_t pte
= ldl_phys(paddr
);
536 *ring
= (pte
>> 4) & 0x3;
537 *wi
= (++env
->autorefill_idx
) & 0x3;
538 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, *wi
, ei
);
539 xtensa_tlb_set_entry(env
, dtlb
, *wi
, *ei
, vpn
, pte
);
540 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
541 __func__
, vaddr
, vpn
, pte
);
546 static int get_physical_addr_region(CPUState
*env
,
547 uint32_t vaddr
, int is_write
, int mmu_idx
,
548 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
550 bool dtlb
= is_write
!= 2;
552 uint32_t ei
= (vaddr
>> 29) & 0x7;
553 const xtensa_tlb_entry
*entry
=
554 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
556 *access
= region_attr_to_access(entry
->attr
);
557 if (!is_access_granted(*access
, is_write
)) {
560 STORE_PROHIBITED_CAUSE
:
561 LOAD_PROHIBITED_CAUSE
) :
562 INST_FETCH_PROHIBITED_CAUSE
;
565 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
566 *page_size
= ~REGION_PAGE_MASK
+ 1;
572 * Convert virtual address to physical addr.
573 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
575 * \return 0 if ok, exception cause code otherwise
577 int xtensa_get_physical_addr(CPUState
*env
,
578 uint32_t vaddr
, int is_write
, int mmu_idx
,
579 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
581 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
582 return get_physical_addr_mmu(env
, vaddr
, is_write
, mmu_idx
,
583 paddr
, page_size
, access
);
584 } else if (xtensa_option_bits_enabled(env
->config
,
585 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
586 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
587 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
588 paddr
, page_size
, access
);
591 *page_size
= TARGET_PAGE_SIZE
;
592 *access
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;