2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qemu-common.h"
32 #include "host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
37 static void reset_mmu(CPUState
*env
);
39 void cpu_reset(CPUXtensaState
*env
)
41 env
->exception_taken
= 0;
42 env
->pc
= env
->config
->exception_vector
[EXC_RESET
];
43 env
->sregs
[LITBASE
] &= ~1;
44 env
->sregs
[PS
] = xtensa_option_enabled(env
->config
,
45 XTENSA_OPTION_INTERRUPT
) ? 0x1f : 0x10;
46 env
->sregs
[VECBASE
] = env
->config
->vecbase
;
48 env
->pending_irq_level
= 0;
52 static struct XtensaConfigList
*xtensa_cores
;
54 void xtensa_register_core(XtensaConfigList
*node
)
56 node
->next
= xtensa_cores
;
60 CPUXtensaState
*cpu_xtensa_init(const char *cpu_model
)
62 static int tcg_inited
;
64 const XtensaConfig
*config
= NULL
;
65 XtensaConfigList
*core
= xtensa_cores
;
67 for (; core
; core
= core
->next
)
68 if (strcmp(core
->config
->name
, cpu_model
) == 0) {
69 config
= core
->config
;
77 env
= g_malloc0(sizeof(*env
));
83 xtensa_translate_init();
92 void xtensa_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
94 XtensaConfigList
*core
= xtensa_cores
;
95 cpu_fprintf(f
, "Available CPUs:\n");
96 for (; core
; core
= core
->next
) {
97 cpu_fprintf(f
, " %s\n", core
->config
->name
);
101 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
107 if (xtensa_get_physical_addr(env
, addr
, 0, 0,
108 &paddr
, &page_size
, &access
) == 0) {
111 if (xtensa_get_physical_addr(env
, addr
, 2, 0,
112 &paddr
, &page_size
, &access
) == 0) {
118 static uint32_t relocated_vector(CPUState
*env
, uint32_t vector
)
120 if (xtensa_option_enabled(env
->config
,
121 XTENSA_OPTION_RELOCATABLE_VECTOR
)) {
122 return vector
- env
->config
->vecbase
+ env
->sregs
[VECBASE
];
129 * Handle penging IRQ.
130 * For the high priority interrupt jump to the corresponding interrupt vector.
131 * For the level-1 interrupt convert it to either user, kernel or double
132 * exception with the 'level-1 interrupt' exception cause.
134 static void handle_interrupt(CPUState
*env
)
136 int level
= env
->pending_irq_level
;
138 if (level
> xtensa_get_cintlevel(env
) &&
139 level
<= env
->config
->nlevel
&&
140 (env
->config
->level_mask
[level
] &
142 env
->sregs
[INTENABLE
])) {
144 env
->sregs
[EPC1
+ level
- 1] = env
->pc
;
145 env
->sregs
[EPS2
+ level
- 2] = env
->sregs
[PS
];
147 (env
->sregs
[PS
] & ~PS_INTLEVEL
) | level
| PS_EXCM
;
148 env
->pc
= relocated_vector(env
,
149 env
->config
->interrupt_vector
[level
]);
151 env
->sregs
[EXCCAUSE
] = LEVEL1_INTERRUPT_CAUSE
;
153 if (env
->sregs
[PS
] & PS_EXCM
) {
154 if (env
->config
->ndepc
) {
155 env
->sregs
[DEPC
] = env
->pc
;
157 env
->sregs
[EPC1
] = env
->pc
;
159 env
->exception_index
= EXC_DOUBLE
;
161 env
->sregs
[EPC1
] = env
->pc
;
162 env
->exception_index
=
163 (env
->sregs
[PS
] & PS_UM
) ? EXC_USER
: EXC_KERNEL
;
165 env
->sregs
[PS
] |= PS_EXCM
;
167 env
->exception_taken
= 1;
171 void do_interrupt(CPUState
*env
)
173 if (env
->exception_index
== EXC_IRQ
) {
174 qemu_log_mask(CPU_LOG_INT
,
175 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
176 "pc = %08x, a0 = %08x, ps = %08x, "
177 "intset = %08x, intenable = %08x, "
179 __func__
, env
->pending_irq_level
, xtensa_get_cintlevel(env
),
180 env
->pc
, env
->regs
[0], env
->sregs
[PS
],
181 env
->sregs
[INTSET
], env
->sregs
[INTENABLE
],
183 handle_interrupt(env
);
186 switch (env
->exception_index
) {
187 case EXC_WINDOW_OVERFLOW4
:
188 case EXC_WINDOW_UNDERFLOW4
:
189 case EXC_WINDOW_OVERFLOW8
:
190 case EXC_WINDOW_UNDERFLOW8
:
191 case EXC_WINDOW_OVERFLOW12
:
192 case EXC_WINDOW_UNDERFLOW12
:
196 qemu_log_mask(CPU_LOG_INT
, "%s(%d) "
197 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
198 __func__
, env
->exception_index
,
199 env
->pc
, env
->regs
[0], env
->sregs
[PS
], env
->sregs
[CCOUNT
]);
200 if (env
->config
->exception_vector
[env
->exception_index
]) {
201 env
->pc
= relocated_vector(env
,
202 env
->config
->exception_vector
[env
->exception_index
]);
203 env
->exception_taken
= 1;
205 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
206 __func__
, env
->pc
, env
->exception_index
);
214 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
215 __func__
, env
->pc
, env
->exception_index
);
218 check_interrupts(env
);
221 static void reset_tlb_mmu_all_ways(CPUState
*env
,
222 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
226 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
227 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
228 entry
[wi
][ei
].asid
= 0;
229 entry
[wi
][ei
].variable
= true;
234 static void reset_tlb_mmu_ways56(CPUState
*env
,
235 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
237 if (!tlb
->varway56
) {
238 static const xtensa_tlb_entry way5
[] = {
253 static const xtensa_tlb_entry way6
[] = {
268 memcpy(entry
[5], way5
, sizeof(way5
));
269 memcpy(entry
[6], way6
, sizeof(way6
));
272 for (ei
= 0; ei
< 8; ++ei
) {
273 entry
[6][ei
].vaddr
= ei
<< 29;
274 entry
[6][ei
].paddr
= ei
<< 29;
275 entry
[6][ei
].asid
= 1;
276 entry
[6][ei
].attr
= 2;
281 static void reset_tlb_region_way0(CPUState
*env
,
282 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
286 for (ei
= 0; ei
< 8; ++ei
) {
287 entry
[0][ei
].vaddr
= ei
<< 29;
288 entry
[0][ei
].paddr
= ei
<< 29;
289 entry
[0][ei
].asid
= 1;
290 entry
[0][ei
].attr
= 2;
291 entry
[0][ei
].variable
= true;
295 static void reset_mmu(CPUState
*env
)
297 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
298 env
->sregs
[RASID
] = 0x04030201;
299 env
->sregs
[ITLBCFG
] = 0;
300 env
->sregs
[DTLBCFG
] = 0;
301 env
->autorefill_idx
= 0;
302 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
303 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
304 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
305 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
307 reset_tlb_region_way0(env
, env
->itlb
);
308 reset_tlb_region_way0(env
, env
->dtlb
);
312 static unsigned get_ring(const CPUState
*env
, uint8_t asid
)
315 for (i
= 0; i
< 4; ++i
) {
316 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
324 * Lookup xtensa TLB for the given virtual address.
327 * \param pwi: [out] way index
328 * \param pei: [out] entry index
329 * \param pring: [out] access ring
330 * \return 0 if ok, exception cause code otherwise
332 int xtensa_tlb_lookup(const CPUState
*env
, uint32_t addr
, bool dtlb
,
333 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
335 const xtensa_tlb
*tlb
= dtlb
?
336 &env
->config
->dtlb
: &env
->config
->itlb
;
337 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
338 env
->dtlb
: env
->itlb
;
343 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
346 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
347 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
348 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
352 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
353 INST_TLB_MULTI_HIT_CAUSE
;
362 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
366 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
369 static unsigned mmu_attr_to_access(uint32_t attr
)
378 access
|= PAGE_WRITE
;
380 } else if (attr
== 13) {
381 access
|= PAGE_READ
| PAGE_WRITE
;
387 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
390 static unsigned region_attr_to_access(uint32_t attr
)
393 if ((attr
< 6 && attr
!= 3) || attr
== 14) {
394 access
|= PAGE_READ
| PAGE_WRITE
;
396 if (attr
> 0 && attr
< 6) {
402 static bool is_access_granted(unsigned access
, int is_write
)
406 return access
& PAGE_READ
;
409 return access
& PAGE_WRITE
;
412 return access
& PAGE_EXEC
;
419 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
420 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
);
422 static int get_physical_addr_mmu(CPUState
*env
,
423 uint32_t vaddr
, int is_write
, int mmu_idx
,
424 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
426 bool dtlb
= is_write
!= 2;
430 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
432 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
433 (mmu_idx
!= 0 || ((vaddr
^ env
->sregs
[PTEVADDR
]) & 0xffc00000)) &&
434 autorefill_mmu(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
) == 0) {
441 const xtensa_tlb_entry
*entry
=
442 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
444 if (ring
< mmu_idx
) {
446 LOAD_STORE_PRIVILEGE_CAUSE
:
447 INST_FETCH_PRIVILEGE_CAUSE
;
450 *access
= mmu_attr_to_access(entry
->attr
);
451 if (!is_access_granted(*access
, is_write
)) {
454 STORE_PROHIBITED_CAUSE
:
455 LOAD_PROHIBITED_CAUSE
) :
456 INST_FETCH_PROHIBITED_CAUSE
;
459 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
460 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
465 static int autorefill_mmu(CPUState
*env
, uint32_t vaddr
, bool dtlb
,
466 uint32_t *wi
, uint32_t *ei
, uint8_t *ring
)
472 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
473 int ret
= get_physical_addr_mmu(env
, pt_vaddr
, 0, 0,
474 &paddr
, &page_size
, &access
);
476 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__
,
477 vaddr
, ret
? ~0 : paddr
);
481 uint32_t pte
= ldl_phys(paddr
);
483 *ring
= (pte
>> 4) & 0x3;
484 *wi
= (++env
->autorefill_idx
) & 0x3;
485 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, *wi
, ei
);
486 xtensa_tlb_set_entry(env
, dtlb
, *wi
, *ei
, vpn
, pte
);
487 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
488 __func__
, vaddr
, vpn
, pte
);
493 static int get_physical_addr_region(CPUState
*env
,
494 uint32_t vaddr
, int is_write
, int mmu_idx
,
495 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
497 bool dtlb
= is_write
!= 2;
499 uint32_t ei
= (vaddr
>> 29) & 0x7;
500 const xtensa_tlb_entry
*entry
=
501 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
503 *access
= region_attr_to_access(entry
->attr
);
504 if (!is_access_granted(*access
, is_write
)) {
507 STORE_PROHIBITED_CAUSE
:
508 LOAD_PROHIBITED_CAUSE
) :
509 INST_FETCH_PROHIBITED_CAUSE
;
512 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
513 *page_size
= ~REGION_PAGE_MASK
+ 1;
519 * Convert virtual address to physical addr.
520 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
522 * \return 0 if ok, exception cause code otherwise
524 int xtensa_get_physical_addr(CPUState
*env
,
525 uint32_t vaddr
, int is_write
, int mmu_idx
,
526 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
528 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
529 return get_physical_addr_mmu(env
, vaddr
, is_write
, mmu_idx
,
530 paddr
, page_size
, access
);
531 } else if (xtensa_option_bits_enabled(env
->config
,
532 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
533 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
534 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
535 paddr
, page_size
, access
);
538 *page_size
= TARGET_PAGE_SIZE
;
539 *access
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;