target/arm/cpu64: Ensure kvm really supports aarch64=off
[qemu/ar7.git] / target / alpha / helper.c
blob93b8e788b185f8b199b71256e5ff119afe41f2e5
1 /*
2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/qemu-print.h"
29 #define CONVERT_BIT(X, SRC, DST) \
30 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
32 uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
34 return (uint64_t)env->fpcr << 32;
37 void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
39 uint32_t fpcr = val >> 32;
40 uint32_t t = 0;
42 t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
43 t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
44 t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
45 t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
46 t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
48 env->fpcr = fpcr;
49 env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
51 switch (fpcr & FPCR_DYN_MASK) {
52 case FPCR_DYN_NORMAL:
53 default:
54 t = float_round_nearest_even;
55 break;
56 case FPCR_DYN_CHOPPED:
57 t = float_round_to_zero;
58 break;
59 case FPCR_DYN_MINUS:
60 t = float_round_down;
61 break;
62 case FPCR_DYN_PLUS:
63 t = float_round_up;
64 break;
66 env->fpcr_dyn_round = t;
68 env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
69 env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
71 #ifdef CONFIG_USER_ONLY
73 * Override some of these bits with the contents of ENV->SWCR.
74 * In system mode, some of these would trap to the kernel, at
75 * which point the kernel's handler would emulate and apply
76 * the software exception mask.
78 if (env->swcr & SWCR_MAP_DMZ) {
79 env->fp_status.flush_inputs_to_zero = 1;
81 if (env->swcr & SWCR_MAP_UMZ) {
82 env->fp_status.flush_to_zero = 1;
84 env->fpcr_exc_enable &= ~(alpha_ieee_swcr_to_fpcr(env->swcr) >> 32);
85 #endif
88 uint64_t helper_load_fpcr(CPUAlphaState *env)
90 return cpu_alpha_load_fpcr(env);
93 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
95 cpu_alpha_store_fpcr(env, val);
98 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
100 #ifndef CONFIG_USER_ONLY
101 if (env->flags & ENV_FLAG_PAL_MODE) {
102 if (reg >= 8 && reg <= 14) {
103 return &env->shadow[reg - 8];
104 } else if (reg == 25) {
105 return &env->shadow[7];
108 #endif
109 return &env->ir[reg];
112 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
114 return *cpu_alpha_addr_gr(env, reg);
117 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
119 *cpu_alpha_addr_gr(env, reg) = val;
122 #if defined(CONFIG_USER_ONLY)
123 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
124 MMUAccessType access_type, int mmu_idx,
125 bool probe, uintptr_t retaddr)
127 AlphaCPU *cpu = ALPHA_CPU(cs);
129 cs->exception_index = EXCP_MMFAULT;
130 cpu->env.trap_arg0 = address;
131 cpu_loop_exit_restore(cs, retaddr);
133 #else
134 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
135 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
136 int prot_need, int mmu_idx,
137 target_ulong *pphys, int *pprot)
139 CPUState *cs = env_cpu(env);
140 target_long saddr = addr;
141 target_ulong phys = 0;
142 target_ulong L1pte, L2pte, L3pte;
143 target_ulong pt, index;
144 int prot = 0;
145 int ret = MM_K_ACV;
147 /* Handle physical accesses. */
148 if (mmu_idx == MMU_PHYS_IDX) {
149 phys = addr;
150 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
151 ret = -1;
152 goto exit;
155 /* Ensure that the virtual address is properly sign-extended from
156 the last implemented virtual address bit. */
157 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
158 goto exit;
161 /* Translate the superpage. */
162 /* ??? When we do more than emulate Unix PALcode, we'll need to
163 determine which KSEG is actually active. */
164 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
165 /* User-space cannot access KSEG addresses. */
166 if (mmu_idx != MMU_KERNEL_IDX) {
167 goto exit;
170 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
171 We would not do this if the 48-bit KSEG is enabled. */
172 phys = saddr & ((1ull << 40) - 1);
173 phys |= (saddr & (1ull << 40)) << 3;
175 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
176 ret = -1;
177 goto exit;
180 /* Interpret the page table exactly like PALcode does. */
182 pt = env->ptbr;
184 /* TODO: rather than using ldq_phys() to read the page table we should
185 * use address_space_ldq() so that we can handle the case when
186 * the page table read gives a bus fault, rather than ignoring it.
187 * For the existing code the zero data that ldq_phys will return for
188 * an access to invalid memory will result in our treating the page
189 * table as invalid, which may even be the right behaviour.
192 /* L1 page table read. */
193 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
194 L1pte = ldq_phys(cs->as, pt + index*8);
196 if (unlikely((L1pte & PTE_VALID) == 0)) {
197 ret = MM_K_TNV;
198 goto exit;
200 if (unlikely((L1pte & PTE_KRE) == 0)) {
201 goto exit;
203 pt = L1pte >> 32 << TARGET_PAGE_BITS;
205 /* L2 page table read. */
206 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
207 L2pte = ldq_phys(cs->as, pt + index*8);
209 if (unlikely((L2pte & PTE_VALID) == 0)) {
210 ret = MM_K_TNV;
211 goto exit;
213 if (unlikely((L2pte & PTE_KRE) == 0)) {
214 goto exit;
216 pt = L2pte >> 32 << TARGET_PAGE_BITS;
218 /* L3 page table read. */
219 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
220 L3pte = ldq_phys(cs->as, pt + index*8);
222 phys = L3pte >> 32 << TARGET_PAGE_BITS;
223 if (unlikely((L3pte & PTE_VALID) == 0)) {
224 ret = MM_K_TNV;
225 goto exit;
228 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
229 # error page bits out of date
230 #endif
232 /* Check access violations. */
233 if (L3pte & (PTE_KRE << mmu_idx)) {
234 prot |= PAGE_READ | PAGE_EXEC;
236 if (L3pte & (PTE_KWE << mmu_idx)) {
237 prot |= PAGE_WRITE;
239 if (unlikely((prot & prot_need) == 0 && prot_need)) {
240 goto exit;
243 /* Check fault-on-operation violations. */
244 prot &= ~(L3pte >> 1);
245 ret = -1;
246 if (unlikely((prot & prot_need) == 0)) {
247 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
248 prot_need & PAGE_WRITE ? MM_K_FOW :
249 prot_need & PAGE_READ ? MM_K_FOR : -1);
252 exit:
253 *pphys = phys;
254 *pprot = prot;
255 return ret;
258 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
260 AlphaCPU *cpu = ALPHA_CPU(cs);
261 target_ulong phys;
262 int prot, fail;
264 fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
265 return (fail >= 0 ? -1 : phys);
268 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
269 MMUAccessType access_type, int mmu_idx,
270 bool probe, uintptr_t retaddr)
272 AlphaCPU *cpu = ALPHA_CPU(cs);
273 CPUAlphaState *env = &cpu->env;
274 target_ulong phys;
275 int prot, fail;
277 fail = get_physical_address(env, addr, 1 << access_type,
278 mmu_idx, &phys, &prot);
279 if (unlikely(fail >= 0)) {
280 if (probe) {
281 return false;
283 cs->exception_index = EXCP_MMFAULT;
284 env->trap_arg0 = addr;
285 env->trap_arg1 = fail;
286 env->trap_arg2 = (access_type == MMU_INST_FETCH ? -1 : access_type);
287 cpu_loop_exit_restore(cs, retaddr);
290 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
291 prot, mmu_idx, TARGET_PAGE_SIZE);
292 return true;
294 #endif /* USER_ONLY */
296 void alpha_cpu_do_interrupt(CPUState *cs)
298 AlphaCPU *cpu = ALPHA_CPU(cs);
299 CPUAlphaState *env = &cpu->env;
300 int i = cs->exception_index;
302 if (qemu_loglevel_mask(CPU_LOG_INT)) {
303 static int count;
304 const char *name = "<unknown>";
306 switch (i) {
307 case EXCP_RESET:
308 name = "reset";
309 break;
310 case EXCP_MCHK:
311 name = "mchk";
312 break;
313 case EXCP_SMP_INTERRUPT:
314 name = "smp_interrupt";
315 break;
316 case EXCP_CLK_INTERRUPT:
317 name = "clk_interrupt";
318 break;
319 case EXCP_DEV_INTERRUPT:
320 name = "dev_interrupt";
321 break;
322 case EXCP_MMFAULT:
323 name = "mmfault";
324 break;
325 case EXCP_UNALIGN:
326 name = "unalign";
327 break;
328 case EXCP_OPCDEC:
329 name = "opcdec";
330 break;
331 case EXCP_ARITH:
332 name = "arith";
333 break;
334 case EXCP_FEN:
335 name = "fen";
336 break;
337 case EXCP_CALL_PAL:
338 name = "call_pal";
339 break;
341 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
342 PRIx64 " sp=%016" PRIx64 "\n",
343 ++count, name, env->error_code, cs->cpu_index,
344 env->pc, env->ir[IR_SP]);
347 cs->exception_index = -1;
349 #if !defined(CONFIG_USER_ONLY)
350 switch (i) {
351 case EXCP_RESET:
352 i = 0x0000;
353 break;
354 case EXCP_MCHK:
355 i = 0x0080;
356 break;
357 case EXCP_SMP_INTERRUPT:
358 i = 0x0100;
359 break;
360 case EXCP_CLK_INTERRUPT:
361 i = 0x0180;
362 break;
363 case EXCP_DEV_INTERRUPT:
364 i = 0x0200;
365 break;
366 case EXCP_MMFAULT:
367 i = 0x0280;
368 break;
369 case EXCP_UNALIGN:
370 i = 0x0300;
371 break;
372 case EXCP_OPCDEC:
373 i = 0x0380;
374 break;
375 case EXCP_ARITH:
376 i = 0x0400;
377 break;
378 case EXCP_FEN:
379 i = 0x0480;
380 break;
381 case EXCP_CALL_PAL:
382 i = env->error_code;
383 /* There are 64 entry points for both privileged and unprivileged,
384 with bit 0x80 indicating unprivileged. Each entry point gets
385 64 bytes to do its job. */
386 if (i & 0x80) {
387 i = 0x2000 + (i - 0x80) * 64;
388 } else {
389 i = 0x1000 + i * 64;
391 break;
392 default:
393 cpu_abort(cs, "Unhandled CPU exception");
396 /* Remember where the exception happened. Emulate real hardware in
397 that the low bit of the PC indicates PALmode. */
398 env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
400 /* Continue execution at the PALcode entry point. */
401 env->pc = env->palbr + i;
403 /* Switch to PALmode. */
404 env->flags |= ENV_FLAG_PAL_MODE;
405 #endif /* !USER_ONLY */
408 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
410 AlphaCPU *cpu = ALPHA_CPU(cs);
411 CPUAlphaState *env = &cpu->env;
412 int idx = -1;
414 /* We never take interrupts while in PALmode. */
415 if (env->flags & ENV_FLAG_PAL_MODE) {
416 return false;
419 /* Fall through the switch, collecting the highest priority
420 interrupt that isn't masked by the processor status IPL. */
421 /* ??? This hard-codes the OSF/1 interrupt levels. */
422 switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
423 case 0 ... 3:
424 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 idx = EXCP_DEV_INTERRUPT;
427 /* FALLTHRU */
428 case 4:
429 if (interrupt_request & CPU_INTERRUPT_TIMER) {
430 idx = EXCP_CLK_INTERRUPT;
432 /* FALLTHRU */
433 case 5:
434 if (interrupt_request & CPU_INTERRUPT_SMP) {
435 idx = EXCP_SMP_INTERRUPT;
437 /* FALLTHRU */
438 case 6:
439 if (interrupt_request & CPU_INTERRUPT_MCHK) {
440 idx = EXCP_MCHK;
443 if (idx >= 0) {
444 cs->exception_index = idx;
445 env->error_code = 0;
446 alpha_cpu_do_interrupt(cs);
447 return true;
449 return false;
452 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
454 static const char linux_reg_names[31][4] = {
455 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
456 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
457 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
458 "t10", "t11", "ra", "t12", "at", "gp", "sp"
460 AlphaCPU *cpu = ALPHA_CPU(cs);
461 CPUAlphaState *env = &cpu->env;
462 int i;
464 qemu_fprintf(f, "PC " TARGET_FMT_lx " PS %02x\n",
465 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
466 for (i = 0; i < 31; i++) {
467 qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
468 linux_reg_names[i], cpu_alpha_load_gr(env, i),
469 (i % 3) == 2 ? '\n' : ' ');
472 qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
473 env->lock_addr, env->lock_value);
475 if (flags & CPU_DUMP_FPU) {
476 for (i = 0; i < 31; i++) {
477 qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
478 (i % 3) == 2 ? '\n' : ' ');
480 qemu_fprintf(f, "fpcr %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
482 qemu_fprintf(f, "\n");
485 /* This should only be called from translate, via gen_excp.
486 We expect that ENV->PC has already been updated. */
487 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
489 CPUState *cs = env_cpu(env);
491 cs->exception_index = excp;
492 env->error_code = error;
493 cpu_loop_exit(cs);
496 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
497 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
498 int excp, int error)
500 CPUState *cs = env_cpu(env);
502 cs->exception_index = excp;
503 env->error_code = error;
504 if (retaddr) {
505 cpu_restore_state(cs, retaddr, true);
506 /* Floating-point exceptions (our only users) point to the next PC. */
507 env->pc += 4;
509 cpu_loop_exit(cs);
512 void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
513 int exc, uint64_t mask)
515 env->trap_arg0 = exc;
516 env->trap_arg1 = mask;
517 dynamic_excp(env, retaddr, EXCP_ARITH, 0);