blockdev: acquire AioContext in eject, change, and block_passwd
[qemu/ar7.git] / target-alpha / helper.c
bloba8aa782a2addf5dea891d2d9da4107ef38f3bd1f
1 /*
2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "fpu/softfloat.h"
26 #include "exec/helper-proto.h"
28 uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
30 uint64_t r = 0;
31 uint8_t t;
33 t = env->fpcr_exc_status;
34 if (t) {
35 r = FPCR_SUM;
36 if (t & float_flag_invalid) {
37 r |= FPCR_INV;
39 if (t & float_flag_divbyzero) {
40 r |= FPCR_DZE;
42 if (t & float_flag_overflow) {
43 r |= FPCR_OVF;
45 if (t & float_flag_underflow) {
46 r |= FPCR_UNF;
48 if (t & float_flag_inexact) {
49 r |= FPCR_INE;
53 t = env->fpcr_exc_mask;
54 if (t & float_flag_invalid) {
55 r |= FPCR_INVD;
57 if (t & float_flag_divbyzero) {
58 r |= FPCR_DZED;
60 if (t & float_flag_overflow) {
61 r |= FPCR_OVFD;
63 if (t & float_flag_underflow) {
64 r |= FPCR_UNFD;
66 if (t & float_flag_inexact) {
67 r |= FPCR_INED;
70 switch (env->fpcr_dyn_round) {
71 case float_round_nearest_even:
72 r |= FPCR_DYN_NORMAL;
73 break;
74 case float_round_down:
75 r |= FPCR_DYN_MINUS;
76 break;
77 case float_round_up:
78 r |= FPCR_DYN_PLUS;
79 break;
80 case float_round_to_zero:
81 r |= FPCR_DYN_CHOPPED;
82 break;
85 if (env->fp_status.flush_inputs_to_zero) {
86 r |= FPCR_DNZ;
88 if (env->fpcr_dnod) {
89 r |= FPCR_DNOD;
91 if (env->fpcr_undz) {
92 r |= FPCR_UNDZ;
95 return r;
98 void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
100 uint8_t t;
102 t = 0;
103 if (val & FPCR_INV) {
104 t |= float_flag_invalid;
106 if (val & FPCR_DZE) {
107 t |= float_flag_divbyzero;
109 if (val & FPCR_OVF) {
110 t |= float_flag_overflow;
112 if (val & FPCR_UNF) {
113 t |= float_flag_underflow;
115 if (val & FPCR_INE) {
116 t |= float_flag_inexact;
118 env->fpcr_exc_status = t;
120 t = 0;
121 if (val & FPCR_INVD) {
122 t |= float_flag_invalid;
124 if (val & FPCR_DZED) {
125 t |= float_flag_divbyzero;
127 if (val & FPCR_OVFD) {
128 t |= float_flag_overflow;
130 if (val & FPCR_UNFD) {
131 t |= float_flag_underflow;
133 if (val & FPCR_INED) {
134 t |= float_flag_inexact;
136 env->fpcr_exc_mask = t;
138 switch (val & FPCR_DYN_MASK) {
139 case FPCR_DYN_CHOPPED:
140 t = float_round_to_zero;
141 break;
142 case FPCR_DYN_MINUS:
143 t = float_round_down;
144 break;
145 case FPCR_DYN_NORMAL:
146 t = float_round_nearest_even;
147 break;
148 case FPCR_DYN_PLUS:
149 t = float_round_up;
150 break;
152 env->fpcr_dyn_round = t;
154 env->fpcr_dnod = (val & FPCR_DNOD) != 0;
155 env->fpcr_undz = (val & FPCR_UNDZ) != 0;
156 env->fpcr_flush_to_zero = env->fpcr_dnod & env->fpcr_undz;
157 env->fp_status.flush_inputs_to_zero = (val & FPCR_DNZ) != 0;
160 uint64_t helper_load_fpcr(CPUAlphaState *env)
162 return cpu_alpha_load_fpcr(env);
165 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
167 cpu_alpha_store_fpcr(env, val);
170 #if defined(CONFIG_USER_ONLY)
171 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
172 int rw, int mmu_idx)
174 AlphaCPU *cpu = ALPHA_CPU(cs);
176 cs->exception_index = EXCP_MMFAULT;
177 cpu->env.trap_arg0 = address;
178 return 1;
180 #else
181 void swap_shadow_regs(CPUAlphaState *env)
183 uint64_t i0, i1, i2, i3, i4, i5, i6, i7;
185 i0 = env->ir[8];
186 i1 = env->ir[9];
187 i2 = env->ir[10];
188 i3 = env->ir[11];
189 i4 = env->ir[12];
190 i5 = env->ir[13];
191 i6 = env->ir[14];
192 i7 = env->ir[25];
194 env->ir[8] = env->shadow[0];
195 env->ir[9] = env->shadow[1];
196 env->ir[10] = env->shadow[2];
197 env->ir[11] = env->shadow[3];
198 env->ir[12] = env->shadow[4];
199 env->ir[13] = env->shadow[5];
200 env->ir[14] = env->shadow[6];
201 env->ir[25] = env->shadow[7];
203 env->shadow[0] = i0;
204 env->shadow[1] = i1;
205 env->shadow[2] = i2;
206 env->shadow[3] = i3;
207 env->shadow[4] = i4;
208 env->shadow[5] = i5;
209 env->shadow[6] = i6;
210 env->shadow[7] = i7;
213 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
214 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
215 int prot_need, int mmu_idx,
216 target_ulong *pphys, int *pprot)
218 CPUState *cs = CPU(alpha_env_get_cpu(env));
219 target_long saddr = addr;
220 target_ulong phys = 0;
221 target_ulong L1pte, L2pte, L3pte;
222 target_ulong pt, index;
223 int prot = 0;
224 int ret = MM_K_ACV;
226 /* Ensure that the virtual address is properly sign-extended from
227 the last implemented virtual address bit. */
228 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
229 goto exit;
232 /* Translate the superpage. */
233 /* ??? When we do more than emulate Unix PALcode, we'll need to
234 determine which KSEG is actually active. */
235 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
236 /* User-space cannot access KSEG addresses. */
237 if (mmu_idx != MMU_KERNEL_IDX) {
238 goto exit;
241 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
242 We would not do this if the 48-bit KSEG is enabled. */
243 phys = saddr & ((1ull << 40) - 1);
244 phys |= (saddr & (1ull << 40)) << 3;
246 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
247 ret = -1;
248 goto exit;
251 /* Interpret the page table exactly like PALcode does. */
253 pt = env->ptbr;
255 /* L1 page table read. */
256 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
257 L1pte = ldq_phys(cs->as, pt + index*8);
259 if (unlikely((L1pte & PTE_VALID) == 0)) {
260 ret = MM_K_TNV;
261 goto exit;
263 if (unlikely((L1pte & PTE_KRE) == 0)) {
264 goto exit;
266 pt = L1pte >> 32 << TARGET_PAGE_BITS;
268 /* L2 page table read. */
269 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
270 L2pte = ldq_phys(cs->as, pt + index*8);
272 if (unlikely((L2pte & PTE_VALID) == 0)) {
273 ret = MM_K_TNV;
274 goto exit;
276 if (unlikely((L2pte & PTE_KRE) == 0)) {
277 goto exit;
279 pt = L2pte >> 32 << TARGET_PAGE_BITS;
281 /* L3 page table read. */
282 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
283 L3pte = ldq_phys(cs->as, pt + index*8);
285 phys = L3pte >> 32 << TARGET_PAGE_BITS;
286 if (unlikely((L3pte & PTE_VALID) == 0)) {
287 ret = MM_K_TNV;
288 goto exit;
291 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
292 # error page bits out of date
293 #endif
295 /* Check access violations. */
296 if (L3pte & (PTE_KRE << mmu_idx)) {
297 prot |= PAGE_READ | PAGE_EXEC;
299 if (L3pte & (PTE_KWE << mmu_idx)) {
300 prot |= PAGE_WRITE;
302 if (unlikely((prot & prot_need) == 0 && prot_need)) {
303 goto exit;
306 /* Check fault-on-operation violations. */
307 prot &= ~(L3pte >> 1);
308 ret = -1;
309 if (unlikely((prot & prot_need) == 0)) {
310 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
311 prot_need & PAGE_WRITE ? MM_K_FOW :
312 prot_need & PAGE_READ ? MM_K_FOR : -1);
315 exit:
316 *pphys = phys;
317 *pprot = prot;
318 return ret;
321 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
323 AlphaCPU *cpu = ALPHA_CPU(cs);
324 target_ulong phys;
325 int prot, fail;
327 fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
328 return (fail >= 0 ? -1 : phys);
331 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
332 int mmu_idx)
334 AlphaCPU *cpu = ALPHA_CPU(cs);
335 CPUAlphaState *env = &cpu->env;
336 target_ulong phys;
337 int prot, fail;
339 fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
340 if (unlikely(fail >= 0)) {
341 cs->exception_index = EXCP_MMFAULT;
342 env->trap_arg0 = addr;
343 env->trap_arg1 = fail;
344 env->trap_arg2 = (rw == 2 ? -1 : rw);
345 return 1;
348 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
349 prot, mmu_idx, TARGET_PAGE_SIZE);
350 return 0;
352 #endif /* USER_ONLY */
354 void alpha_cpu_do_interrupt(CPUState *cs)
356 AlphaCPU *cpu = ALPHA_CPU(cs);
357 CPUAlphaState *env = &cpu->env;
358 int i = cs->exception_index;
360 if (qemu_loglevel_mask(CPU_LOG_INT)) {
361 static int count;
362 const char *name = "<unknown>";
364 switch (i) {
365 case EXCP_RESET:
366 name = "reset";
367 break;
368 case EXCP_MCHK:
369 name = "mchk";
370 break;
371 case EXCP_SMP_INTERRUPT:
372 name = "smp_interrupt";
373 break;
374 case EXCP_CLK_INTERRUPT:
375 name = "clk_interrupt";
376 break;
377 case EXCP_DEV_INTERRUPT:
378 name = "dev_interrupt";
379 break;
380 case EXCP_MMFAULT:
381 name = "mmfault";
382 break;
383 case EXCP_UNALIGN:
384 name = "unalign";
385 break;
386 case EXCP_OPCDEC:
387 name = "opcdec";
388 break;
389 case EXCP_ARITH:
390 name = "arith";
391 break;
392 case EXCP_FEN:
393 name = "fen";
394 break;
395 case EXCP_CALL_PAL:
396 name = "call_pal";
397 break;
398 case EXCP_STL_C:
399 name = "stl_c";
400 break;
401 case EXCP_STQ_C:
402 name = "stq_c";
403 break;
405 qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n",
406 ++count, name, env->error_code, env->pc, env->ir[IR_SP]);
409 cs->exception_index = -1;
411 #if !defined(CONFIG_USER_ONLY)
412 switch (i) {
413 case EXCP_RESET:
414 i = 0x0000;
415 break;
416 case EXCP_MCHK:
417 i = 0x0080;
418 break;
419 case EXCP_SMP_INTERRUPT:
420 i = 0x0100;
421 break;
422 case EXCP_CLK_INTERRUPT:
423 i = 0x0180;
424 break;
425 case EXCP_DEV_INTERRUPT:
426 i = 0x0200;
427 break;
428 case EXCP_MMFAULT:
429 i = 0x0280;
430 break;
431 case EXCP_UNALIGN:
432 i = 0x0300;
433 break;
434 case EXCP_OPCDEC:
435 i = 0x0380;
436 break;
437 case EXCP_ARITH:
438 i = 0x0400;
439 break;
440 case EXCP_FEN:
441 i = 0x0480;
442 break;
443 case EXCP_CALL_PAL:
444 i = env->error_code;
445 /* There are 64 entry points for both privileged and unprivileged,
446 with bit 0x80 indicating unprivileged. Each entry point gets
447 64 bytes to do its job. */
448 if (i & 0x80) {
449 i = 0x2000 + (i - 0x80) * 64;
450 } else {
451 i = 0x1000 + i * 64;
453 break;
454 default:
455 cpu_abort(cs, "Unhandled CPU exception");
458 /* Remember where the exception happened. Emulate real hardware in
459 that the low bit of the PC indicates PALmode. */
460 env->exc_addr = env->pc | env->pal_mode;
462 /* Continue execution at the PALcode entry point. */
463 env->pc = env->palbr + i;
465 /* Switch to PALmode. */
466 if (!env->pal_mode) {
467 env->pal_mode = 1;
468 swap_shadow_regs(env);
470 #endif /* !USER_ONLY */
473 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
475 AlphaCPU *cpu = ALPHA_CPU(cs);
476 CPUAlphaState *env = &cpu->env;
477 int idx = -1;
479 /* We never take interrupts while in PALmode. */
480 if (env->pal_mode) {
481 return false;
484 /* Fall through the switch, collecting the highest priority
485 interrupt that isn't masked by the processor status IPL. */
486 /* ??? This hard-codes the OSF/1 interrupt levels. */
487 switch (env->ps & PS_INT_MASK) {
488 case 0 ... 3:
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 idx = EXCP_DEV_INTERRUPT;
492 /* FALLTHRU */
493 case 4:
494 if (interrupt_request & CPU_INTERRUPT_TIMER) {
495 idx = EXCP_CLK_INTERRUPT;
497 /* FALLTHRU */
498 case 5:
499 if (interrupt_request & CPU_INTERRUPT_SMP) {
500 idx = EXCP_SMP_INTERRUPT;
502 /* FALLTHRU */
503 case 6:
504 if (interrupt_request & CPU_INTERRUPT_MCHK) {
505 idx = EXCP_MCHK;
508 if (idx >= 0) {
509 cs->exception_index = idx;
510 env->error_code = 0;
511 alpha_cpu_do_interrupt(cs);
512 return true;
514 return false;
517 void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
518 int flags)
520 static const char *linux_reg_names[] = {
521 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
522 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
523 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
524 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
526 AlphaCPU *cpu = ALPHA_CPU(cs);
527 CPUAlphaState *env = &cpu->env;
528 int i;
530 cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
531 env->pc, env->ps);
532 for (i = 0; i < 31; i++) {
533 cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
534 linux_reg_names[i], env->ir[i]);
535 if ((i % 3) == 2)
536 cpu_fprintf(f, "\n");
539 cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
540 env->lock_addr, env->lock_value);
542 for (i = 0; i < 31; i++) {
543 cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i,
544 *((uint64_t *)(&env->fir[i])));
545 if ((i % 3) == 2)
546 cpu_fprintf(f, "\n");
548 cpu_fprintf(f, "\n");
551 /* This should only be called from translate, via gen_excp.
552 We expect that ENV->PC has already been updated. */
553 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
555 AlphaCPU *cpu = alpha_env_get_cpu(env);
556 CPUState *cs = CPU(cpu);
558 cs->exception_index = excp;
559 env->error_code = error;
560 cpu_loop_exit(cs);
563 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
564 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
565 int excp, int error)
567 AlphaCPU *cpu = alpha_env_get_cpu(env);
568 CPUState *cs = CPU(cpu);
570 cs->exception_index = excp;
571 env->error_code = error;
572 if (retaddr) {
573 cpu_restore_state(cs, retaddr);
575 cpu_loop_exit(cs);
578 void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
579 int exc, uint64_t mask)
581 env->trap_arg0 = exc;
582 env->trap_arg1 = mask;
583 dynamic_excp(env, retaddr, EXCP_ARITH, 0);