2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "fpu/softfloat.h"
28 uint64_t cpu_alpha_load_fpcr (CPUAlphaState
*env
)
33 t
= env
->fpcr_exc_status
;
36 if (t
& float_flag_invalid
) {
39 if (t
& float_flag_divbyzero
) {
42 if (t
& float_flag_overflow
) {
45 if (t
& float_flag_underflow
) {
48 if (t
& float_flag_inexact
) {
53 t
= env
->fpcr_exc_mask
;
54 if (t
& float_flag_invalid
) {
57 if (t
& float_flag_divbyzero
) {
60 if (t
& float_flag_overflow
) {
63 if (t
& float_flag_underflow
) {
66 if (t
& float_flag_inexact
) {
70 switch (env
->fpcr_dyn_round
) {
71 case float_round_nearest_even
:
74 case float_round_down
:
80 case float_round_to_zero
:
81 r
|= FPCR_DYN_CHOPPED
;
85 if (env
->fp_status
.flush_inputs_to_zero
) {
98 void cpu_alpha_store_fpcr (CPUAlphaState
*env
, uint64_t val
)
103 if (val
& FPCR_INV
) {
104 t
|= float_flag_invalid
;
106 if (val
& FPCR_DZE
) {
107 t
|= float_flag_divbyzero
;
109 if (val
& FPCR_OVF
) {
110 t
|= float_flag_overflow
;
112 if (val
& FPCR_UNF
) {
113 t
|= float_flag_underflow
;
115 if (val
& FPCR_INE
) {
116 t
|= float_flag_inexact
;
118 env
->fpcr_exc_status
= t
;
121 if (val
& FPCR_INVD
) {
122 t
|= float_flag_invalid
;
124 if (val
& FPCR_DZED
) {
125 t
|= float_flag_divbyzero
;
127 if (val
& FPCR_OVFD
) {
128 t
|= float_flag_overflow
;
130 if (val
& FPCR_UNFD
) {
131 t
|= float_flag_underflow
;
133 if (val
& FPCR_INED
) {
134 t
|= float_flag_inexact
;
136 env
->fpcr_exc_mask
= t
;
138 switch (val
& FPCR_DYN_MASK
) {
139 case FPCR_DYN_CHOPPED
:
140 t
= float_round_to_zero
;
143 t
= float_round_down
;
145 case FPCR_DYN_NORMAL
:
146 t
= float_round_nearest_even
;
152 env
->fpcr_dyn_round
= t
;
154 env
->fpcr_dnod
= (val
& FPCR_DNOD
) != 0;
155 env
->fpcr_undz
= (val
& FPCR_UNDZ
) != 0;
156 env
->fpcr_flush_to_zero
= env
->fpcr_dnod
& env
->fpcr_undz
;
157 env
->fp_status
.flush_inputs_to_zero
= (val
& FPCR_DNZ
) != 0;
160 uint64_t helper_load_fpcr(CPUAlphaState
*env
)
162 return cpu_alpha_load_fpcr(env
);
165 void helper_store_fpcr(CPUAlphaState
*env
, uint64_t val
)
167 cpu_alpha_store_fpcr(env
, val
);
170 #if defined(CONFIG_USER_ONLY)
171 int cpu_alpha_handle_mmu_fault(CPUAlphaState
*env
, target_ulong address
,
174 env
->exception_index
= EXCP_MMFAULT
;
175 env
->trap_arg0
= address
;
179 void swap_shadow_regs(CPUAlphaState
*env
)
181 uint64_t i0
, i1
, i2
, i3
, i4
, i5
, i6
, i7
;
192 env
->ir
[8] = env
->shadow
[0];
193 env
->ir
[9] = env
->shadow
[1];
194 env
->ir
[10] = env
->shadow
[2];
195 env
->ir
[11] = env
->shadow
[3];
196 env
->ir
[12] = env
->shadow
[4];
197 env
->ir
[13] = env
->shadow
[5];
198 env
->ir
[14] = env
->shadow
[6];
199 env
->ir
[25] = env
->shadow
[7];
211 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
212 static int get_physical_address(CPUAlphaState
*env
, target_ulong addr
,
213 int prot_need
, int mmu_idx
,
214 target_ulong
*pphys
, int *pprot
)
216 target_long saddr
= addr
;
217 target_ulong phys
= 0;
218 target_ulong L1pte
, L2pte
, L3pte
;
219 target_ulong pt
, index
;
223 /* Ensure that the virtual address is properly sign-extended from
224 the last implemented virtual address bit. */
225 if (saddr
>> TARGET_VIRT_ADDR_SPACE_BITS
!= saddr
>> 63) {
229 /* Translate the superpage. */
230 /* ??? When we do more than emulate Unix PALcode, we'll need to
231 determine which KSEG is actually active. */
232 if (saddr
< 0 && ((saddr
>> 41) & 3) == 2) {
233 /* User-space cannot access KSEG addresses. */
234 if (mmu_idx
!= MMU_KERNEL_IDX
) {
238 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
239 We would not do this if the 48-bit KSEG is enabled. */
240 phys
= saddr
& ((1ull << 40) - 1);
241 phys
|= (saddr
& (1ull << 40)) << 3;
243 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
248 /* Interpret the page table exactly like PALcode does. */
252 /* L1 page table read. */
253 index
= (addr
>> (TARGET_PAGE_BITS
+ 20)) & 0x3ff;
254 L1pte
= ldq_phys(pt
+ index
*8);
256 if (unlikely((L1pte
& PTE_VALID
) == 0)) {
260 if (unlikely((L1pte
& PTE_KRE
) == 0)) {
263 pt
= L1pte
>> 32 << TARGET_PAGE_BITS
;
265 /* L2 page table read. */
266 index
= (addr
>> (TARGET_PAGE_BITS
+ 10)) & 0x3ff;
267 L2pte
= ldq_phys(pt
+ index
*8);
269 if (unlikely((L2pte
& PTE_VALID
) == 0)) {
273 if (unlikely((L2pte
& PTE_KRE
) == 0)) {
276 pt
= L2pte
>> 32 << TARGET_PAGE_BITS
;
278 /* L3 page table read. */
279 index
= (addr
>> TARGET_PAGE_BITS
) & 0x3ff;
280 L3pte
= ldq_phys(pt
+ index
*8);
282 phys
= L3pte
>> 32 << TARGET_PAGE_BITS
;
283 if (unlikely((L3pte
& PTE_VALID
) == 0)) {
288 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
289 # error page bits out of date
292 /* Check access violations. */
293 if (L3pte
& (PTE_KRE
<< mmu_idx
)) {
294 prot
|= PAGE_READ
| PAGE_EXEC
;
296 if (L3pte
& (PTE_KWE
<< mmu_idx
)) {
299 if (unlikely((prot
& prot_need
) == 0 && prot_need
)) {
303 /* Check fault-on-operation violations. */
304 prot
&= ~(L3pte
>> 1);
306 if (unlikely((prot
& prot_need
) == 0)) {
307 ret
= (prot_need
& PAGE_EXEC
? MM_K_FOE
:
308 prot_need
& PAGE_WRITE
? MM_K_FOW
:
309 prot_need
& PAGE_READ
? MM_K_FOR
: -1);
318 hwaddr
cpu_get_phys_page_debug(CPUAlphaState
*env
, target_ulong addr
)
323 fail
= get_physical_address(env
, addr
, 0, 0, &phys
, &prot
);
324 return (fail
>= 0 ? -1 : phys
);
327 int cpu_alpha_handle_mmu_fault(CPUAlphaState
*env
, target_ulong addr
, int rw
,
333 fail
= get_physical_address(env
, addr
, 1 << rw
, mmu_idx
, &phys
, &prot
);
334 if (unlikely(fail
>= 0)) {
335 env
->exception_index
= EXCP_MMFAULT
;
336 env
->trap_arg0
= addr
;
337 env
->trap_arg1
= fail
;
338 env
->trap_arg2
= (rw
== 2 ? -1 : rw
);
342 tlb_set_page(env
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
343 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
346 #endif /* USER_ONLY */
348 void alpha_cpu_do_interrupt(CPUState
*cs
)
350 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
351 CPUAlphaState
*env
= &cpu
->env
;
352 int i
= env
->exception_index
;
354 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
356 const char *name
= "<unknown>";
365 case EXCP_SMP_INTERRUPT
:
366 name
= "smp_interrupt";
368 case EXCP_CLK_INTERRUPT
:
369 name
= "clk_interrupt";
371 case EXCP_DEV_INTERRUPT
:
372 name
= "dev_interrupt";
399 qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64
" sp=%016" PRIx64
"\n",
400 ++count
, name
, env
->error_code
, env
->pc
, env
->ir
[IR_SP
]);
403 env
->exception_index
= -1;
405 #if !defined(CONFIG_USER_ONLY)
413 case EXCP_SMP_INTERRUPT
:
416 case EXCP_CLK_INTERRUPT
:
419 case EXCP_DEV_INTERRUPT
:
439 /* There are 64 entry points for both privileged and unprivileged,
440 with bit 0x80 indicating unprivileged. Each entry point gets
441 64 bytes to do its job. */
443 i
= 0x2000 + (i
- 0x80) * 64;
449 cpu_abort(env
, "Unhandled CPU exception");
452 /* Remember where the exception happened. Emulate real hardware in
453 that the low bit of the PC indicates PALmode. */
454 env
->exc_addr
= env
->pc
| env
->pal_mode
;
456 /* Continue execution at the PALcode entry point. */
457 env
->pc
= env
->palbr
+ i
;
459 /* Switch to PALmode. */
460 if (!env
->pal_mode
) {
462 swap_shadow_regs(env
);
464 #endif /* !USER_ONLY */
467 void alpha_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
470 static const char *linux_reg_names
[] = {
471 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
472 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
473 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
474 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
476 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
477 CPUAlphaState
*env
= &cpu
->env
;
480 cpu_fprintf(f
, " PC " TARGET_FMT_lx
" PS %02x\n",
482 for (i
= 0; i
< 31; i
++) {
483 cpu_fprintf(f
, "IR%02d %s " TARGET_FMT_lx
" ", i
,
484 linux_reg_names
[i
], env
->ir
[i
]);
486 cpu_fprintf(f
, "\n");
489 cpu_fprintf(f
, "lock_a " TARGET_FMT_lx
" lock_v " TARGET_FMT_lx
"\n",
490 env
->lock_addr
, env
->lock_value
);
492 for (i
= 0; i
< 31; i
++) {
493 cpu_fprintf(f
, "FIR%02d " TARGET_FMT_lx
" ", i
,
494 *((uint64_t *)(&env
->fir
[i
])));
496 cpu_fprintf(f
, "\n");
498 cpu_fprintf(f
, "\n");
501 /* This should only be called from translate, via gen_excp.
502 We expect that ENV->PC has already been updated. */
503 void QEMU_NORETURN
helper_excp(CPUAlphaState
*env
, int excp
, int error
)
505 env
->exception_index
= excp
;
506 env
->error_code
= error
;
510 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
511 void QEMU_NORETURN
dynamic_excp(CPUAlphaState
*env
, uintptr_t retaddr
,
514 env
->exception_index
= excp
;
515 env
->error_code
= error
;
517 cpu_restore_state(env
, retaddr
);
522 void QEMU_NORETURN
arith_excp(CPUAlphaState
*env
, uintptr_t retaddr
,
523 int exc
, uint64_t mask
)
525 env
->trap_arg0
= exc
;
526 env
->trap_arg1
= mask
;
527 dynamic_excp(env
, retaddr
, EXCP_ARITH
, 0);