2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "fpu/softfloat-types.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/qemu-print.h"
29 #define CONVERT_BIT(X, SRC, DST) \
30 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
32 uint64_t cpu_alpha_load_fpcr(CPUAlphaState
*env
)
34 return (uint64_t)env
->fpcr
<< 32;
37 void cpu_alpha_store_fpcr(CPUAlphaState
*env
, uint64_t val
)
39 static const uint8_t rm_map
[] = {
40 [FPCR_DYN_NORMAL
>> FPCR_DYN_SHIFT
] = float_round_nearest_even
,
41 [FPCR_DYN_CHOPPED
>> FPCR_DYN_SHIFT
] = float_round_to_zero
,
42 [FPCR_DYN_MINUS
>> FPCR_DYN_SHIFT
] = float_round_down
,
43 [FPCR_DYN_PLUS
>> FPCR_DYN_SHIFT
] = float_round_up
,
46 uint32_t fpcr
= val
>> 32;
49 /* Record the raw value before adjusting for linux-user. */
52 #ifdef CONFIG_USER_ONLY
54 * Override some of these bits with the contents of ENV->SWCR.
55 * In system mode, some of these would trap to the kernel, at
56 * which point the kernel's handler would emulate and apply
57 * the software exception mask.
59 uint32_t soft_fpcr
= alpha_ieee_swcr_to_fpcr(env
->swcr
) >> 32;
60 fpcr
|= soft_fpcr
& (FPCR_STATUS_MASK
| FPCR_DNZ
);
63 * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
64 * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
65 * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
67 t
|= CONVERT_BIT(soft_fpcr
, FPCR_INVD
, FPCR_IOV
);
70 t
|= CONVERT_BIT(fpcr
, FPCR_INED
, FPCR_INE
);
71 t
|= CONVERT_BIT(fpcr
, FPCR_UNFD
, FPCR_UNF
);
72 t
|= CONVERT_BIT(fpcr
, FPCR_OVFD
, FPCR_OVF
);
73 t
|= CONVERT_BIT(fpcr
, FPCR_DZED
, FPCR_DZE
);
74 t
|= CONVERT_BIT(fpcr
, FPCR_INVD
, FPCR_INV
);
76 env
->fpcr_exc_enable
= ~t
& FPCR_STATUS_MASK
;
78 env
->fpcr_dyn_round
= rm_map
[(fpcr
& FPCR_DYN_MASK
) >> FPCR_DYN_SHIFT
];
79 env
->fp_status
.flush_inputs_to_zero
= (fpcr
& FPCR_DNZ
) != 0;
81 t
= (fpcr
& FPCR_UNFD
) && (fpcr
& FPCR_UNDZ
);
82 #ifdef CONFIG_USER_ONLY
83 t
|= (env
->swcr
& SWCR_MAP_UMZ
) != 0;
85 env
->fpcr_flush_to_zero
= t
;
88 uint64_t helper_load_fpcr(CPUAlphaState
*env
)
90 return cpu_alpha_load_fpcr(env
);
93 void helper_store_fpcr(CPUAlphaState
*env
, uint64_t val
)
95 cpu_alpha_store_fpcr(env
, val
);
98 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState
*env
, unsigned reg
)
100 #ifndef CONFIG_USER_ONLY
101 if (env
->flags
& ENV_FLAG_PAL_MODE
) {
102 if (reg
>= 8 && reg
<= 14) {
103 return &env
->shadow
[reg
- 8];
104 } else if (reg
== 25) {
105 return &env
->shadow
[7];
109 return &env
->ir
[reg
];
112 uint64_t cpu_alpha_load_gr(CPUAlphaState
*env
, unsigned reg
)
114 return *cpu_alpha_addr_gr(env
, reg
);
117 void cpu_alpha_store_gr(CPUAlphaState
*env
, unsigned reg
, uint64_t val
)
119 *cpu_alpha_addr_gr(env
, reg
) = val
;
122 #if defined(CONFIG_USER_ONLY)
123 void alpha_cpu_record_sigsegv(CPUState
*cs
, vaddr address
,
124 MMUAccessType access_type
,
125 bool maperr
, uintptr_t retaddr
)
127 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
128 target_ulong mmcsr
, cause
;
130 /* Assuming !maperr, infer the missing protection. */
131 switch (access_type
) {
145 g_assert_not_reached();
148 if (address
< BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS
- 1)) {
149 /* Userspace address, therefore page not mapped. */
152 /* Kernel or invalid address. */
157 /* Record the arguments that PALcode would give to the kernel. */
158 cpu
->env
.trap_arg0
= address
;
159 cpu
->env
.trap_arg1
= mmcsr
;
160 cpu
->env
.trap_arg2
= cause
;
163 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
164 static int get_physical_address(CPUAlphaState
*env
, target_ulong addr
,
165 int prot_need
, int mmu_idx
,
166 target_ulong
*pphys
, int *pprot
)
168 CPUState
*cs
= env_cpu(env
);
169 target_long saddr
= addr
;
170 target_ulong phys
= 0;
171 target_ulong L1pte
, L2pte
, L3pte
;
172 target_ulong pt
, index
;
176 /* Handle physical accesses. */
177 if (mmu_idx
== MMU_PHYS_IDX
) {
179 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
184 /* Ensure that the virtual address is properly sign-extended from
185 the last implemented virtual address bit. */
186 if (saddr
>> TARGET_VIRT_ADDR_SPACE_BITS
!= saddr
>> 63) {
190 /* Translate the superpage. */
191 /* ??? When we do more than emulate Unix PALcode, we'll need to
192 determine which KSEG is actually active. */
193 if (saddr
< 0 && ((saddr
>> 41) & 3) == 2) {
194 /* User-space cannot access KSEG addresses. */
195 if (mmu_idx
!= MMU_KERNEL_IDX
) {
199 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
200 We would not do this if the 48-bit KSEG is enabled. */
201 phys
= saddr
& ((1ull << 40) - 1);
202 phys
|= (saddr
& (1ull << 40)) << 3;
204 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
209 /* Interpret the page table exactly like PALcode does. */
213 /* TODO: rather than using ldq_phys() to read the page table we should
214 * use address_space_ldq() so that we can handle the case when
215 * the page table read gives a bus fault, rather than ignoring it.
216 * For the existing code the zero data that ldq_phys will return for
217 * an access to invalid memory will result in our treating the page
218 * table as invalid, which may even be the right behaviour.
221 /* L1 page table read. */
222 index
= (addr
>> (TARGET_PAGE_BITS
+ 20)) & 0x3ff;
223 L1pte
= ldq_phys(cs
->as
, pt
+ index
*8);
225 if (unlikely((L1pte
& PTE_VALID
) == 0)) {
229 if (unlikely((L1pte
& PTE_KRE
) == 0)) {
232 pt
= L1pte
>> 32 << TARGET_PAGE_BITS
;
234 /* L2 page table read. */
235 index
= (addr
>> (TARGET_PAGE_BITS
+ 10)) & 0x3ff;
236 L2pte
= ldq_phys(cs
->as
, pt
+ index
*8);
238 if (unlikely((L2pte
& PTE_VALID
) == 0)) {
242 if (unlikely((L2pte
& PTE_KRE
) == 0)) {
245 pt
= L2pte
>> 32 << TARGET_PAGE_BITS
;
247 /* L3 page table read. */
248 index
= (addr
>> TARGET_PAGE_BITS
) & 0x3ff;
249 L3pte
= ldq_phys(cs
->as
, pt
+ index
*8);
251 phys
= L3pte
>> 32 << TARGET_PAGE_BITS
;
252 if (unlikely((L3pte
& PTE_VALID
) == 0)) {
257 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
258 # error page bits out of date
261 /* Check access violations. */
262 if (L3pte
& (PTE_KRE
<< mmu_idx
)) {
263 prot
|= PAGE_READ
| PAGE_EXEC
;
265 if (L3pte
& (PTE_KWE
<< mmu_idx
)) {
268 if (unlikely((prot
& prot_need
) == 0 && prot_need
)) {
272 /* Check fault-on-operation violations. */
273 prot
&= ~(L3pte
>> 1);
275 if (unlikely((prot
& prot_need
) == 0)) {
276 ret
= (prot_need
& PAGE_EXEC
? MM_K_FOE
:
277 prot_need
& PAGE_WRITE
? MM_K_FOW
:
278 prot_need
& PAGE_READ
? MM_K_FOR
: -1);
287 hwaddr
alpha_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
289 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
293 fail
= get_physical_address(&cpu
->env
, addr
, 0, 0, &phys
, &prot
);
294 return (fail
>= 0 ? -1 : phys
);
297 bool alpha_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
298 MMUAccessType access_type
, int mmu_idx
,
299 bool probe
, uintptr_t retaddr
)
301 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
302 CPUAlphaState
*env
= &cpu
->env
;
306 fail
= get_physical_address(env
, addr
, 1 << access_type
,
307 mmu_idx
, &phys
, &prot
);
308 if (unlikely(fail
>= 0)) {
312 cs
->exception_index
= EXCP_MMFAULT
;
313 env
->trap_arg0
= addr
;
314 env
->trap_arg1
= fail
;
315 env
->trap_arg2
= (access_type
== MMU_DATA_LOAD
? 0ull :
316 access_type
== MMU_DATA_STORE
? 1ull :
317 /* access_type == MMU_INST_FETCH */ -1ull);
318 cpu_loop_exit_restore(cs
, retaddr
);
321 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
322 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
326 void alpha_cpu_do_interrupt(CPUState
*cs
)
328 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
329 CPUAlphaState
*env
= &cpu
->env
;
330 int i
= cs
->exception_index
;
332 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
334 const char *name
= "<unknown>";
343 case EXCP_SMP_INTERRUPT
:
344 name
= "smp_interrupt";
346 case EXCP_CLK_INTERRUPT
:
347 name
= "clk_interrupt";
349 case EXCP_DEV_INTERRUPT
:
350 name
= "dev_interrupt";
371 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
372 PRIx64
" sp=%016" PRIx64
"\n",
373 ++count
, name
, env
->error_code
, cs
->cpu_index
,
374 env
->pc
, env
->ir
[IR_SP
]);
377 cs
->exception_index
= -1;
386 case EXCP_SMP_INTERRUPT
:
389 case EXCP_CLK_INTERRUPT
:
392 case EXCP_DEV_INTERRUPT
:
412 /* There are 64 entry points for both privileged and unprivileged,
413 with bit 0x80 indicating unprivileged. Each entry point gets
414 64 bytes to do its job. */
416 i
= 0x2000 + (i
- 0x80) * 64;
422 cpu_abort(cs
, "Unhandled CPU exception");
425 /* Remember where the exception happened. Emulate real hardware in
426 that the low bit of the PC indicates PALmode. */
427 env
->exc_addr
= env
->pc
| (env
->flags
& ENV_FLAG_PAL_MODE
);
429 /* Continue execution at the PALcode entry point. */
430 env
->pc
= env
->palbr
+ i
;
432 /* Switch to PALmode. */
433 env
->flags
|= ENV_FLAG_PAL_MODE
;
436 bool alpha_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
438 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
439 CPUAlphaState
*env
= &cpu
->env
;
442 /* We never take interrupts while in PALmode. */
443 if (env
->flags
& ENV_FLAG_PAL_MODE
) {
447 /* Fall through the switch, collecting the highest priority
448 interrupt that isn't masked by the processor status IPL. */
449 /* ??? This hard-codes the OSF/1 interrupt levels. */
450 switch ((env
->flags
>> ENV_FLAG_PS_SHIFT
) & PS_INT_MASK
) {
452 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
453 idx
= EXCP_DEV_INTERRUPT
;
457 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
458 idx
= EXCP_CLK_INTERRUPT
;
462 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
463 idx
= EXCP_SMP_INTERRUPT
;
467 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
472 cs
->exception_index
= idx
;
474 alpha_cpu_do_interrupt(cs
);
480 #endif /* !CONFIG_USER_ONLY */
482 void alpha_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
484 static const char linux_reg_names
[31][4] = {
485 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
486 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
487 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
488 "t10", "t11", "ra", "t12", "at", "gp", "sp"
490 AlphaCPU
*cpu
= ALPHA_CPU(cs
);
491 CPUAlphaState
*env
= &cpu
->env
;
494 qemu_fprintf(f
, "PC " TARGET_FMT_lx
" PS %02x\n",
495 env
->pc
, extract32(env
->flags
, ENV_FLAG_PS_SHIFT
, 8));
496 for (i
= 0; i
< 31; i
++) {
497 qemu_fprintf(f
, "%-8s" TARGET_FMT_lx
"%c",
498 linux_reg_names
[i
], cpu_alpha_load_gr(env
, i
),
499 (i
% 3) == 2 ? '\n' : ' ');
502 qemu_fprintf(f
, "lock_a " TARGET_FMT_lx
" lock_v " TARGET_FMT_lx
"\n",
503 env
->lock_addr
, env
->lock_value
);
505 if (flags
& CPU_DUMP_FPU
) {
506 for (i
= 0; i
< 31; i
++) {
507 qemu_fprintf(f
, "f%-7d%016" PRIx64
"%c", i
, env
->fir
[i
],
508 (i
% 3) == 2 ? '\n' : ' ');
510 qemu_fprintf(f
, "fpcr %016" PRIx64
"\n", cpu_alpha_load_fpcr(env
));
512 qemu_fprintf(f
, "\n");
515 /* This should only be called from translate, via gen_excp.
516 We expect that ENV->PC has already been updated. */
517 G_NORETURN
void helper_excp(CPUAlphaState
*env
, int excp
, int error
)
519 CPUState
*cs
= env_cpu(env
);
521 cs
->exception_index
= excp
;
522 env
->error_code
= error
;
526 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
527 G_NORETURN
void dynamic_excp(CPUAlphaState
*env
, uintptr_t retaddr
,
530 CPUState
*cs
= env_cpu(env
);
532 cs
->exception_index
= excp
;
533 env
->error_code
= error
;
535 cpu_restore_state(cs
, retaddr
);
536 /* Floating-point exceptions (our only users) point to the next PC. */
542 G_NORETURN
void arith_excp(CPUAlphaState
*env
, uintptr_t retaddr
,
543 int exc
, uint64_t mask
)
545 env
->trap_arg0
= exc
;
546 env
->trap_arg1
= mask
;
547 dynamic_excp(env
, retaddr
, EXCP_ARITH
, 0);