target-alpha: Move fpcr helpers from op_helper.c to helper.c.
[qemu/wangdongxu.git] / target-alpha / helper.c
blob3333bfa1b932a8df313a2c02705242277c6cdaf6
1 /*
2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <stdio.h>
24 #include "cpu.h"
25 #include "softfloat.h"
26 #include "helper.h"
28 uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
30 uint64_t r = 0;
31 uint8_t t;
33 t = env->fpcr_exc_status;
34 if (t) {
35 r = FPCR_SUM;
36 if (t & float_flag_invalid) {
37 r |= FPCR_INV;
39 if (t & float_flag_divbyzero) {
40 r |= FPCR_DZE;
42 if (t & float_flag_overflow) {
43 r |= FPCR_OVF;
45 if (t & float_flag_underflow) {
46 r |= FPCR_UNF;
48 if (t & float_flag_inexact) {
49 r |= FPCR_INE;
53 t = env->fpcr_exc_mask;
54 if (t & float_flag_invalid) {
55 r |= FPCR_INVD;
57 if (t & float_flag_divbyzero) {
58 r |= FPCR_DZED;
60 if (t & float_flag_overflow) {
61 r |= FPCR_OVFD;
63 if (t & float_flag_underflow) {
64 r |= FPCR_UNFD;
66 if (t & float_flag_inexact) {
67 r |= FPCR_INED;
70 switch (env->fpcr_dyn_round) {
71 case float_round_nearest_even:
72 r |= FPCR_DYN_NORMAL;
73 break;
74 case float_round_down:
75 r |= FPCR_DYN_MINUS;
76 break;
77 case float_round_up:
78 r |= FPCR_DYN_PLUS;
79 break;
80 case float_round_to_zero:
81 r |= FPCR_DYN_CHOPPED;
82 break;
85 if (env->fpcr_dnz) {
86 r |= FPCR_DNZ;
88 if (env->fpcr_dnod) {
89 r |= FPCR_DNOD;
91 if (env->fpcr_undz) {
92 r |= FPCR_UNDZ;
95 return r;
98 void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
100 uint8_t t;
102 t = 0;
103 if (val & FPCR_INV) {
104 t |= float_flag_invalid;
106 if (val & FPCR_DZE) {
107 t |= float_flag_divbyzero;
109 if (val & FPCR_OVF) {
110 t |= float_flag_overflow;
112 if (val & FPCR_UNF) {
113 t |= float_flag_underflow;
115 if (val & FPCR_INE) {
116 t |= float_flag_inexact;
118 env->fpcr_exc_status = t;
120 t = 0;
121 if (val & FPCR_INVD) {
122 t |= float_flag_invalid;
124 if (val & FPCR_DZED) {
125 t |= float_flag_divbyzero;
127 if (val & FPCR_OVFD) {
128 t |= float_flag_overflow;
130 if (val & FPCR_UNFD) {
131 t |= float_flag_underflow;
133 if (val & FPCR_INED) {
134 t |= float_flag_inexact;
136 env->fpcr_exc_mask = t;
138 switch (val & FPCR_DYN_MASK) {
139 case FPCR_DYN_CHOPPED:
140 t = float_round_to_zero;
141 break;
142 case FPCR_DYN_MINUS:
143 t = float_round_down;
144 break;
145 case FPCR_DYN_NORMAL:
146 t = float_round_nearest_even;
147 break;
148 case FPCR_DYN_PLUS:
149 t = float_round_up;
150 break;
152 env->fpcr_dyn_round = t;
154 env->fpcr_flush_to_zero
155 = (val & (FPCR_UNDZ|FPCR_UNFD)) == (FPCR_UNDZ|FPCR_UNFD);
157 env->fpcr_dnz = (val & FPCR_DNZ) != 0;
158 env->fpcr_dnod = (val & FPCR_DNOD) != 0;
159 env->fpcr_undz = (val & FPCR_UNDZ) != 0;
162 uint64_t helper_load_fpcr(CPUAlphaState *env)
164 return cpu_alpha_load_fpcr(env);
167 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
169 cpu_alpha_store_fpcr(env, val);
172 #if defined(CONFIG_USER_ONLY)
173 int cpu_alpha_handle_mmu_fault(CPUAlphaState *env, target_ulong address,
174 int rw, int mmu_idx)
176 env->exception_index = EXCP_MMFAULT;
177 env->trap_arg0 = address;
178 return 1;
180 #else
181 void swap_shadow_regs(CPUAlphaState *env)
183 uint64_t i0, i1, i2, i3, i4, i5, i6, i7;
185 i0 = env->ir[8];
186 i1 = env->ir[9];
187 i2 = env->ir[10];
188 i3 = env->ir[11];
189 i4 = env->ir[12];
190 i5 = env->ir[13];
191 i6 = env->ir[14];
192 i7 = env->ir[25];
194 env->ir[8] = env->shadow[0];
195 env->ir[9] = env->shadow[1];
196 env->ir[10] = env->shadow[2];
197 env->ir[11] = env->shadow[3];
198 env->ir[12] = env->shadow[4];
199 env->ir[13] = env->shadow[5];
200 env->ir[14] = env->shadow[6];
201 env->ir[25] = env->shadow[7];
203 env->shadow[0] = i0;
204 env->shadow[1] = i1;
205 env->shadow[2] = i2;
206 env->shadow[3] = i3;
207 env->shadow[4] = i4;
208 env->shadow[5] = i5;
209 env->shadow[6] = i6;
210 env->shadow[7] = i7;
213 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
214 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
215 int prot_need, int mmu_idx,
216 target_ulong *pphys, int *pprot)
218 target_long saddr = addr;
219 target_ulong phys = 0;
220 target_ulong L1pte, L2pte, L3pte;
221 target_ulong pt, index;
222 int prot = 0;
223 int ret = MM_K_ACV;
225 /* Ensure that the virtual address is properly sign-extended from
226 the last implemented virtual address bit. */
227 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
228 goto exit;
231 /* Translate the superpage. */
232 /* ??? When we do more than emulate Unix PALcode, we'll need to
233 determine which KSEG is actually active. */
234 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
235 /* User-space cannot access KSEG addresses. */
236 if (mmu_idx != MMU_KERNEL_IDX) {
237 goto exit;
240 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
241 We would not do this if the 48-bit KSEG is enabled. */
242 phys = saddr & ((1ull << 40) - 1);
243 phys |= (saddr & (1ull << 40)) << 3;
245 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
246 ret = -1;
247 goto exit;
250 /* Interpret the page table exactly like PALcode does. */
252 pt = env->ptbr;
254 /* L1 page table read. */
255 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
256 L1pte = ldq_phys(pt + index*8);
258 if (unlikely((L1pte & PTE_VALID) == 0)) {
259 ret = MM_K_TNV;
260 goto exit;
262 if (unlikely((L1pte & PTE_KRE) == 0)) {
263 goto exit;
265 pt = L1pte >> 32 << TARGET_PAGE_BITS;
267 /* L2 page table read. */
268 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
269 L2pte = ldq_phys(pt + index*8);
271 if (unlikely((L2pte & PTE_VALID) == 0)) {
272 ret = MM_K_TNV;
273 goto exit;
275 if (unlikely((L2pte & PTE_KRE) == 0)) {
276 goto exit;
278 pt = L2pte >> 32 << TARGET_PAGE_BITS;
280 /* L3 page table read. */
281 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
282 L3pte = ldq_phys(pt + index*8);
284 phys = L3pte >> 32 << TARGET_PAGE_BITS;
285 if (unlikely((L3pte & PTE_VALID) == 0)) {
286 ret = MM_K_TNV;
287 goto exit;
290 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
291 # error page bits out of date
292 #endif
294 /* Check access violations. */
295 if (L3pte & (PTE_KRE << mmu_idx)) {
296 prot |= PAGE_READ | PAGE_EXEC;
298 if (L3pte & (PTE_KWE << mmu_idx)) {
299 prot |= PAGE_WRITE;
301 if (unlikely((prot & prot_need) == 0 && prot_need)) {
302 goto exit;
305 /* Check fault-on-operation violations. */
306 prot &= ~(L3pte >> 1);
307 ret = -1;
308 if (unlikely((prot & prot_need) == 0)) {
309 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
310 prot_need & PAGE_WRITE ? MM_K_FOW :
311 prot_need & PAGE_READ ? MM_K_FOR : -1);
314 exit:
315 *pphys = phys;
316 *pprot = prot;
317 return ret;
320 target_phys_addr_t cpu_get_phys_page_debug(CPUAlphaState *env, target_ulong addr)
322 target_ulong phys;
323 int prot, fail;
325 fail = get_physical_address(env, addr, 0, 0, &phys, &prot);
326 return (fail >= 0 ? -1 : phys);
329 int cpu_alpha_handle_mmu_fault(CPUAlphaState *env, target_ulong addr, int rw,
330 int mmu_idx)
332 target_ulong phys;
333 int prot, fail;
335 fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
336 if (unlikely(fail >= 0)) {
337 env->exception_index = EXCP_MMFAULT;
338 env->trap_arg0 = addr;
339 env->trap_arg1 = fail;
340 env->trap_arg2 = (rw == 2 ? -1 : rw);
341 return 1;
344 tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
345 prot, mmu_idx, TARGET_PAGE_SIZE);
346 return 0;
348 #endif /* USER_ONLY */
350 void do_interrupt (CPUAlphaState *env)
352 int i = env->exception_index;
354 if (qemu_loglevel_mask(CPU_LOG_INT)) {
355 static int count;
356 const char *name = "<unknown>";
358 switch (i) {
359 case EXCP_RESET:
360 name = "reset";
361 break;
362 case EXCP_MCHK:
363 name = "mchk";
364 break;
365 case EXCP_SMP_INTERRUPT:
366 name = "smp_interrupt";
367 break;
368 case EXCP_CLK_INTERRUPT:
369 name = "clk_interrupt";
370 break;
371 case EXCP_DEV_INTERRUPT:
372 name = "dev_interrupt";
373 break;
374 case EXCP_MMFAULT:
375 name = "mmfault";
376 break;
377 case EXCP_UNALIGN:
378 name = "unalign";
379 break;
380 case EXCP_OPCDEC:
381 name = "opcdec";
382 break;
383 case EXCP_ARITH:
384 name = "arith";
385 break;
386 case EXCP_FEN:
387 name = "fen";
388 break;
389 case EXCP_CALL_PAL:
390 name = "call_pal";
391 break;
392 case EXCP_STL_C:
393 name = "stl_c";
394 break;
395 case EXCP_STQ_C:
396 name = "stq_c";
397 break;
399 qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64 " sp=%016" PRIx64 "\n",
400 ++count, name, env->error_code, env->pc, env->ir[IR_SP]);
403 env->exception_index = -1;
405 #if !defined(CONFIG_USER_ONLY)
406 switch (i) {
407 case EXCP_RESET:
408 i = 0x0000;
409 break;
410 case EXCP_MCHK:
411 i = 0x0080;
412 break;
413 case EXCP_SMP_INTERRUPT:
414 i = 0x0100;
415 break;
416 case EXCP_CLK_INTERRUPT:
417 i = 0x0180;
418 break;
419 case EXCP_DEV_INTERRUPT:
420 i = 0x0200;
421 break;
422 case EXCP_MMFAULT:
423 i = 0x0280;
424 break;
425 case EXCP_UNALIGN:
426 i = 0x0300;
427 break;
428 case EXCP_OPCDEC:
429 i = 0x0380;
430 break;
431 case EXCP_ARITH:
432 i = 0x0400;
433 break;
434 case EXCP_FEN:
435 i = 0x0480;
436 break;
437 case EXCP_CALL_PAL:
438 i = env->error_code;
439 /* There are 64 entry points for both privileged and unprivileged,
440 with bit 0x80 indicating unprivileged. Each entry point gets
441 64 bytes to do its job. */
442 if (i & 0x80) {
443 i = 0x2000 + (i - 0x80) * 64;
444 } else {
445 i = 0x1000 + i * 64;
447 break;
448 default:
449 cpu_abort(env, "Unhandled CPU exception");
452 /* Remember where the exception happened. Emulate real hardware in
453 that the low bit of the PC indicates PALmode. */
454 env->exc_addr = env->pc | env->pal_mode;
456 /* Continue execution at the PALcode entry point. */
457 env->pc = env->palbr + i;
459 /* Switch to PALmode. */
460 if (!env->pal_mode) {
461 env->pal_mode = 1;
462 swap_shadow_regs(env);
464 #endif /* !USER_ONLY */
467 void cpu_dump_state (CPUAlphaState *env, FILE *f, fprintf_function cpu_fprintf,
468 int flags)
470 static const char *linux_reg_names[] = {
471 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
472 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
473 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
474 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
476 int i;
478 cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
479 env->pc, env->ps);
480 for (i = 0; i < 31; i++) {
481 cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
482 linux_reg_names[i], env->ir[i]);
483 if ((i % 3) == 2)
484 cpu_fprintf(f, "\n");
487 cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
488 env->lock_addr, env->lock_value);
490 for (i = 0; i < 31; i++) {
491 cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i,
492 *((uint64_t *)(&env->fir[i])));
493 if ((i % 3) == 2)
494 cpu_fprintf(f, "\n");
496 cpu_fprintf(f, "\n");
499 void do_restore_state(CPUAlphaState *env, void *retaddr)
501 uintptr_t pc = (uintptr_t)retaddr;
502 if (pc) {
503 TranslationBlock *tb = tb_find_pc(pc);
504 if (tb) {
505 cpu_restore_state(tb, env, pc);
510 /* This should only be called from translate, via gen_excp.
511 We expect that ENV->PC has already been updated. */
512 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
514 env->exception_index = excp;
515 env->error_code = error;
516 cpu_loop_exit(env);
519 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
520 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, void *retaddr,
521 int excp, int error)
523 env->exception_index = excp;
524 env->error_code = error;
525 do_restore_state(env, retaddr);
526 cpu_loop_exit(env);
529 void QEMU_NORETURN arith_excp(CPUAlphaState *env, void *retaddr,
530 int exc, uint64_t mask)
532 env->trap_arg0 = exc;
533 env->trap_arg1 = mask;
534 dynamic_excp(env, retaddr, EXCP_ARITH, 0);