kvmvapic: fix migration when VM paused and when not running Windows
[qemu/ar7.git] / target-alpha / mem_helper.c
blobfc4f57a64478d17863a8dedaff60782b00ec8908
1 /*
2 * Helpers for loads and stores
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/cpu_ldst.h"
24 /* Softmmu support */
25 #ifndef CONFIG_USER_ONLY
27 uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p)
29 CPUState *cs = CPU(alpha_env_get_cpu(env));
30 return (int32_t)ldl_phys(cs->as, p);
33 uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p)
35 CPUState *cs = CPU(alpha_env_get_cpu(env));
36 return ldq_phys(cs->as, p);
39 uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
41 CPUState *cs = CPU(alpha_env_get_cpu(env));
42 env->lock_addr = p;
43 return env->lock_value = (int32_t)ldl_phys(cs->as, p);
46 uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
48 CPUState *cs = CPU(alpha_env_get_cpu(env));
49 env->lock_addr = p;
50 return env->lock_value = ldq_phys(cs->as, p);
53 void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
55 CPUState *cs = CPU(alpha_env_get_cpu(env));
56 stl_phys(cs->as, p, v);
59 void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
61 CPUState *cs = CPU(alpha_env_get_cpu(env));
62 stq_phys(cs->as, p, v);
65 uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
67 CPUState *cs = CPU(alpha_env_get_cpu(env));
68 uint64_t ret = 0;
70 if (p == env->lock_addr) {
71 int32_t old = ldl_phys(cs->as, p);
72 if (old == (int32_t)env->lock_value) {
73 stl_phys(cs->as, p, v);
74 ret = 1;
77 env->lock_addr = -1;
79 return ret;
82 uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
84 CPUState *cs = CPU(alpha_env_get_cpu(env));
85 uint64_t ret = 0;
87 if (p == env->lock_addr) {
88 uint64_t old = ldq_phys(cs->as, p);
89 if (old == env->lock_value) {
90 stq_phys(cs->as, p, v);
91 ret = 1;
94 env->lock_addr = -1;
96 return ret;
99 void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
100 int is_write, int is_user, uintptr_t retaddr)
102 AlphaCPU *cpu = ALPHA_CPU(cs);
103 CPUAlphaState *env = &cpu->env;
104 uint64_t pc;
105 uint32_t insn;
107 if (retaddr) {
108 cpu_restore_state(cs, retaddr);
111 pc = env->pc;
112 insn = cpu_ldl_code(env, pc);
114 env->trap_arg0 = addr;
115 env->trap_arg1 = insn >> 26; /* opcode */
116 env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
117 cs->exception_index = EXCP_UNALIGN;
118 env->error_code = 0;
119 cpu_loop_exit(cs);
122 void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
123 bool is_write, bool is_exec, int unused,
124 unsigned size)
126 AlphaCPU *cpu = ALPHA_CPU(cs);
127 CPUAlphaState *env = &cpu->env;
129 env->trap_arg0 = addr;
130 env->trap_arg1 = is_write ? 1 : 0;
131 dynamic_excp(env, 0, EXCP_MCHK, 0);
134 /* try to fill the TLB and return an exception if error. If retaddr is
135 NULL, it means that the function was called in C code (i.e. not
136 from generated code or from helper.c) */
137 /* XXX: fix it to restore all registers */
138 void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
139 int mmu_idx, uintptr_t retaddr)
141 int ret;
143 ret = alpha_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
144 if (unlikely(ret != 0)) {
145 if (retaddr) {
146 cpu_restore_state(cs, retaddr);
148 /* Exception index and error code are already set */
149 cpu_loop_exit(cs);
152 #endif /* CONFIG_USER_ONLY */