sheepdog: Switch to .bdrv_co_block_status()
[qemu/ar7.git] / target / i386 / mem_helper.c
bloba8ae694a9c117c46e465f3bd682f5c3efbc0c051
1 /*
2 * x86 memory access helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "qemu/int128.h"
26 #include "tcg.h"
28 void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
30 uintptr_t ra = GETPC();
31 uint64_t oldv, cmpv, newv;
32 int eflags;
34 eflags = cpu_cc_compute_all(env, CC_OP);
36 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
37 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
39 oldv = cpu_ldq_data_ra(env, a0, ra);
40 newv = (cmpv == oldv ? newv : oldv);
41 /* always do the store */
42 cpu_stq_data_ra(env, a0, newv, ra);
44 if (oldv == cmpv) {
45 eflags |= CC_Z;
46 } else {
47 env->regs[R_EAX] = (uint32_t)oldv;
48 env->regs[R_EDX] = (uint32_t)(oldv >> 32);
49 eflags &= ~CC_Z;
51 CC_SRC = eflags;
54 void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
56 #ifdef CONFIG_ATOMIC64
57 uint64_t oldv, cmpv, newv;
58 int eflags;
60 eflags = cpu_cc_compute_all(env, CC_OP);
62 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
63 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
65 #ifdef CONFIG_USER_ONLY
67 uint64_t *haddr = g2h(a0);
68 cmpv = cpu_to_le64(cmpv);
69 newv = cpu_to_le64(newv);
70 oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
71 oldv = le64_to_cpu(oldv);
73 #else
75 uintptr_t ra = GETPC();
76 int mem_idx = cpu_mmu_index(env, false);
77 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
78 oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
80 #endif
82 if (oldv == cmpv) {
83 eflags |= CC_Z;
84 } else {
85 env->regs[R_EAX] = (uint32_t)oldv;
86 env->regs[R_EDX] = (uint32_t)(oldv >> 32);
87 eflags &= ~CC_Z;
89 CC_SRC = eflags;
90 #else
91 cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
92 #endif /* CONFIG_ATOMIC64 */
95 #ifdef TARGET_X86_64
96 void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
98 uintptr_t ra = GETPC();
99 Int128 oldv, cmpv, newv;
100 uint64_t o0, o1;
101 int eflags;
102 bool success;
104 if ((a0 & 0xf) != 0) {
105 raise_exception_ra(env, EXCP0D_GPF, GETPC());
107 eflags = cpu_cc_compute_all(env, CC_OP);
109 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
110 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
112 o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
113 o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
115 oldv = int128_make128(o0, o1);
116 success = int128_eq(oldv, cmpv);
117 if (!success) {
118 newv = oldv;
121 cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
122 cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
124 if (success) {
125 eflags |= CC_Z;
126 } else {
127 env->regs[R_EAX] = int128_getlo(oldv);
128 env->regs[R_EDX] = int128_gethi(oldv);
129 eflags &= ~CC_Z;
131 CC_SRC = eflags;
134 void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
136 uintptr_t ra = GETPC();
138 if ((a0 & 0xf) != 0) {
139 raise_exception_ra(env, EXCP0D_GPF, ra);
140 } else {
141 #ifndef CONFIG_ATOMIC128
142 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
143 #else
144 int eflags = cpu_cc_compute_all(env, CC_OP);
146 Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
147 Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
149 int mem_idx = cpu_mmu_index(env, false);
150 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
151 Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
152 newv, oi, ra);
154 if (int128_eq(oldv, cmpv)) {
155 eflags |= CC_Z;
156 } else {
157 env->regs[R_EAX] = int128_getlo(oldv);
158 env->regs[R_EDX] = int128_gethi(oldv);
159 eflags &= ~CC_Z;
161 CC_SRC = eflags;
162 #endif
165 #endif
167 void helper_boundw(CPUX86State *env, target_ulong a0, int v)
169 int low, high;
171 low = cpu_ldsw_data_ra(env, a0, GETPC());
172 high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
173 v = (int16_t)v;
174 if (v < low || v > high) {
175 if (env->hflags & HF_MPX_EN_MASK) {
176 env->bndcs_regs.sts = 0;
178 raise_exception_ra(env, EXCP05_BOUND, GETPC());
182 void helper_boundl(CPUX86State *env, target_ulong a0, int v)
184 int low, high;
186 low = cpu_ldl_data_ra(env, a0, GETPC());
187 high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
188 if (v < low || v > high) {
189 if (env->hflags & HF_MPX_EN_MASK) {
190 env->bndcs_regs.sts = 0;
192 raise_exception_ra(env, EXCP05_BOUND, GETPC());
196 #if !defined(CONFIG_USER_ONLY)
197 /* try to fill the TLB and return an exception if error. If retaddr is
198 * NULL, it means that the function was called in C code (i.e. not
199 * from generated code or from helper.c)
201 /* XXX: fix it to restore all registers */
202 void tlb_fill(CPUState *cs, target_ulong addr, int size,
203 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
205 int ret;
207 ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
208 if (ret) {
209 X86CPU *cpu = X86_CPU(cs);
210 CPUX86State *env = &cpu->env;
212 raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
215 #endif