migration/postcopy: simplify calculation of run_start and fixup_start_addr
[qemu/ar7.git] / target / i386 / mem_helper.c
blobd50d4b0c40c0ff5ae5cf5865a90117f3594a420f
1 /*
2 * x86 memory access helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "qemu/int128.h"
26 #include "qemu/atomic128.h"
27 #include "tcg.h"
29 void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
31 uintptr_t ra = GETPC();
32 uint64_t oldv, cmpv, newv;
33 int eflags;
35 eflags = cpu_cc_compute_all(env, CC_OP);
37 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
38 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
40 oldv = cpu_ldq_data_ra(env, a0, ra);
41 newv = (cmpv == oldv ? newv : oldv);
42 /* always do the store */
43 cpu_stq_data_ra(env, a0, newv, ra);
45 if (oldv == cmpv) {
46 eflags |= CC_Z;
47 } else {
48 env->regs[R_EAX] = (uint32_t)oldv;
49 env->regs[R_EDX] = (uint32_t)(oldv >> 32);
50 eflags &= ~CC_Z;
52 CC_SRC = eflags;
55 void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
57 #ifdef CONFIG_ATOMIC64
58 uint64_t oldv, cmpv, newv;
59 int eflags;
61 eflags = cpu_cc_compute_all(env, CC_OP);
63 cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
64 newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
66 #ifdef CONFIG_USER_ONLY
68 uint64_t *haddr = g2h(a0);
69 cmpv = cpu_to_le64(cmpv);
70 newv = cpu_to_le64(newv);
71 oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
72 oldv = le64_to_cpu(oldv);
74 #else
76 uintptr_t ra = GETPC();
77 int mem_idx = cpu_mmu_index(env, false);
78 TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
79 oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
81 #endif
83 if (oldv == cmpv) {
84 eflags |= CC_Z;
85 } else {
86 env->regs[R_EAX] = (uint32_t)oldv;
87 env->regs[R_EDX] = (uint32_t)(oldv >> 32);
88 eflags &= ~CC_Z;
90 CC_SRC = eflags;
91 #else
92 cpu_loop_exit_atomic(env_cpu(env), GETPC());
93 #endif /* CONFIG_ATOMIC64 */
96 #ifdef TARGET_X86_64
97 void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
99 uintptr_t ra = GETPC();
100 Int128 oldv, cmpv, newv;
101 uint64_t o0, o1;
102 int eflags;
103 bool success;
105 if ((a0 & 0xf) != 0) {
106 raise_exception_ra(env, EXCP0D_GPF, GETPC());
108 eflags = cpu_cc_compute_all(env, CC_OP);
110 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
111 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
113 o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
114 o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
116 oldv = int128_make128(o0, o1);
117 success = int128_eq(oldv, cmpv);
118 if (!success) {
119 newv = oldv;
122 cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
123 cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
125 if (success) {
126 eflags |= CC_Z;
127 } else {
128 env->regs[R_EAX] = int128_getlo(oldv);
129 env->regs[R_EDX] = int128_gethi(oldv);
130 eflags &= ~CC_Z;
132 CC_SRC = eflags;
135 void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
137 uintptr_t ra = GETPC();
139 if ((a0 & 0xf) != 0) {
140 raise_exception_ra(env, EXCP0D_GPF, ra);
141 } else if (HAVE_CMPXCHG128) {
142 int eflags = cpu_cc_compute_all(env, CC_OP);
144 Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
145 Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
147 int mem_idx = cpu_mmu_index(env, false);
148 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
149 Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
150 newv, oi, ra);
152 if (int128_eq(oldv, cmpv)) {
153 eflags |= CC_Z;
154 } else {
155 env->regs[R_EAX] = int128_getlo(oldv);
156 env->regs[R_EDX] = int128_gethi(oldv);
157 eflags &= ~CC_Z;
159 CC_SRC = eflags;
160 } else {
161 cpu_loop_exit_atomic(env_cpu(env), ra);
164 #endif
166 void helper_boundw(CPUX86State *env, target_ulong a0, int v)
168 int low, high;
170 low = cpu_ldsw_data_ra(env, a0, GETPC());
171 high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
172 v = (int16_t)v;
173 if (v < low || v > high) {
174 if (env->hflags & HF_MPX_EN_MASK) {
175 env->bndcs_regs.sts = 0;
177 raise_exception_ra(env, EXCP05_BOUND, GETPC());
181 void helper_boundl(CPUX86State *env, target_ulong a0, int v)
183 int low, high;
185 low = cpu_ldl_data_ra(env, a0, GETPC());
186 high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
187 if (v < low || v > high) {
188 if (env->hflags & HF_MPX_EN_MASK) {
189 env->bndcs_regs.sts = 0;
191 raise_exception_ra(env, EXCP05_BOUND, GETPC());