Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-03-26' into staging
[qemu.git] / target / i386 / cc_helper.c
blobc9c90e10db7d05a3a1fe23499cbf9b199b543247
1 /*
2 * x86 condition code helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
24 const uint8_t parity_table[256] = {
25 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 #define SHIFT 0
60 #include "cc_helper_template.h"
61 #undef SHIFT
63 #define SHIFT 1
64 #include "cc_helper_template.h"
65 #undef SHIFT
67 #define SHIFT 2
68 #include "cc_helper_template.h"
69 #undef SHIFT
71 #ifdef TARGET_X86_64
73 #define SHIFT 3
74 #include "cc_helper_template.h"
75 #undef SHIFT
77 #endif
79 static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
80 target_ulong src2)
82 return (src1 & ~CC_C) | (dst * CC_C);
85 static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
86 target_ulong src2)
88 return (src1 & ~CC_O) | (src2 * CC_O);
91 static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
92 target_ulong src2)
94 return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
97 target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
98 target_ulong src2, int op)
100 switch (op) {
101 default: /* should never happen */
102 return 0;
104 case CC_OP_EFLAGS:
105 return src1;
106 case CC_OP_CLR:
107 return CC_Z | CC_P;
108 case CC_OP_POPCNT:
109 return src1 ? 0 : CC_Z;
111 case CC_OP_MULB:
112 return compute_all_mulb(dst, src1);
113 case CC_OP_MULW:
114 return compute_all_mulw(dst, src1);
115 case CC_OP_MULL:
116 return compute_all_mull(dst, src1);
118 case CC_OP_ADDB:
119 return compute_all_addb(dst, src1);
120 case CC_OP_ADDW:
121 return compute_all_addw(dst, src1);
122 case CC_OP_ADDL:
123 return compute_all_addl(dst, src1);
125 case CC_OP_ADCB:
126 return compute_all_adcb(dst, src1, src2);
127 case CC_OP_ADCW:
128 return compute_all_adcw(dst, src1, src2);
129 case CC_OP_ADCL:
130 return compute_all_adcl(dst, src1, src2);
132 case CC_OP_SUBB:
133 return compute_all_subb(dst, src1);
134 case CC_OP_SUBW:
135 return compute_all_subw(dst, src1);
136 case CC_OP_SUBL:
137 return compute_all_subl(dst, src1);
139 case CC_OP_SBBB:
140 return compute_all_sbbb(dst, src1, src2);
141 case CC_OP_SBBW:
142 return compute_all_sbbw(dst, src1, src2);
143 case CC_OP_SBBL:
144 return compute_all_sbbl(dst, src1, src2);
146 case CC_OP_LOGICB:
147 return compute_all_logicb(dst, src1);
148 case CC_OP_LOGICW:
149 return compute_all_logicw(dst, src1);
150 case CC_OP_LOGICL:
151 return compute_all_logicl(dst, src1);
153 case CC_OP_INCB:
154 return compute_all_incb(dst, src1);
155 case CC_OP_INCW:
156 return compute_all_incw(dst, src1);
157 case CC_OP_INCL:
158 return compute_all_incl(dst, src1);
160 case CC_OP_DECB:
161 return compute_all_decb(dst, src1);
162 case CC_OP_DECW:
163 return compute_all_decw(dst, src1);
164 case CC_OP_DECL:
165 return compute_all_decl(dst, src1);
167 case CC_OP_SHLB:
168 return compute_all_shlb(dst, src1);
169 case CC_OP_SHLW:
170 return compute_all_shlw(dst, src1);
171 case CC_OP_SHLL:
172 return compute_all_shll(dst, src1);
174 case CC_OP_SARB:
175 return compute_all_sarb(dst, src1);
176 case CC_OP_SARW:
177 return compute_all_sarw(dst, src1);
178 case CC_OP_SARL:
179 return compute_all_sarl(dst, src1);
181 case CC_OP_BMILGB:
182 return compute_all_bmilgb(dst, src1);
183 case CC_OP_BMILGW:
184 return compute_all_bmilgw(dst, src1);
185 case CC_OP_BMILGL:
186 return compute_all_bmilgl(dst, src1);
188 case CC_OP_ADCX:
189 return compute_all_adcx(dst, src1, src2);
190 case CC_OP_ADOX:
191 return compute_all_adox(dst, src1, src2);
192 case CC_OP_ADCOX:
193 return compute_all_adcox(dst, src1, src2);
195 #ifdef TARGET_X86_64
196 case CC_OP_MULQ:
197 return compute_all_mulq(dst, src1);
198 case CC_OP_ADDQ:
199 return compute_all_addq(dst, src1);
200 case CC_OP_ADCQ:
201 return compute_all_adcq(dst, src1, src2);
202 case CC_OP_SUBQ:
203 return compute_all_subq(dst, src1);
204 case CC_OP_SBBQ:
205 return compute_all_sbbq(dst, src1, src2);
206 case CC_OP_LOGICQ:
207 return compute_all_logicq(dst, src1);
208 case CC_OP_INCQ:
209 return compute_all_incq(dst, src1);
210 case CC_OP_DECQ:
211 return compute_all_decq(dst, src1);
212 case CC_OP_SHLQ:
213 return compute_all_shlq(dst, src1);
214 case CC_OP_SARQ:
215 return compute_all_sarq(dst, src1);
216 case CC_OP_BMILGQ:
217 return compute_all_bmilgq(dst, src1);
218 #endif
222 uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
224 return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
227 target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
228 target_ulong src2, int op)
230 switch (op) {
231 default: /* should never happen */
232 case CC_OP_LOGICB:
233 case CC_OP_LOGICW:
234 case CC_OP_LOGICL:
235 case CC_OP_LOGICQ:
236 case CC_OP_CLR:
237 case CC_OP_POPCNT:
238 return 0;
240 case CC_OP_EFLAGS:
241 case CC_OP_SARB:
242 case CC_OP_SARW:
243 case CC_OP_SARL:
244 case CC_OP_SARQ:
245 case CC_OP_ADOX:
246 return src1 & 1;
248 case CC_OP_INCB:
249 case CC_OP_INCW:
250 case CC_OP_INCL:
251 case CC_OP_INCQ:
252 case CC_OP_DECB:
253 case CC_OP_DECW:
254 case CC_OP_DECL:
255 case CC_OP_DECQ:
256 return src1;
258 case CC_OP_MULB:
259 case CC_OP_MULW:
260 case CC_OP_MULL:
261 case CC_OP_MULQ:
262 return src1 != 0;
264 case CC_OP_ADCX:
265 case CC_OP_ADCOX:
266 return dst;
268 case CC_OP_ADDB:
269 return compute_c_addb(dst, src1);
270 case CC_OP_ADDW:
271 return compute_c_addw(dst, src1);
272 case CC_OP_ADDL:
273 return compute_c_addl(dst, src1);
275 case CC_OP_ADCB:
276 return compute_c_adcb(dst, src1, src2);
277 case CC_OP_ADCW:
278 return compute_c_adcw(dst, src1, src2);
279 case CC_OP_ADCL:
280 return compute_c_adcl(dst, src1, src2);
282 case CC_OP_SUBB:
283 return compute_c_subb(dst, src1);
284 case CC_OP_SUBW:
285 return compute_c_subw(dst, src1);
286 case CC_OP_SUBL:
287 return compute_c_subl(dst, src1);
289 case CC_OP_SBBB:
290 return compute_c_sbbb(dst, src1, src2);
291 case CC_OP_SBBW:
292 return compute_c_sbbw(dst, src1, src2);
293 case CC_OP_SBBL:
294 return compute_c_sbbl(dst, src1, src2);
296 case CC_OP_SHLB:
297 return compute_c_shlb(dst, src1);
298 case CC_OP_SHLW:
299 return compute_c_shlw(dst, src1);
300 case CC_OP_SHLL:
301 return compute_c_shll(dst, src1);
303 case CC_OP_BMILGB:
304 return compute_c_bmilgb(dst, src1);
305 case CC_OP_BMILGW:
306 return compute_c_bmilgw(dst, src1);
307 case CC_OP_BMILGL:
308 return compute_c_bmilgl(dst, src1);
310 #ifdef TARGET_X86_64
311 case CC_OP_ADDQ:
312 return compute_c_addq(dst, src1);
313 case CC_OP_ADCQ:
314 return compute_c_adcq(dst, src1, src2);
315 case CC_OP_SUBQ:
316 return compute_c_subq(dst, src1);
317 case CC_OP_SBBQ:
318 return compute_c_sbbq(dst, src1, src2);
319 case CC_OP_SHLQ:
320 return compute_c_shlq(dst, src1);
321 case CC_OP_BMILGQ:
322 return compute_c_bmilgq(dst, src1);
323 #endif
327 void helper_write_eflags(CPUX86State *env, target_ulong t0,
328 uint32_t update_mask)
330 cpu_load_eflags(env, t0, update_mask);
333 target_ulong helper_read_eflags(CPUX86State *env)
335 uint32_t eflags;
337 eflags = cpu_cc_compute_all(env, CC_OP);
338 eflags |= (env->df & DF_MASK);
339 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
340 return eflags;
343 void helper_clts(CPUX86State *env)
345 env->cr[0] &= ~CR0_TS_MASK;
346 env->hflags &= ~HF_TS_MASK;
349 void helper_reset_rf(CPUX86State *env)
351 env->eflags &= ~RF_MASK;
354 void helper_cli(CPUX86State *env)
356 env->eflags &= ~IF_MASK;
359 void helper_sti(CPUX86State *env)
361 env->eflags |= IF_MASK;
364 void helper_clac(CPUX86State *env)
366 env->eflags &= ~AC_MASK;
369 void helper_stac(CPUX86State *env)
371 env->eflags |= AC_MASK;
374 #if 0
375 /* vm86plus instructions */
376 void helper_cli_vm(CPUX86State *env)
378 env->eflags &= ~VIF_MASK;
381 void helper_sti_vm(CPUX86State *env)
383 env->eflags |= VIF_MASK;
384 if (env->eflags & VIP_MASK) {
385 raise_exception_ra(env, EXCP0D_GPF, GETPC());
388 #endif