hw/timer/sse-timer: Model the SSE Subsystem System Timer
[qemu/ar7.git] / target / i386 / tcg / cc_helper.c
blobcc7ea9e8b9d98bbbc1fa415fd04eed6d5d58c653
1 /*
2 * x86 condition code helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "helper-tcg.h"
25 const uint8_t parity_table[256] = {
26 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 #define SHIFT 0
61 #include "cc_helper_template.h"
62 #undef SHIFT
64 #define SHIFT 1
65 #include "cc_helper_template.h"
66 #undef SHIFT
68 #define SHIFT 2
69 #include "cc_helper_template.h"
70 #undef SHIFT
72 #ifdef TARGET_X86_64
74 #define SHIFT 3
75 #include "cc_helper_template.h"
76 #undef SHIFT
78 #endif
80 static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
81 target_ulong src2)
83 return (src1 & ~CC_C) | (dst * CC_C);
86 static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
87 target_ulong src2)
89 return (src1 & ~CC_O) | (src2 * CC_O);
92 static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
93 target_ulong src2)
95 return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
98 target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
99 target_ulong src2, int op)
101 switch (op) {
102 default: /* should never happen */
103 return 0;
105 case CC_OP_EFLAGS:
106 return src1;
107 case CC_OP_CLR:
108 return CC_Z | CC_P;
109 case CC_OP_POPCNT:
110 return src1 ? 0 : CC_Z;
112 case CC_OP_MULB:
113 return compute_all_mulb(dst, src1);
114 case CC_OP_MULW:
115 return compute_all_mulw(dst, src1);
116 case CC_OP_MULL:
117 return compute_all_mull(dst, src1);
119 case CC_OP_ADDB:
120 return compute_all_addb(dst, src1);
121 case CC_OP_ADDW:
122 return compute_all_addw(dst, src1);
123 case CC_OP_ADDL:
124 return compute_all_addl(dst, src1);
126 case CC_OP_ADCB:
127 return compute_all_adcb(dst, src1, src2);
128 case CC_OP_ADCW:
129 return compute_all_adcw(dst, src1, src2);
130 case CC_OP_ADCL:
131 return compute_all_adcl(dst, src1, src2);
133 case CC_OP_SUBB:
134 return compute_all_subb(dst, src1);
135 case CC_OP_SUBW:
136 return compute_all_subw(dst, src1);
137 case CC_OP_SUBL:
138 return compute_all_subl(dst, src1);
140 case CC_OP_SBBB:
141 return compute_all_sbbb(dst, src1, src2);
142 case CC_OP_SBBW:
143 return compute_all_sbbw(dst, src1, src2);
144 case CC_OP_SBBL:
145 return compute_all_sbbl(dst, src1, src2);
147 case CC_OP_LOGICB:
148 return compute_all_logicb(dst, src1);
149 case CC_OP_LOGICW:
150 return compute_all_logicw(dst, src1);
151 case CC_OP_LOGICL:
152 return compute_all_logicl(dst, src1);
154 case CC_OP_INCB:
155 return compute_all_incb(dst, src1);
156 case CC_OP_INCW:
157 return compute_all_incw(dst, src1);
158 case CC_OP_INCL:
159 return compute_all_incl(dst, src1);
161 case CC_OP_DECB:
162 return compute_all_decb(dst, src1);
163 case CC_OP_DECW:
164 return compute_all_decw(dst, src1);
165 case CC_OP_DECL:
166 return compute_all_decl(dst, src1);
168 case CC_OP_SHLB:
169 return compute_all_shlb(dst, src1);
170 case CC_OP_SHLW:
171 return compute_all_shlw(dst, src1);
172 case CC_OP_SHLL:
173 return compute_all_shll(dst, src1);
175 case CC_OP_SARB:
176 return compute_all_sarb(dst, src1);
177 case CC_OP_SARW:
178 return compute_all_sarw(dst, src1);
179 case CC_OP_SARL:
180 return compute_all_sarl(dst, src1);
182 case CC_OP_BMILGB:
183 return compute_all_bmilgb(dst, src1);
184 case CC_OP_BMILGW:
185 return compute_all_bmilgw(dst, src1);
186 case CC_OP_BMILGL:
187 return compute_all_bmilgl(dst, src1);
189 case CC_OP_ADCX:
190 return compute_all_adcx(dst, src1, src2);
191 case CC_OP_ADOX:
192 return compute_all_adox(dst, src1, src2);
193 case CC_OP_ADCOX:
194 return compute_all_adcox(dst, src1, src2);
196 #ifdef TARGET_X86_64
197 case CC_OP_MULQ:
198 return compute_all_mulq(dst, src1);
199 case CC_OP_ADDQ:
200 return compute_all_addq(dst, src1);
201 case CC_OP_ADCQ:
202 return compute_all_adcq(dst, src1, src2);
203 case CC_OP_SUBQ:
204 return compute_all_subq(dst, src1);
205 case CC_OP_SBBQ:
206 return compute_all_sbbq(dst, src1, src2);
207 case CC_OP_LOGICQ:
208 return compute_all_logicq(dst, src1);
209 case CC_OP_INCQ:
210 return compute_all_incq(dst, src1);
211 case CC_OP_DECQ:
212 return compute_all_decq(dst, src1);
213 case CC_OP_SHLQ:
214 return compute_all_shlq(dst, src1);
215 case CC_OP_SARQ:
216 return compute_all_sarq(dst, src1);
217 case CC_OP_BMILGQ:
218 return compute_all_bmilgq(dst, src1);
219 #endif
223 uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
225 return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
228 target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
229 target_ulong src2, int op)
231 switch (op) {
232 default: /* should never happen */
233 case CC_OP_LOGICB:
234 case CC_OP_LOGICW:
235 case CC_OP_LOGICL:
236 case CC_OP_LOGICQ:
237 case CC_OP_CLR:
238 case CC_OP_POPCNT:
239 return 0;
241 case CC_OP_EFLAGS:
242 case CC_OP_SARB:
243 case CC_OP_SARW:
244 case CC_OP_SARL:
245 case CC_OP_SARQ:
246 case CC_OP_ADOX:
247 return src1 & 1;
249 case CC_OP_INCB:
250 case CC_OP_INCW:
251 case CC_OP_INCL:
252 case CC_OP_INCQ:
253 case CC_OP_DECB:
254 case CC_OP_DECW:
255 case CC_OP_DECL:
256 case CC_OP_DECQ:
257 return src1;
259 case CC_OP_MULB:
260 case CC_OP_MULW:
261 case CC_OP_MULL:
262 case CC_OP_MULQ:
263 return src1 != 0;
265 case CC_OP_ADCX:
266 case CC_OP_ADCOX:
267 return dst;
269 case CC_OP_ADDB:
270 return compute_c_addb(dst, src1);
271 case CC_OP_ADDW:
272 return compute_c_addw(dst, src1);
273 case CC_OP_ADDL:
274 return compute_c_addl(dst, src1);
276 case CC_OP_ADCB:
277 return compute_c_adcb(dst, src1, src2);
278 case CC_OP_ADCW:
279 return compute_c_adcw(dst, src1, src2);
280 case CC_OP_ADCL:
281 return compute_c_adcl(dst, src1, src2);
283 case CC_OP_SUBB:
284 return compute_c_subb(dst, src1);
285 case CC_OP_SUBW:
286 return compute_c_subw(dst, src1);
287 case CC_OP_SUBL:
288 return compute_c_subl(dst, src1);
290 case CC_OP_SBBB:
291 return compute_c_sbbb(dst, src1, src2);
292 case CC_OP_SBBW:
293 return compute_c_sbbw(dst, src1, src2);
294 case CC_OP_SBBL:
295 return compute_c_sbbl(dst, src1, src2);
297 case CC_OP_SHLB:
298 return compute_c_shlb(dst, src1);
299 case CC_OP_SHLW:
300 return compute_c_shlw(dst, src1);
301 case CC_OP_SHLL:
302 return compute_c_shll(dst, src1);
304 case CC_OP_BMILGB:
305 return compute_c_bmilgb(dst, src1);
306 case CC_OP_BMILGW:
307 return compute_c_bmilgw(dst, src1);
308 case CC_OP_BMILGL:
309 return compute_c_bmilgl(dst, src1);
311 #ifdef TARGET_X86_64
312 case CC_OP_ADDQ:
313 return compute_c_addq(dst, src1);
314 case CC_OP_ADCQ:
315 return compute_c_adcq(dst, src1, src2);
316 case CC_OP_SUBQ:
317 return compute_c_subq(dst, src1);
318 case CC_OP_SBBQ:
319 return compute_c_sbbq(dst, src1, src2);
320 case CC_OP_SHLQ:
321 return compute_c_shlq(dst, src1);
322 case CC_OP_BMILGQ:
323 return compute_c_bmilgq(dst, src1);
324 #endif
328 void helper_write_eflags(CPUX86State *env, target_ulong t0,
329 uint32_t update_mask)
331 cpu_load_eflags(env, t0, update_mask);
334 target_ulong helper_read_eflags(CPUX86State *env)
336 uint32_t eflags;
338 eflags = cpu_cc_compute_all(env, CC_OP);
339 eflags |= (env->df & DF_MASK);
340 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
341 return eflags;
344 void helper_clts(CPUX86State *env)
346 env->cr[0] &= ~CR0_TS_MASK;
347 env->hflags &= ~HF_TS_MASK;
350 void helper_reset_rf(CPUX86State *env)
352 env->eflags &= ~RF_MASK;
355 void helper_cli(CPUX86State *env)
357 env->eflags &= ~IF_MASK;
360 void helper_sti(CPUX86State *env)
362 env->eflags |= IF_MASK;
365 void helper_clac(CPUX86State *env)
367 env->eflags &= ~AC_MASK;
370 void helper_stac(CPUX86State *env)
372 env->eflags |= AC_MASK;
375 #if 0
376 /* vm86plus instructions */
377 void helper_cli_vm(CPUX86State *env)
379 env->eflags &= ~VIF_MASK;
382 void helper_sti_vm(CPUX86State *env)
384 env->eflags |= VIF_MASK;
385 if (env->eflags & VIP_MASK) {
386 raise_exception_ra(env, EXCP0D_GPF, GETPC());
389 #endif