m25p80: avoid out of bounds accesses
[qemu.git] / target-i386 / cc_helper.c
blob83af223c9f7f13c5bd5a6eea21e2a989ef957d4b
1 /*
2 * x86 condition code helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
24 const uint8_t parity_table[256] = {
25 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 #define SHIFT 0
60 #include "cc_helper_template.h"
61 #undef SHIFT
63 #define SHIFT 1
64 #include "cc_helper_template.h"
65 #undef SHIFT
67 #define SHIFT 2
68 #include "cc_helper_template.h"
69 #undef SHIFT
71 #ifdef TARGET_X86_64
73 #define SHIFT 3
74 #include "cc_helper_template.h"
75 #undef SHIFT
77 #endif
79 static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
80 target_ulong src2)
82 return (src1 & ~CC_C) | (dst * CC_C);
85 static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
86 target_ulong src2)
88 return (src1 & ~CC_O) | (src2 * CC_O);
91 static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
92 target_ulong src2)
94 return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
97 target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
98 target_ulong src2, int op)
100 switch (op) {
101 default: /* should never happen */
102 return 0;
104 case CC_OP_EFLAGS:
105 return src1;
106 case CC_OP_CLR:
107 return CC_Z | CC_P;
109 case CC_OP_MULB:
110 return compute_all_mulb(dst, src1);
111 case CC_OP_MULW:
112 return compute_all_mulw(dst, src1);
113 case CC_OP_MULL:
114 return compute_all_mull(dst, src1);
116 case CC_OP_ADDB:
117 return compute_all_addb(dst, src1);
118 case CC_OP_ADDW:
119 return compute_all_addw(dst, src1);
120 case CC_OP_ADDL:
121 return compute_all_addl(dst, src1);
123 case CC_OP_ADCB:
124 return compute_all_adcb(dst, src1, src2);
125 case CC_OP_ADCW:
126 return compute_all_adcw(dst, src1, src2);
127 case CC_OP_ADCL:
128 return compute_all_adcl(dst, src1, src2);
130 case CC_OP_SUBB:
131 return compute_all_subb(dst, src1);
132 case CC_OP_SUBW:
133 return compute_all_subw(dst, src1);
134 case CC_OP_SUBL:
135 return compute_all_subl(dst, src1);
137 case CC_OP_SBBB:
138 return compute_all_sbbb(dst, src1, src2);
139 case CC_OP_SBBW:
140 return compute_all_sbbw(dst, src1, src2);
141 case CC_OP_SBBL:
142 return compute_all_sbbl(dst, src1, src2);
144 case CC_OP_LOGICB:
145 return compute_all_logicb(dst, src1);
146 case CC_OP_LOGICW:
147 return compute_all_logicw(dst, src1);
148 case CC_OP_LOGICL:
149 return compute_all_logicl(dst, src1);
151 case CC_OP_INCB:
152 return compute_all_incb(dst, src1);
153 case CC_OP_INCW:
154 return compute_all_incw(dst, src1);
155 case CC_OP_INCL:
156 return compute_all_incl(dst, src1);
158 case CC_OP_DECB:
159 return compute_all_decb(dst, src1);
160 case CC_OP_DECW:
161 return compute_all_decw(dst, src1);
162 case CC_OP_DECL:
163 return compute_all_decl(dst, src1);
165 case CC_OP_SHLB:
166 return compute_all_shlb(dst, src1);
167 case CC_OP_SHLW:
168 return compute_all_shlw(dst, src1);
169 case CC_OP_SHLL:
170 return compute_all_shll(dst, src1);
172 case CC_OP_SARB:
173 return compute_all_sarb(dst, src1);
174 case CC_OP_SARW:
175 return compute_all_sarw(dst, src1);
176 case CC_OP_SARL:
177 return compute_all_sarl(dst, src1);
179 case CC_OP_BMILGB:
180 return compute_all_bmilgb(dst, src1);
181 case CC_OP_BMILGW:
182 return compute_all_bmilgw(dst, src1);
183 case CC_OP_BMILGL:
184 return compute_all_bmilgl(dst, src1);
186 case CC_OP_ADCX:
187 return compute_all_adcx(dst, src1, src2);
188 case CC_OP_ADOX:
189 return compute_all_adox(dst, src1, src2);
190 case CC_OP_ADCOX:
191 return compute_all_adcox(dst, src1, src2);
193 #ifdef TARGET_X86_64
194 case CC_OP_MULQ:
195 return compute_all_mulq(dst, src1);
196 case CC_OP_ADDQ:
197 return compute_all_addq(dst, src1);
198 case CC_OP_ADCQ:
199 return compute_all_adcq(dst, src1, src2);
200 case CC_OP_SUBQ:
201 return compute_all_subq(dst, src1);
202 case CC_OP_SBBQ:
203 return compute_all_sbbq(dst, src1, src2);
204 case CC_OP_LOGICQ:
205 return compute_all_logicq(dst, src1);
206 case CC_OP_INCQ:
207 return compute_all_incq(dst, src1);
208 case CC_OP_DECQ:
209 return compute_all_decq(dst, src1);
210 case CC_OP_SHLQ:
211 return compute_all_shlq(dst, src1);
212 case CC_OP_SARQ:
213 return compute_all_sarq(dst, src1);
214 case CC_OP_BMILGQ:
215 return compute_all_bmilgq(dst, src1);
216 #endif
220 uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
222 return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
225 target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
226 target_ulong src2, int op)
228 switch (op) {
229 default: /* should never happen */
230 case CC_OP_LOGICB:
231 case CC_OP_LOGICW:
232 case CC_OP_LOGICL:
233 case CC_OP_LOGICQ:
234 case CC_OP_CLR:
235 return 0;
237 case CC_OP_EFLAGS:
238 case CC_OP_SARB:
239 case CC_OP_SARW:
240 case CC_OP_SARL:
241 case CC_OP_SARQ:
242 case CC_OP_ADOX:
243 return src1 & 1;
245 case CC_OP_INCB:
246 case CC_OP_INCW:
247 case CC_OP_INCL:
248 case CC_OP_INCQ:
249 case CC_OP_DECB:
250 case CC_OP_DECW:
251 case CC_OP_DECL:
252 case CC_OP_DECQ:
253 return src1;
255 case CC_OP_MULB:
256 case CC_OP_MULW:
257 case CC_OP_MULL:
258 case CC_OP_MULQ:
259 return src1 != 0;
261 case CC_OP_ADCX:
262 case CC_OP_ADCOX:
263 return dst;
265 case CC_OP_ADDB:
266 return compute_c_addb(dst, src1);
267 case CC_OP_ADDW:
268 return compute_c_addw(dst, src1);
269 case CC_OP_ADDL:
270 return compute_c_addl(dst, src1);
272 case CC_OP_ADCB:
273 return compute_c_adcb(dst, src1, src2);
274 case CC_OP_ADCW:
275 return compute_c_adcw(dst, src1, src2);
276 case CC_OP_ADCL:
277 return compute_c_adcl(dst, src1, src2);
279 case CC_OP_SUBB:
280 return compute_c_subb(dst, src1);
281 case CC_OP_SUBW:
282 return compute_c_subw(dst, src1);
283 case CC_OP_SUBL:
284 return compute_c_subl(dst, src1);
286 case CC_OP_SBBB:
287 return compute_c_sbbb(dst, src1, src2);
288 case CC_OP_SBBW:
289 return compute_c_sbbw(dst, src1, src2);
290 case CC_OP_SBBL:
291 return compute_c_sbbl(dst, src1, src2);
293 case CC_OP_SHLB:
294 return compute_c_shlb(dst, src1);
295 case CC_OP_SHLW:
296 return compute_c_shlw(dst, src1);
297 case CC_OP_SHLL:
298 return compute_c_shll(dst, src1);
300 case CC_OP_BMILGB:
301 return compute_c_bmilgb(dst, src1);
302 case CC_OP_BMILGW:
303 return compute_c_bmilgw(dst, src1);
304 case CC_OP_BMILGL:
305 return compute_c_bmilgl(dst, src1);
307 #ifdef TARGET_X86_64
308 case CC_OP_ADDQ:
309 return compute_c_addq(dst, src1);
310 case CC_OP_ADCQ:
311 return compute_c_adcq(dst, src1, src2);
312 case CC_OP_SUBQ:
313 return compute_c_subq(dst, src1);
314 case CC_OP_SBBQ:
315 return compute_c_sbbq(dst, src1, src2);
316 case CC_OP_SHLQ:
317 return compute_c_shlq(dst, src1);
318 case CC_OP_BMILGQ:
319 return compute_c_bmilgq(dst, src1);
320 #endif
324 void helper_write_eflags(CPUX86State *env, target_ulong t0,
325 uint32_t update_mask)
327 cpu_load_eflags(env, t0, update_mask);
330 target_ulong helper_read_eflags(CPUX86State *env)
332 uint32_t eflags;
334 eflags = cpu_cc_compute_all(env, CC_OP);
335 eflags |= (env->df & DF_MASK);
336 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
337 return eflags;
340 void helper_clts(CPUX86State *env)
342 env->cr[0] &= ~CR0_TS_MASK;
343 env->hflags &= ~HF_TS_MASK;
346 void helper_reset_rf(CPUX86State *env)
348 env->eflags &= ~RF_MASK;
351 void helper_cli(CPUX86State *env)
353 env->eflags &= ~IF_MASK;
356 void helper_sti(CPUX86State *env)
358 env->eflags |= IF_MASK;
361 void helper_clac(CPUX86State *env)
363 env->eflags &= ~AC_MASK;
366 void helper_stac(CPUX86State *env)
368 env->eflags |= AC_MASK;
371 #if 0
372 /* vm86plus instructions */
373 void helper_cli_vm(CPUX86State *env)
375 env->eflags &= ~VIF_MASK;
378 void helper_sti_vm(CPUX86State *env)
380 env->eflags |= VIF_MASK;
381 if (env->eflags & VIP_MASK) {
382 raise_exception_ra(env, EXCP0D_GPF, GETPC());
385 #endif