stellaris_enet: Flush queued packets when read done
[qemu.git] / target-i386 / cc_helper.c
blobecbf0ec09c166a677a021f12ff66773ad4f00024
1 /*
2 * x86 condition code helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
23 const uint8_t parity_table[256] = {
24 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
25 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
28 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 #define SHIFT 0
59 #include "cc_helper_template.h"
60 #undef SHIFT
62 #define SHIFT 1
63 #include "cc_helper_template.h"
64 #undef SHIFT
66 #define SHIFT 2
67 #include "cc_helper_template.h"
68 #undef SHIFT
70 #ifdef TARGET_X86_64
72 #define SHIFT 3
73 #include "cc_helper_template.h"
74 #undef SHIFT
76 #endif
78 static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
79 target_ulong src2)
81 return (src1 & ~CC_C) | (dst * CC_C);
84 static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
85 target_ulong src2)
87 return (src1 & ~CC_O) | (src2 * CC_O);
90 static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
91 target_ulong src2)
93 return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
96 target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
97 target_ulong src2, int op)
99 switch (op) {
100 default: /* should never happen */
101 return 0;
103 case CC_OP_EFLAGS:
104 return src1;
105 case CC_OP_CLR:
106 return CC_Z | CC_P;
108 case CC_OP_MULB:
109 return compute_all_mulb(dst, src1);
110 case CC_OP_MULW:
111 return compute_all_mulw(dst, src1);
112 case CC_OP_MULL:
113 return compute_all_mull(dst, src1);
115 case CC_OP_ADDB:
116 return compute_all_addb(dst, src1);
117 case CC_OP_ADDW:
118 return compute_all_addw(dst, src1);
119 case CC_OP_ADDL:
120 return compute_all_addl(dst, src1);
122 case CC_OP_ADCB:
123 return compute_all_adcb(dst, src1, src2);
124 case CC_OP_ADCW:
125 return compute_all_adcw(dst, src1, src2);
126 case CC_OP_ADCL:
127 return compute_all_adcl(dst, src1, src2);
129 case CC_OP_SUBB:
130 return compute_all_subb(dst, src1);
131 case CC_OP_SUBW:
132 return compute_all_subw(dst, src1);
133 case CC_OP_SUBL:
134 return compute_all_subl(dst, src1);
136 case CC_OP_SBBB:
137 return compute_all_sbbb(dst, src1, src2);
138 case CC_OP_SBBW:
139 return compute_all_sbbw(dst, src1, src2);
140 case CC_OP_SBBL:
141 return compute_all_sbbl(dst, src1, src2);
143 case CC_OP_LOGICB:
144 return compute_all_logicb(dst, src1);
145 case CC_OP_LOGICW:
146 return compute_all_logicw(dst, src1);
147 case CC_OP_LOGICL:
148 return compute_all_logicl(dst, src1);
150 case CC_OP_INCB:
151 return compute_all_incb(dst, src1);
152 case CC_OP_INCW:
153 return compute_all_incw(dst, src1);
154 case CC_OP_INCL:
155 return compute_all_incl(dst, src1);
157 case CC_OP_DECB:
158 return compute_all_decb(dst, src1);
159 case CC_OP_DECW:
160 return compute_all_decw(dst, src1);
161 case CC_OP_DECL:
162 return compute_all_decl(dst, src1);
164 case CC_OP_SHLB:
165 return compute_all_shlb(dst, src1);
166 case CC_OP_SHLW:
167 return compute_all_shlw(dst, src1);
168 case CC_OP_SHLL:
169 return compute_all_shll(dst, src1);
171 case CC_OP_SARB:
172 return compute_all_sarb(dst, src1);
173 case CC_OP_SARW:
174 return compute_all_sarw(dst, src1);
175 case CC_OP_SARL:
176 return compute_all_sarl(dst, src1);
178 case CC_OP_BMILGB:
179 return compute_all_bmilgb(dst, src1);
180 case CC_OP_BMILGW:
181 return compute_all_bmilgw(dst, src1);
182 case CC_OP_BMILGL:
183 return compute_all_bmilgl(dst, src1);
185 case CC_OP_ADCX:
186 return compute_all_adcx(dst, src1, src2);
187 case CC_OP_ADOX:
188 return compute_all_adox(dst, src1, src2);
189 case CC_OP_ADCOX:
190 return compute_all_adcox(dst, src1, src2);
192 #ifdef TARGET_X86_64
193 case CC_OP_MULQ:
194 return compute_all_mulq(dst, src1);
195 case CC_OP_ADDQ:
196 return compute_all_addq(dst, src1);
197 case CC_OP_ADCQ:
198 return compute_all_adcq(dst, src1, src2);
199 case CC_OP_SUBQ:
200 return compute_all_subq(dst, src1);
201 case CC_OP_SBBQ:
202 return compute_all_sbbq(dst, src1, src2);
203 case CC_OP_LOGICQ:
204 return compute_all_logicq(dst, src1);
205 case CC_OP_INCQ:
206 return compute_all_incq(dst, src1);
207 case CC_OP_DECQ:
208 return compute_all_decq(dst, src1);
209 case CC_OP_SHLQ:
210 return compute_all_shlq(dst, src1);
211 case CC_OP_SARQ:
212 return compute_all_sarq(dst, src1);
213 case CC_OP_BMILGQ:
214 return compute_all_bmilgq(dst, src1);
215 #endif
219 uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
221 return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
224 target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
225 target_ulong src2, int op)
227 switch (op) {
228 default: /* should never happen */
229 case CC_OP_LOGICB:
230 case CC_OP_LOGICW:
231 case CC_OP_LOGICL:
232 case CC_OP_LOGICQ:
233 case CC_OP_CLR:
234 return 0;
236 case CC_OP_EFLAGS:
237 case CC_OP_SARB:
238 case CC_OP_SARW:
239 case CC_OP_SARL:
240 case CC_OP_SARQ:
241 case CC_OP_ADOX:
242 return src1 & 1;
244 case CC_OP_INCB:
245 case CC_OP_INCW:
246 case CC_OP_INCL:
247 case CC_OP_INCQ:
248 case CC_OP_DECB:
249 case CC_OP_DECW:
250 case CC_OP_DECL:
251 case CC_OP_DECQ:
252 return src1;
254 case CC_OP_MULB:
255 case CC_OP_MULW:
256 case CC_OP_MULL:
257 case CC_OP_MULQ:
258 return src1 != 0;
260 case CC_OP_ADCX:
261 case CC_OP_ADCOX:
262 return dst;
264 case CC_OP_ADDB:
265 return compute_c_addb(dst, src1);
266 case CC_OP_ADDW:
267 return compute_c_addw(dst, src1);
268 case CC_OP_ADDL:
269 return compute_c_addl(dst, src1);
271 case CC_OP_ADCB:
272 return compute_c_adcb(dst, src1, src2);
273 case CC_OP_ADCW:
274 return compute_c_adcw(dst, src1, src2);
275 case CC_OP_ADCL:
276 return compute_c_adcl(dst, src1, src2);
278 case CC_OP_SUBB:
279 return compute_c_subb(dst, src1);
280 case CC_OP_SUBW:
281 return compute_c_subw(dst, src1);
282 case CC_OP_SUBL:
283 return compute_c_subl(dst, src1);
285 case CC_OP_SBBB:
286 return compute_c_sbbb(dst, src1, src2);
287 case CC_OP_SBBW:
288 return compute_c_sbbw(dst, src1, src2);
289 case CC_OP_SBBL:
290 return compute_c_sbbl(dst, src1, src2);
292 case CC_OP_SHLB:
293 return compute_c_shlb(dst, src1);
294 case CC_OP_SHLW:
295 return compute_c_shlw(dst, src1);
296 case CC_OP_SHLL:
297 return compute_c_shll(dst, src1);
299 case CC_OP_BMILGB:
300 return compute_c_bmilgb(dst, src1);
301 case CC_OP_BMILGW:
302 return compute_c_bmilgw(dst, src1);
303 case CC_OP_BMILGL:
304 return compute_c_bmilgl(dst, src1);
306 #ifdef TARGET_X86_64
307 case CC_OP_ADDQ:
308 return compute_c_addq(dst, src1);
309 case CC_OP_ADCQ:
310 return compute_c_adcq(dst, src1, src2);
311 case CC_OP_SUBQ:
312 return compute_c_subq(dst, src1);
313 case CC_OP_SBBQ:
314 return compute_c_sbbq(dst, src1, src2);
315 case CC_OP_SHLQ:
316 return compute_c_shlq(dst, src1);
317 case CC_OP_BMILGQ:
318 return compute_c_bmilgq(dst, src1);
319 #endif
323 void helper_write_eflags(CPUX86State *env, target_ulong t0,
324 uint32_t update_mask)
326 cpu_load_eflags(env, t0, update_mask);
329 target_ulong helper_read_eflags(CPUX86State *env)
331 uint32_t eflags;
333 eflags = cpu_cc_compute_all(env, CC_OP);
334 eflags |= (env->df & DF_MASK);
335 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
336 return eflags;
339 void helper_clts(CPUX86State *env)
341 env->cr[0] &= ~CR0_TS_MASK;
342 env->hflags &= ~HF_TS_MASK;
345 void helper_reset_rf(CPUX86State *env)
347 env->eflags &= ~RF_MASK;
350 void helper_cli(CPUX86State *env)
352 env->eflags &= ~IF_MASK;
355 void helper_sti(CPUX86State *env)
357 env->eflags |= IF_MASK;
360 void helper_clac(CPUX86State *env)
362 env->eflags &= ~AC_MASK;
365 void helper_stac(CPUX86State *env)
367 env->eflags |= AC_MASK;
370 #if 0
371 /* vm86plus instructions */
372 void helper_cli_vm(CPUX86State *env)
374 env->eflags &= ~VIF_MASK;
377 void helper_sti_vm(CPUX86State *env)
379 env->eflags |= VIF_MASK;
380 if (env->eflags & VIP_MASK) {
381 raise_exception(env, EXCP0D_GPF);
384 #endif
386 void helper_set_inhibit_irq(CPUX86State *env)
388 env->hflags |= HF_INHIBIT_IRQ_MASK;
391 void helper_reset_inhibit_irq(CPUX86State *env)
393 env->hflags &= ~HF_INHIBIT_IRQ_MASK;