sparc fixes
[qemu/qemu_0_9_1_stable.git] / target-i386 / helper.c
blobc660980cacc7c61086b9739a2de3519c3c55d2ff
1 /*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
22 //#define DEBUG_PCALL
24 const uint8_t parity_table[256] = {
25 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 /* modulo 17 table */
60 const uint8_t rclw_table[32] = {
61 0, 1, 2, 3, 4, 5, 6, 7,
62 8, 9,10,11,12,13,14,15,
63 16, 0, 1, 2, 3, 4, 5, 6,
64 7, 8, 9,10,11,12,13,14,
67 /* modulo 9 table */
68 const uint8_t rclb_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 0, 1, 2, 3, 4, 5, 6,
71 7, 8, 0, 1, 2, 3, 4, 5,
72 6, 7, 8, 0, 1, 2, 3, 4,
75 const CPU86_LDouble f15rk[7] =
77 0.00000000000000000000L,
78 1.00000000000000000000L,
79 3.14159265358979323851L, /*pi*/
80 0.30102999566398119523L, /*lg2*/
81 0.69314718055994530943L, /*ln2*/
82 1.44269504088896340739L, /*l2e*/
83 3.32192809488736234781L, /*l2t*/
86 /* thread support */
88 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
90 void cpu_lock(void)
92 spin_lock(&global_cpu_lock);
95 void cpu_unlock(void)
97 spin_unlock(&global_cpu_lock);
100 void cpu_loop_exit(void)
102 /* NOTE: the register at this point must be saved by hand because
103 longjmp restore them */
104 #ifdef reg_EAX
105 env->regs[R_EAX] = EAX;
106 #endif
107 #ifdef reg_ECX
108 env->regs[R_ECX] = ECX;
109 #endif
110 #ifdef reg_EDX
111 env->regs[R_EDX] = EDX;
112 #endif
113 #ifdef reg_EBX
114 env->regs[R_EBX] = EBX;
115 #endif
116 #ifdef reg_ESP
117 env->regs[R_ESP] = ESP;
118 #endif
119 #ifdef reg_EBP
120 env->regs[R_EBP] = EBP;
121 #endif
122 #ifdef reg_ESI
123 env->regs[R_ESI] = ESI;
124 #endif
125 #ifdef reg_EDI
126 env->regs[R_EDI] = EDI;
127 #endif
128 longjmp(env->jmp_env, 1);
131 /* return non zero if error */
132 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
133 int selector)
135 SegmentCache *dt;
136 int index;
137 uint8_t *ptr;
139 if (selector & 0x4)
140 dt = &env->ldt;
141 else
142 dt = &env->gdt;
143 index = selector & ~7;
144 if ((index + 7) > dt->limit)
145 return -1;
146 ptr = dt->base + index;
147 *e1_ptr = ldl_kernel(ptr);
148 *e2_ptr = ldl_kernel(ptr + 4);
149 return 0;
152 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
154 unsigned int limit;
155 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
156 if (e2 & DESC_G_MASK)
157 limit = (limit << 12) | 0xfff;
158 return limit;
161 static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
163 return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
166 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
168 sc->base = get_seg_base(e1, e2);
169 sc->limit = get_seg_limit(e1, e2);
170 sc->flags = e2;
173 /* init the segment cache in vm86 mode. */
174 static inline void load_seg_vm(int seg, int selector)
176 selector &= 0xffff;
177 cpu_x86_load_seg_cache(env, seg, selector,
178 (uint8_t *)(selector << 4), 0xffff, 0);
181 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
182 uint32_t *esp_ptr, int dpl)
184 int type, index, shift;
186 #if 0
188 int i;
189 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
190 for(i=0;i<env->tr.limit;i++) {
191 printf("%02x ", env->tr.base[i]);
192 if ((i & 7) == 7) printf("\n");
194 printf("\n");
196 #endif
198 if (!(env->tr.flags & DESC_P_MASK))
199 cpu_abort(env, "invalid tss");
200 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
201 if ((type & 7) != 1)
202 cpu_abort(env, "invalid tss type");
203 shift = type >> 3;
204 index = (dpl * 4 + 2) << shift;
205 if (index + (4 << shift) - 1 > env->tr.limit)
206 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
207 if (shift == 0) {
208 *esp_ptr = lduw_kernel(env->tr.base + index);
209 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
210 } else {
211 *esp_ptr = ldl_kernel(env->tr.base + index);
212 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
216 /* XXX: merge with load_seg() */
217 static void tss_load_seg(int seg_reg, int selector)
219 uint32_t e1, e2;
220 int rpl, dpl, cpl;
222 if ((selector & 0xfffc) != 0) {
223 if (load_segment(&e1, &e2, selector) != 0)
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 if (!(e2 & DESC_S_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 rpl = selector & 3;
228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
229 cpl = env->hflags & HF_CPL_MASK;
230 if (seg_reg == R_CS) {
231 if (!(e2 & DESC_CS_MASK))
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 if (dpl != rpl)
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 if ((e2 & DESC_C_MASK) && dpl > rpl)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else if (seg_reg == R_SS) {
239 /* SS must be writable data */
240 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 if (dpl != cpl || dpl != rpl)
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 } else {
245 /* not readable code */
246 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 /* if data or non conforming code, checks the rights */
249 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
250 if (dpl < cpl || dpl < rpl)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254 if (!(e2 & DESC_P_MASK))
255 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
256 cpu_x86_load_seg_cache(env, seg_reg, selector,
257 get_seg_base(e1, e2),
258 get_seg_limit(e1, e2),
259 e2);
260 } else {
261 if (seg_reg == R_SS || seg_reg == R_CS)
262 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
266 #define SWITCH_TSS_JMP 0
267 #define SWITCH_TSS_IRET 1
268 #define SWITCH_TSS_CALL 2
270 /* XXX: restore CPU state in registers (PowerPC case) */
271 static void switch_tss(int tss_selector,
272 uint32_t e1, uint32_t e2, int source)
274 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275 uint8_t *tss_base;
276 uint32_t new_regs[8], new_segs[6];
277 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278 uint32_t old_eflags, eflags_mask;
279 SegmentCache *dt;
280 int index;
281 uint8_t *ptr;
283 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
285 /* if task gate, we read the TSS segment and we load it */
286 if (type == 5) {
287 if (!(e2 & DESC_P_MASK))
288 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
289 tss_selector = e1 >> 16;
290 if (tss_selector & 4)
291 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
292 if (load_segment(&e1, &e2, tss_selector) != 0)
293 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294 if (e2 & DESC_S_MASK)
295 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
296 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297 if ((type & 7) != 1)
298 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (!(e2 & DESC_P_MASK))
302 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
304 if (type & 8)
305 tss_limit_max = 103;
306 else
307 tss_limit_max = 43;
308 tss_limit = get_seg_limit(e1, e2);
309 tss_base = get_seg_base(e1, e2);
310 if ((tss_selector & 4) != 0 ||
311 tss_limit < tss_limit_max)
312 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314 if (old_type & 8)
315 old_tss_limit_max = 103;
316 else
317 old_tss_limit_max = 43;
319 /* read all the registers from the new TSS */
320 if (type & 8) {
321 /* 32 bit */
322 new_cr3 = ldl_kernel(tss_base + 0x1c);
323 new_eip = ldl_kernel(tss_base + 0x20);
324 new_eflags = ldl_kernel(tss_base + 0x24);
325 for(i = 0; i < 8; i++)
326 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
327 for(i = 0; i < 6; i++)
328 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
329 new_ldt = lduw_kernel(tss_base + 0x60);
330 new_trap = ldl_kernel(tss_base + 0x64);
331 } else {
332 /* 16 bit */
333 new_cr3 = 0;
334 new_eip = lduw_kernel(tss_base + 0x0e);
335 new_eflags = lduw_kernel(tss_base + 0x10);
336 for(i = 0; i < 8; i++)
337 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
338 for(i = 0; i < 4; i++)
339 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
340 new_ldt = lduw_kernel(tss_base + 0x2a);
341 new_segs[R_FS] = 0;
342 new_segs[R_GS] = 0;
343 new_trap = 0;
346 /* NOTE: we must avoid memory exceptions during the task switch,
347 so we make dummy accesses before */
348 /* XXX: it can still fail in some cases, so a bigger hack is
349 necessary to valid the TLB after having done the accesses */
351 v1 = ldub_kernel(env->tr.base);
352 v2 = ldub(env->tr.base + old_tss_limit_max);
353 stb_kernel(env->tr.base, v1);
354 stb_kernel(env->tr.base + old_tss_limit_max, v2);
356 /* clear busy bit (it is restartable) */
357 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
358 uint8_t *ptr;
359 uint32_t e2;
360 ptr = env->gdt.base + (env->tr.selector << 3);
361 e2 = ldl_kernel(ptr + 4);
362 e2 &= ~DESC_TSS_BUSY_MASK;
363 stl_kernel(ptr + 4, e2);
365 old_eflags = compute_eflags();
366 if (source == SWITCH_TSS_IRET)
367 old_eflags &= ~NT_MASK;
369 /* save the current state in the old TSS */
370 if (type & 8) {
371 /* 32 bit */
372 stl_kernel(env->tr.base + 0x20, env->eip);
373 stl_kernel(env->tr.base + 0x24, old_eflags);
374 for(i = 0; i < 8; i++)
375 stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
376 for(i = 0; i < 6; i++)
377 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
378 } else {
379 /* 16 bit */
380 stw_kernel(env->tr.base + 0x0e, new_eip);
381 stw_kernel(env->tr.base + 0x10, old_eflags);
382 for(i = 0; i < 8; i++)
383 stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
384 for(i = 0; i < 4; i++)
385 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
388 /* now if an exception occurs, it will occurs in the next task
389 context */
391 if (source == SWITCH_TSS_CALL) {
392 stw_kernel(tss_base, env->tr.selector);
393 new_eflags |= NT_MASK;
396 /* set busy bit */
397 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
398 uint8_t *ptr;
399 uint32_t e2;
400 ptr = env->gdt.base + (tss_selector << 3);
401 e2 = ldl_kernel(ptr + 4);
402 e2 |= DESC_TSS_BUSY_MASK;
403 stl_kernel(ptr + 4, e2);
406 /* set the new CPU state */
407 /* from this point, any exception which occurs can give problems */
408 env->cr[0] |= CR0_TS_MASK;
409 env->tr.selector = tss_selector;
410 env->tr.base = tss_base;
411 env->tr.limit = tss_limit;
412 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
414 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
415 env->cr[3] = new_cr3;
416 cpu_x86_update_cr3(env);
419 /* load all registers without an exception, then reload them with
420 possible exception */
421 env->eip = new_eip;
422 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
423 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK;
424 if (!(type & 8))
425 eflags_mask &= 0xffff;
426 load_eflags(new_eflags, eflags_mask);
427 for(i = 0; i < 8; i++)
428 env->regs[i] = new_regs[i];
429 if (new_eflags & VM_MASK) {
430 for(i = 0; i < 6; i++)
431 load_seg_vm(i, new_segs[i]);
432 /* in vm86, CPL is always 3 */
433 cpu_x86_set_cpl(env, 3);
434 } else {
435 /* CPL is set the RPL of CS */
436 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
437 /* first just selectors as the rest may trigger exceptions */
438 for(i = 0; i < 6; i++)
439 cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
442 env->ldt.selector = new_ldt & ~4;
443 env->ldt.base = NULL;
444 env->ldt.limit = 0;
445 env->ldt.flags = 0;
447 /* load the LDT */
448 if (new_ldt & 4)
449 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
451 dt = &env->gdt;
452 index = new_ldt & ~7;
453 if ((index + 7) > dt->limit)
454 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
455 ptr = dt->base + index;
456 e1 = ldl_kernel(ptr);
457 e2 = ldl_kernel(ptr + 4);
458 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
459 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460 if (!(e2 & DESC_P_MASK))
461 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
462 load_seg_cache_raw_dt(&env->ldt, e1, e2);
464 /* load the segments */
465 if (!(new_eflags & VM_MASK)) {
466 tss_load_seg(R_CS, new_segs[R_CS]);
467 tss_load_seg(R_SS, new_segs[R_SS]);
468 tss_load_seg(R_ES, new_segs[R_ES]);
469 tss_load_seg(R_DS, new_segs[R_DS]);
470 tss_load_seg(R_FS, new_segs[R_FS]);
471 tss_load_seg(R_GS, new_segs[R_GS]);
474 /* check that EIP is in the CS segment limits */
475 if (new_eip > env->segs[R_CS].limit) {
476 raise_exception_err(EXCP0D_GPF, 0);
480 /* check if Port I/O is allowed in TSS */
481 static inline void check_io(int addr, int size)
483 int io_offset, val, mask;
485 /* TSS must be a valid 32 bit one */
486 if (!(env->tr.flags & DESC_P_MASK) ||
487 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
488 env->tr.limit < 103)
489 goto fail;
490 io_offset = lduw_kernel(env->tr.base + 0x66);
491 io_offset += (addr >> 3);
492 /* Note: the check needs two bytes */
493 if ((io_offset + 1) > env->tr.limit)
494 goto fail;
495 val = lduw_kernel(env->tr.base + io_offset);
496 val >>= (addr & 7);
497 mask = (1 << size) - 1;
498 /* all bits must be zero to allow the I/O */
499 if ((val & mask) != 0) {
500 fail:
501 raise_exception_err(EXCP0D_GPF, 0);
505 void check_iob_T0(void)
507 check_io(T0, 1);
510 void check_iow_T0(void)
512 check_io(T0, 2);
515 void check_iol_T0(void)
517 check_io(T0, 4);
520 void check_iob_DX(void)
522 check_io(EDX & 0xffff, 1);
525 void check_iow_DX(void)
527 check_io(EDX & 0xffff, 2);
530 void check_iol_DX(void)
532 check_io(EDX & 0xffff, 4);
535 static inline unsigned int get_sp_mask(unsigned int e2)
537 if (e2 & DESC_B_MASK)
538 return 0xffffffff;
539 else
540 return 0xffff;
543 /* XXX: add a is_user flag to have proper security support */
544 #define PUSHW(ssp, sp, sp_mask, val)\
546 sp -= 2;\
547 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
550 #define PUSHL(ssp, sp, sp_mask, val)\
552 sp -= 4;\
553 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
556 #define POPW(ssp, sp, sp_mask, val)\
558 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
559 sp += 2;\
562 #define POPL(ssp, sp, sp_mask, val)\
564 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
565 sp += 4;\
568 /* protected mode interrupt */
569 static void do_interrupt_protected(int intno, int is_int, int error_code,
570 unsigned int next_eip, int is_hw)
572 SegmentCache *dt;
573 uint8_t *ptr, *ssp;
574 int type, dpl, selector, ss_dpl, cpl, sp_mask;
575 int has_error_code, new_stack, shift;
576 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
577 uint32_t old_eip;
579 has_error_code = 0;
580 if (!is_int && !is_hw) {
581 switch(intno) {
582 case 8:
583 case 10:
584 case 11:
585 case 12:
586 case 13:
587 case 14:
588 case 17:
589 has_error_code = 1;
590 break;
594 dt = &env->idt;
595 if (intno * 8 + 7 > dt->limit)
596 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
597 ptr = dt->base + intno * 8;
598 e1 = ldl_kernel(ptr);
599 e2 = ldl_kernel(ptr + 4);
600 /* check gate type */
601 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
602 switch(type) {
603 case 5: /* task gate */
604 /* must do that check here to return the correct error code */
605 if (!(e2 & DESC_P_MASK))
606 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
607 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
608 if (has_error_code) {
609 int mask;
610 /* push the error code */
611 shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
612 if (env->segs[R_SS].flags & DESC_B_MASK)
613 mask = 0xffffffff;
614 else
615 mask = 0xffff;
616 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
617 ssp = env->segs[R_SS].base + esp;
618 if (shift)
619 stl_kernel(ssp, error_code);
620 else
621 stw_kernel(ssp, error_code);
622 env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
624 return;
625 case 6: /* 286 interrupt gate */
626 case 7: /* 286 trap gate */
627 case 14: /* 386 interrupt gate */
628 case 15: /* 386 trap gate */
629 break;
630 default:
631 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632 break;
634 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
635 cpl = env->hflags & HF_CPL_MASK;
636 /* check privledge if software int */
637 if (is_int && dpl < cpl)
638 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
639 /* check valid bit */
640 if (!(e2 & DESC_P_MASK))
641 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642 selector = e1 >> 16;
643 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
644 if ((selector & 0xfffc) == 0)
645 raise_exception_err(EXCP0D_GPF, 0);
647 if (load_segment(&e1, &e2, selector) != 0)
648 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
649 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
650 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
651 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
652 if (dpl > cpl)
653 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
654 if (!(e2 & DESC_P_MASK))
655 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
656 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
657 /* to inner priviledge */
658 get_ss_esp_from_tss(&ss, &esp, dpl);
659 if ((ss & 0xfffc) == 0)
660 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
661 if ((ss & 3) != dpl)
662 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
663 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
664 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
665 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
666 if (ss_dpl != dpl)
667 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
668 if (!(ss_e2 & DESC_S_MASK) ||
669 (ss_e2 & DESC_CS_MASK) ||
670 !(ss_e2 & DESC_W_MASK))
671 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
672 if (!(ss_e2 & DESC_P_MASK))
673 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
674 new_stack = 1;
675 sp_mask = get_sp_mask(ss_e2);
676 ssp = get_seg_base(ss_e1, ss_e2);
677 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
678 /* to same priviledge */
679 if (env->eflags & VM_MASK)
680 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
681 new_stack = 0;
682 sp_mask = get_sp_mask(env->segs[R_SS].flags);
683 ssp = env->segs[R_SS].base;
684 esp = ESP;
685 } else {
686 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
687 new_stack = 0; /* avoid warning */
688 sp_mask = 0; /* avoid warning */
689 ssp = NULL; /* avoid warning */
690 esp = 0; /* avoid warning */
693 shift = type >> 3;
695 #if 0
696 /* XXX: check that enough room is available */
697 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
698 if (env->eflags & VM_MASK)
699 push_size += 8;
700 push_size <<= shift;
701 #endif
702 if (is_int)
703 old_eip = next_eip;
704 else
705 old_eip = env->eip;
706 if (shift == 1) {
707 if (new_stack) {
708 if (env->eflags & VM_MASK) {
709 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
710 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
711 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
712 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
714 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
715 PUSHL(ssp, esp, sp_mask, ESP);
717 PUSHL(ssp, esp, sp_mask, compute_eflags());
718 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
719 PUSHL(ssp, esp, sp_mask, old_eip);
720 if (has_error_code) {
721 PUSHL(ssp, esp, sp_mask, error_code);
723 } else {
724 if (new_stack) {
725 if (env->eflags & VM_MASK) {
726 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
727 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
728 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
729 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
731 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
732 PUSHW(ssp, esp, sp_mask, ESP);
734 PUSHW(ssp, esp, sp_mask, compute_eflags());
735 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
736 PUSHW(ssp, esp, sp_mask, old_eip);
737 if (has_error_code) {
738 PUSHW(ssp, esp, sp_mask, error_code);
742 if (new_stack) {
743 if (env->eflags & VM_MASK) {
744 /* XXX: explain me why W2K hangs if the whole segment cache is
745 reset ? */
746 env->segs[R_ES].selector = 0;
747 env->segs[R_ES].flags = 0;
748 env->segs[R_DS].selector = 0;
749 env->segs[R_DS].flags = 0;
750 env->segs[R_FS].selector = 0;
751 env->segs[R_FS].flags = 0;
752 env->segs[R_GS].selector = 0;
753 env->segs[R_GS].flags = 0;
755 ss = (ss & ~3) | dpl;
756 cpu_x86_load_seg_cache(env, R_SS, ss,
757 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
759 ESP = (ESP & ~sp_mask) | (esp & sp_mask);
761 selector = (selector & ~3) | dpl;
762 cpu_x86_load_seg_cache(env, R_CS, selector,
763 get_seg_base(e1, e2),
764 get_seg_limit(e1, e2),
765 e2);
766 cpu_x86_set_cpl(env, dpl);
767 env->eip = offset;
769 /* interrupt gate clear IF mask */
770 if ((type & 1) == 0) {
771 env->eflags &= ~IF_MASK;
773 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
776 /* real mode interrupt */
777 static void do_interrupt_real(int intno, int is_int, int error_code,
778 unsigned int next_eip)
780 SegmentCache *dt;
781 uint8_t *ptr, *ssp;
782 int selector;
783 uint32_t offset, esp;
784 uint32_t old_cs, old_eip;
786 /* real mode (simpler !) */
787 dt = &env->idt;
788 if (intno * 4 + 3 > dt->limit)
789 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
790 ptr = dt->base + intno * 4;
791 offset = lduw_kernel(ptr);
792 selector = lduw_kernel(ptr + 2);
793 esp = ESP;
794 ssp = env->segs[R_SS].base;
795 if (is_int)
796 old_eip = next_eip;
797 else
798 old_eip = env->eip;
799 old_cs = env->segs[R_CS].selector;
800 /* XXX: use SS segment size ? */
801 PUSHW(ssp, esp, 0xffff, compute_eflags());
802 PUSHW(ssp, esp, 0xffff, old_cs);
803 PUSHW(ssp, esp, 0xffff, old_eip);
805 /* update processor state */
806 ESP = (ESP & ~0xffff) | (esp & 0xffff);
807 env->eip = offset;
808 env->segs[R_CS].selector = selector;
809 env->segs[R_CS].base = (uint8_t *)(selector << 4);
810 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
813 /* fake user mode interrupt */
814 void do_interrupt_user(int intno, int is_int, int error_code,
815 unsigned int next_eip)
817 SegmentCache *dt;
818 uint8_t *ptr;
819 int dpl, cpl;
820 uint32_t e2;
822 dt = &env->idt;
823 ptr = dt->base + (intno * 8);
824 e2 = ldl_kernel(ptr + 4);
826 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
827 cpl = env->hflags & HF_CPL_MASK;
828 /* check privledge if software int */
829 if (is_int && dpl < cpl)
830 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
832 /* Since we emulate only user space, we cannot do more than
833 exiting the emulation with the suitable exception and error
834 code */
835 if (is_int)
836 EIP = next_eip;
840 * Begin excution of an interruption. is_int is TRUE if coming from
841 * the int instruction. next_eip is the EIP value AFTER the interrupt
842 * instruction. It is only relevant if is_int is TRUE.
844 void do_interrupt(int intno, int is_int, int error_code,
845 unsigned int next_eip, int is_hw)
847 #if 0
849 extern FILE *stdout;
850 static int count;
851 if (env->cr[0] & CR0_PE_MASK) {
852 fprintf(stdout, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
853 count, intno, error_code, is_int);
854 count++;
857 if ((env->cr[0] & CR0_PE_MASK) && intno == 0x10) {
858 tb_flush(env);
859 cpu_set_log(CPU_LOG_ALL);
861 #endif
862 #ifdef DEBUG_PCALL
863 if (loglevel) {
864 static int count;
865 fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
866 count, intno, error_code, is_int);
867 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
868 #if 1
870 int i;
871 uint8_t *ptr;
872 fprintf(logfile, " code=");
873 ptr = env->segs[R_CS].base + env->eip;
874 for(i = 0; i < 16; i++) {
875 fprintf(logfile, " %02x", ldub(ptr + i));
877 fprintf(logfile, "\n");
879 #endif
880 count++;
882 #endif
883 if (env->cr[0] & CR0_PE_MASK) {
884 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
885 } else {
886 do_interrupt_real(intno, is_int, error_code, next_eip);
891 * Signal an interruption. It is executed in the main CPU loop.
892 * is_int is TRUE if coming from the int instruction. next_eip is the
893 * EIP value AFTER the interrupt instruction. It is only relevant if
894 * is_int is TRUE.
896 void raise_interrupt(int intno, int is_int, int error_code,
897 unsigned int next_eip)
899 env->exception_index = intno;
900 env->error_code = error_code;
901 env->exception_is_int = is_int;
902 env->exception_next_eip = next_eip;
903 cpu_loop_exit();
906 /* shortcuts to generate exceptions */
907 void raise_exception_err(int exception_index, int error_code)
909 raise_interrupt(exception_index, 0, error_code, 0);
912 void raise_exception(int exception_index)
914 raise_interrupt(exception_index, 0, 0, 0);
917 #ifdef BUGGY_GCC_DIV64
918 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
919 call it from another function */
920 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
922 *q_ptr = num / den;
923 return num % den;
926 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
928 *q_ptr = num / den;
929 return num % den;
931 #endif
933 void helper_divl_EAX_T0(uint32_t eip)
935 unsigned int den, q, r;
936 uint64_t num;
938 num = EAX | ((uint64_t)EDX << 32);
939 den = T0;
940 if (den == 0) {
941 EIP = eip;
942 raise_exception(EXCP00_DIVZ);
944 #ifdef BUGGY_GCC_DIV64
945 r = div64(&q, num, den);
946 #else
947 q = (num / den);
948 r = (num % den);
949 #endif
950 EAX = q;
951 EDX = r;
954 void helper_idivl_EAX_T0(uint32_t eip)
956 int den, q, r;
957 int64_t num;
959 num = EAX | ((uint64_t)EDX << 32);
960 den = T0;
961 if (den == 0) {
962 EIP = eip;
963 raise_exception(EXCP00_DIVZ);
965 #ifdef BUGGY_GCC_DIV64
966 r = idiv64(&q, num, den);
967 #else
968 q = (num / den);
969 r = (num % den);
970 #endif
971 EAX = q;
972 EDX = r;
975 void helper_cmpxchg8b(void)
977 uint64_t d;
978 int eflags;
980 eflags = cc_table[CC_OP].compute_all();
981 d = ldq((uint8_t *)A0);
982 if (d == (((uint64_t)EDX << 32) | EAX)) {
983 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
984 eflags |= CC_Z;
985 } else {
986 EDX = d >> 32;
987 EAX = d;
988 eflags &= ~CC_Z;
990 CC_SRC = eflags;
993 #define CPUID_FP87 (1 << 0)
994 #define CPUID_VME (1 << 1)
995 #define CPUID_DE (1 << 2)
996 #define CPUID_PSE (1 << 3)
997 #define CPUID_TSC (1 << 4)
998 #define CPUID_MSR (1 << 5)
999 #define CPUID_PAE (1 << 6)
1000 #define CPUID_MCE (1 << 7)
1001 #define CPUID_CX8 (1 << 8)
1002 #define CPUID_APIC (1 << 9)
1003 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1004 #define CPUID_MTRR (1 << 12)
1005 #define CPUID_PGE (1 << 13)
1006 #define CPUID_MCA (1 << 14)
1007 #define CPUID_CMOV (1 << 15)
1008 /* ... */
1009 #define CPUID_MMX (1 << 23)
1010 #define CPUID_FXSR (1 << 24)
1011 #define CPUID_SSE (1 << 25)
1012 #define CPUID_SSE2 (1 << 26)
1014 void helper_cpuid(void)
1016 switch(EAX) {
1017 case 0:
1018 EAX = 2; /* max EAX index supported */
1019 EBX = 0x756e6547;
1020 ECX = 0x6c65746e;
1021 EDX = 0x49656e69;
1022 break;
1023 case 1:
1025 int family, model, stepping;
1026 /* EAX = 1 info */
1027 #if 0
1028 /* pentium 75-200 */
1029 family = 5;
1030 model = 2;
1031 stepping = 11;
1032 #else
1033 /* pentium pro */
1034 family = 6;
1035 model = 1;
1036 stepping = 3;
1037 #endif
1038 EAX = (family << 8) | (model << 4) | stepping;
1039 EBX = 0;
1040 ECX = 0;
1041 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1042 CPUID_TSC | CPUID_MSR | CPUID_MCE |
1043 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1045 break;
1046 default:
1047 /* cache info: needed for Pentium Pro compatibility */
1048 EAX = 0x410601;
1049 EBX = 0;
1050 ECX = 0;
1051 EDX = 0;
1052 break;
1056 void helper_lldt_T0(void)
1058 int selector;
1059 SegmentCache *dt;
1060 uint32_t e1, e2;
1061 int index;
1062 uint8_t *ptr;
1064 selector = T0 & 0xffff;
1065 if ((selector & 0xfffc) == 0) {
1066 /* XXX: NULL selector case: invalid LDT */
1067 env->ldt.base = NULL;
1068 env->ldt.limit = 0;
1069 } else {
1070 if (selector & 0x4)
1071 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1072 dt = &env->gdt;
1073 index = selector & ~7;
1074 if ((index + 7) > dt->limit)
1075 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1076 ptr = dt->base + index;
1077 e1 = ldl_kernel(ptr);
1078 e2 = ldl_kernel(ptr + 4);
1079 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1080 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1081 if (!(e2 & DESC_P_MASK))
1082 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1083 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1085 env->ldt.selector = selector;
1088 void helper_ltr_T0(void)
1090 int selector;
1091 SegmentCache *dt;
1092 uint32_t e1, e2;
1093 int index, type;
1094 uint8_t *ptr;
1096 selector = T0 & 0xffff;
1097 if ((selector & 0xfffc) == 0) {
1098 /* NULL selector case: invalid LDT */
1099 env->tr.base = NULL;
1100 env->tr.limit = 0;
1101 env->tr.flags = 0;
1102 } else {
1103 if (selector & 0x4)
1104 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1105 dt = &env->gdt;
1106 index = selector & ~7;
1107 if ((index + 7) > dt->limit)
1108 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1109 ptr = dt->base + index;
1110 e1 = ldl_kernel(ptr);
1111 e2 = ldl_kernel(ptr + 4);
1112 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1113 if ((e2 & DESC_S_MASK) ||
1114 (type != 1 && type != 9))
1115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1116 if (!(e2 & DESC_P_MASK))
1117 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1118 load_seg_cache_raw_dt(&env->tr, e1, e2);
1119 e2 |= DESC_TSS_BUSY_MASK;
1120 stl_kernel(ptr + 4, e2);
1122 env->tr.selector = selector;
1125 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1126 void load_seg(int seg_reg, int selector)
1128 uint32_t e1, e2;
1129 int cpl, dpl, rpl;
1130 SegmentCache *dt;
1131 int index;
1132 uint8_t *ptr;
1134 selector &= 0xffff;
1135 if ((selector & 0xfffc) == 0) {
1136 /* null selector case */
1137 if (seg_reg == R_SS)
1138 raise_exception_err(EXCP0D_GPF, 0);
1139 cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1140 } else {
1142 if (selector & 0x4)
1143 dt = &env->ldt;
1144 else
1145 dt = &env->gdt;
1146 index = selector & ~7;
1147 if ((index + 7) > dt->limit)
1148 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1149 ptr = dt->base + index;
1150 e1 = ldl_kernel(ptr);
1151 e2 = ldl_kernel(ptr + 4);
1153 if (!(e2 & DESC_S_MASK))
1154 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1155 rpl = selector & 3;
1156 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1157 cpl = env->hflags & HF_CPL_MASK;
1158 if (seg_reg == R_SS) {
1159 /* must be writable segment */
1160 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1161 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1162 if (rpl != cpl || dpl != cpl)
1163 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1164 } else {
1165 /* must be readable segment */
1166 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1167 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1169 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1170 /* if not conforming code, test rights */
1171 if (dpl < cpl || dpl < rpl)
1172 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1176 if (!(e2 & DESC_P_MASK)) {
1177 if (seg_reg == R_SS)
1178 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1179 else
1180 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1183 /* set the access bit if not already set */
1184 if (!(e2 & DESC_A_MASK)) {
1185 e2 |= DESC_A_MASK;
1186 stl_kernel(ptr + 4, e2);
1189 cpu_x86_load_seg_cache(env, seg_reg, selector,
1190 get_seg_base(e1, e2),
1191 get_seg_limit(e1, e2),
1192 e2);
1193 #if 0
1194 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1195 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1196 #endif
1200 /* protected mode jump */
1201 void helper_ljmp_protected_T0_T1(void)
1203 int new_cs, new_eip, gate_cs, type;
1204 uint32_t e1, e2, cpl, dpl, rpl, limit;
1206 new_cs = T0;
1207 new_eip = T1;
1208 if ((new_cs & 0xfffc) == 0)
1209 raise_exception_err(EXCP0D_GPF, 0);
1210 if (load_segment(&e1, &e2, new_cs) != 0)
1211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1212 cpl = env->hflags & HF_CPL_MASK;
1213 if (e2 & DESC_S_MASK) {
1214 if (!(e2 & DESC_CS_MASK))
1215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1216 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1217 if (e2 & DESC_C_MASK) {
1218 /* conforming code segment */
1219 if (dpl > cpl)
1220 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1221 } else {
1222 /* non conforming code segment */
1223 rpl = new_cs & 3;
1224 if (rpl > cpl)
1225 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1226 if (dpl != cpl)
1227 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1229 if (!(e2 & DESC_P_MASK))
1230 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1231 limit = get_seg_limit(e1, e2);
1232 if (new_eip > limit)
1233 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1234 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1235 get_seg_base(e1, e2), limit, e2);
1236 EIP = new_eip;
1237 } else {
1238 /* jump to call or task gate */
1239 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1240 rpl = new_cs & 3;
1241 cpl = env->hflags & HF_CPL_MASK;
1242 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1243 switch(type) {
1244 case 1: /* 286 TSS */
1245 case 9: /* 386 TSS */
1246 case 5: /* task gate */
1247 if (dpl < cpl || dpl < rpl)
1248 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1249 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
1250 break;
1251 case 4: /* 286 call gate */
1252 case 12: /* 386 call gate */
1253 if ((dpl < cpl) || (dpl < rpl))
1254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1255 if (!(e2 & DESC_P_MASK))
1256 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1257 gate_cs = e1 >> 16;
1258 if (load_segment(&e1, &e2, gate_cs) != 0)
1259 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1260 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1261 /* must be code segment */
1262 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1263 (DESC_S_MASK | DESC_CS_MASK)))
1264 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1265 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1266 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1267 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1268 if (!(e2 & DESC_P_MASK))
1269 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1270 new_eip = (e1 & 0xffff);
1271 if (type == 12)
1272 new_eip |= (e2 & 0xffff0000);
1273 limit = get_seg_limit(e1, e2);
1274 if (new_eip > limit)
1275 raise_exception_err(EXCP0D_GPF, 0);
1276 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1277 get_seg_base(e1, e2), limit, e2);
1278 EIP = new_eip;
1279 break;
1280 default:
1281 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1282 break;
1287 /* real mode call */
1288 void helper_lcall_real_T0_T1(int shift, int next_eip)
1290 int new_cs, new_eip;
1291 uint32_t esp, esp_mask;
1292 uint8_t *ssp;
1294 new_cs = T0;
1295 new_eip = T1;
1296 esp = ESP;
1297 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1298 ssp = env->segs[R_SS].base;
1299 if (shift) {
1300 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1301 PUSHL(ssp, esp, esp_mask, next_eip);
1302 } else {
1303 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1304 PUSHW(ssp, esp, esp_mask, next_eip);
1307 ESP = (ESP & ~esp_mask) | (esp & esp_mask);
1308 env->eip = new_eip;
1309 env->segs[R_CS].selector = new_cs;
1310 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1313 /* protected mode call */
1314 void helper_lcall_protected_T0_T1(int shift, int next_eip)
1316 int new_cs, new_eip, new_stack, i;
1317 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1318 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1319 uint32_t val, limit, old_sp_mask;
1320 uint8_t *ssp, *old_ssp;
1322 new_cs = T0;
1323 new_eip = T1;
1324 #ifdef DEBUG_PCALL
1325 if (loglevel) {
1326 fprintf(logfile, "lcall %04x:%08x\n",
1327 new_cs, new_eip);
1328 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1330 #endif
1331 if ((new_cs & 0xfffc) == 0)
1332 raise_exception_err(EXCP0D_GPF, 0);
1333 if (load_segment(&e1, &e2, new_cs) != 0)
1334 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1335 cpl = env->hflags & HF_CPL_MASK;
1336 #ifdef DEBUG_PCALL
1337 if (loglevel) {
1338 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1340 #endif
1341 if (e2 & DESC_S_MASK) {
1342 if (!(e2 & DESC_CS_MASK))
1343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1344 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1345 if (e2 & DESC_C_MASK) {
1346 /* conforming code segment */
1347 if (dpl > cpl)
1348 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1349 } else {
1350 /* non conforming code segment */
1351 rpl = new_cs & 3;
1352 if (rpl > cpl)
1353 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1354 if (dpl != cpl)
1355 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1357 if (!(e2 & DESC_P_MASK))
1358 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1360 sp = ESP;
1361 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1362 ssp = env->segs[R_SS].base;
1363 if (shift) {
1364 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1365 PUSHL(ssp, sp, sp_mask, next_eip);
1366 } else {
1367 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1368 PUSHW(ssp, sp, sp_mask, next_eip);
1371 limit = get_seg_limit(e1, e2);
1372 if (new_eip > limit)
1373 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1374 /* from this point, not restartable */
1375 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1376 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1377 get_seg_base(e1, e2), limit, e2);
1378 EIP = new_eip;
1379 } else {
1380 /* check gate type */
1381 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1382 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1383 rpl = new_cs & 3;
1384 switch(type) {
1385 case 1: /* available 286 TSS */
1386 case 9: /* available 386 TSS */
1387 case 5: /* task gate */
1388 if (dpl < cpl || dpl < rpl)
1389 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1390 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
1391 break;
1392 case 4: /* 286 call gate */
1393 case 12: /* 386 call gate */
1394 break;
1395 default:
1396 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1397 break;
1399 shift = type >> 3;
1401 if (dpl < cpl || dpl < rpl)
1402 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1403 /* check valid bit */
1404 if (!(e2 & DESC_P_MASK))
1405 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1406 selector = e1 >> 16;
1407 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1408 param_count = e2 & 0x1f;
1409 if ((selector & 0xfffc) == 0)
1410 raise_exception_err(EXCP0D_GPF, 0);
1412 if (load_segment(&e1, &e2, selector) != 0)
1413 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1414 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1415 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1416 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1417 if (dpl > cpl)
1418 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1419 if (!(e2 & DESC_P_MASK))
1420 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1422 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1423 /* to inner priviledge */
1424 get_ss_esp_from_tss(&ss, &sp, dpl);
1425 #ifdef DEBUG_PCALL
1426 if (loglevel)
1427 fprintf(logfile, "ss=%04x sp=%04x param_count=%d ESP=%x\n",
1428 ss, sp, param_count, ESP);
1429 #endif
1430 if ((ss & 0xfffc) == 0)
1431 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1432 if ((ss & 3) != dpl)
1433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1434 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1435 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1436 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1437 if (ss_dpl != dpl)
1438 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1439 if (!(ss_e2 & DESC_S_MASK) ||
1440 (ss_e2 & DESC_CS_MASK) ||
1441 !(ss_e2 & DESC_W_MASK))
1442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1443 if (!(ss_e2 & DESC_P_MASK))
1444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1446 // push_size = ((param_count * 2) + 8) << shift;
1448 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1449 old_ssp = env->segs[R_SS].base;
1451 sp_mask = get_sp_mask(ss_e2);
1452 ssp = get_seg_base(ss_e1, ss_e2);
1453 if (shift) {
1454 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1455 PUSHL(ssp, sp, sp_mask, ESP);
1456 for(i = param_count - 1; i >= 0; i--) {
1457 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1458 PUSHL(ssp, sp, sp_mask, val);
1460 } else {
1461 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1462 PUSHW(ssp, sp, sp_mask, ESP);
1463 for(i = param_count - 1; i >= 0; i--) {
1464 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1465 PUSHW(ssp, sp, sp_mask, val);
1468 new_stack = 1;
1469 } else {
1470 /* to same priviledge */
1471 sp = ESP;
1472 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1473 ssp = env->segs[R_SS].base;
1474 // push_size = (4 << shift);
1475 new_stack = 0;
1478 if (shift) {
1479 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1480 PUSHL(ssp, sp, sp_mask, next_eip);
1481 } else {
1482 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1483 PUSHW(ssp, sp, sp_mask, next_eip);
1486 /* from this point, not restartable */
1488 if (new_stack) {
1489 ss = (ss & ~3) | dpl;
1490 cpu_x86_load_seg_cache(env, R_SS, ss,
1491 ssp,
1492 get_seg_limit(ss_e1, ss_e2),
1493 ss_e2);
1496 selector = (selector & ~3) | dpl;
1497 cpu_x86_load_seg_cache(env, R_CS, selector,
1498 get_seg_base(e1, e2),
1499 get_seg_limit(e1, e2),
1500 e2);
1501 cpu_x86_set_cpl(env, dpl);
1502 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1503 EIP = offset;
1507 /* real and vm86 mode iret */
1508 void helper_iret_real(int shift)
1510 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1511 uint8_t *ssp;
1512 int eflags_mask;
1514 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1515 sp = ESP;
1516 ssp = env->segs[R_SS].base;
1517 if (shift == 1) {
1518 /* 32 bits */
1519 POPL(ssp, sp, sp_mask, new_eip);
1520 POPL(ssp, sp, sp_mask, new_cs);
1521 new_cs &= 0xffff;
1522 POPL(ssp, sp, sp_mask, new_eflags);
1523 } else {
1524 /* 16 bits */
1525 POPW(ssp, sp, sp_mask, new_eip);
1526 POPW(ssp, sp, sp_mask, new_cs);
1527 POPW(ssp, sp, sp_mask, new_eflags);
1529 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1530 load_seg_vm(R_CS, new_cs);
1531 env->eip = new_eip;
1532 if (env->eflags & VM_MASK)
1533 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK;
1534 else
1535 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK;
1536 if (shift == 0)
1537 eflags_mask &= 0xffff;
1538 load_eflags(new_eflags, eflags_mask);
1541 static inline void validate_seg(int seg_reg, int cpl)
1543 int dpl;
1544 uint32_t e2;
1546 e2 = env->segs[seg_reg].flags;
1547 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1548 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1549 /* data or non conforming code segment */
1550 if (dpl < cpl) {
1551 cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1556 /* protected mode iret */
1557 static inline void helper_ret_protected(int shift, int is_iret, int addend)
1559 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
1560 uint32_t new_es, new_ds, new_fs, new_gs;
1561 uint32_t e1, e2, ss_e1, ss_e2;
1562 int cpl, dpl, rpl, eflags_mask, iopl;
1563 uint8_t *ssp;
1565 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1566 sp = ESP;
1567 ssp = env->segs[R_SS].base;
1568 if (shift == 1) {
1569 /* 32 bits */
1570 POPL(ssp, sp, sp_mask, new_eip);
1571 POPL(ssp, sp, sp_mask, new_cs);
1572 new_cs &= 0xffff;
1573 if (is_iret) {
1574 POPL(ssp, sp, sp_mask, new_eflags);
1575 if (new_eflags & VM_MASK)
1576 goto return_to_vm86;
1578 } else {
1579 /* 16 bits */
1580 POPW(ssp, sp, sp_mask, new_eip);
1581 POPW(ssp, sp, sp_mask, new_cs);
1582 if (is_iret)
1583 POPW(ssp, sp, sp_mask, new_eflags);
1585 #ifdef DEBUG_PCALL
1586 if (loglevel) {
1587 fprintf(logfile, "lret new %04x:%08x addend=0x%x\n",
1588 new_cs, new_eip, addend);
1589 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
1591 #endif
1592 if ((new_cs & 0xfffc) == 0)
1593 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1594 if (load_segment(&e1, &e2, new_cs) != 0)
1595 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1596 if (!(e2 & DESC_S_MASK) ||
1597 !(e2 & DESC_CS_MASK))
1598 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1599 cpl = env->hflags & HF_CPL_MASK;
1600 rpl = new_cs & 3;
1601 if (rpl < cpl)
1602 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1603 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1604 if (e2 & DESC_C_MASK) {
1605 if (dpl > rpl)
1606 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1607 } else {
1608 if (dpl != rpl)
1609 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1611 if (!(e2 & DESC_P_MASK))
1612 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1614 sp += addend;
1615 if (rpl == cpl) {
1616 /* return to same priledge level */
1617 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1618 get_seg_base(e1, e2),
1619 get_seg_limit(e1, e2),
1620 e2);
1621 } else {
1622 /* return to different priviledge level */
1623 if (shift == 1) {
1624 /* 32 bits */
1625 POPL(ssp, sp, sp_mask, new_esp);
1626 POPL(ssp, sp, sp_mask, new_ss);
1627 new_ss &= 0xffff;
1628 } else {
1629 /* 16 bits */
1630 POPW(ssp, sp, sp_mask, new_esp);
1631 POPW(ssp, sp, sp_mask, new_ss);
1634 if ((new_ss & 3) != rpl)
1635 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1636 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1637 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1638 if (!(ss_e2 & DESC_S_MASK) ||
1639 (ss_e2 & DESC_CS_MASK) ||
1640 !(ss_e2 & DESC_W_MASK))
1641 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1642 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1643 if (dpl != rpl)
1644 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1645 if (!(ss_e2 & DESC_P_MASK))
1646 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1648 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1649 get_seg_base(e1, e2),
1650 get_seg_limit(e1, e2),
1651 e2);
1652 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1653 get_seg_base(ss_e1, ss_e2),
1654 get_seg_limit(ss_e1, ss_e2),
1655 ss_e2);
1656 cpu_x86_set_cpl(env, rpl);
1657 sp = new_esp;
1658 /* XXX: change sp_mask according to old segment ? */
1660 /* validate data segments */
1661 validate_seg(R_ES, cpl);
1662 validate_seg(R_DS, cpl);
1663 validate_seg(R_FS, cpl);
1664 validate_seg(R_GS, cpl);
1666 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1667 env->eip = new_eip;
1668 if (is_iret) {
1669 /* NOTE: 'cpl' is the _old_ CPL */
1670 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK;
1671 if (cpl == 0)
1672 eflags_mask |= IOPL_MASK;
1673 iopl = (env->eflags >> IOPL_SHIFT) & 3;
1674 if (cpl <= iopl)
1675 eflags_mask |= IF_MASK;
1676 if (shift == 0)
1677 eflags_mask &= 0xffff;
1678 load_eflags(new_eflags, eflags_mask);
1680 return;
1682 return_to_vm86:
1683 POPL(ssp, sp, sp_mask, new_esp);
1684 POPL(ssp, sp, sp_mask, new_ss);
1685 POPL(ssp, sp, sp_mask, new_es);
1686 POPL(ssp, sp, sp_mask, new_ds);
1687 POPL(ssp, sp, sp_mask, new_fs);
1688 POPL(ssp, sp, sp_mask, new_gs);
1690 /* modify processor state */
1691 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
1692 IF_MASK | IOPL_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1693 load_seg_vm(R_CS, new_cs & 0xffff);
1694 cpu_x86_set_cpl(env, 3);
1695 load_seg_vm(R_SS, new_ss & 0xffff);
1696 load_seg_vm(R_ES, new_es & 0xffff);
1697 load_seg_vm(R_DS, new_ds & 0xffff);
1698 load_seg_vm(R_FS, new_fs & 0xffff);
1699 load_seg_vm(R_GS, new_gs & 0xffff);
1701 env->eip = new_eip;
1702 ESP = new_esp;
1705 void helper_iret_protected(int shift)
1707 int tss_selector, type;
1708 uint32_t e1, e2;
1710 /* specific case for TSS */
1711 if (env->eflags & NT_MASK) {
1712 tss_selector = lduw_kernel(env->tr.base + 0);
1713 if (tss_selector & 4)
1714 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1715 if (load_segment(&e1, &e2, tss_selector) != 0)
1716 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1717 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1718 /* NOTE: we check both segment and busy TSS */
1719 if (type != 3)
1720 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1721 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
1722 } else {
1723 helper_ret_protected(shift, 1, 0);
1727 void helper_lret_protected(int shift, int addend)
1729 helper_ret_protected(shift, 0, addend);
1732 void helper_movl_crN_T0(int reg)
1734 env->cr[reg] = T0;
1735 switch(reg) {
1736 case 0:
1737 cpu_x86_update_cr0(env);
1738 break;
1739 case 3:
1740 cpu_x86_update_cr3(env);
1741 break;
1745 /* XXX: do more */
1746 void helper_movl_drN_T0(int reg)
1748 env->dr[reg] = T0;
1751 void helper_invlpg(unsigned int addr)
1753 cpu_x86_flush_tlb(env, addr);
1756 /* rdtsc */
1757 #ifndef __i386__
1758 uint64_t emu_time;
1759 #endif
1761 void helper_rdtsc(void)
1763 uint64_t val;
1764 #ifdef __i386__
1765 asm("rdtsc" : "=A" (val));
1766 #else
1767 /* better than nothing: the time increases */
1768 val = emu_time++;
1769 #endif
1770 EAX = val;
1771 EDX = val >> 32;
1774 void helper_wrmsr(void)
1776 switch(ECX) {
1777 case MSR_IA32_SYSENTER_CS:
1778 env->sysenter_cs = EAX & 0xffff;
1779 break;
1780 case MSR_IA32_SYSENTER_ESP:
1781 env->sysenter_esp = EAX;
1782 break;
1783 case MSR_IA32_SYSENTER_EIP:
1784 env->sysenter_eip = EAX;
1785 break;
1786 default:
1787 /* XXX: exception ? */
1788 break;
1792 void helper_rdmsr(void)
1794 switch(ECX) {
1795 case MSR_IA32_SYSENTER_CS:
1796 EAX = env->sysenter_cs;
1797 EDX = 0;
1798 break;
1799 case MSR_IA32_SYSENTER_ESP:
1800 EAX = env->sysenter_esp;
1801 EDX = 0;
1802 break;
1803 case MSR_IA32_SYSENTER_EIP:
1804 EAX = env->sysenter_eip;
1805 EDX = 0;
1806 break;
1807 default:
1808 /* XXX: exception ? */
1809 break;
1813 void helper_lsl(void)
1815 unsigned int selector, limit;
1816 uint32_t e1, e2;
1817 int rpl, dpl, cpl, type;
1819 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1820 selector = T0 & 0xffff;
1821 if (load_segment(&e1, &e2, selector) != 0)
1822 return;
1823 rpl = selector & 3;
1824 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1825 cpl = env->hflags & HF_CPL_MASK;
1826 if (e2 & DESC_S_MASK) {
1827 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1828 /* conforming */
1829 } else {
1830 if (dpl < cpl || dpl < rpl)
1831 return;
1833 } else {
1834 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1835 switch(type) {
1836 case 1:
1837 case 2:
1838 case 3:
1839 case 9:
1840 case 11:
1841 break;
1842 default:
1843 return;
1845 if (dpl < cpl || dpl < rpl)
1846 return;
1848 limit = get_seg_limit(e1, e2);
1849 T1 = limit;
1850 CC_SRC |= CC_Z;
1853 void helper_lar(void)
1855 unsigned int selector;
1856 uint32_t e1, e2;
1857 int rpl, dpl, cpl, type;
1859 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1860 selector = T0 & 0xffff;
1861 if ((selector & 0xfffc) == 0)
1862 return;
1863 if (load_segment(&e1, &e2, selector) != 0)
1864 return;
1865 rpl = selector & 3;
1866 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1867 cpl = env->hflags & HF_CPL_MASK;
1868 if (e2 & DESC_S_MASK) {
1869 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1870 /* conforming */
1871 } else {
1872 if (dpl < cpl || dpl < rpl)
1873 return;
1875 } else {
1876 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1877 switch(type) {
1878 case 1:
1879 case 2:
1880 case 3:
1881 case 4:
1882 case 5:
1883 case 9:
1884 case 11:
1885 case 12:
1886 break;
1887 default:
1888 return;
1890 if (dpl < cpl || dpl < rpl)
1891 return;
1893 T1 = e2 & 0x00f0ff00;
1894 CC_SRC |= CC_Z;
1897 void helper_verr(void)
1899 unsigned int selector;
1900 uint32_t e1, e2;
1901 int rpl, dpl, cpl;
1903 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1904 selector = T0 & 0xffff;
1905 if ((selector & 0xfffc) == 0)
1906 return;
1907 if (load_segment(&e1, &e2, selector) != 0)
1908 return;
1909 if (!(e2 & DESC_S_MASK))
1910 return;
1911 rpl = selector & 3;
1912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1913 cpl = env->hflags & HF_CPL_MASK;
1914 if (e2 & DESC_CS_MASK) {
1915 if (!(e2 & DESC_R_MASK))
1916 return;
1917 if (!(e2 & DESC_C_MASK)) {
1918 if (dpl < cpl || dpl < rpl)
1919 return;
1921 } else {
1922 if (dpl < cpl || dpl < rpl)
1923 return;
1925 CC_SRC |= CC_Z;
1928 void helper_verw(void)
1930 unsigned int selector;
1931 uint32_t e1, e2;
1932 int rpl, dpl, cpl;
1934 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1935 selector = T0 & 0xffff;
1936 if ((selector & 0xfffc) == 0)
1937 return;
1938 if (load_segment(&e1, &e2, selector) != 0)
1939 return;
1940 if (!(e2 & DESC_S_MASK))
1941 return;
1942 rpl = selector & 3;
1943 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1944 cpl = env->hflags & HF_CPL_MASK;
1945 if (e2 & DESC_CS_MASK) {
1946 return;
1947 } else {
1948 if (dpl < cpl || dpl < rpl)
1949 return;
1950 if (!(e2 & DESC_W_MASK))
1951 return;
1953 CC_SRC |= CC_Z;
1956 /* FPU helpers */
1958 void helper_fldt_ST0_A0(void)
1960 int new_fpstt;
1961 new_fpstt = (env->fpstt - 1) & 7;
1962 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1963 env->fpstt = new_fpstt;
1964 env->fptags[new_fpstt] = 0; /* validate stack entry */
1967 void helper_fstt_ST0_A0(void)
1969 helper_fstt(ST0, (uint8_t *)A0);
1972 /* BCD ops */
1974 #define MUL10(iv) ( iv + iv + (iv << 3) )
1976 void helper_fbld_ST0_A0(void)
1978 CPU86_LDouble tmp;
1979 uint64_t val;
1980 unsigned int v;
1981 int i;
1983 val = 0;
1984 for(i = 8; i >= 0; i--) {
1985 v = ldub((uint8_t *)A0 + i);
1986 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1988 tmp = val;
1989 if (ldub((uint8_t *)A0 + 9) & 0x80)
1990 tmp = -tmp;
1991 fpush();
1992 ST0 = tmp;
1995 void helper_fbst_ST0_A0(void)
1997 CPU86_LDouble tmp;
1998 int v;
1999 uint8_t *mem_ref, *mem_end;
2000 int64_t val;
2002 tmp = rint(ST0);
2003 val = (int64_t)tmp;
2004 mem_ref = (uint8_t *)A0;
2005 mem_end = mem_ref + 9;
2006 if (val < 0) {
2007 stb(mem_end, 0x80);
2008 val = -val;
2009 } else {
2010 stb(mem_end, 0x00);
2012 while (mem_ref < mem_end) {
2013 if (val == 0)
2014 break;
2015 v = val % 100;
2016 val = val / 100;
2017 v = ((v / 10) << 4) | (v % 10);
2018 stb(mem_ref++, v);
2020 while (mem_ref < mem_end) {
2021 stb(mem_ref++, 0);
2025 void helper_f2xm1(void)
2027 ST0 = pow(2.0,ST0) - 1.0;
2030 void helper_fyl2x(void)
2032 CPU86_LDouble fptemp;
2034 fptemp = ST0;
2035 if (fptemp>0.0){
2036 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
2037 ST1 *= fptemp;
2038 fpop();
2039 } else {
2040 env->fpus &= (~0x4700);
2041 env->fpus |= 0x400;
2045 void helper_fptan(void)
2047 CPU86_LDouble fptemp;
2049 fptemp = ST0;
2050 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2051 env->fpus |= 0x400;
2052 } else {
2053 ST0 = tan(fptemp);
2054 fpush();
2055 ST0 = 1.0;
2056 env->fpus &= (~0x400); /* C2 <-- 0 */
2057 /* the above code is for |arg| < 2**52 only */
2061 void helper_fpatan(void)
2063 CPU86_LDouble fptemp, fpsrcop;
2065 fpsrcop = ST1;
2066 fptemp = ST0;
2067 ST1 = atan2(fpsrcop,fptemp);
2068 fpop();
2071 void helper_fxtract(void)
2073 CPU86_LDoubleU temp;
2074 unsigned int expdif;
2076 temp.d = ST0;
2077 expdif = EXPD(temp) - EXPBIAS;
2078 /*DP exponent bias*/
2079 ST0 = expdif;
2080 fpush();
2081 BIASEXPONENT(temp);
2082 ST0 = temp.d;
2085 void helper_fprem1(void)
2087 CPU86_LDouble dblq, fpsrcop, fptemp;
2088 CPU86_LDoubleU fpsrcop1, fptemp1;
2089 int expdif;
2090 int q;
2092 fpsrcop = ST0;
2093 fptemp = ST1;
2094 fpsrcop1.d = fpsrcop;
2095 fptemp1.d = fptemp;
2096 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2097 if (expdif < 53) {
2098 dblq = fpsrcop / fptemp;
2099 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2100 ST0 = fpsrcop - fptemp*dblq;
2101 q = (int)dblq; /* cutting off top bits is assumed here */
2102 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2103 /* (C0,C1,C3) <-- (q2,q1,q0) */
2104 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2105 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2106 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2107 } else {
2108 env->fpus |= 0x400; /* C2 <-- 1 */
2109 fptemp = pow(2.0, expdif-50);
2110 fpsrcop = (ST0 / ST1) / fptemp;
2111 /* fpsrcop = integer obtained by rounding to the nearest */
2112 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2113 floor(fpsrcop): ceil(fpsrcop);
2114 ST0 -= (ST1 * fpsrcop * fptemp);
2118 void helper_fprem(void)
2120 CPU86_LDouble dblq, fpsrcop, fptemp;
2121 CPU86_LDoubleU fpsrcop1, fptemp1;
2122 int expdif;
2123 int q;
2125 fpsrcop = ST0;
2126 fptemp = ST1;
2127 fpsrcop1.d = fpsrcop;
2128 fptemp1.d = fptemp;
2129 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2130 if ( expdif < 53 ) {
2131 dblq = fpsrcop / fptemp;
2132 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2133 ST0 = fpsrcop - fptemp*dblq;
2134 q = (int)dblq; /* cutting off top bits is assumed here */
2135 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2136 /* (C0,C1,C3) <-- (q2,q1,q0) */
2137 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2138 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2139 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2140 } else {
2141 env->fpus |= 0x400; /* C2 <-- 1 */
2142 fptemp = pow(2.0, expdif-50);
2143 fpsrcop = (ST0 / ST1) / fptemp;
2144 /* fpsrcop = integer obtained by chopping */
2145 fpsrcop = (fpsrcop < 0.0)?
2146 -(floor(fabs(fpsrcop))): floor(fpsrcop);
2147 ST0 -= (ST1 * fpsrcop * fptemp);
2151 void helper_fyl2xp1(void)
2153 CPU86_LDouble fptemp;
2155 fptemp = ST0;
2156 if ((fptemp+1.0)>0.0) {
2157 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2158 ST1 *= fptemp;
2159 fpop();
2160 } else {
2161 env->fpus &= (~0x4700);
2162 env->fpus |= 0x400;
2166 void helper_fsqrt(void)
2168 CPU86_LDouble fptemp;
2170 fptemp = ST0;
2171 if (fptemp<0.0) {
2172 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2173 env->fpus |= 0x400;
2175 ST0 = sqrt(fptemp);
2178 void helper_fsincos(void)
2180 CPU86_LDouble fptemp;
2182 fptemp = ST0;
2183 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2184 env->fpus |= 0x400;
2185 } else {
2186 ST0 = sin(fptemp);
2187 fpush();
2188 ST0 = cos(fptemp);
2189 env->fpus &= (~0x400); /* C2 <-- 0 */
2190 /* the above code is for |arg| < 2**63 only */
2194 void helper_frndint(void)
2196 CPU86_LDouble a;
2198 a = ST0;
2199 #ifdef __arm__
2200 switch(env->fpuc & RC_MASK) {
2201 default:
2202 case RC_NEAR:
2203 asm("rndd %0, %1" : "=f" (a) : "f"(a));
2204 break;
2205 case RC_DOWN:
2206 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2207 break;
2208 case RC_UP:
2209 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2210 break;
2211 case RC_CHOP:
2212 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2213 break;
2215 #else
2216 a = rint(a);
2217 #endif
2218 ST0 = a;
2221 void helper_fscale(void)
2223 CPU86_LDouble fpsrcop, fptemp;
2225 fpsrcop = 2.0;
2226 fptemp = pow(fpsrcop,ST1);
2227 ST0 *= fptemp;
2230 void helper_fsin(void)
2232 CPU86_LDouble fptemp;
2234 fptemp = ST0;
2235 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2236 env->fpus |= 0x400;
2237 } else {
2238 ST0 = sin(fptemp);
2239 env->fpus &= (~0x400); /* C2 <-- 0 */
2240 /* the above code is for |arg| < 2**53 only */
2244 void helper_fcos(void)
2246 CPU86_LDouble fptemp;
2248 fptemp = ST0;
2249 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2250 env->fpus |= 0x400;
2251 } else {
2252 ST0 = cos(fptemp);
2253 env->fpus &= (~0x400); /* C2 <-- 0 */
2254 /* the above code is for |arg5 < 2**63 only */
2258 void helper_fxam_ST0(void)
2260 CPU86_LDoubleU temp;
2261 int expdif;
2263 temp.d = ST0;
2265 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2266 if (SIGND(temp))
2267 env->fpus |= 0x200; /* C1 <-- 1 */
2269 expdif = EXPD(temp);
2270 if (expdif == MAXEXPD) {
2271 if (MANTD(temp) == 0)
2272 env->fpus |= 0x500 /*Infinity*/;
2273 else
2274 env->fpus |= 0x100 /*NaN*/;
2275 } else if (expdif == 0) {
2276 if (MANTD(temp) == 0)
2277 env->fpus |= 0x4000 /*Zero*/;
2278 else
2279 env->fpus |= 0x4400 /*Denormal*/;
2280 } else {
2281 env->fpus |= 0x400;
2285 void helper_fstenv(uint8_t *ptr, int data32)
2287 int fpus, fptag, exp, i;
2288 uint64_t mant;
2289 CPU86_LDoubleU tmp;
2291 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2292 fptag = 0;
2293 for (i=7; i>=0; i--) {
2294 fptag <<= 2;
2295 if (env->fptags[i]) {
2296 fptag |= 3;
2297 } else {
2298 tmp.d = env->fpregs[i];
2299 exp = EXPD(tmp);
2300 mant = MANTD(tmp);
2301 if (exp == 0 && mant == 0) {
2302 /* zero */
2303 fptag |= 1;
2304 } else if (exp == 0 || exp == MAXEXPD
2305 #ifdef USE_X86LDOUBLE
2306 || (mant & (1LL << 63)) == 0
2307 #endif
2309 /* NaNs, infinity, denormal */
2310 fptag |= 2;
2314 if (data32) {
2315 /* 32 bit */
2316 stl(ptr, env->fpuc);
2317 stl(ptr + 4, fpus);
2318 stl(ptr + 8, fptag);
2319 stl(ptr + 12, 0);
2320 stl(ptr + 16, 0);
2321 stl(ptr + 20, 0);
2322 stl(ptr + 24, 0);
2323 } else {
2324 /* 16 bit */
2325 stw(ptr, env->fpuc);
2326 stw(ptr + 2, fpus);
2327 stw(ptr + 4, fptag);
2328 stw(ptr + 6, 0);
2329 stw(ptr + 8, 0);
2330 stw(ptr + 10, 0);
2331 stw(ptr + 12, 0);
2335 void helper_fldenv(uint8_t *ptr, int data32)
2337 int i, fpus, fptag;
2339 if (data32) {
2340 env->fpuc = lduw(ptr);
2341 fpus = lduw(ptr + 4);
2342 fptag = lduw(ptr + 8);
2344 else {
2345 env->fpuc = lduw(ptr);
2346 fpus = lduw(ptr + 2);
2347 fptag = lduw(ptr + 4);
2349 env->fpstt = (fpus >> 11) & 7;
2350 env->fpus = fpus & ~0x3800;
2351 for(i = 0;i < 7; i++) {
2352 env->fptags[i] = ((fptag & 3) == 3);
2353 fptag >>= 2;
2357 void helper_fsave(uint8_t *ptr, int data32)
2359 CPU86_LDouble tmp;
2360 int i;
2362 helper_fstenv(ptr, data32);
2364 ptr += (14 << data32);
2365 for(i = 0;i < 8; i++) {
2366 tmp = ST(i);
2367 helper_fstt(tmp, ptr);
2368 ptr += 10;
2371 /* fninit */
2372 env->fpus = 0;
2373 env->fpstt = 0;
2374 env->fpuc = 0x37f;
2375 env->fptags[0] = 1;
2376 env->fptags[1] = 1;
2377 env->fptags[2] = 1;
2378 env->fptags[3] = 1;
2379 env->fptags[4] = 1;
2380 env->fptags[5] = 1;
2381 env->fptags[6] = 1;
2382 env->fptags[7] = 1;
2385 void helper_frstor(uint8_t *ptr, int data32)
2387 CPU86_LDouble tmp;
2388 int i;
2390 helper_fldenv(ptr, data32);
2391 ptr += (14 << data32);
2393 for(i = 0;i < 8; i++) {
2394 tmp = helper_fldt(ptr);
2395 ST(i) = tmp;
2396 ptr += 10;
2400 #if !defined(CONFIG_USER_ONLY)
2402 #define MMUSUFFIX _mmu
2403 #define GETPC() (__builtin_return_address(0))
2405 #define SHIFT 0
2406 #include "softmmu_template.h"
2408 #define SHIFT 1
2409 #include "softmmu_template.h"
2411 #define SHIFT 2
2412 #include "softmmu_template.h"
2414 #define SHIFT 3
2415 #include "softmmu_template.h"
2417 #endif
2419 /* try to fill the TLB and return an exception if error. If retaddr is
2420 NULL, it means that the function was called in C code (i.e. not
2421 from generated code or from helper.c) */
2422 /* XXX: fix it to restore all registers */
2423 void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2425 TranslationBlock *tb;
2426 int ret;
2427 unsigned long pc;
2428 CPUX86State *saved_env;
2430 /* XXX: hack to restore env in all cases, even if not called from
2431 generated code */
2432 saved_env = env;
2433 env = cpu_single_env;
2434 if (is_write && page_unprotect(addr)) {
2435 /* nothing more to do: the page was write protected because
2436 there was code in it. page_unprotect() flushed the code. */
2439 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2440 if (ret) {
2441 if (retaddr) {
2442 /* now we have a real cpu fault */
2443 pc = (unsigned long)retaddr;
2444 tb = tb_find_pc(pc);
2445 if (tb) {
2446 /* the PC is inside the translated code. It means that we have
2447 a virtual CPU fault */
2448 cpu_restore_state(tb, env, pc);
2451 raise_exception_err(EXCP0E_PAGE, env->error_code);
2453 env = saved_env;