4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #define raise_exception_err(a, b)\
27 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
32 const uint8_t parity_table
[256] = {
33 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
34 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
35 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 const uint8_t rclw_table
[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
76 const uint8_t rclb_table
[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk
[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
96 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
100 spin_lock(&global_cpu_lock
);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock
);
108 void cpu_loop_exit(void)
110 /* NOTE: the register at this point must be saved by hand because
111 longjmp restore them */
113 longjmp(env
->jmp_env
, 1);
116 /* return non zero if error */
117 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
128 index
= selector
& ~7;
129 if ((index
+ 7) > dt
->limit
)
131 ptr
= dt
->base
+ index
;
132 *e1_ptr
= ldl_kernel(ptr
);
133 *e2_ptr
= ldl_kernel(ptr
+ 4);
137 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
140 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
141 if (e2
& DESC_G_MASK
)
142 limit
= (limit
<< 12) | 0xfff;
146 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
148 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
151 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
153 sc
->base
= get_seg_base(e1
, e2
);
154 sc
->limit
= get_seg_limit(e1
, e2
);
158 /* init the segment cache in vm86 mode. */
159 static inline void load_seg_vm(int seg
, int selector
)
162 cpu_x86_load_seg_cache(env
, seg
, selector
,
163 (selector
<< 4), 0xffff, 0);
166 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
167 uint32_t *esp_ptr
, int dpl
)
169 int type
, index
, shift
;
174 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
175 for(i
=0;i
<env
->tr
.limit
;i
++) {
176 printf("%02x ", env
->tr
.base
[i
]);
177 if ((i
& 7) == 7) printf("\n");
183 if (!(env
->tr
.flags
& DESC_P_MASK
))
184 cpu_abort(env
, "invalid tss");
185 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
187 cpu_abort(env
, "invalid tss type");
189 index
= (dpl
* 4 + 2) << shift
;
190 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
191 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
193 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
194 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
196 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
197 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
201 /* XXX: merge with load_seg() */
202 static void tss_load_seg(int seg_reg
, int selector
)
207 if ((selector
& 0xfffc) != 0) {
208 if (load_segment(&e1
, &e2
, selector
) != 0)
209 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
210 if (!(e2
& DESC_S_MASK
))
211 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
213 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
214 cpl
= env
->hflags
& HF_CPL_MASK
;
215 if (seg_reg
== R_CS
) {
216 if (!(e2
& DESC_CS_MASK
))
217 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
219 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
220 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
221 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
223 } else if (seg_reg
== R_SS
) {
224 /* SS must be writable data */
225 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 if (dpl
!= cpl
|| dpl
!= rpl
)
228 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 /* not readable code */
231 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
232 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 /* if data or non conforming code, checks the rights */
234 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
235 if (dpl
< cpl
|| dpl
< rpl
)
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
239 if (!(e2
& DESC_P_MASK
))
240 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
241 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
242 get_seg_base(e1
, e2
),
243 get_seg_limit(e1
, e2
),
246 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
247 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
251 #define SWITCH_TSS_JMP 0
252 #define SWITCH_TSS_IRET 1
253 #define SWITCH_TSS_CALL 2
255 /* XXX: restore CPU state in registers (PowerPC case) */
256 static void switch_tss(int tss_selector
,
257 uint32_t e1
, uint32_t e2
, int source
,
260 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
261 target_ulong tss_base
;
262 uint32_t new_regs
[8], new_segs
[6];
263 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
264 uint32_t old_eflags
, eflags_mask
;
269 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
271 if (loglevel
& CPU_LOG_PCALL
)
272 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
275 /* if task gate, we read the TSS segment and we load it */
277 if (!(e2
& DESC_P_MASK
))
278 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
279 tss_selector
= e1
>> 16;
280 if (tss_selector
& 4)
281 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
282 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
283 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
284 if (e2
& DESC_S_MASK
)
285 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
286 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
288 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
291 if (!(e2
& DESC_P_MASK
))
292 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
298 tss_limit
= get_seg_limit(e1
, e2
);
299 tss_base
= get_seg_base(e1
, e2
);
300 if ((tss_selector
& 4) != 0 ||
301 tss_limit
< tss_limit_max
)
302 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
303 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
305 old_tss_limit_max
= 103;
307 old_tss_limit_max
= 43;
309 /* read all the registers from the new TSS */
312 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
313 new_eip
= ldl_kernel(tss_base
+ 0x20);
314 new_eflags
= ldl_kernel(tss_base
+ 0x24);
315 for(i
= 0; i
< 8; i
++)
316 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
317 for(i
= 0; i
< 6; i
++)
318 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
319 new_ldt
= lduw_kernel(tss_base
+ 0x60);
320 new_trap
= ldl_kernel(tss_base
+ 0x64);
324 new_eip
= lduw_kernel(tss_base
+ 0x0e);
325 new_eflags
= lduw_kernel(tss_base
+ 0x10);
326 for(i
= 0; i
< 8; i
++)
327 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
328 for(i
= 0; i
< 4; i
++)
329 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
330 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
336 /* NOTE: we must avoid memory exceptions during the task switch,
337 so we make dummy accesses before */
338 /* XXX: it can still fail in some cases, so a bigger hack is
339 necessary to valid the TLB after having done the accesses */
341 v1
= ldub_kernel(env
->tr
.base
);
342 v2
= ldub(env
->tr
.base
+ old_tss_limit_max
);
343 stb_kernel(env
->tr
.base
, v1
);
344 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
346 /* clear busy bit (it is restartable) */
347 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
350 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
351 e2
= ldl_kernel(ptr
+ 4);
352 e2
&= ~DESC_TSS_BUSY_MASK
;
353 stl_kernel(ptr
+ 4, e2
);
355 old_eflags
= compute_eflags();
356 if (source
== SWITCH_TSS_IRET
)
357 old_eflags
&= ~NT_MASK
;
359 /* save the current state in the old TSS */
362 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
363 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
364 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
365 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
366 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
367 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
368 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
369 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
370 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
371 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
372 for(i
= 0; i
< 6; i
++)
373 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
376 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
377 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
378 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
379 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
380 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
381 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
382 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
383 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
384 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
385 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
386 for(i
= 0; i
< 4; i
++)
387 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
390 /* now if an exception occurs, it will occurs in the next task
393 if (source
== SWITCH_TSS_CALL
) {
394 stw_kernel(tss_base
, env
->tr
.selector
);
395 new_eflags
|= NT_MASK
;
399 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
402 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
403 e2
= ldl_kernel(ptr
+ 4);
404 e2
|= DESC_TSS_BUSY_MASK
;
405 stl_kernel(ptr
+ 4, e2
);
408 /* set the new CPU state */
409 /* from this point, any exception which occurs can give problems */
410 env
->cr
[0] |= CR0_TS_MASK
;
411 env
->hflags
|= HF_TS_MASK
;
412 env
->tr
.selector
= tss_selector
;
413 env
->tr
.base
= tss_base
;
414 env
->tr
.limit
= tss_limit
;
415 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
417 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
418 cpu_x86_update_cr3(env
, new_cr3
);
421 /* load all registers without an exception, then reload them with
422 possible exception */
424 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
425 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
427 eflags_mask
&= 0xffff;
428 load_eflags(new_eflags
, eflags_mask
);
429 /* XXX: what to do in 16 bit case ? */
438 if (new_eflags
& VM_MASK
) {
439 for(i
= 0; i
< 6; i
++)
440 load_seg_vm(i
, new_segs
[i
]);
441 /* in vm86, CPL is always 3 */
442 cpu_x86_set_cpl(env
, 3);
444 /* CPL is set the RPL of CS */
445 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
446 /* first just selectors as the rest may trigger exceptions */
447 for(i
= 0; i
< 6; i
++)
448 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
451 env
->ldt
.selector
= new_ldt
& ~4;
458 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
460 if ((new_ldt
& 0xfffc) != 0) {
462 index
= new_ldt
& ~7;
463 if ((index
+ 7) > dt
->limit
)
464 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
465 ptr
= dt
->base
+ index
;
466 e1
= ldl_kernel(ptr
);
467 e2
= ldl_kernel(ptr
+ 4);
468 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
469 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
470 if (!(e2
& DESC_P_MASK
))
471 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
472 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
475 /* load the segments */
476 if (!(new_eflags
& VM_MASK
)) {
477 tss_load_seg(R_CS
, new_segs
[R_CS
]);
478 tss_load_seg(R_SS
, new_segs
[R_SS
]);
479 tss_load_seg(R_ES
, new_segs
[R_ES
]);
480 tss_load_seg(R_DS
, new_segs
[R_DS
]);
481 tss_load_seg(R_FS
, new_segs
[R_FS
]);
482 tss_load_seg(R_GS
, new_segs
[R_GS
]);
485 /* check that EIP is in the CS segment limits */
486 if (new_eip
> env
->segs
[R_CS
].limit
) {
487 /* XXX: different exception if CALL ? */
488 raise_exception_err(EXCP0D_GPF
, 0);
492 /* check if Port I/O is allowed in TSS */
493 static inline void check_io(int addr
, int size
)
495 int io_offset
, val
, mask
;
497 /* TSS must be a valid 32 bit one */
498 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
499 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
502 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
503 io_offset
+= (addr
>> 3);
504 /* Note: the check needs two bytes */
505 if ((io_offset
+ 1) > env
->tr
.limit
)
507 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
509 mask
= (1 << size
) - 1;
510 /* all bits must be zero to allow the I/O */
511 if ((val
& mask
) != 0) {
513 raise_exception_err(EXCP0D_GPF
, 0);
517 void check_iob_T0(void)
522 void check_iow_T0(void)
527 void check_iol_T0(void)
532 void check_iob_DX(void)
534 check_io(EDX
& 0xffff, 1);
537 void check_iow_DX(void)
539 check_io(EDX
& 0xffff, 2);
542 void check_iol_DX(void)
544 check_io(EDX
& 0xffff, 4);
547 static inline unsigned int get_sp_mask(unsigned int e2
)
549 if (e2
& DESC_B_MASK
)
555 /* XXX: add a is_user flag to have proper security support */
556 #define PUSHW(ssp, sp, sp_mask, val)\
559 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
562 #define PUSHL(ssp, sp, sp_mask, val)\
565 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
568 #define POPW(ssp, sp, sp_mask, val)\
570 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
574 #define POPL(ssp, sp, sp_mask, val)\
576 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
580 /* protected mode interrupt */
581 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
582 unsigned int next_eip
, int is_hw
)
585 target_ulong ptr
, ssp
;
586 int type
, dpl
, selector
, ss_dpl
, cpl
, sp_mask
;
587 int has_error_code
, new_stack
, shift
;
588 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
592 if (!is_int
&& !is_hw
) {
611 if (intno
* 8 + 7 > dt
->limit
)
612 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
613 ptr
= dt
->base
+ intno
* 8;
614 e1
= ldl_kernel(ptr
);
615 e2
= ldl_kernel(ptr
+ 4);
616 /* check gate type */
617 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
619 case 5: /* task gate */
620 /* must do that check here to return the correct error code */
621 if (!(e2
& DESC_P_MASK
))
622 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
623 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
624 if (has_error_code
) {
626 /* push the error code */
627 shift
= (env
->segs
[R_CS
].flags
>> DESC_B_SHIFT
) & 1;
628 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
632 esp
= (ESP
- (2 << shift
)) & mask
;
633 ssp
= env
->segs
[R_SS
].base
+ esp
;
635 stl_kernel(ssp
, error_code
);
637 stw_kernel(ssp
, error_code
);
638 ESP
= (esp
& mask
) | (ESP
& ~mask
);
641 case 6: /* 286 interrupt gate */
642 case 7: /* 286 trap gate */
643 case 14: /* 386 interrupt gate */
644 case 15: /* 386 trap gate */
647 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
650 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
651 cpl
= env
->hflags
& HF_CPL_MASK
;
652 /* check privledge if software int */
653 if (is_int
&& dpl
< cpl
)
654 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
655 /* check valid bit */
656 if (!(e2
& DESC_P_MASK
))
657 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
659 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
660 if ((selector
& 0xfffc) == 0)
661 raise_exception_err(EXCP0D_GPF
, 0);
663 if (load_segment(&e1
, &e2
, selector
) != 0)
664 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
665 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
666 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
667 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
669 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
670 if (!(e2
& DESC_P_MASK
))
671 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
672 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
673 /* to inner priviledge */
674 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
675 if ((ss
& 0xfffc) == 0)
676 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
678 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
679 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
680 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
681 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
683 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
684 if (!(ss_e2
& DESC_S_MASK
) ||
685 (ss_e2
& DESC_CS_MASK
) ||
686 !(ss_e2
& DESC_W_MASK
))
687 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
688 if (!(ss_e2
& DESC_P_MASK
))
689 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
691 sp_mask
= get_sp_mask(ss_e2
);
692 ssp
= get_seg_base(ss_e1
, ss_e2
);
693 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
694 /* to same priviledge */
695 if (env
->eflags
& VM_MASK
)
696 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
698 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
699 ssp
= env
->segs
[R_SS
].base
;
703 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
704 new_stack
= 0; /* avoid warning */
705 sp_mask
= 0; /* avoid warning */
706 ssp
= 0; /* avoid warning */
707 esp
= 0; /* avoid warning */
713 /* XXX: check that enough room is available */
714 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
715 if (env
->eflags
& VM_MASK
)
721 if (env
->eflags
& VM_MASK
) {
722 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
723 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
724 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
725 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
727 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, ESP
);
730 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
731 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
732 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
733 if (has_error_code
) {
734 PUSHL(ssp
, esp
, sp_mask
, error_code
);
738 if (env
->eflags
& VM_MASK
) {
739 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
740 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
741 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
742 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
744 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, ESP
);
747 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
748 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
749 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
750 if (has_error_code
) {
751 PUSHW(ssp
, esp
, sp_mask
, error_code
);
756 if (env
->eflags
& VM_MASK
) {
757 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
758 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
759 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
762 ss
= (ss
& ~3) | dpl
;
763 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
764 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
766 ESP
= (ESP
& ~sp_mask
) | (esp
& sp_mask
);
768 selector
= (selector
& ~3) | dpl
;
769 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
770 get_seg_base(e1
, e2
),
771 get_seg_limit(e1
, e2
),
773 cpu_x86_set_cpl(env
, dpl
);
776 /* interrupt gate clear IF mask */
777 if ((type
& 1) == 0) {
778 env
->eflags
&= ~IF_MASK
;
780 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
785 #define PUSHQ(sp, val)\
788 stq_kernel(sp, (val));\
791 #define POPQ(sp, val)\
793 val = ldq_kernel(sp);\
797 static inline target_ulong
get_rsp_from_tss(int level
)
802 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
803 env
->tr
.base
, env
->tr
.limit
);
806 if (!(env
->tr
.flags
& DESC_P_MASK
))
807 cpu_abort(env
, "invalid tss");
808 index
= 8 * level
+ 4;
809 if ((index
+ 7) > env
->tr
.limit
)
810 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
811 return ldq_kernel(env
->tr
.base
+ index
);
814 /* 64 bit interrupt */
815 static void do_interrupt64(int intno
, int is_int
, int error_code
,
816 target_ulong next_eip
, int is_hw
)
820 int type
, dpl
, selector
, cpl
, ist
;
821 int has_error_code
, new_stack
;
822 uint32_t e1
, e2
, e3
, ss
;
823 target_ulong old_eip
, esp
, offset
;
826 if (!is_int
&& !is_hw
) {
845 if (intno
* 16 + 15 > dt
->limit
)
846 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
847 ptr
= dt
->base
+ intno
* 16;
848 e1
= ldl_kernel(ptr
);
849 e2
= ldl_kernel(ptr
+ 4);
850 e3
= ldl_kernel(ptr
+ 8);
851 /* check gate type */
852 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
854 case 14: /* 386 interrupt gate */
855 case 15: /* 386 trap gate */
858 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
861 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
862 cpl
= env
->hflags
& HF_CPL_MASK
;
863 /* check privledge if software int */
864 if (is_int
&& dpl
< cpl
)
865 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
866 /* check valid bit */
867 if (!(e2
& DESC_P_MASK
))
868 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
870 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
872 if ((selector
& 0xfffc) == 0)
873 raise_exception_err(EXCP0D_GPF
, 0);
875 if (load_segment(&e1
, &e2
, selector
) != 0)
876 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
877 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
878 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
879 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
881 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
882 if (!(e2
& DESC_P_MASK
))
883 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
884 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
885 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
886 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
887 /* to inner priviledge */
889 esp
= get_rsp_from_tss(ist
+ 3);
891 esp
= get_rsp_from_tss(dpl
);
894 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
895 /* to same priviledge */
896 if (env
->eflags
& VM_MASK
)
897 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
899 esp
= ESP
& ~0xf; /* align stack */
902 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
903 new_stack
= 0; /* avoid warning */
904 esp
= 0; /* avoid warning */
907 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
909 PUSHQ(esp
, compute_eflags());
910 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
912 if (has_error_code
) {
913 PUSHQ(esp
, error_code
);
918 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
922 selector
= (selector
& ~3) | dpl
;
923 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
924 get_seg_base(e1
, e2
),
925 get_seg_limit(e1
, e2
),
927 cpu_x86_set_cpl(env
, dpl
);
930 /* interrupt gate clear IF mask */
931 if ((type
& 1) == 0) {
932 env
->eflags
&= ~IF_MASK
;
934 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
938 void helper_syscall(int next_eip_addend
)
942 if (!(env
->efer
& MSR_EFER_SCE
)) {
943 raise_exception_err(EXCP06_ILLOP
, 0);
945 selector
= (env
->star
>> 32) & 0xffff;
947 if (env
->hflags
& HF_LMA_MASK
) {
948 ECX
= env
->eip
+ next_eip_addend
;
949 env
->regs
[11] = compute_eflags();
951 cpu_x86_set_cpl(env
, 0);
952 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
954 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
956 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
957 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
959 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
961 DESC_W_MASK
| DESC_A_MASK
);
962 env
->eflags
&= ~env
->fmask
;
963 if (env
->hflags
& HF_CS64_MASK
)
964 env
->eip
= env
->lstar
;
966 env
->eip
= env
->cstar
;
970 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
972 cpu_x86_set_cpl(env
, 0);
973 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
975 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
977 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
978 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
980 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
982 DESC_W_MASK
| DESC_A_MASK
);
983 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
984 env
->eip
= (uint32_t)env
->star
;
988 void helper_sysret(int dflag
)
992 if (!(env
->efer
& MSR_EFER_SCE
)) {
993 raise_exception_err(EXCP06_ILLOP
, 0);
995 cpl
= env
->hflags
& HF_CPL_MASK
;
996 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
997 raise_exception_err(EXCP0D_GPF
, 0);
999 selector
= (env
->star
>> 48) & 0xffff;
1000 #ifdef TARGET_X86_64
1001 if (env
->hflags
& HF_LMA_MASK
) {
1003 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1005 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1006 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1007 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1011 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1013 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1014 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1015 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1016 env
->eip
= (uint32_t)ECX
;
1018 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1020 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1021 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1022 DESC_W_MASK
| DESC_A_MASK
);
1023 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1024 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1025 cpu_x86_set_cpl(env
, 3);
1029 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1032 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1033 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1034 env
->eip
= (uint32_t)ECX
;
1035 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1037 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1038 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1039 DESC_W_MASK
| DESC_A_MASK
);
1040 env
->eflags
|= IF_MASK
;
1041 cpu_x86_set_cpl(env
, 3);
1044 if (kqemu_is_ok(env
)) {
1045 if (env
->hflags
& HF_LMA_MASK
)
1046 CC_OP
= CC_OP_EFLAGS
;
1047 env
->exception_index
= -1;
1053 /* real mode interrupt */
1054 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1055 unsigned int next_eip
)
1058 target_ulong ptr
, ssp
;
1060 uint32_t offset
, esp
;
1061 uint32_t old_cs
, old_eip
;
1063 /* real mode (simpler !) */
1065 if (intno
* 4 + 3 > dt
->limit
)
1066 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1067 ptr
= dt
->base
+ intno
* 4;
1068 offset
= lduw_kernel(ptr
);
1069 selector
= lduw_kernel(ptr
+ 2);
1071 ssp
= env
->segs
[R_SS
].base
;
1076 old_cs
= env
->segs
[R_CS
].selector
;
1077 /* XXX: use SS segment size ? */
1078 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1079 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1080 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1082 /* update processor state */
1083 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1085 env
->segs
[R_CS
].selector
= selector
;
1086 env
->segs
[R_CS
].base
= (selector
<< 4);
1087 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1090 /* fake user mode interrupt */
1091 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1092 target_ulong next_eip
)
1100 ptr
= dt
->base
+ (intno
* 8);
1101 e2
= ldl_kernel(ptr
+ 4);
1103 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1104 cpl
= env
->hflags
& HF_CPL_MASK
;
1105 /* check privledge if software int */
1106 if (is_int
&& dpl
< cpl
)
1107 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1109 /* Since we emulate only user space, we cannot do more than
1110 exiting the emulation with the suitable exception and error
1117 * Begin execution of an interruption. is_int is TRUE if coming from
1118 * the int instruction. next_eip is the EIP value AFTER the interrupt
1119 * instruction. It is only relevant if is_int is TRUE.
1121 void do_interrupt(int intno
, int is_int
, int error_code
,
1122 target_ulong next_eip
, int is_hw
)
1125 if (loglevel
& (CPU_LOG_PCALL
| CPU_LOG_INT
)) {
1126 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1128 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1129 count
, intno
, error_code
, is_int
,
1130 env
->hflags
& HF_CPL_MASK
,
1131 env
->segs
[R_CS
].selector
, EIP
,
1132 (int)env
->segs
[R_CS
].base
+ EIP
,
1133 env
->segs
[R_SS
].selector
, ESP
);
1134 if (intno
== 0x0e) {
1135 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1137 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1139 fprintf(logfile
, "\n");
1141 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1145 fprintf(logfile
, " code=");
1146 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1147 for(i
= 0; i
< 16; i
++) {
1148 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1150 fprintf(logfile
, "\n");
1157 if (env
->cr
[0] & CR0_PE_MASK
) {
1159 if (env
->hflags
& HF_LMA_MASK
) {
1160 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1164 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1167 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1172 * Signal an interruption. It is executed in the main CPU loop.
1173 * is_int is TRUE if coming from the int instruction. next_eip is the
1174 * EIP value AFTER the interrupt instruction. It is only relevant if
1177 void raise_interrupt(int intno
, int is_int
, int error_code
,
1178 int next_eip_addend
)
1180 env
->exception_index
= intno
;
1181 env
->error_code
= error_code
;
1182 env
->exception_is_int
= is_int
;
1183 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1187 /* same as raise_exception_err, but do not restore global registers */
1188 static void raise_exception_err_norestore(int exception_index
, int error_code
)
1190 env
->exception_index
= exception_index
;
1191 env
->error_code
= error_code
;
1192 env
->exception_is_int
= 0;
1193 env
->exception_next_eip
= 0;
1194 longjmp(env
->jmp_env
, 1);
1197 /* shortcuts to generate exceptions */
1199 void (raise_exception_err
)(int exception_index
, int error_code
)
1201 raise_interrupt(exception_index
, 0, error_code
, 0);
1204 void raise_exception(int exception_index
)
1206 raise_interrupt(exception_index
, 0, 0, 0);
1209 #ifdef BUGGY_GCC_DIV64
1210 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1211 call it from another function */
1212 uint32_t div32(uint32_t *q_ptr
, uint64_t num
, uint32_t den
)
1218 int32_t idiv32(int32_t *q_ptr
, int64_t num
, int32_t den
)
1225 void helper_divl_EAX_T0(void)
1227 unsigned int den
, q
, r
;
1230 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1233 raise_exception(EXCP00_DIVZ
);
1235 #ifdef BUGGY_GCC_DIV64
1236 r
= div32(&q
, num
, den
);
1245 void helper_idivl_EAX_T0(void)
1250 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1253 raise_exception(EXCP00_DIVZ
);
1255 #ifdef BUGGY_GCC_DIV64
1256 r
= idiv32(&q
, num
, den
);
1265 void helper_cmpxchg8b(void)
1270 eflags
= cc_table
[CC_OP
].compute_all();
1272 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
1273 stq(A0
, ((uint64_t)ECX
<< 32) | EBX
);
1283 void helper_cpuid(void)
1286 index
= (uint32_t)EAX
;
1288 /* test if maximum index reached */
1289 if (index
& 0x80000000) {
1290 if (index
> env
->cpuid_xlevel
)
1291 index
= env
->cpuid_level
;
1293 if (index
> env
->cpuid_level
)
1294 index
= env
->cpuid_level
;
1299 EAX
= env
->cpuid_level
;
1300 EBX
= env
->cpuid_vendor1
;
1301 EDX
= env
->cpuid_vendor2
;
1302 ECX
= env
->cpuid_vendor3
;
1305 EAX
= env
->cpuid_version
;
1307 ECX
= env
->cpuid_ext_features
;
1308 EDX
= env
->cpuid_features
;
1311 /* cache info: needed for Pentium Pro compatibility */
1318 EAX
= env
->cpuid_xlevel
;
1319 EBX
= env
->cpuid_vendor1
;
1320 EDX
= env
->cpuid_vendor2
;
1321 ECX
= env
->cpuid_vendor3
;
1324 EAX
= env
->cpuid_features
;
1327 EDX
= env
->cpuid_ext2_features
;
1332 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1333 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1334 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1335 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1338 /* virtual & phys address size in low 2 bytes. */
1345 /* reserved values: zero */
1354 void helper_enter_level(int level
, int data32
)
1357 uint32_t esp_mask
, esp
, ebp
;
1359 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1360 ssp
= env
->segs
[R_SS
].base
;
1369 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1372 stl(ssp
+ (esp
& esp_mask
), T1
);
1379 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1382 stw(ssp
+ (esp
& esp_mask
), T1
);
1386 void helper_lldt_T0(void)
1391 int index
, entry_limit
;
1394 selector
= T0
& 0xffff;
1395 if ((selector
& 0xfffc) == 0) {
1396 /* XXX: NULL selector case: invalid LDT */
1401 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1403 index
= selector
& ~7;
1404 #ifdef TARGET_X86_64
1405 if (env
->hflags
& HF_LMA_MASK
)
1410 if ((index
+ entry_limit
) > dt
->limit
)
1411 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1412 ptr
= dt
->base
+ index
;
1413 e1
= ldl_kernel(ptr
);
1414 e2
= ldl_kernel(ptr
+ 4);
1415 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1416 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1417 if (!(e2
& DESC_P_MASK
))
1418 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1419 #ifdef TARGET_X86_64
1420 if (env
->hflags
& HF_LMA_MASK
) {
1422 e3
= ldl_kernel(ptr
+ 8);
1423 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1424 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1428 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1431 env
->ldt
.selector
= selector
;
1434 void helper_ltr_T0(void)
1439 int index
, type
, entry_limit
;
1442 selector
= T0
& 0xffff;
1443 if ((selector
& 0xfffc) == 0) {
1444 /* NULL selector case: invalid TR */
1450 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1452 index
= selector
& ~7;
1453 #ifdef TARGET_X86_64
1454 if (env
->hflags
& HF_LMA_MASK
)
1459 if ((index
+ entry_limit
) > dt
->limit
)
1460 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1461 ptr
= dt
->base
+ index
;
1462 e1
= ldl_kernel(ptr
);
1463 e2
= ldl_kernel(ptr
+ 4);
1464 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1465 if ((e2
& DESC_S_MASK
) ||
1466 (type
!= 1 && type
!= 9))
1467 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1468 if (!(e2
& DESC_P_MASK
))
1469 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1470 #ifdef TARGET_X86_64
1471 if (env
->hflags
& HF_LMA_MASK
) {
1473 e3
= ldl_kernel(ptr
+ 8);
1474 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1475 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1479 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1481 e2
|= DESC_TSS_BUSY_MASK
;
1482 stl_kernel(ptr
+ 4, e2
);
1484 env
->tr
.selector
= selector
;
1487 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1488 void load_seg(int seg_reg
, int selector
)
1497 cpl
= env
->hflags
& HF_CPL_MASK
;
1498 if ((selector
& 0xfffc) == 0) {
1499 /* null selector case */
1501 #ifdef TARGET_X86_64
1502 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1505 raise_exception_err(EXCP0D_GPF
, 0);
1506 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1513 index
= selector
& ~7;
1514 if ((index
+ 7) > dt
->limit
)
1515 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1516 ptr
= dt
->base
+ index
;
1517 e1
= ldl_kernel(ptr
);
1518 e2
= ldl_kernel(ptr
+ 4);
1520 if (!(e2
& DESC_S_MASK
))
1521 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1523 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1524 if (seg_reg
== R_SS
) {
1525 /* must be writable segment */
1526 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1527 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1528 if (rpl
!= cpl
|| dpl
!= cpl
)
1529 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1531 /* must be readable segment */
1532 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1533 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1535 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1536 /* if not conforming code, test rights */
1537 if (dpl
< cpl
|| dpl
< rpl
)
1538 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1542 if (!(e2
& DESC_P_MASK
)) {
1543 if (seg_reg
== R_SS
)
1544 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1546 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1549 /* set the access bit if not already set */
1550 if (!(e2
& DESC_A_MASK
)) {
1552 stl_kernel(ptr
+ 4, e2
);
1555 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1556 get_seg_base(e1
, e2
),
1557 get_seg_limit(e1
, e2
),
1560 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1561 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1566 /* protected mode jump */
1567 void helper_ljmp_protected_T0_T1(int next_eip_addend
)
1569 int new_cs
, gate_cs
, type
;
1570 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1571 target_ulong new_eip
, next_eip
;
1575 if ((new_cs
& 0xfffc) == 0)
1576 raise_exception_err(EXCP0D_GPF
, 0);
1577 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1578 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1579 cpl
= env
->hflags
& HF_CPL_MASK
;
1580 if (e2
& DESC_S_MASK
) {
1581 if (!(e2
& DESC_CS_MASK
))
1582 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1583 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1584 if (e2
& DESC_C_MASK
) {
1585 /* conforming code segment */
1587 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1589 /* non conforming code segment */
1592 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1594 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1596 if (!(e2
& DESC_P_MASK
))
1597 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1598 limit
= get_seg_limit(e1
, e2
);
1599 if (new_eip
> limit
&&
1600 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
1601 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1602 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1603 get_seg_base(e1
, e2
), limit
, e2
);
1606 /* jump to call or task gate */
1607 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1609 cpl
= env
->hflags
& HF_CPL_MASK
;
1610 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1612 case 1: /* 286 TSS */
1613 case 9: /* 386 TSS */
1614 case 5: /* task gate */
1615 if (dpl
< cpl
|| dpl
< rpl
)
1616 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1617 next_eip
= env
->eip
+ next_eip_addend
;
1618 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1620 case 4: /* 286 call gate */
1621 case 12: /* 386 call gate */
1622 if ((dpl
< cpl
) || (dpl
< rpl
))
1623 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1624 if (!(e2
& DESC_P_MASK
))
1625 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1627 new_eip
= (e1
& 0xffff);
1629 new_eip
|= (e2
& 0xffff0000);
1630 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
1631 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1632 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1633 /* must be code segment */
1634 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1635 (DESC_S_MASK
| DESC_CS_MASK
)))
1636 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1637 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1638 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
1639 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1640 if (!(e2
& DESC_P_MASK
))
1641 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1642 limit
= get_seg_limit(e1
, e2
);
1643 if (new_eip
> limit
)
1644 raise_exception_err(EXCP0D_GPF
, 0);
1645 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1646 get_seg_base(e1
, e2
), limit
, e2
);
1650 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1656 /* real mode call */
1657 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
1659 int new_cs
, new_eip
;
1660 uint32_t esp
, esp_mask
;
1666 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1667 ssp
= env
->segs
[R_SS
].base
;
1669 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1670 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1672 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1673 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1676 ESP
= (ESP
& ~esp_mask
) | (esp
& esp_mask
);
1678 env
->segs
[R_CS
].selector
= new_cs
;
1679 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1682 /* protected mode call */
1683 void helper_lcall_protected_T0_T1(int shift
, int next_eip_addend
)
1685 int new_cs
, new_eip
, new_stack
, i
;
1686 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1687 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
1688 uint32_t val
, limit
, old_sp_mask
;
1689 target_ulong ssp
, old_ssp
, next_eip
;
1693 next_eip
= env
->eip
+ next_eip_addend
;
1695 if (loglevel
& CPU_LOG_PCALL
) {
1696 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
1697 new_cs
, new_eip
, shift
);
1698 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1701 if ((new_cs
& 0xfffc) == 0)
1702 raise_exception_err(EXCP0D_GPF
, 0);
1703 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1704 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1705 cpl
= env
->hflags
& HF_CPL_MASK
;
1707 if (loglevel
& CPU_LOG_PCALL
) {
1708 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
1711 if (e2
& DESC_S_MASK
) {
1712 if (!(e2
& DESC_CS_MASK
))
1713 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1714 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1715 if (e2
& DESC_C_MASK
) {
1716 /* conforming code segment */
1718 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1720 /* non conforming code segment */
1723 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1725 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1727 if (!(e2
& DESC_P_MASK
))
1728 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1730 #ifdef TARGET_X86_64
1731 /* XXX: check 16/32 bit cases in long mode */
1736 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1737 PUSHQ(rsp
, next_eip
);
1738 /* from this point, not restartable */
1740 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1741 get_seg_base(e1
, e2
),
1742 get_seg_limit(e1
, e2
), e2
);
1748 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1749 ssp
= env
->segs
[R_SS
].base
;
1751 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1752 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1754 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1755 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1758 limit
= get_seg_limit(e1
, e2
);
1759 if (new_eip
> limit
)
1760 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1761 /* from this point, not restartable */
1762 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1763 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1764 get_seg_base(e1
, e2
), limit
, e2
);
1768 /* check gate type */
1769 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1770 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1773 case 1: /* available 286 TSS */
1774 case 9: /* available 386 TSS */
1775 case 5: /* task gate */
1776 if (dpl
< cpl
|| dpl
< rpl
)
1777 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1778 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1780 case 4: /* 286 call gate */
1781 case 12: /* 386 call gate */
1784 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1789 if (dpl
< cpl
|| dpl
< rpl
)
1790 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1791 /* check valid bit */
1792 if (!(e2
& DESC_P_MASK
))
1793 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1794 selector
= e1
>> 16;
1795 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1796 param_count
= e2
& 0x1f;
1797 if ((selector
& 0xfffc) == 0)
1798 raise_exception_err(EXCP0D_GPF
, 0);
1800 if (load_segment(&e1
, &e2
, selector
) != 0)
1801 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1802 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1803 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1804 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1806 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1807 if (!(e2
& DESC_P_MASK
))
1808 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1810 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1811 /* to inner priviledge */
1812 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
1814 if (loglevel
& CPU_LOG_PCALL
)
1815 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
1816 ss
, sp
, param_count
, ESP
);
1818 if ((ss
& 0xfffc) == 0)
1819 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1820 if ((ss
& 3) != dpl
)
1821 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1822 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
1823 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1824 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1826 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1827 if (!(ss_e2
& DESC_S_MASK
) ||
1828 (ss_e2
& DESC_CS_MASK
) ||
1829 !(ss_e2
& DESC_W_MASK
))
1830 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1831 if (!(ss_e2
& DESC_P_MASK
))
1832 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1834 // push_size = ((param_count * 2) + 8) << shift;
1836 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1837 old_ssp
= env
->segs
[R_SS
].base
;
1839 sp_mask
= get_sp_mask(ss_e2
);
1840 ssp
= get_seg_base(ss_e1
, ss_e2
);
1842 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1843 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1844 for(i
= param_count
- 1; i
>= 0; i
--) {
1845 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
1846 PUSHL(ssp
, sp
, sp_mask
, val
);
1849 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1850 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1851 for(i
= param_count
- 1; i
>= 0; i
--) {
1852 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
1853 PUSHW(ssp
, sp
, sp_mask
, val
);
1858 /* to same priviledge */
1860 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1861 ssp
= env
->segs
[R_SS
].base
;
1862 // push_size = (4 << shift);
1867 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1868 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1870 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1871 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1874 /* from this point, not restartable */
1877 ss
= (ss
& ~3) | dpl
;
1878 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1880 get_seg_limit(ss_e1
, ss_e2
),
1884 selector
= (selector
& ~3) | dpl
;
1885 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1886 get_seg_base(e1
, e2
),
1887 get_seg_limit(e1
, e2
),
1889 cpu_x86_set_cpl(env
, dpl
);
1890 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1894 if (kqemu_is_ok(env
)) {
1895 env
->exception_index
= -1;
1901 /* real and vm86 mode iret */
1902 void helper_iret_real(int shift
)
1904 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1908 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
1910 ssp
= env
->segs
[R_SS
].base
;
1913 POPL(ssp
, sp
, sp_mask
, new_eip
);
1914 POPL(ssp
, sp
, sp_mask
, new_cs
);
1916 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1919 POPW(ssp
, sp
, sp_mask
, new_eip
);
1920 POPW(ssp
, sp
, sp_mask
, new_cs
);
1921 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1923 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1924 load_seg_vm(R_CS
, new_cs
);
1926 if (env
->eflags
& VM_MASK
)
1927 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
1929 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
1931 eflags_mask
&= 0xffff;
1932 load_eflags(new_eflags
, eflags_mask
);
1935 static inline void validate_seg(int seg_reg
, int cpl
)
1940 e2
= env
->segs
[seg_reg
].flags
;
1941 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1942 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1943 /* data or non conforming code segment */
1945 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1950 /* protected mode iret */
1951 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
1953 uint32_t new_cs
, new_eflags
, new_ss
;
1954 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1955 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1956 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1957 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1959 #ifdef TARGET_X86_64
1964 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1966 ssp
= env
->segs
[R_SS
].base
;
1967 new_eflags
= 0; /* avoid warning */
1968 #ifdef TARGET_X86_64
1974 POPQ(sp
, new_eflags
);
1980 POPL(ssp
, sp
, sp_mask
, new_eip
);
1981 POPL(ssp
, sp
, sp_mask
, new_cs
);
1984 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1985 if (new_eflags
& VM_MASK
)
1986 goto return_to_vm86
;
1990 POPW(ssp
, sp
, sp_mask
, new_eip
);
1991 POPW(ssp
, sp
, sp_mask
, new_cs
);
1993 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1996 if (loglevel
& CPU_LOG_PCALL
) {
1997 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
1998 new_cs
, new_eip
, shift
, addend
);
1999 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2002 if ((new_cs
& 0xfffc) == 0)
2003 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2004 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2005 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2006 if (!(e2
& DESC_S_MASK
) ||
2007 !(e2
& DESC_CS_MASK
))
2008 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2009 cpl
= env
->hflags
& HF_CPL_MASK
;
2012 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2013 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2014 if (e2
& DESC_C_MASK
) {
2016 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2019 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2021 if (!(e2
& DESC_P_MASK
))
2022 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2025 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2026 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2027 /* return to same priledge level */
2028 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2029 get_seg_base(e1
, e2
),
2030 get_seg_limit(e1
, e2
),
2033 /* return to different priviledge level */
2034 #ifdef TARGET_X86_64
2043 POPL(ssp
, sp
, sp_mask
, new_esp
);
2044 POPL(ssp
, sp
, sp_mask
, new_ss
);
2048 POPW(ssp
, sp
, sp_mask
, new_esp
);
2049 POPW(ssp
, sp
, sp_mask
, new_ss
);
2052 if (loglevel
& CPU_LOG_PCALL
) {
2053 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2057 if ((new_ss
& 0xfffc) == 0) {
2058 #ifdef TARGET_X86_64
2059 /* NULL ss is allowed in long mode if cpl != 3*/
2060 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2061 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2063 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2064 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2065 DESC_W_MASK
| DESC_A_MASK
);
2069 raise_exception_err(EXCP0D_GPF
, 0);
2072 if ((new_ss
& 3) != rpl
)
2073 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2074 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2075 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2076 if (!(ss_e2
& DESC_S_MASK
) ||
2077 (ss_e2
& DESC_CS_MASK
) ||
2078 !(ss_e2
& DESC_W_MASK
))
2079 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2080 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2082 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2083 if (!(ss_e2
& DESC_P_MASK
))
2084 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2085 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2086 get_seg_base(ss_e1
, ss_e2
),
2087 get_seg_limit(ss_e1
, ss_e2
),
2091 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2092 get_seg_base(e1
, e2
),
2093 get_seg_limit(e1
, e2
),
2095 cpu_x86_set_cpl(env
, rpl
);
2097 #ifdef TARGET_X86_64
2102 sp_mask
= get_sp_mask(ss_e2
);
2104 /* validate data segments */
2105 validate_seg(R_ES
, cpl
);
2106 validate_seg(R_DS
, cpl
);
2107 validate_seg(R_FS
, cpl
);
2108 validate_seg(R_GS
, cpl
);
2112 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2115 /* NOTE: 'cpl' is the _old_ CPL */
2116 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2118 eflags_mask
|= IOPL_MASK
;
2119 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2121 eflags_mask
|= IF_MASK
;
2123 eflags_mask
&= 0xffff;
2124 load_eflags(new_eflags
, eflags_mask
);
2129 POPL(ssp
, sp
, sp_mask
, new_esp
);
2130 POPL(ssp
, sp
, sp_mask
, new_ss
);
2131 POPL(ssp
, sp
, sp_mask
, new_es
);
2132 POPL(ssp
, sp
, sp_mask
, new_ds
);
2133 POPL(ssp
, sp
, sp_mask
, new_fs
);
2134 POPL(ssp
, sp
, sp_mask
, new_gs
);
2136 /* modify processor state */
2137 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2138 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2139 load_seg_vm(R_CS
, new_cs
& 0xffff);
2140 cpu_x86_set_cpl(env
, 3);
2141 load_seg_vm(R_SS
, new_ss
& 0xffff);
2142 load_seg_vm(R_ES
, new_es
& 0xffff);
2143 load_seg_vm(R_DS
, new_ds
& 0xffff);
2144 load_seg_vm(R_FS
, new_fs
& 0xffff);
2145 load_seg_vm(R_GS
, new_gs
& 0xffff);
2147 env
->eip
= new_eip
& 0xffff;
2151 void helper_iret_protected(int shift
, int next_eip
)
2153 int tss_selector
, type
;
2156 /* specific case for TSS */
2157 if (env
->eflags
& NT_MASK
) {
2158 #ifdef TARGET_X86_64
2159 if (env
->hflags
& HF_LMA_MASK
)
2160 raise_exception_err(EXCP0D_GPF
, 0);
2162 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2163 if (tss_selector
& 4)
2164 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2165 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2166 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2167 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2168 /* NOTE: we check both segment and busy TSS */
2170 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2171 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2173 helper_ret_protected(shift
, 1, 0);
2176 if (kqemu_is_ok(env
)) {
2177 CC_OP
= CC_OP_EFLAGS
;
2178 env
->exception_index
= -1;
2184 void helper_lret_protected(int shift
, int addend
)
2186 helper_ret_protected(shift
, 0, addend
);
2188 if (kqemu_is_ok(env
)) {
2189 env
->exception_index
= -1;
2195 void helper_sysenter(void)
2197 if (env
->sysenter_cs
== 0) {
2198 raise_exception_err(EXCP0D_GPF
, 0);
2200 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2201 cpu_x86_set_cpl(env
, 0);
2202 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2204 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2206 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2207 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2209 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2211 DESC_W_MASK
| DESC_A_MASK
);
2212 ESP
= env
->sysenter_esp
;
2213 EIP
= env
->sysenter_eip
;
2216 void helper_sysexit(void)
2220 cpl
= env
->hflags
& HF_CPL_MASK
;
2221 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2222 raise_exception_err(EXCP0D_GPF
, 0);
2224 cpu_x86_set_cpl(env
, 3);
2225 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2227 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2228 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2229 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2230 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2232 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2233 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2234 DESC_W_MASK
| DESC_A_MASK
);
2238 if (kqemu_is_ok(env
)) {
2239 env
->exception_index
= -1;
2245 void helper_movl_crN_T0(int reg
)
2247 #if !defined(CONFIG_USER_ONLY)
2250 cpu_x86_update_cr0(env
, T0
);
2253 cpu_x86_update_cr3(env
, T0
);
2256 cpu_x86_update_cr4(env
, T0
);
2259 cpu_set_apic_tpr(env
, T0
);
2269 void helper_movl_drN_T0(int reg
)
2274 void helper_invlpg(unsigned int addr
)
2276 cpu_x86_flush_tlb(env
, addr
);
2279 void helper_rdtsc(void)
2283 val
= cpu_get_tsc(env
);
2284 EAX
= (uint32_t)(val
);
2285 EDX
= (uint32_t)(val
>> 32);
2288 #if defined(CONFIG_USER_ONLY)
2289 void helper_wrmsr(void)
2293 void helper_rdmsr(void)
2297 void helper_wrmsr(void)
2301 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
2303 switch((uint32_t)ECX
) {
2304 case MSR_IA32_SYSENTER_CS
:
2305 env
->sysenter_cs
= val
& 0xffff;
2307 case MSR_IA32_SYSENTER_ESP
:
2308 env
->sysenter_esp
= val
;
2310 case MSR_IA32_SYSENTER_EIP
:
2311 env
->sysenter_eip
= val
;
2313 case MSR_IA32_APICBASE
:
2314 cpu_set_apic_base(env
, val
);
2318 uint64_t update_mask
;
2320 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
2321 update_mask
|= MSR_EFER_SCE
;
2322 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
2323 update_mask
|= MSR_EFER_LME
;
2324 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
2325 update_mask
|= MSR_EFER_FFXSR
;
2326 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
2327 update_mask
|= MSR_EFER_NXE
;
2328 env
->efer
= (env
->efer
& ~update_mask
) |
2329 (val
& update_mask
);
2335 #ifdef TARGET_X86_64
2346 env
->segs
[R_FS
].base
= val
;
2349 env
->segs
[R_GS
].base
= val
;
2351 case MSR_KERNELGSBASE
:
2352 env
->kernelgsbase
= val
;
2356 /* XXX: exception ? */
2361 void helper_rdmsr(void)
2364 switch((uint32_t)ECX
) {
2365 case MSR_IA32_SYSENTER_CS
:
2366 val
= env
->sysenter_cs
;
2368 case MSR_IA32_SYSENTER_ESP
:
2369 val
= env
->sysenter_esp
;
2371 case MSR_IA32_SYSENTER_EIP
:
2372 val
= env
->sysenter_eip
;
2374 case MSR_IA32_APICBASE
:
2375 val
= cpu_get_apic_base(env
);
2383 #ifdef TARGET_X86_64
2394 val
= env
->segs
[R_FS
].base
;
2397 val
= env
->segs
[R_GS
].base
;
2399 case MSR_KERNELGSBASE
:
2400 val
= env
->kernelgsbase
;
2404 /* XXX: exception ? */
2408 EAX
= (uint32_t)(val
);
2409 EDX
= (uint32_t)(val
>> 32);
2413 void helper_lsl(void)
2415 unsigned int selector
, limit
;
2416 uint32_t e1
, e2
, eflags
;
2417 int rpl
, dpl
, cpl
, type
;
2419 eflags
= cc_table
[CC_OP
].compute_all();
2420 selector
= T0
& 0xffff;
2421 if (load_segment(&e1
, &e2
, selector
) != 0)
2424 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2425 cpl
= env
->hflags
& HF_CPL_MASK
;
2426 if (e2
& DESC_S_MASK
) {
2427 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2430 if (dpl
< cpl
|| dpl
< rpl
)
2434 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2445 if (dpl
< cpl
|| dpl
< rpl
) {
2447 CC_SRC
= eflags
& ~CC_Z
;
2451 limit
= get_seg_limit(e1
, e2
);
2453 CC_SRC
= eflags
| CC_Z
;
2456 void helper_lar(void)
2458 unsigned int selector
;
2459 uint32_t e1
, e2
, eflags
;
2460 int rpl
, dpl
, cpl
, type
;
2462 eflags
= cc_table
[CC_OP
].compute_all();
2463 selector
= T0
& 0xffff;
2464 if ((selector
& 0xfffc) == 0)
2466 if (load_segment(&e1
, &e2
, selector
) != 0)
2469 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2470 cpl
= env
->hflags
& HF_CPL_MASK
;
2471 if (e2
& DESC_S_MASK
) {
2472 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2475 if (dpl
< cpl
|| dpl
< rpl
)
2479 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2493 if (dpl
< cpl
|| dpl
< rpl
) {
2495 CC_SRC
= eflags
& ~CC_Z
;
2499 T1
= e2
& 0x00f0ff00;
2500 CC_SRC
= eflags
| CC_Z
;
2503 void helper_verr(void)
2505 unsigned int selector
;
2506 uint32_t e1
, e2
, eflags
;
2509 eflags
= cc_table
[CC_OP
].compute_all();
2510 selector
= T0
& 0xffff;
2511 if ((selector
& 0xfffc) == 0)
2513 if (load_segment(&e1
, &e2
, selector
) != 0)
2515 if (!(e2
& DESC_S_MASK
))
2518 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2519 cpl
= env
->hflags
& HF_CPL_MASK
;
2520 if (e2
& DESC_CS_MASK
) {
2521 if (!(e2
& DESC_R_MASK
))
2523 if (!(e2
& DESC_C_MASK
)) {
2524 if (dpl
< cpl
|| dpl
< rpl
)
2528 if (dpl
< cpl
|| dpl
< rpl
) {
2530 CC_SRC
= eflags
& ~CC_Z
;
2534 CC_SRC
= eflags
| CC_Z
;
2537 void helper_verw(void)
2539 unsigned int selector
;
2540 uint32_t e1
, e2
, eflags
;
2543 eflags
= cc_table
[CC_OP
].compute_all();
2544 selector
= T0
& 0xffff;
2545 if ((selector
& 0xfffc) == 0)
2547 if (load_segment(&e1
, &e2
, selector
) != 0)
2549 if (!(e2
& DESC_S_MASK
))
2552 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2553 cpl
= env
->hflags
& HF_CPL_MASK
;
2554 if (e2
& DESC_CS_MASK
) {
2557 if (dpl
< cpl
|| dpl
< rpl
)
2559 if (!(e2
& DESC_W_MASK
)) {
2561 CC_SRC
= eflags
& ~CC_Z
;
2565 CC_SRC
= eflags
| CC_Z
;
2570 void helper_fldt_ST0_A0(void)
2573 new_fpstt
= (env
->fpstt
- 1) & 7;
2574 env
->fpregs
[new_fpstt
].d
= helper_fldt(A0
);
2575 env
->fpstt
= new_fpstt
;
2576 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
2579 void helper_fstt_ST0_A0(void)
2581 helper_fstt(ST0
, A0
);
2584 void fpu_set_exception(int mask
)
2587 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
2588 env
->fpus
|= FPUS_SE
| FPUS_B
;
2591 CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
2594 fpu_set_exception(FPUS_ZE
);
2598 void fpu_raise_exception(void)
2600 if (env
->cr
[0] & CR0_NE_MASK
) {
2601 raise_exception(EXCP10_COPR
);
2603 #if !defined(CONFIG_USER_ONLY)
2612 void helper_fbld_ST0_A0(void)
2620 for(i
= 8; i
>= 0; i
--) {
2622 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
2625 if (ldub(A0
+ 9) & 0x80)
2631 void helper_fbst_ST0_A0(void)
2634 target_ulong mem_ref
, mem_end
;
2637 val
= floatx_to_int64(ST0
, &env
->fp_status
);
2639 mem_end
= mem_ref
+ 9;
2646 while (mem_ref
< mem_end
) {
2651 v
= ((v
/ 10) << 4) | (v
% 10);
2654 while (mem_ref
< mem_end
) {
2659 void helper_f2xm1(void)
2661 ST0
= pow(2.0,ST0
) - 1.0;
2664 void helper_fyl2x(void)
2666 CPU86_LDouble fptemp
;
2670 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
2674 env
->fpus
&= (~0x4700);
2679 void helper_fptan(void)
2681 CPU86_LDouble fptemp
;
2684 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2690 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2691 /* the above code is for |arg| < 2**52 only */
2695 void helper_fpatan(void)
2697 CPU86_LDouble fptemp
, fpsrcop
;
2701 ST1
= atan2(fpsrcop
,fptemp
);
2705 void helper_fxtract(void)
2707 CPU86_LDoubleU temp
;
2708 unsigned int expdif
;
2711 expdif
= EXPD(temp
) - EXPBIAS
;
2712 /*DP exponent bias*/
2719 void helper_fprem1(void)
2721 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2722 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2728 fpsrcop1
.d
= fpsrcop
;
2730 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2732 dblq
= fpsrcop
/ fptemp
;
2733 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2734 ST0
= fpsrcop
- fptemp
*dblq
;
2735 q
= (int)dblq
; /* cutting off top bits is assumed here */
2736 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2737 /* (C0,C1,C3) <-- (q2,q1,q0) */
2738 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2739 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2740 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2742 env
->fpus
|= 0x400; /* C2 <-- 1 */
2743 fptemp
= pow(2.0, expdif
-50);
2744 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2745 /* fpsrcop = integer obtained by rounding to the nearest */
2746 fpsrcop
= (fpsrcop
-floor(fpsrcop
) < ceil(fpsrcop
)-fpsrcop
)?
2747 floor(fpsrcop
): ceil(fpsrcop
);
2748 ST0
-= (ST1
* fpsrcop
* fptemp
);
2752 void helper_fprem(void)
2754 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2755 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2761 fpsrcop1
.d
= fpsrcop
;
2763 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2764 if ( expdif
< 53 ) {
2765 dblq
= fpsrcop
/ fptemp
;
2766 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2767 ST0
= fpsrcop
- fptemp
*dblq
;
2768 q
= (int)dblq
; /* cutting off top bits is assumed here */
2769 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2770 /* (C0,C1,C3) <-- (q2,q1,q0) */
2771 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2772 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2773 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2775 env
->fpus
|= 0x400; /* C2 <-- 1 */
2776 fptemp
= pow(2.0, expdif
-50);
2777 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2778 /* fpsrcop = integer obtained by chopping */
2779 fpsrcop
= (fpsrcop
< 0.0)?
2780 -(floor(fabs(fpsrcop
))): floor(fpsrcop
);
2781 ST0
-= (ST1
* fpsrcop
* fptemp
);
2785 void helper_fyl2xp1(void)
2787 CPU86_LDouble fptemp
;
2790 if ((fptemp
+1.0)>0.0) {
2791 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
2795 env
->fpus
&= (~0x4700);
2800 void helper_fsqrt(void)
2802 CPU86_LDouble fptemp
;
2806 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2812 void helper_fsincos(void)
2814 CPU86_LDouble fptemp
;
2817 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2823 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2824 /* the above code is for |arg| < 2**63 only */
2828 void helper_frndint(void)
2830 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
2833 void helper_fscale(void)
2835 CPU86_LDouble fpsrcop
, fptemp
;
2838 fptemp
= pow(fpsrcop
,ST1
);
2842 void helper_fsin(void)
2844 CPU86_LDouble fptemp
;
2847 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2851 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2852 /* the above code is for |arg| < 2**53 only */
2856 void helper_fcos(void)
2858 CPU86_LDouble fptemp
;
2861 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2865 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2866 /* the above code is for |arg5 < 2**63 only */
2870 void helper_fxam_ST0(void)
2872 CPU86_LDoubleU temp
;
2877 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2879 env
->fpus
|= 0x200; /* C1 <-- 1 */
2881 expdif
= EXPD(temp
);
2882 if (expdif
== MAXEXPD
) {
2883 if (MANTD(temp
) == 0)
2884 env
->fpus
|= 0x500 /*Infinity*/;
2886 env
->fpus
|= 0x100 /*NaN*/;
2887 } else if (expdif
== 0) {
2888 if (MANTD(temp
) == 0)
2889 env
->fpus
|= 0x4000 /*Zero*/;
2891 env
->fpus
|= 0x4400 /*Denormal*/;
2897 void helper_fstenv(target_ulong ptr
, int data32
)
2899 int fpus
, fptag
, exp
, i
;
2903 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
2905 for (i
=7; i
>=0; i
--) {
2907 if (env
->fptags
[i
]) {
2910 tmp
.d
= env
->fpregs
[i
].d
;
2913 if (exp
== 0 && mant
== 0) {
2916 } else if (exp
== 0 || exp
== MAXEXPD
2917 #ifdef USE_X86LDOUBLE
2918 || (mant
& (1LL << 63)) == 0
2921 /* NaNs, infinity, denormal */
2928 stl(ptr
, env
->fpuc
);
2930 stl(ptr
+ 8, fptag
);
2931 stl(ptr
+ 12, 0); /* fpip */
2932 stl(ptr
+ 16, 0); /* fpcs */
2933 stl(ptr
+ 20, 0); /* fpoo */
2934 stl(ptr
+ 24, 0); /* fpos */
2937 stw(ptr
, env
->fpuc
);
2939 stw(ptr
+ 4, fptag
);
2947 void helper_fldenv(target_ulong ptr
, int data32
)
2952 env
->fpuc
= lduw(ptr
);
2953 fpus
= lduw(ptr
+ 4);
2954 fptag
= lduw(ptr
+ 8);
2957 env
->fpuc
= lduw(ptr
);
2958 fpus
= lduw(ptr
+ 2);
2959 fptag
= lduw(ptr
+ 4);
2961 env
->fpstt
= (fpus
>> 11) & 7;
2962 env
->fpus
= fpus
& ~0x3800;
2963 for(i
= 0;i
< 8; i
++) {
2964 env
->fptags
[i
] = ((fptag
& 3) == 3);
2969 void helper_fsave(target_ulong ptr
, int data32
)
2974 helper_fstenv(ptr
, data32
);
2976 ptr
+= (14 << data32
);
2977 for(i
= 0;i
< 8; i
++) {
2979 helper_fstt(tmp
, ptr
);
2997 void helper_frstor(target_ulong ptr
, int data32
)
3002 helper_fldenv(ptr
, data32
);
3003 ptr
+= (14 << data32
);
3005 for(i
= 0;i
< 8; i
++) {
3006 tmp
= helper_fldt(ptr
);
3012 void helper_fxsave(target_ulong ptr
, int data64
)
3014 int fpus
, fptag
, i
, nb_xmm_regs
;
3018 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3020 for(i
= 0; i
< 8; i
++) {
3021 fptag
|= (env
->fptags
[i
] << i
);
3023 stw(ptr
, env
->fpuc
);
3025 stw(ptr
+ 4, fptag
^ 0xff);
3028 for(i
= 0;i
< 8; i
++) {
3030 helper_fstt(tmp
, addr
);
3034 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3035 /* XXX: finish it */
3036 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
3037 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
3038 nb_xmm_regs
= 8 << data64
;
3040 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3041 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
3042 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
3048 void helper_fxrstor(target_ulong ptr
, int data64
)
3050 int i
, fpus
, fptag
, nb_xmm_regs
;
3054 env
->fpuc
= lduw(ptr
);
3055 fpus
= lduw(ptr
+ 2);
3056 fptag
= lduw(ptr
+ 4);
3057 env
->fpstt
= (fpus
>> 11) & 7;
3058 env
->fpus
= fpus
& ~0x3800;
3060 for(i
= 0;i
< 8; i
++) {
3061 env
->fptags
[i
] = ((fptag
>> i
) & 1);
3065 for(i
= 0;i
< 8; i
++) {
3066 tmp
= helper_fldt(addr
);
3071 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3072 /* XXX: finish it */
3073 env
->mxcsr
= ldl(ptr
+ 0x18);
3075 nb_xmm_regs
= 8 << data64
;
3077 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3078 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
3079 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
3085 #ifndef USE_X86LDOUBLE
3087 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3089 CPU86_LDoubleU temp
;
3094 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
3095 /* exponent + sign */
3096 e
= EXPD(temp
) - EXPBIAS
+ 16383;
3097 e
|= SIGND(temp
) >> 16;
3101 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3103 CPU86_LDoubleU temp
;
3107 /* XXX: handle overflow ? */
3108 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
3109 e
|= (upper
>> 4) & 0x800; /* sign */
3110 ll
= (mant
>> 11) & ((1LL << 52) - 1);
3112 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
3115 temp
.ll
= ll
| ((uint64_t)e
<< 52);
3122 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3124 CPU86_LDoubleU temp
;
3127 *pmant
= temp
.l
.lower
;
3128 *pexp
= temp
.l
.upper
;
3131 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3133 CPU86_LDoubleU temp
;
3135 temp
.l
.upper
= upper
;
3136 temp
.l
.lower
= mant
;
3141 #ifdef TARGET_X86_64
3143 //#define DEBUG_MULDIV
3145 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
3154 static void neg128(uint64_t *plow
, uint64_t *phigh
)
3158 add128(plow
, phigh
, 1, 0);
3161 static void mul64(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
3163 uint32_t a0
, a1
, b0
, b1
;
3172 v
= (uint64_t)a0
* (uint64_t)b0
;
3176 v
= (uint64_t)a0
* (uint64_t)b1
;
3177 add128(plow
, phigh
, v
<< 32, v
>> 32);
3179 v
= (uint64_t)a1
* (uint64_t)b0
;
3180 add128(plow
, phigh
, v
<< 32, v
>> 32);
3182 v
= (uint64_t)a1
* (uint64_t)b1
;
3185 printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
3186 a
, b
, *phigh
, *plow
);
3190 static void imul64(uint64_t *plow
, uint64_t *phigh
, int64_t a
, int64_t b
)
3199 mul64(plow
, phigh
, a
, b
);
3201 neg128(plow
, phigh
);
3205 /* XXX: overflow support */
3206 static void div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
3208 uint64_t q
, r
, a1
, a0
;
3219 /* XXX: use a better algorithm */
3220 for(i
= 0; i
< 64; i
++) {
3221 a1
= (a1
<< 1) | (a0
>> 63);
3228 a0
= (a0
<< 1) | qb
;
3230 #if defined(DEBUG_MULDIV)
3231 printf("div: 0x%016llx%016llx / 0x%016llx: q=0x%016llx r=0x%016llx\n",
3232 *phigh
, *plow
, b
, a0
, a1
);
3239 static void idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
3242 sa
= ((int64_t)*phigh
< 0);
3244 neg128(plow
, phigh
);
3248 div64(plow
, phigh
, b
);
3255 void helper_mulq_EAX_T0(void)
3259 mul64(&r0
, &r1
, EAX
, T0
);
3266 void helper_imulq_EAX_T0(void)
3270 imul64(&r0
, &r1
, EAX
, T0
);
3274 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3277 void helper_imulq_T0_T1(void)
3281 imul64(&r0
, &r1
, T0
, T1
);
3284 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3287 void helper_divq_EAX_T0(void)
3291 raise_exception(EXCP00_DIVZ
);
3295 div64(&r0
, &r1
, T0
);
3300 void helper_idivq_EAX_T0(void)
3304 raise_exception(EXCP00_DIVZ
);
3308 idiv64(&r0
, &r1
, T0
);
3315 float approx_rsqrt(float a
)
3317 return 1.0 / sqrt(a
);
3320 float approx_rcp(float a
)
3325 void update_fp_status(void)
3329 /* set rounding mode */
3330 switch(env
->fpuc
& RC_MASK
) {
3333 rnd_type
= float_round_nearest_even
;
3336 rnd_type
= float_round_down
;
3339 rnd_type
= float_round_up
;
3342 rnd_type
= float_round_to_zero
;
3345 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3347 switch((env
->fpuc
>> 8) & 3) {
3359 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3363 #if !defined(CONFIG_USER_ONLY)
3365 #define MMUSUFFIX _mmu
3366 #define GETPC() (__builtin_return_address(0))
3369 #include "softmmu_template.h"
3372 #include "softmmu_template.h"
3375 #include "softmmu_template.h"
3378 #include "softmmu_template.h"
3382 /* try to fill the TLB and return an exception if error. If retaddr is
3383 NULL, it means that the function was called in C code (i.e. not
3384 from generated code or from helper.c) */
3385 /* XXX: fix it to restore all registers */
3386 void tlb_fill(target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
3388 TranslationBlock
*tb
;
3391 CPUX86State
*saved_env
;
3393 /* XXX: hack to restore env in all cases, even if not called from
3396 env
= cpu_single_env
;
3398 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
3401 /* now we have a real cpu fault */
3402 pc
= (unsigned long)retaddr
;
3403 tb
= tb_find_pc(pc
);
3405 /* the PC is inside the translated code. It means that we have
3406 a virtual CPU fault */
3407 cpu_restore_state(tb
, env
, pc
, NULL
);
3411 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);
3413 raise_exception_err_norestore(EXCP0E_PAGE
, env
->error_code
);