4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #define raise_exception_err(a, b)\
28 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
29 (raise_exception_err)(a, b);\
33 const uint8_t parity_table
[256] = {
34 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
35 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
36 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
37 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
45 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 const uint8_t rclw_table
[32] = {
70 0, 1, 2, 3, 4, 5, 6, 7,
71 8, 9,10,11,12,13,14,15,
72 16, 0, 1, 2, 3, 4, 5, 6,
73 7, 8, 9,10,11,12,13,14,
77 const uint8_t rclb_table
[32] = {
78 0, 1, 2, 3, 4, 5, 6, 7,
79 8, 0, 1, 2, 3, 4, 5, 6,
80 7, 8, 0, 1, 2, 3, 4, 5,
81 6, 7, 8, 0, 1, 2, 3, 4,
84 const CPU86_LDouble f15rk
[7] =
86 0.00000000000000000000L,
87 1.00000000000000000000L,
88 3.14159265358979323851L, /*pi*/
89 0.30102999566398119523L, /*lg2*/
90 0.69314718055994530943L, /*ln2*/
91 1.44269504088896340739L, /*l2e*/
92 3.32192809488736234781L, /*l2t*/
97 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
101 spin_lock(&global_cpu_lock
);
104 void cpu_unlock(void)
106 spin_unlock(&global_cpu_lock
);
109 /* return non zero if error */
110 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
121 index
= selector
& ~7;
122 if ((index
+ 7) > dt
->limit
)
124 ptr
= dt
->base
+ index
;
125 *e1_ptr
= ldl_kernel(ptr
);
126 *e2_ptr
= ldl_kernel(ptr
+ 4);
130 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
133 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
134 if (e2
& DESC_G_MASK
)
135 limit
= (limit
<< 12) | 0xfff;
139 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
141 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
144 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
146 sc
->base
= get_seg_base(e1
, e2
);
147 sc
->limit
= get_seg_limit(e1
, e2
);
151 /* init the segment cache in vm86 mode. */
152 static inline void load_seg_vm(int seg
, int selector
)
155 cpu_x86_load_seg_cache(env
, seg
, selector
,
156 (selector
<< 4), 0xffff, 0);
159 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
160 uint32_t *esp_ptr
, int dpl
)
162 int type
, index
, shift
;
167 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
168 for(i
=0;i
<env
->tr
.limit
;i
++) {
169 printf("%02x ", env
->tr
.base
[i
]);
170 if ((i
& 7) == 7) printf("\n");
176 if (!(env
->tr
.flags
& DESC_P_MASK
))
177 cpu_abort(env
, "invalid tss");
178 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
180 cpu_abort(env
, "invalid tss type");
182 index
= (dpl
* 4 + 2) << shift
;
183 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
184 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
186 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
187 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
189 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
190 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
194 /* XXX: merge with load_seg() */
195 static void tss_load_seg(int seg_reg
, int selector
)
200 if ((selector
& 0xfffc) != 0) {
201 if (load_segment(&e1
, &e2
, selector
) != 0)
202 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
203 if (!(e2
& DESC_S_MASK
))
204 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
206 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
207 cpl
= env
->hflags
& HF_CPL_MASK
;
208 if (seg_reg
== R_CS
) {
209 if (!(e2
& DESC_CS_MASK
))
210 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
211 /* XXX: is it correct ? */
213 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
214 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
215 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
216 } else if (seg_reg
== R_SS
) {
217 /* SS must be writable data */
218 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
219 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
220 if (dpl
!= cpl
|| dpl
!= rpl
)
221 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
223 /* not readable code */
224 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
225 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
226 /* if data or non conforming code, checks the rights */
227 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
228 if (dpl
< cpl
|| dpl
< rpl
)
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 if (!(e2
& DESC_P_MASK
))
233 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
234 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
235 get_seg_base(e1
, e2
),
236 get_seg_limit(e1
, e2
),
239 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
244 #define SWITCH_TSS_JMP 0
245 #define SWITCH_TSS_IRET 1
246 #define SWITCH_TSS_CALL 2
248 /* XXX: restore CPU state in registers (PowerPC case) */
249 static void switch_tss(int tss_selector
,
250 uint32_t e1
, uint32_t e2
, int source
,
253 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
254 target_ulong tss_base
;
255 uint32_t new_regs
[8], new_segs
[6];
256 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
257 uint32_t old_eflags
, eflags_mask
;
262 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
264 if (loglevel
& CPU_LOG_PCALL
)
265 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
268 /* if task gate, we read the TSS segment and we load it */
270 if (!(e2
& DESC_P_MASK
))
271 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
272 tss_selector
= e1
>> 16;
273 if (tss_selector
& 4)
274 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
275 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
276 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
277 if (e2
& DESC_S_MASK
)
278 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
279 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
281 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
284 if (!(e2
& DESC_P_MASK
))
285 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
291 tss_limit
= get_seg_limit(e1
, e2
);
292 tss_base
= get_seg_base(e1
, e2
);
293 if ((tss_selector
& 4) != 0 ||
294 tss_limit
< tss_limit_max
)
295 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
296 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
298 old_tss_limit_max
= 103;
300 old_tss_limit_max
= 43;
302 /* read all the registers from the new TSS */
305 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
306 new_eip
= ldl_kernel(tss_base
+ 0x20);
307 new_eflags
= ldl_kernel(tss_base
+ 0x24);
308 for(i
= 0; i
< 8; i
++)
309 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
310 for(i
= 0; i
< 6; i
++)
311 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
312 new_ldt
= lduw_kernel(tss_base
+ 0x60);
313 new_trap
= ldl_kernel(tss_base
+ 0x64);
317 new_eip
= lduw_kernel(tss_base
+ 0x0e);
318 new_eflags
= lduw_kernel(tss_base
+ 0x10);
319 for(i
= 0; i
< 8; i
++)
320 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
321 for(i
= 0; i
< 4; i
++)
322 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
323 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
329 /* NOTE: we must avoid memory exceptions during the task switch,
330 so we make dummy accesses before */
331 /* XXX: it can still fail in some cases, so a bigger hack is
332 necessary to valid the TLB after having done the accesses */
334 v1
= ldub_kernel(env
->tr
.base
);
335 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
336 stb_kernel(env
->tr
.base
, v1
);
337 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
339 /* clear busy bit (it is restartable) */
340 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
343 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
344 e2
= ldl_kernel(ptr
+ 4);
345 e2
&= ~DESC_TSS_BUSY_MASK
;
346 stl_kernel(ptr
+ 4, e2
);
348 old_eflags
= compute_eflags();
349 if (source
== SWITCH_TSS_IRET
)
350 old_eflags
&= ~NT_MASK
;
352 /* save the current state in the old TSS */
355 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
356 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
357 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
358 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
359 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
360 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
361 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
362 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
363 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
364 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
365 for(i
= 0; i
< 6; i
++)
366 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
369 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
370 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
371 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
372 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
373 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
374 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
375 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
376 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
377 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
378 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
379 for(i
= 0; i
< 4; i
++)
380 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
383 /* now if an exception occurs, it will occurs in the next task
386 if (source
== SWITCH_TSS_CALL
) {
387 stw_kernel(tss_base
, env
->tr
.selector
);
388 new_eflags
|= NT_MASK
;
392 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
395 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
396 e2
= ldl_kernel(ptr
+ 4);
397 e2
|= DESC_TSS_BUSY_MASK
;
398 stl_kernel(ptr
+ 4, e2
);
401 /* set the new CPU state */
402 /* from this point, any exception which occurs can give problems */
403 env
->cr
[0] |= CR0_TS_MASK
;
404 env
->hflags
|= HF_TS_MASK
;
405 env
->tr
.selector
= tss_selector
;
406 env
->tr
.base
= tss_base
;
407 env
->tr
.limit
= tss_limit
;
408 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
410 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
411 cpu_x86_update_cr3(env
, new_cr3
);
414 /* load all registers without an exception, then reload them with
415 possible exception */
417 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
418 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
420 eflags_mask
&= 0xffff;
421 load_eflags(new_eflags
, eflags_mask
);
422 /* XXX: what to do in 16 bit case ? */
431 if (new_eflags
& VM_MASK
) {
432 for(i
= 0; i
< 6; i
++)
433 load_seg_vm(i
, new_segs
[i
]);
434 /* in vm86, CPL is always 3 */
435 cpu_x86_set_cpl(env
, 3);
437 /* CPL is set the RPL of CS */
438 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
439 /* first just selectors as the rest may trigger exceptions */
440 for(i
= 0; i
< 6; i
++)
441 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
444 env
->ldt
.selector
= new_ldt
& ~4;
451 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
453 if ((new_ldt
& 0xfffc) != 0) {
455 index
= new_ldt
& ~7;
456 if ((index
+ 7) > dt
->limit
)
457 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
458 ptr
= dt
->base
+ index
;
459 e1
= ldl_kernel(ptr
);
460 e2
= ldl_kernel(ptr
+ 4);
461 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
462 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
463 if (!(e2
& DESC_P_MASK
))
464 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
465 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
468 /* load the segments */
469 if (!(new_eflags
& VM_MASK
)) {
470 tss_load_seg(R_CS
, new_segs
[R_CS
]);
471 tss_load_seg(R_SS
, new_segs
[R_SS
]);
472 tss_load_seg(R_ES
, new_segs
[R_ES
]);
473 tss_load_seg(R_DS
, new_segs
[R_DS
]);
474 tss_load_seg(R_FS
, new_segs
[R_FS
]);
475 tss_load_seg(R_GS
, new_segs
[R_GS
]);
478 /* check that EIP is in the CS segment limits */
479 if (new_eip
> env
->segs
[R_CS
].limit
) {
480 /* XXX: different exception if CALL ? */
481 raise_exception_err(EXCP0D_GPF
, 0);
485 /* check if Port I/O is allowed in TSS */
486 static inline void check_io(int addr
, int size
)
488 int io_offset
, val
, mask
;
490 /* TSS must be a valid 32 bit one */
491 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
492 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
495 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
496 io_offset
+= (addr
>> 3);
497 /* Note: the check needs two bytes */
498 if ((io_offset
+ 1) > env
->tr
.limit
)
500 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
502 mask
= (1 << size
) - 1;
503 /* all bits must be zero to allow the I/O */
504 if ((val
& mask
) != 0) {
506 raise_exception_err(EXCP0D_GPF
, 0);
510 void check_iob_T0(void)
515 void check_iow_T0(void)
520 void check_iol_T0(void)
525 void check_iob_DX(void)
527 check_io(EDX
& 0xffff, 1);
530 void check_iow_DX(void)
532 check_io(EDX
& 0xffff, 2);
535 void check_iol_DX(void)
537 check_io(EDX
& 0xffff, 4);
540 static inline unsigned int get_sp_mask(unsigned int e2
)
542 if (e2
& DESC_B_MASK
)
549 #define SET_ESP(val, sp_mask)\
551 if ((sp_mask) == 0xffff)\
552 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
553 else if ((sp_mask) == 0xffffffffLL)\
554 ESP = (uint32_t)(val);\
559 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
562 /* XXX: add a is_user flag to have proper security support */
563 #define PUSHW(ssp, sp, sp_mask, val)\
566 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
569 #define PUSHL(ssp, sp, sp_mask, val)\
572 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
575 #define POPW(ssp, sp, sp_mask, val)\
577 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
581 #define POPL(ssp, sp, sp_mask, val)\
583 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
587 /* protected mode interrupt */
588 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
589 unsigned int next_eip
, int is_hw
)
592 target_ulong ptr
, ssp
;
593 int type
, dpl
, selector
, ss_dpl
, cpl
;
594 int has_error_code
, new_stack
, shift
;
595 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
596 uint32_t old_eip
, sp_mask
;
597 int svm_should_check
= 1;
599 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
601 svm_should_check
= 0;
605 && (INTERCEPTEDl(_exceptions
, 1 << intno
)
607 raise_interrupt(intno
, is_int
, error_code
, 0);
610 if (!is_int
&& !is_hw
) {
629 if (intno
* 8 + 7 > dt
->limit
)
630 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
631 ptr
= dt
->base
+ intno
* 8;
632 e1
= ldl_kernel(ptr
);
633 e2
= ldl_kernel(ptr
+ 4);
634 /* check gate type */
635 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
637 case 5: /* task gate */
638 /* must do that check here to return the correct error code */
639 if (!(e2
& DESC_P_MASK
))
640 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
641 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
642 if (has_error_code
) {
645 /* push the error code */
646 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
648 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
652 esp
= (ESP
- (2 << shift
)) & mask
;
653 ssp
= env
->segs
[R_SS
].base
+ esp
;
655 stl_kernel(ssp
, error_code
);
657 stw_kernel(ssp
, error_code
);
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
667 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
670 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
671 cpl
= env
->hflags
& HF_CPL_MASK
;
672 /* check privledge if software int */
673 if (is_int
&& dpl
< cpl
)
674 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
675 /* check valid bit */
676 if (!(e2
& DESC_P_MASK
))
677 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
679 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
680 if ((selector
& 0xfffc) == 0)
681 raise_exception_err(EXCP0D_GPF
, 0);
683 if (load_segment(&e1
, &e2
, selector
) != 0)
684 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
685 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
686 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
687 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
689 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
690 if (!(e2
& DESC_P_MASK
))
691 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
692 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
693 /* to inner privilege */
694 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
695 if ((ss
& 0xfffc) == 0)
696 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
698 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
699 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
700 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
701 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
703 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
704 if (!(ss_e2
& DESC_S_MASK
) ||
705 (ss_e2
& DESC_CS_MASK
) ||
706 !(ss_e2
& DESC_W_MASK
))
707 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
708 if (!(ss_e2
& DESC_P_MASK
))
709 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
711 sp_mask
= get_sp_mask(ss_e2
);
712 ssp
= get_seg_base(ss_e1
, ss_e2
);
713 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
714 /* to same privilege */
715 if (env
->eflags
& VM_MASK
)
716 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
718 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
719 ssp
= env
->segs
[R_SS
].base
;
723 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
724 new_stack
= 0; /* avoid warning */
725 sp_mask
= 0; /* avoid warning */
726 ssp
= 0; /* avoid warning */
727 esp
= 0; /* avoid warning */
733 /* XXX: check that enough room is available */
734 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
735 if (env
->eflags
& VM_MASK
)
741 if (env
->eflags
& VM_MASK
) {
742 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
743 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
744 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
745 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
747 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
748 PUSHL(ssp
, esp
, sp_mask
, ESP
);
750 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
751 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
752 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
753 if (has_error_code
) {
754 PUSHL(ssp
, esp
, sp_mask
, error_code
);
758 if (env
->eflags
& VM_MASK
) {
759 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
760 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
761 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
762 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
764 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
765 PUSHW(ssp
, esp
, sp_mask
, ESP
);
767 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
768 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
769 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
770 if (has_error_code
) {
771 PUSHW(ssp
, esp
, sp_mask
, error_code
);
776 if (env
->eflags
& VM_MASK
) {
777 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
778 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
782 ss
= (ss
& ~3) | dpl
;
783 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
784 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
786 SET_ESP(esp
, sp_mask
);
788 selector
= (selector
& ~3) | dpl
;
789 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
790 get_seg_base(e1
, e2
),
791 get_seg_limit(e1
, e2
),
793 cpu_x86_set_cpl(env
, dpl
);
796 /* interrupt gate clear IF mask */
797 if ((type
& 1) == 0) {
798 env
->eflags
&= ~IF_MASK
;
800 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
805 #define PUSHQ(sp, val)\
808 stq_kernel(sp, (val));\
811 #define POPQ(sp, val)\
813 val = ldq_kernel(sp);\
817 static inline target_ulong
get_rsp_from_tss(int level
)
822 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
823 env
->tr
.base
, env
->tr
.limit
);
826 if (!(env
->tr
.flags
& DESC_P_MASK
))
827 cpu_abort(env
, "invalid tss");
828 index
= 8 * level
+ 4;
829 if ((index
+ 7) > env
->tr
.limit
)
830 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
831 return ldq_kernel(env
->tr
.base
+ index
);
834 /* 64 bit interrupt */
835 static void do_interrupt64(int intno
, int is_int
, int error_code
,
836 target_ulong next_eip
, int is_hw
)
840 int type
, dpl
, selector
, cpl
, ist
;
841 int has_error_code
, new_stack
;
842 uint32_t e1
, e2
, e3
, ss
;
843 target_ulong old_eip
, esp
, offset
;
844 int svm_should_check
= 1;
846 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
848 svm_should_check
= 0;
851 && INTERCEPTEDl(_exceptions
, 1 << intno
)
853 raise_interrupt(intno
, is_int
, error_code
, 0);
856 if (!is_int
&& !is_hw
) {
875 if (intno
* 16 + 15 > dt
->limit
)
876 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
877 ptr
= dt
->base
+ intno
* 16;
878 e1
= ldl_kernel(ptr
);
879 e2
= ldl_kernel(ptr
+ 4);
880 e3
= ldl_kernel(ptr
+ 8);
881 /* check gate type */
882 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
884 case 14: /* 386 interrupt gate */
885 case 15: /* 386 trap gate */
888 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
891 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
892 cpl
= env
->hflags
& HF_CPL_MASK
;
893 /* check privledge if software int */
894 if (is_int
&& dpl
< cpl
)
895 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
896 /* check valid bit */
897 if (!(e2
& DESC_P_MASK
))
898 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
900 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
902 if ((selector
& 0xfffc) == 0)
903 raise_exception_err(EXCP0D_GPF
, 0);
905 if (load_segment(&e1
, &e2
, selector
) != 0)
906 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
907 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
908 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
909 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
911 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
912 if (!(e2
& DESC_P_MASK
))
913 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
914 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
915 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
916 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
917 /* to inner privilege */
919 esp
= get_rsp_from_tss(ist
+ 3);
921 esp
= get_rsp_from_tss(dpl
);
922 esp
&= ~0xfLL
; /* align stack */
925 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
926 /* to same privilege */
927 if (env
->eflags
& VM_MASK
)
928 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
931 esp
= get_rsp_from_tss(ist
+ 3);
934 esp
&= ~0xfLL
; /* align stack */
937 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
938 new_stack
= 0; /* avoid warning */
939 esp
= 0; /* avoid warning */
942 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
944 PUSHQ(esp
, compute_eflags());
945 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
947 if (has_error_code
) {
948 PUSHQ(esp
, error_code
);
953 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
957 selector
= (selector
& ~3) | dpl
;
958 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
959 get_seg_base(e1
, e2
),
960 get_seg_limit(e1
, e2
),
962 cpu_x86_set_cpl(env
, dpl
);
965 /* interrupt gate clear IF mask */
966 if ((type
& 1) == 0) {
967 env
->eflags
&= ~IF_MASK
;
969 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
973 void helper_syscall(int next_eip_addend
)
977 if (!(env
->efer
& MSR_EFER_SCE
)) {
978 raise_exception_err(EXCP06_ILLOP
, 0);
980 selector
= (env
->star
>> 32) & 0xffff;
982 if (env
->hflags
& HF_LMA_MASK
) {
985 ECX
= env
->eip
+ next_eip_addend
;
986 env
->regs
[11] = compute_eflags();
988 code64
= env
->hflags
& HF_CS64_MASK
;
990 cpu_x86_set_cpl(env
, 0);
991 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
993 DESC_G_MASK
| DESC_P_MASK
|
995 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
996 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
998 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1000 DESC_W_MASK
| DESC_A_MASK
);
1001 env
->eflags
&= ~env
->fmask
;
1003 env
->eip
= env
->lstar
;
1005 env
->eip
= env
->cstar
;
1009 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1011 cpu_x86_set_cpl(env
, 0);
1012 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1014 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1016 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1017 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1019 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1021 DESC_W_MASK
| DESC_A_MASK
);
1022 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1023 env
->eip
= (uint32_t)env
->star
;
1027 void helper_sysret(int dflag
)
1031 if (!(env
->efer
& MSR_EFER_SCE
)) {
1032 raise_exception_err(EXCP06_ILLOP
, 0);
1034 cpl
= env
->hflags
& HF_CPL_MASK
;
1035 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1036 raise_exception_err(EXCP0D_GPF
, 0);
1038 selector
= (env
->star
>> 48) & 0xffff;
1039 #ifdef TARGET_X86_64
1040 if (env
->hflags
& HF_LMA_MASK
) {
1042 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1044 DESC_G_MASK
| DESC_P_MASK
|
1045 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1046 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1050 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1052 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1053 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1054 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1055 env
->eip
= (uint32_t)ECX
;
1057 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1059 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1060 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1061 DESC_W_MASK
| DESC_A_MASK
);
1062 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1063 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1064 cpu_x86_set_cpl(env
, 3);
1068 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1070 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1071 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1072 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1073 env
->eip
= (uint32_t)ECX
;
1074 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1076 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1077 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1078 DESC_W_MASK
| DESC_A_MASK
);
1079 env
->eflags
|= IF_MASK
;
1080 cpu_x86_set_cpl(env
, 3);
1083 if (kqemu_is_ok(env
)) {
1084 if (env
->hflags
& HF_LMA_MASK
)
1085 CC_OP
= CC_OP_EFLAGS
;
1086 env
->exception_index
= -1;
1092 /* real mode interrupt */
1093 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1094 unsigned int next_eip
)
1097 target_ulong ptr
, ssp
;
1099 uint32_t offset
, esp
;
1100 uint32_t old_cs
, old_eip
;
1101 int svm_should_check
= 1;
1103 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
1105 svm_should_check
= 0;
1107 if (svm_should_check
1108 && INTERCEPTEDl(_exceptions
, 1 << intno
)
1110 raise_interrupt(intno
, is_int
, error_code
, 0);
1112 /* real mode (simpler !) */
1114 if (intno
* 4 + 3 > dt
->limit
)
1115 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1116 ptr
= dt
->base
+ intno
* 4;
1117 offset
= lduw_kernel(ptr
);
1118 selector
= lduw_kernel(ptr
+ 2);
1120 ssp
= env
->segs
[R_SS
].base
;
1125 old_cs
= env
->segs
[R_CS
].selector
;
1126 /* XXX: use SS segment size ? */
1127 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1128 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1129 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1131 /* update processor state */
1132 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1134 env
->segs
[R_CS
].selector
= selector
;
1135 env
->segs
[R_CS
].base
= (selector
<< 4);
1136 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1139 /* fake user mode interrupt */
1140 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1141 target_ulong next_eip
)
1149 ptr
= dt
->base
+ (intno
* 8);
1150 e2
= ldl_kernel(ptr
+ 4);
1152 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1153 cpl
= env
->hflags
& HF_CPL_MASK
;
1154 /* check privledge if software int */
1155 if (is_int
&& dpl
< cpl
)
1156 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1158 /* Since we emulate only user space, we cannot do more than
1159 exiting the emulation with the suitable exception and error
1166 * Begin execution of an interruption. is_int is TRUE if coming from
1167 * the int instruction. next_eip is the EIP value AFTER the interrupt
1168 * instruction. It is only relevant if is_int is TRUE.
1170 void do_interrupt(int intno
, int is_int
, int error_code
,
1171 target_ulong next_eip
, int is_hw
)
1173 if (loglevel
& CPU_LOG_INT
) {
1174 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1176 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1177 count
, intno
, error_code
, is_int
,
1178 env
->hflags
& HF_CPL_MASK
,
1179 env
->segs
[R_CS
].selector
, EIP
,
1180 (int)env
->segs
[R_CS
].base
+ EIP
,
1181 env
->segs
[R_SS
].selector
, ESP
);
1182 if (intno
== 0x0e) {
1183 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1185 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1187 fprintf(logfile
, "\n");
1188 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1193 fprintf(logfile
, " code=");
1194 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1195 for(i
= 0; i
< 16; i
++) {
1196 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1198 fprintf(logfile
, "\n");
1204 if (env
->cr
[0] & CR0_PE_MASK
) {
1206 if (env
->hflags
& HF_LMA_MASK
) {
1207 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1211 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1214 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1219 * Check nested exceptions and change to double or triple fault if
1220 * needed. It should only be called, if this is not an interrupt.
1221 * Returns the new exception number.
1223 int check_exception(int intno
, int *error_code
)
1225 char first_contributory
= env
->old_exception
== 0 ||
1226 (env
->old_exception
>= 10 &&
1227 env
->old_exception
<= 13);
1228 char second_contributory
= intno
== 0 ||
1229 (intno
>= 10 && intno
<= 13);
1231 if (loglevel
& CPU_LOG_INT
)
1232 fprintf(logfile
, "check_exception old: %x new %x\n",
1233 env
->old_exception
, intno
);
1235 if (env
->old_exception
== EXCP08_DBLE
)
1236 cpu_abort(env
, "triple fault");
1238 if ((first_contributory
&& second_contributory
)
1239 || (env
->old_exception
== EXCP0E_PAGE
&&
1240 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1241 intno
= EXCP08_DBLE
;
1245 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1246 (intno
== EXCP08_DBLE
))
1247 env
->old_exception
= intno
;
1253 * Signal an interruption. It is executed in the main CPU loop.
1254 * is_int is TRUE if coming from the int instruction. next_eip is the
1255 * EIP value AFTER the interrupt instruction. It is only relevant if
1258 void raise_interrupt(int intno
, int is_int
, int error_code
,
1259 int next_eip_addend
)
1262 svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1263 intno
= check_exception(intno
, &error_code
);
1266 env
->exception_index
= intno
;
1267 env
->error_code
= error_code
;
1268 env
->exception_is_int
= is_int
;
1269 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1273 /* same as raise_exception_err, but do not restore global registers */
1274 static void raise_exception_err_norestore(int exception_index
, int error_code
)
1276 exception_index
= check_exception(exception_index
, &error_code
);
1278 env
->exception_index
= exception_index
;
1279 env
->error_code
= error_code
;
1280 env
->exception_is_int
= 0;
1281 env
->exception_next_eip
= 0;
1282 longjmp(env
->jmp_env
, 1);
1285 /* shortcuts to generate exceptions */
1287 void (raise_exception_err
)(int exception_index
, int error_code
)
1289 raise_interrupt(exception_index
, 0, error_code
, 0);
1292 void raise_exception(int exception_index
)
1294 raise_interrupt(exception_index
, 0, 0, 0);
1299 #if defined(CONFIG_USER_ONLY)
1301 void do_smm_enter(void)
1305 void helper_rsm(void)
1311 #ifdef TARGET_X86_64
1312 #define SMM_REVISION_ID 0x00020064
1314 #define SMM_REVISION_ID 0x00020000
1317 void do_smm_enter(void)
1319 target_ulong sm_state
;
1323 if (loglevel
& CPU_LOG_INT
) {
1324 fprintf(logfile
, "SMM: enter\n");
1325 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1328 env
->hflags
|= HF_SMM_MASK
;
1329 cpu_smm_update(env
);
1331 sm_state
= env
->smbase
+ 0x8000;
1333 #ifdef TARGET_X86_64
1334 for(i
= 0; i
< 6; i
++) {
1336 offset
= 0x7e00 + i
* 16;
1337 stw_phys(sm_state
+ offset
, dt
->selector
);
1338 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1339 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1340 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1343 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1344 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1346 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1347 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1348 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1349 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1351 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1352 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1354 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1355 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1356 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1357 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1359 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1361 stq_phys(sm_state
+ 0x7ff8, EAX
);
1362 stq_phys(sm_state
+ 0x7ff0, ECX
);
1363 stq_phys(sm_state
+ 0x7fe8, EDX
);
1364 stq_phys(sm_state
+ 0x7fe0, EBX
);
1365 stq_phys(sm_state
+ 0x7fd8, ESP
);
1366 stq_phys(sm_state
+ 0x7fd0, EBP
);
1367 stq_phys(sm_state
+ 0x7fc8, ESI
);
1368 stq_phys(sm_state
+ 0x7fc0, EDI
);
1369 for(i
= 8; i
< 16; i
++)
1370 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1371 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1372 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1373 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1374 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1376 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1377 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1378 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1380 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1381 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1383 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1384 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1385 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1386 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1387 stl_phys(sm_state
+ 0x7fec, EDI
);
1388 stl_phys(sm_state
+ 0x7fe8, ESI
);
1389 stl_phys(sm_state
+ 0x7fe4, EBP
);
1390 stl_phys(sm_state
+ 0x7fe0, ESP
);
1391 stl_phys(sm_state
+ 0x7fdc, EBX
);
1392 stl_phys(sm_state
+ 0x7fd8, EDX
);
1393 stl_phys(sm_state
+ 0x7fd4, ECX
);
1394 stl_phys(sm_state
+ 0x7fd0, EAX
);
1395 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1396 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1398 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1399 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1400 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1401 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1403 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1404 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1405 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1406 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1408 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1409 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1411 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1412 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1414 for(i
= 0; i
< 6; i
++) {
1417 offset
= 0x7f84 + i
* 12;
1419 offset
= 0x7f2c + (i
- 3) * 12;
1420 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1421 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1422 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1423 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1425 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1427 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1428 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1430 /* init SMM cpu state */
1432 #ifdef TARGET_X86_64
1434 env
->hflags
&= ~HF_LMA_MASK
;
1436 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1437 env
->eip
= 0x00008000;
1438 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1440 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1441 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1442 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1443 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1444 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1446 cpu_x86_update_cr0(env
,
1447 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1448 cpu_x86_update_cr4(env
, 0);
1449 env
->dr
[7] = 0x00000400;
1450 CC_OP
= CC_OP_EFLAGS
;
1453 void helper_rsm(void)
1455 target_ulong sm_state
;
1459 sm_state
= env
->smbase
+ 0x8000;
1460 #ifdef TARGET_X86_64
1461 env
->efer
= ldq_phys(sm_state
+ 0x7ed0);
1462 if (env
->efer
& MSR_EFER_LMA
)
1463 env
->hflags
|= HF_LMA_MASK
;
1465 env
->hflags
&= ~HF_LMA_MASK
;
1467 for(i
= 0; i
< 6; i
++) {
1468 offset
= 0x7e00 + i
* 16;
1469 cpu_x86_load_seg_cache(env
, i
,
1470 lduw_phys(sm_state
+ offset
),
1471 ldq_phys(sm_state
+ offset
+ 8),
1472 ldl_phys(sm_state
+ offset
+ 4),
1473 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1476 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1477 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1479 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1480 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1481 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1482 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1484 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1485 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1487 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1488 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1489 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1490 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1492 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1493 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1494 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1495 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1496 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1497 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1498 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1499 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1500 for(i
= 8; i
< 16; i
++)
1501 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1502 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1503 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1504 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1505 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1506 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1508 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1509 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1510 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1512 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1513 if (val
& 0x20000) {
1514 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1517 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1518 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1519 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1520 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1521 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1522 EDI
= ldl_phys(sm_state
+ 0x7fec);
1523 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1524 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1525 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1526 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1527 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1528 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1529 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1530 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1531 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1533 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1534 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1535 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1536 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1538 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1539 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1540 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1541 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1543 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1544 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1546 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1547 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1549 for(i
= 0; i
< 6; i
++) {
1551 offset
= 0x7f84 + i
* 12;
1553 offset
= 0x7f2c + (i
- 3) * 12;
1554 cpu_x86_load_seg_cache(env
, i
,
1555 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1556 ldl_phys(sm_state
+ offset
+ 8),
1557 ldl_phys(sm_state
+ offset
+ 4),
1558 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1560 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1562 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1563 if (val
& 0x20000) {
1564 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1567 CC_OP
= CC_OP_EFLAGS
;
1568 env
->hflags
&= ~HF_SMM_MASK
;
1569 cpu_smm_update(env
);
1571 if (loglevel
& CPU_LOG_INT
) {
1572 fprintf(logfile
, "SMM: after RSM\n");
1573 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1577 #endif /* !CONFIG_USER_ONLY */
1580 #ifdef BUGGY_GCC_DIV64
1581 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1582 call it from another function */
1583 uint32_t div32(uint64_t *q_ptr
, uint64_t num
, uint32_t den
)
1589 int32_t idiv32(int64_t *q_ptr
, int64_t num
, int32_t den
)
1596 void helper_divl_EAX_T0(void)
1598 unsigned int den
, r
;
1601 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1604 raise_exception(EXCP00_DIVZ
);
1606 #ifdef BUGGY_GCC_DIV64
1607 r
= div32(&q
, num
, den
);
1613 raise_exception(EXCP00_DIVZ
);
1618 void helper_idivl_EAX_T0(void)
1623 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1626 raise_exception(EXCP00_DIVZ
);
1628 #ifdef BUGGY_GCC_DIV64
1629 r
= idiv32(&q
, num
, den
);
1634 if (q
!= (int32_t)q
)
1635 raise_exception(EXCP00_DIVZ
);
1640 void helper_cmpxchg8b(void)
1645 eflags
= cc_table
[CC_OP
].compute_all();
1647 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
1648 stq(A0
, ((uint64_t)ECX
<< 32) | EBX
);
1658 void helper_single_step()
1660 env
->dr
[6] |= 0x4000;
1661 raise_exception(EXCP01_SSTP
);
1664 void helper_cpuid(void)
1667 index
= (uint32_t)EAX
;
1669 /* test if maximum index reached */
1670 if (index
& 0x80000000) {
1671 if (index
> env
->cpuid_xlevel
)
1672 index
= env
->cpuid_level
;
1674 if (index
> env
->cpuid_level
)
1675 index
= env
->cpuid_level
;
1680 EAX
= env
->cpuid_level
;
1681 EBX
= env
->cpuid_vendor1
;
1682 EDX
= env
->cpuid_vendor2
;
1683 ECX
= env
->cpuid_vendor3
;
1686 EAX
= env
->cpuid_version
;
1687 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1688 ECX
= env
->cpuid_ext_features
;
1689 EDX
= env
->cpuid_features
;
1692 /* cache info: needed for Pentium Pro compatibility */
1699 EAX
= env
->cpuid_xlevel
;
1700 EBX
= env
->cpuid_vendor1
;
1701 EDX
= env
->cpuid_vendor2
;
1702 ECX
= env
->cpuid_vendor3
;
1705 EAX
= env
->cpuid_features
;
1707 ECX
= env
->cpuid_ext3_features
;
1708 EDX
= env
->cpuid_ext2_features
;
1713 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1714 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1715 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1716 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1719 /* cache info (L1 cache) */
1726 /* cache info (L2 cache) */
1733 /* virtual & phys address size in low 2 bytes. */
1740 /* reserved values: zero */
1749 void helper_enter_level(int level
, int data32
)
1752 uint32_t esp_mask
, esp
, ebp
;
1754 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1755 ssp
= env
->segs
[R_SS
].base
;
1764 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1767 stl(ssp
+ (esp
& esp_mask
), T1
);
1774 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1777 stw(ssp
+ (esp
& esp_mask
), T1
);
1781 #ifdef TARGET_X86_64
1782 void helper_enter64_level(int level
, int data64
)
1784 target_ulong esp
, ebp
;
1804 stw(esp
, lduw(ebp
));
1812 void helper_lldt_T0(void)
1817 int index
, entry_limit
;
1820 selector
= T0
& 0xffff;
1821 if ((selector
& 0xfffc) == 0) {
1822 /* XXX: NULL selector case: invalid LDT */
1827 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1829 index
= selector
& ~7;
1830 #ifdef TARGET_X86_64
1831 if (env
->hflags
& HF_LMA_MASK
)
1836 if ((index
+ entry_limit
) > dt
->limit
)
1837 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1838 ptr
= dt
->base
+ index
;
1839 e1
= ldl_kernel(ptr
);
1840 e2
= ldl_kernel(ptr
+ 4);
1841 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1842 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1843 if (!(e2
& DESC_P_MASK
))
1844 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1845 #ifdef TARGET_X86_64
1846 if (env
->hflags
& HF_LMA_MASK
) {
1848 e3
= ldl_kernel(ptr
+ 8);
1849 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1850 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1854 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1857 env
->ldt
.selector
= selector
;
1860 void helper_ltr_T0(void)
1865 int index
, type
, entry_limit
;
1868 selector
= T0
& 0xffff;
1869 if ((selector
& 0xfffc) == 0) {
1870 /* NULL selector case: invalid TR */
1876 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1878 index
= selector
& ~7;
1879 #ifdef TARGET_X86_64
1880 if (env
->hflags
& HF_LMA_MASK
)
1885 if ((index
+ entry_limit
) > dt
->limit
)
1886 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1887 ptr
= dt
->base
+ index
;
1888 e1
= ldl_kernel(ptr
);
1889 e2
= ldl_kernel(ptr
+ 4);
1890 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1891 if ((e2
& DESC_S_MASK
) ||
1892 (type
!= 1 && type
!= 9))
1893 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1894 if (!(e2
& DESC_P_MASK
))
1895 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1896 #ifdef TARGET_X86_64
1897 if (env
->hflags
& HF_LMA_MASK
) {
1899 e3
= ldl_kernel(ptr
+ 8);
1900 e4
= ldl_kernel(ptr
+ 12);
1901 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
1902 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1903 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1904 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1908 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1910 e2
|= DESC_TSS_BUSY_MASK
;
1911 stl_kernel(ptr
+ 4, e2
);
1913 env
->tr
.selector
= selector
;
1916 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1917 void load_seg(int seg_reg
, int selector
)
1926 cpl
= env
->hflags
& HF_CPL_MASK
;
1927 if ((selector
& 0xfffc) == 0) {
1928 /* null selector case */
1930 #ifdef TARGET_X86_64
1931 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1934 raise_exception_err(EXCP0D_GPF
, 0);
1935 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1942 index
= selector
& ~7;
1943 if ((index
+ 7) > dt
->limit
)
1944 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1945 ptr
= dt
->base
+ index
;
1946 e1
= ldl_kernel(ptr
);
1947 e2
= ldl_kernel(ptr
+ 4);
1949 if (!(e2
& DESC_S_MASK
))
1950 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1952 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1953 if (seg_reg
== R_SS
) {
1954 /* must be writable segment */
1955 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1956 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1957 if (rpl
!= cpl
|| dpl
!= cpl
)
1958 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1960 /* must be readable segment */
1961 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1962 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1964 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1965 /* if not conforming code, test rights */
1966 if (dpl
< cpl
|| dpl
< rpl
)
1967 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1971 if (!(e2
& DESC_P_MASK
)) {
1972 if (seg_reg
== R_SS
)
1973 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1975 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1978 /* set the access bit if not already set */
1979 if (!(e2
& DESC_A_MASK
)) {
1981 stl_kernel(ptr
+ 4, e2
);
1984 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1985 get_seg_base(e1
, e2
),
1986 get_seg_limit(e1
, e2
),
1989 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1990 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1995 /* protected mode jump */
1996 void helper_ljmp_protected_T0_T1(int next_eip_addend
)
1998 int new_cs
, gate_cs
, type
;
1999 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2000 target_ulong new_eip
, next_eip
;
2004 if ((new_cs
& 0xfffc) == 0)
2005 raise_exception_err(EXCP0D_GPF
, 0);
2006 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2007 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2008 cpl
= env
->hflags
& HF_CPL_MASK
;
2009 if (e2
& DESC_S_MASK
) {
2010 if (!(e2
& DESC_CS_MASK
))
2011 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2012 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2013 if (e2
& DESC_C_MASK
) {
2014 /* conforming code segment */
2016 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2018 /* non conforming code segment */
2021 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2023 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2025 if (!(e2
& DESC_P_MASK
))
2026 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2027 limit
= get_seg_limit(e1
, e2
);
2028 if (new_eip
> limit
&&
2029 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2030 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2031 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2032 get_seg_base(e1
, e2
), limit
, e2
);
2035 /* jump to call or task gate */
2036 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2038 cpl
= env
->hflags
& HF_CPL_MASK
;
2039 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2041 case 1: /* 286 TSS */
2042 case 9: /* 386 TSS */
2043 case 5: /* task gate */
2044 if (dpl
< cpl
|| dpl
< rpl
)
2045 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2046 next_eip
= env
->eip
+ next_eip_addend
;
2047 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2048 CC_OP
= CC_OP_EFLAGS
;
2050 case 4: /* 286 call gate */
2051 case 12: /* 386 call gate */
2052 if ((dpl
< cpl
) || (dpl
< rpl
))
2053 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2054 if (!(e2
& DESC_P_MASK
))
2055 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2057 new_eip
= (e1
& 0xffff);
2059 new_eip
|= (e2
& 0xffff0000);
2060 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2061 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2062 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2063 /* must be code segment */
2064 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2065 (DESC_S_MASK
| DESC_CS_MASK
)))
2066 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2067 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2068 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2069 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2070 if (!(e2
& DESC_P_MASK
))
2071 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2072 limit
= get_seg_limit(e1
, e2
);
2073 if (new_eip
> limit
)
2074 raise_exception_err(EXCP0D_GPF
, 0);
2075 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2076 get_seg_base(e1
, e2
), limit
, e2
);
2080 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2086 /* real mode call */
2087 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
2089 int new_cs
, new_eip
;
2090 uint32_t esp
, esp_mask
;
2096 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2097 ssp
= env
->segs
[R_SS
].base
;
2099 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2100 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2102 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2103 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2106 SET_ESP(esp
, esp_mask
);
2108 env
->segs
[R_CS
].selector
= new_cs
;
2109 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2112 /* protected mode call */
2113 void helper_lcall_protected_T0_T1(int shift
, int next_eip_addend
)
2115 int new_cs
, new_stack
, i
;
2116 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2117 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2118 uint32_t val
, limit
, old_sp_mask
;
2119 target_ulong ssp
, old_ssp
, next_eip
, new_eip
;
2123 next_eip
= env
->eip
+ next_eip_addend
;
2125 if (loglevel
& CPU_LOG_PCALL
) {
2126 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2127 new_cs
, (uint32_t)new_eip
, shift
);
2128 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2131 if ((new_cs
& 0xfffc) == 0)
2132 raise_exception_err(EXCP0D_GPF
, 0);
2133 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2134 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2135 cpl
= env
->hflags
& HF_CPL_MASK
;
2137 if (loglevel
& CPU_LOG_PCALL
) {
2138 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2141 if (e2
& DESC_S_MASK
) {
2142 if (!(e2
& DESC_CS_MASK
))
2143 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2144 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2145 if (e2
& DESC_C_MASK
) {
2146 /* conforming code segment */
2148 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2150 /* non conforming code segment */
2153 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2155 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2157 if (!(e2
& DESC_P_MASK
))
2158 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2160 #ifdef TARGET_X86_64
2161 /* XXX: check 16/32 bit cases in long mode */
2166 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2167 PUSHQ(rsp
, next_eip
);
2168 /* from this point, not restartable */
2170 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2171 get_seg_base(e1
, e2
),
2172 get_seg_limit(e1
, e2
), e2
);
2178 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2179 ssp
= env
->segs
[R_SS
].base
;
2181 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2182 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2184 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2185 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2188 limit
= get_seg_limit(e1
, e2
);
2189 if (new_eip
> limit
)
2190 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2191 /* from this point, not restartable */
2192 SET_ESP(sp
, sp_mask
);
2193 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2194 get_seg_base(e1
, e2
), limit
, e2
);
2198 /* check gate type */
2199 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2200 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2203 case 1: /* available 286 TSS */
2204 case 9: /* available 386 TSS */
2205 case 5: /* task gate */
2206 if (dpl
< cpl
|| dpl
< rpl
)
2207 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2208 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2209 CC_OP
= CC_OP_EFLAGS
;
2211 case 4: /* 286 call gate */
2212 case 12: /* 386 call gate */
2215 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2220 if (dpl
< cpl
|| dpl
< rpl
)
2221 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2222 /* check valid bit */
2223 if (!(e2
& DESC_P_MASK
))
2224 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2225 selector
= e1
>> 16;
2226 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2227 param_count
= e2
& 0x1f;
2228 if ((selector
& 0xfffc) == 0)
2229 raise_exception_err(EXCP0D_GPF
, 0);
2231 if (load_segment(&e1
, &e2
, selector
) != 0)
2232 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2233 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2234 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2235 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2237 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2238 if (!(e2
& DESC_P_MASK
))
2239 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2241 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2242 /* to inner privilege */
2243 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2245 if (loglevel
& CPU_LOG_PCALL
)
2246 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2247 ss
, sp
, param_count
, ESP
);
2249 if ((ss
& 0xfffc) == 0)
2250 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2251 if ((ss
& 3) != dpl
)
2252 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2253 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2254 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2255 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2257 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2258 if (!(ss_e2
& DESC_S_MASK
) ||
2259 (ss_e2
& DESC_CS_MASK
) ||
2260 !(ss_e2
& DESC_W_MASK
))
2261 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2262 if (!(ss_e2
& DESC_P_MASK
))
2263 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2265 // push_size = ((param_count * 2) + 8) << shift;
2267 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2268 old_ssp
= env
->segs
[R_SS
].base
;
2270 sp_mask
= get_sp_mask(ss_e2
);
2271 ssp
= get_seg_base(ss_e1
, ss_e2
);
2273 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2274 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2275 for(i
= param_count
- 1; i
>= 0; i
--) {
2276 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2277 PUSHL(ssp
, sp
, sp_mask
, val
);
2280 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2281 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2282 for(i
= param_count
- 1; i
>= 0; i
--) {
2283 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2284 PUSHW(ssp
, sp
, sp_mask
, val
);
2289 /* to same privilege */
2291 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2292 ssp
= env
->segs
[R_SS
].base
;
2293 // push_size = (4 << shift);
2298 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2299 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2301 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2302 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2305 /* from this point, not restartable */
2308 ss
= (ss
& ~3) | dpl
;
2309 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2311 get_seg_limit(ss_e1
, ss_e2
),
2315 selector
= (selector
& ~3) | dpl
;
2316 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2317 get_seg_base(e1
, e2
),
2318 get_seg_limit(e1
, e2
),
2320 cpu_x86_set_cpl(env
, dpl
);
2321 SET_ESP(sp
, sp_mask
);
2325 if (kqemu_is_ok(env
)) {
2326 env
->exception_index
= -1;
2332 /* real and vm86 mode iret */
2333 void helper_iret_real(int shift
)
2335 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2339 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2341 ssp
= env
->segs
[R_SS
].base
;
2344 POPL(ssp
, sp
, sp_mask
, new_eip
);
2345 POPL(ssp
, sp
, sp_mask
, new_cs
);
2347 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2350 POPW(ssp
, sp
, sp_mask
, new_eip
);
2351 POPW(ssp
, sp
, sp_mask
, new_cs
);
2352 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2354 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2355 load_seg_vm(R_CS
, new_cs
);
2357 if (env
->eflags
& VM_MASK
)
2358 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2360 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2362 eflags_mask
&= 0xffff;
2363 load_eflags(new_eflags
, eflags_mask
);
2366 static inline void validate_seg(int seg_reg
, int cpl
)
2371 /* XXX: on x86_64, we do not want to nullify FS and GS because
2372 they may still contain a valid base. I would be interested to
2373 know how a real x86_64 CPU behaves */
2374 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2375 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2378 e2
= env
->segs
[seg_reg
].flags
;
2379 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2380 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2381 /* data or non conforming code segment */
2383 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2388 /* protected mode iret */
2389 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2391 uint32_t new_cs
, new_eflags
, new_ss
;
2392 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2393 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2394 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2395 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2397 #ifdef TARGET_X86_64
2402 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2404 ssp
= env
->segs
[R_SS
].base
;
2405 new_eflags
= 0; /* avoid warning */
2406 #ifdef TARGET_X86_64
2412 POPQ(sp
, new_eflags
);
2418 POPL(ssp
, sp
, sp_mask
, new_eip
);
2419 POPL(ssp
, sp
, sp_mask
, new_cs
);
2422 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2423 if (new_eflags
& VM_MASK
)
2424 goto return_to_vm86
;
2428 POPW(ssp
, sp
, sp_mask
, new_eip
);
2429 POPW(ssp
, sp
, sp_mask
, new_cs
);
2431 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2434 if (loglevel
& CPU_LOG_PCALL
) {
2435 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2436 new_cs
, new_eip
, shift
, addend
);
2437 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2440 if ((new_cs
& 0xfffc) == 0)
2441 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2442 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2443 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2444 if (!(e2
& DESC_S_MASK
) ||
2445 !(e2
& DESC_CS_MASK
))
2446 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2447 cpl
= env
->hflags
& HF_CPL_MASK
;
2450 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2451 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2452 if (e2
& DESC_C_MASK
) {
2454 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2457 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2459 if (!(e2
& DESC_P_MASK
))
2460 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2463 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2464 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2465 /* return to same priledge level */
2466 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2467 get_seg_base(e1
, e2
),
2468 get_seg_limit(e1
, e2
),
2471 /* return to different privilege level */
2472 #ifdef TARGET_X86_64
2481 POPL(ssp
, sp
, sp_mask
, new_esp
);
2482 POPL(ssp
, sp
, sp_mask
, new_ss
);
2486 POPW(ssp
, sp
, sp_mask
, new_esp
);
2487 POPW(ssp
, sp
, sp_mask
, new_ss
);
2490 if (loglevel
& CPU_LOG_PCALL
) {
2491 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2495 if ((new_ss
& 0xfffc) == 0) {
2496 #ifdef TARGET_X86_64
2497 /* NULL ss is allowed in long mode if cpl != 3*/
2498 /* XXX: test CS64 ? */
2499 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2500 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2502 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2503 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2504 DESC_W_MASK
| DESC_A_MASK
);
2505 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2509 raise_exception_err(EXCP0D_GPF
, 0);
2512 if ((new_ss
& 3) != rpl
)
2513 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2514 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2515 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2516 if (!(ss_e2
& DESC_S_MASK
) ||
2517 (ss_e2
& DESC_CS_MASK
) ||
2518 !(ss_e2
& DESC_W_MASK
))
2519 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2520 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2522 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2523 if (!(ss_e2
& DESC_P_MASK
))
2524 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2525 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2526 get_seg_base(ss_e1
, ss_e2
),
2527 get_seg_limit(ss_e1
, ss_e2
),
2531 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2532 get_seg_base(e1
, e2
),
2533 get_seg_limit(e1
, e2
),
2535 cpu_x86_set_cpl(env
, rpl
);
2537 #ifdef TARGET_X86_64
2538 if (env
->hflags
& HF_CS64_MASK
)
2542 sp_mask
= get_sp_mask(ss_e2
);
2544 /* validate data segments */
2545 validate_seg(R_ES
, rpl
);
2546 validate_seg(R_DS
, rpl
);
2547 validate_seg(R_FS
, rpl
);
2548 validate_seg(R_GS
, rpl
);
2552 SET_ESP(sp
, sp_mask
);
2555 /* NOTE: 'cpl' is the _old_ CPL */
2556 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2558 eflags_mask
|= IOPL_MASK
;
2559 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2561 eflags_mask
|= IF_MASK
;
2563 eflags_mask
&= 0xffff;
2564 load_eflags(new_eflags
, eflags_mask
);
2569 POPL(ssp
, sp
, sp_mask
, new_esp
);
2570 POPL(ssp
, sp
, sp_mask
, new_ss
);
2571 POPL(ssp
, sp
, sp_mask
, new_es
);
2572 POPL(ssp
, sp
, sp_mask
, new_ds
);
2573 POPL(ssp
, sp
, sp_mask
, new_fs
);
2574 POPL(ssp
, sp
, sp_mask
, new_gs
);
2576 /* modify processor state */
2577 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2578 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2579 load_seg_vm(R_CS
, new_cs
& 0xffff);
2580 cpu_x86_set_cpl(env
, 3);
2581 load_seg_vm(R_SS
, new_ss
& 0xffff);
2582 load_seg_vm(R_ES
, new_es
& 0xffff);
2583 load_seg_vm(R_DS
, new_ds
& 0xffff);
2584 load_seg_vm(R_FS
, new_fs
& 0xffff);
2585 load_seg_vm(R_GS
, new_gs
& 0xffff);
2587 env
->eip
= new_eip
& 0xffff;
2591 void helper_iret_protected(int shift
, int next_eip
)
2593 int tss_selector
, type
;
2596 /* specific case for TSS */
2597 if (env
->eflags
& NT_MASK
) {
2598 #ifdef TARGET_X86_64
2599 if (env
->hflags
& HF_LMA_MASK
)
2600 raise_exception_err(EXCP0D_GPF
, 0);
2602 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2603 if (tss_selector
& 4)
2604 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2605 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2606 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2607 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2608 /* NOTE: we check both segment and busy TSS */
2610 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2611 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2613 helper_ret_protected(shift
, 1, 0);
2616 if (kqemu_is_ok(env
)) {
2617 CC_OP
= CC_OP_EFLAGS
;
2618 env
->exception_index
= -1;
2624 void helper_lret_protected(int shift
, int addend
)
2626 helper_ret_protected(shift
, 0, addend
);
2628 if (kqemu_is_ok(env
)) {
2629 env
->exception_index
= -1;
2635 void helper_sysenter(void)
2637 if (env
->sysenter_cs
== 0) {
2638 raise_exception_err(EXCP0D_GPF
, 0);
2640 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2641 cpu_x86_set_cpl(env
, 0);
2642 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2644 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2646 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2647 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2649 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2651 DESC_W_MASK
| DESC_A_MASK
);
2652 ESP
= env
->sysenter_esp
;
2653 EIP
= env
->sysenter_eip
;
2656 void helper_sysexit(void)
2660 cpl
= env
->hflags
& HF_CPL_MASK
;
2661 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2662 raise_exception_err(EXCP0D_GPF
, 0);
2664 cpu_x86_set_cpl(env
, 3);
2665 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2667 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2668 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2669 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2670 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2672 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2673 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2674 DESC_W_MASK
| DESC_A_MASK
);
2678 if (kqemu_is_ok(env
)) {
2679 env
->exception_index
= -1;
2685 void helper_movl_crN_T0(int reg
)
2687 #if !defined(CONFIG_USER_ONLY)
2690 cpu_x86_update_cr0(env
, T0
);
2693 cpu_x86_update_cr3(env
, T0
);
2696 cpu_x86_update_cr4(env
, T0
);
2699 cpu_set_apic_tpr(env
, T0
);
2709 void helper_movl_drN_T0(int reg
)
2714 void helper_invlpg(target_ulong addr
)
2716 cpu_x86_flush_tlb(env
, addr
);
2719 void helper_rdtsc(void)
2723 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2724 raise_exception(EXCP0D_GPF
);
2726 val
= cpu_get_tsc(env
);
2727 EAX
= (uint32_t)(val
);
2728 EDX
= (uint32_t)(val
>> 32);
2731 #if defined(CONFIG_USER_ONLY)
2732 void helper_wrmsr(void)
2736 void helper_rdmsr(void)
2740 void helper_wrmsr(void)
2744 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
2746 switch((uint32_t)ECX
) {
2747 case MSR_IA32_SYSENTER_CS
:
2748 env
->sysenter_cs
= val
& 0xffff;
2750 case MSR_IA32_SYSENTER_ESP
:
2751 env
->sysenter_esp
= val
;
2753 case MSR_IA32_SYSENTER_EIP
:
2754 env
->sysenter_eip
= val
;
2756 case MSR_IA32_APICBASE
:
2757 cpu_set_apic_base(env
, val
);
2761 uint64_t update_mask
;
2763 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
2764 update_mask
|= MSR_EFER_SCE
;
2765 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
2766 update_mask
|= MSR_EFER_LME
;
2767 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
2768 update_mask
|= MSR_EFER_FFXSR
;
2769 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
2770 update_mask
|= MSR_EFER_NXE
;
2771 env
->efer
= (env
->efer
& ~update_mask
) |
2772 (val
& update_mask
);
2781 case MSR_VM_HSAVE_PA
:
2782 env
->vm_hsave
= val
;
2784 #ifdef TARGET_X86_64
2795 env
->segs
[R_FS
].base
= val
;
2798 env
->segs
[R_GS
].base
= val
;
2800 case MSR_KERNELGSBASE
:
2801 env
->kernelgsbase
= val
;
2805 /* XXX: exception ? */
2810 void helper_rdmsr(void)
2813 switch((uint32_t)ECX
) {
2814 case MSR_IA32_SYSENTER_CS
:
2815 val
= env
->sysenter_cs
;
2817 case MSR_IA32_SYSENTER_ESP
:
2818 val
= env
->sysenter_esp
;
2820 case MSR_IA32_SYSENTER_EIP
:
2821 val
= env
->sysenter_eip
;
2823 case MSR_IA32_APICBASE
:
2824 val
= cpu_get_apic_base(env
);
2835 case MSR_VM_HSAVE_PA
:
2836 val
= env
->vm_hsave
;
2838 #ifdef TARGET_X86_64
2849 val
= env
->segs
[R_FS
].base
;
2852 val
= env
->segs
[R_GS
].base
;
2854 case MSR_KERNELGSBASE
:
2855 val
= env
->kernelgsbase
;
2859 /* XXX: exception ? */
2863 EAX
= (uint32_t)(val
);
2864 EDX
= (uint32_t)(val
>> 32);
2868 void helper_lsl(void)
2870 unsigned int selector
, limit
;
2871 uint32_t e1
, e2
, eflags
;
2872 int rpl
, dpl
, cpl
, type
;
2874 eflags
= cc_table
[CC_OP
].compute_all();
2875 selector
= T0
& 0xffff;
2876 if (load_segment(&e1
, &e2
, selector
) != 0)
2879 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2880 cpl
= env
->hflags
& HF_CPL_MASK
;
2881 if (e2
& DESC_S_MASK
) {
2882 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2885 if (dpl
< cpl
|| dpl
< rpl
)
2889 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2900 if (dpl
< cpl
|| dpl
< rpl
) {
2902 CC_SRC
= eflags
& ~CC_Z
;
2906 limit
= get_seg_limit(e1
, e2
);
2908 CC_SRC
= eflags
| CC_Z
;
2911 void helper_lar(void)
2913 unsigned int selector
;
2914 uint32_t e1
, e2
, eflags
;
2915 int rpl
, dpl
, cpl
, type
;
2917 eflags
= cc_table
[CC_OP
].compute_all();
2918 selector
= T0
& 0xffff;
2919 if ((selector
& 0xfffc) == 0)
2921 if (load_segment(&e1
, &e2
, selector
) != 0)
2924 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2925 cpl
= env
->hflags
& HF_CPL_MASK
;
2926 if (e2
& DESC_S_MASK
) {
2927 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2930 if (dpl
< cpl
|| dpl
< rpl
)
2934 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2948 if (dpl
< cpl
|| dpl
< rpl
) {
2950 CC_SRC
= eflags
& ~CC_Z
;
2954 T1
= e2
& 0x00f0ff00;
2955 CC_SRC
= eflags
| CC_Z
;
2958 void helper_verr(void)
2960 unsigned int selector
;
2961 uint32_t e1
, e2
, eflags
;
2964 eflags
= cc_table
[CC_OP
].compute_all();
2965 selector
= T0
& 0xffff;
2966 if ((selector
& 0xfffc) == 0)
2968 if (load_segment(&e1
, &e2
, selector
) != 0)
2970 if (!(e2
& DESC_S_MASK
))
2973 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2974 cpl
= env
->hflags
& HF_CPL_MASK
;
2975 if (e2
& DESC_CS_MASK
) {
2976 if (!(e2
& DESC_R_MASK
))
2978 if (!(e2
& DESC_C_MASK
)) {
2979 if (dpl
< cpl
|| dpl
< rpl
)
2983 if (dpl
< cpl
|| dpl
< rpl
) {
2985 CC_SRC
= eflags
& ~CC_Z
;
2989 CC_SRC
= eflags
| CC_Z
;
2992 void helper_verw(void)
2994 unsigned int selector
;
2995 uint32_t e1
, e2
, eflags
;
2998 eflags
= cc_table
[CC_OP
].compute_all();
2999 selector
= T0
& 0xffff;
3000 if ((selector
& 0xfffc) == 0)
3002 if (load_segment(&e1
, &e2
, selector
) != 0)
3004 if (!(e2
& DESC_S_MASK
))
3007 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3008 cpl
= env
->hflags
& HF_CPL_MASK
;
3009 if (e2
& DESC_CS_MASK
) {
3012 if (dpl
< cpl
|| dpl
< rpl
)
3014 if (!(e2
& DESC_W_MASK
)) {
3016 CC_SRC
= eflags
& ~CC_Z
;
3020 CC_SRC
= eflags
| CC_Z
;
3025 void helper_fldt_ST0_A0(void)
3028 new_fpstt
= (env
->fpstt
- 1) & 7;
3029 env
->fpregs
[new_fpstt
].d
= helper_fldt(A0
);
3030 env
->fpstt
= new_fpstt
;
3031 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3034 void helper_fstt_ST0_A0(void)
3036 helper_fstt(ST0
, A0
);
3039 void fpu_set_exception(int mask
)
3042 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3043 env
->fpus
|= FPUS_SE
| FPUS_B
;
3046 CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3049 fpu_set_exception(FPUS_ZE
);
3053 void fpu_raise_exception(void)
3055 if (env
->cr
[0] & CR0_NE_MASK
) {
3056 raise_exception(EXCP10_COPR
);
3058 #if !defined(CONFIG_USER_ONLY)
3067 void helper_fbld_ST0_A0(void)
3075 for(i
= 8; i
>= 0; i
--) {
3077 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3080 if (ldub(A0
+ 9) & 0x80)
3086 void helper_fbst_ST0_A0(void)
3089 target_ulong mem_ref
, mem_end
;
3092 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3094 mem_end
= mem_ref
+ 9;
3101 while (mem_ref
< mem_end
) {
3106 v
= ((v
/ 10) << 4) | (v
% 10);
3109 while (mem_ref
< mem_end
) {
3114 void helper_f2xm1(void)
3116 ST0
= pow(2.0,ST0
) - 1.0;
3119 void helper_fyl2x(void)
3121 CPU86_LDouble fptemp
;
3125 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3129 env
->fpus
&= (~0x4700);
3134 void helper_fptan(void)
3136 CPU86_LDouble fptemp
;
3139 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3145 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3146 /* the above code is for |arg| < 2**52 only */
3150 void helper_fpatan(void)
3152 CPU86_LDouble fptemp
, fpsrcop
;
3156 ST1
= atan2(fpsrcop
,fptemp
);
3160 void helper_fxtract(void)
3162 CPU86_LDoubleU temp
;
3163 unsigned int expdif
;
3166 expdif
= EXPD(temp
) - EXPBIAS
;
3167 /*DP exponent bias*/
3174 void helper_fprem1(void)
3176 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3177 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3179 signed long long int q
;
3181 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3182 ST0
= 0.0 / 0.0; /* NaN */
3183 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3189 fpsrcop1
.d
= fpsrcop
;
3191 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3194 /* optimisation? taken from the AMD docs */
3195 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3196 /* ST0 is unchanged */
3201 dblq
= fpsrcop
/ fptemp
;
3202 /* round dblq towards nearest integer */
3204 ST0
= fpsrcop
- fptemp
* dblq
;
3206 /* convert dblq to q by truncating towards zero */
3208 q
= (signed long long int)(-dblq
);
3210 q
= (signed long long int)dblq
;
3212 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3213 /* (C0,C3,C1) <-- (q2,q1,q0) */
3214 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3215 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3216 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3218 env
->fpus
|= 0x400; /* C2 <-- 1 */
3219 fptemp
= pow(2.0, expdif
- 50);
3220 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3221 /* fpsrcop = integer obtained by chopping */
3222 fpsrcop
= (fpsrcop
< 0.0) ?
3223 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3224 ST0
-= (ST1
* fpsrcop
* fptemp
);
3228 void helper_fprem(void)
3230 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3231 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3233 signed long long int q
;
3235 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3236 ST0
= 0.0 / 0.0; /* NaN */
3237 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3241 fpsrcop
= (CPU86_LDouble
)ST0
;
3242 fptemp
= (CPU86_LDouble
)ST1
;
3243 fpsrcop1
.d
= fpsrcop
;
3245 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3248 /* optimisation? taken from the AMD docs */
3249 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3250 /* ST0 is unchanged */
3254 if ( expdif
< 53 ) {
3255 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
3256 /* round dblq towards zero */
3257 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
3258 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
3260 /* convert dblq to q by truncating towards zero */
3262 q
= (signed long long int)(-dblq
);
3264 q
= (signed long long int)dblq
;
3266 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3267 /* (C0,C3,C1) <-- (q2,q1,q0) */
3268 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3269 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3270 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3272 int N
= 32 + (expdif
% 32); /* as per AMD docs */
3273 env
->fpus
|= 0x400; /* C2 <-- 1 */
3274 fptemp
= pow(2.0, (double)(expdif
- N
));
3275 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3276 /* fpsrcop = integer obtained by chopping */
3277 fpsrcop
= (fpsrcop
< 0.0) ?
3278 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3279 ST0
-= (ST1
* fpsrcop
* fptemp
);
3283 void helper_fyl2xp1(void)
3285 CPU86_LDouble fptemp
;
3288 if ((fptemp
+1.0)>0.0) {
3289 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
3293 env
->fpus
&= (~0x4700);
3298 void helper_fsqrt(void)
3300 CPU86_LDouble fptemp
;
3304 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3310 void helper_fsincos(void)
3312 CPU86_LDouble fptemp
;
3315 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3321 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3322 /* the above code is for |arg| < 2**63 only */
3326 void helper_frndint(void)
3328 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
3331 void helper_fscale(void)
3333 ST0
= ldexp (ST0
, (int)(ST1
));
3336 void helper_fsin(void)
3338 CPU86_LDouble fptemp
;
3341 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3345 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3346 /* the above code is for |arg| < 2**53 only */
3350 void helper_fcos(void)
3352 CPU86_LDouble fptemp
;
3355 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3359 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3360 /* the above code is for |arg5 < 2**63 only */
3364 void helper_fxam_ST0(void)
3366 CPU86_LDoubleU temp
;
3371 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3373 env
->fpus
|= 0x200; /* C1 <-- 1 */
3375 /* XXX: test fptags too */
3376 expdif
= EXPD(temp
);
3377 if (expdif
== MAXEXPD
) {
3378 #ifdef USE_X86LDOUBLE
3379 if (MANTD(temp
) == 0x8000000000000000ULL
)
3381 if (MANTD(temp
) == 0)
3383 env
->fpus
|= 0x500 /*Infinity*/;
3385 env
->fpus
|= 0x100 /*NaN*/;
3386 } else if (expdif
== 0) {
3387 if (MANTD(temp
) == 0)
3388 env
->fpus
|= 0x4000 /*Zero*/;
3390 env
->fpus
|= 0x4400 /*Denormal*/;
3396 void helper_fstenv(target_ulong ptr
, int data32
)
3398 int fpus
, fptag
, exp
, i
;
3402 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3404 for (i
=7; i
>=0; i
--) {
3406 if (env
->fptags
[i
]) {
3409 tmp
.d
= env
->fpregs
[i
].d
;
3412 if (exp
== 0 && mant
== 0) {
3415 } else if (exp
== 0 || exp
== MAXEXPD
3416 #ifdef USE_X86LDOUBLE
3417 || (mant
& (1LL << 63)) == 0
3420 /* NaNs, infinity, denormal */
3427 stl(ptr
, env
->fpuc
);
3429 stl(ptr
+ 8, fptag
);
3430 stl(ptr
+ 12, 0); /* fpip */
3431 stl(ptr
+ 16, 0); /* fpcs */
3432 stl(ptr
+ 20, 0); /* fpoo */
3433 stl(ptr
+ 24, 0); /* fpos */
3436 stw(ptr
, env
->fpuc
);
3438 stw(ptr
+ 4, fptag
);
3446 void helper_fldenv(target_ulong ptr
, int data32
)
3451 env
->fpuc
= lduw(ptr
);
3452 fpus
= lduw(ptr
+ 4);
3453 fptag
= lduw(ptr
+ 8);
3456 env
->fpuc
= lduw(ptr
);
3457 fpus
= lduw(ptr
+ 2);
3458 fptag
= lduw(ptr
+ 4);
3460 env
->fpstt
= (fpus
>> 11) & 7;
3461 env
->fpus
= fpus
& ~0x3800;
3462 for(i
= 0;i
< 8; i
++) {
3463 env
->fptags
[i
] = ((fptag
& 3) == 3);
3468 void helper_fsave(target_ulong ptr
, int data32
)
3473 helper_fstenv(ptr
, data32
);
3475 ptr
+= (14 << data32
);
3476 for(i
= 0;i
< 8; i
++) {
3478 helper_fstt(tmp
, ptr
);
3496 void helper_frstor(target_ulong ptr
, int data32
)
3501 helper_fldenv(ptr
, data32
);
3502 ptr
+= (14 << data32
);
3504 for(i
= 0;i
< 8; i
++) {
3505 tmp
= helper_fldt(ptr
);
3511 void helper_fxsave(target_ulong ptr
, int data64
)
3513 int fpus
, fptag
, i
, nb_xmm_regs
;
3517 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3519 for(i
= 0; i
< 8; i
++) {
3520 fptag
|= (env
->fptags
[i
] << i
);
3522 stw(ptr
, env
->fpuc
);
3524 stw(ptr
+ 4, fptag
^ 0xff);
3527 for(i
= 0;i
< 8; i
++) {
3529 helper_fstt(tmp
, addr
);
3533 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3534 /* XXX: finish it */
3535 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
3536 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
3537 nb_xmm_regs
= 8 << data64
;
3539 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3540 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
3541 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
3547 void helper_fxrstor(target_ulong ptr
, int data64
)
3549 int i
, fpus
, fptag
, nb_xmm_regs
;
3553 env
->fpuc
= lduw(ptr
);
3554 fpus
= lduw(ptr
+ 2);
3555 fptag
= lduw(ptr
+ 4);
3556 env
->fpstt
= (fpus
>> 11) & 7;
3557 env
->fpus
= fpus
& ~0x3800;
3559 for(i
= 0;i
< 8; i
++) {
3560 env
->fptags
[i
] = ((fptag
>> i
) & 1);
3564 for(i
= 0;i
< 8; i
++) {
3565 tmp
= helper_fldt(addr
);
3570 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3571 /* XXX: finish it */
3572 env
->mxcsr
= ldl(ptr
+ 0x18);
3574 nb_xmm_regs
= 8 << data64
;
3576 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3577 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
3578 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
3584 #ifndef USE_X86LDOUBLE
3586 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3588 CPU86_LDoubleU temp
;
3593 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
3594 /* exponent + sign */
3595 e
= EXPD(temp
) - EXPBIAS
+ 16383;
3596 e
|= SIGND(temp
) >> 16;
3600 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3602 CPU86_LDoubleU temp
;
3606 /* XXX: handle overflow ? */
3607 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
3608 e
|= (upper
>> 4) & 0x800; /* sign */
3609 ll
= (mant
>> 11) & ((1LL << 52) - 1);
3611 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
3614 temp
.ll
= ll
| ((uint64_t)e
<< 52);
3621 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3623 CPU86_LDoubleU temp
;
3626 *pmant
= temp
.l
.lower
;
3627 *pexp
= temp
.l
.upper
;
3630 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3632 CPU86_LDoubleU temp
;
3634 temp
.l
.upper
= upper
;
3635 temp
.l
.lower
= mant
;
3640 #ifdef TARGET_X86_64
3642 //#define DEBUG_MULDIV
3644 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
3653 static void neg128(uint64_t *plow
, uint64_t *phigh
)
3657 add128(plow
, phigh
, 1, 0);
3660 /* return TRUE if overflow */
3661 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
3663 uint64_t q
, r
, a1
, a0
;
3676 /* XXX: use a better algorithm */
3677 for(i
= 0; i
< 64; i
++) {
3679 a1
= (a1
<< 1) | (a0
>> 63);
3680 if (ab
|| a1
>= b
) {
3686 a0
= (a0
<< 1) | qb
;
3688 #if defined(DEBUG_MULDIV)
3689 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
3690 *phigh
, *plow
, b
, a0
, a1
);
3698 /* return TRUE if overflow */
3699 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
3702 sa
= ((int64_t)*phigh
< 0);
3704 neg128(plow
, phigh
);
3708 if (div64(plow
, phigh
, b
) != 0)
3711 if (*plow
> (1ULL << 63))
3715 if (*plow
>= (1ULL << 63))
3723 void helper_mulq_EAX_T0(void)
3727 mulu64(&r1
, &r0
, EAX
, T0
);
3734 void helper_imulq_EAX_T0(void)
3738 muls64(&r1
, &r0
, EAX
, T0
);
3742 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3745 void helper_imulq_T0_T1(void)
3749 muls64(&r1
, &r0
, T0
, T1
);
3752 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3755 void helper_divq_EAX_T0(void)
3759 raise_exception(EXCP00_DIVZ
);
3763 if (div64(&r0
, &r1
, T0
))
3764 raise_exception(EXCP00_DIVZ
);
3769 void helper_idivq_EAX_T0(void)
3773 raise_exception(EXCP00_DIVZ
);
3777 if (idiv64(&r0
, &r1
, T0
))
3778 raise_exception(EXCP00_DIVZ
);
3783 void helper_bswapq_T0(void)
3789 void helper_hlt(void)
3791 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
3792 env
->hflags
|= HF_HALTED_MASK
;
3793 env
->exception_index
= EXCP_HLT
;
3797 void helper_monitor(void)
3799 if ((uint32_t)ECX
!= 0)
3800 raise_exception(EXCP0D_GPF
);
3801 /* XXX: store address ? */
3804 void helper_mwait(void)
3806 if ((uint32_t)ECX
!= 0)
3807 raise_exception(EXCP0D_GPF
);
3808 /* XXX: not complete but not completely erroneous */
3809 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
3810 /* more than one CPU: do not sleep because another CPU may
3817 float approx_rsqrt(float a
)
3819 return 1.0 / sqrt(a
);
3822 float approx_rcp(float a
)
3827 void update_fp_status(void)
3831 /* set rounding mode */
3832 switch(env
->fpuc
& RC_MASK
) {
3835 rnd_type
= float_round_nearest_even
;
3838 rnd_type
= float_round_down
;
3841 rnd_type
= float_round_up
;
3844 rnd_type
= float_round_to_zero
;
3847 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3849 switch((env
->fpuc
>> 8) & 3) {
3861 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3865 #if !defined(CONFIG_USER_ONLY)
3867 #define MMUSUFFIX _mmu
3868 #define GETPC() (__builtin_return_address(0))
3871 #include "softmmu_template.h"
3874 #include "softmmu_template.h"
3877 #include "softmmu_template.h"
3880 #include "softmmu_template.h"
3884 /* try to fill the TLB and return an exception if error. If retaddr is
3885 NULL, it means that the function was called in C code (i.e. not
3886 from generated code or from helper.c) */
3887 /* XXX: fix it to restore all registers */
3888 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3890 TranslationBlock
*tb
;
3893 CPUX86State
*saved_env
;
3895 /* XXX: hack to restore env in all cases, even if not called from
3898 env
= cpu_single_env
;
3900 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3903 /* now we have a real cpu fault */
3904 pc
= (unsigned long)retaddr
;
3905 tb
= tb_find_pc(pc
);
3907 /* the PC is inside the translated code. It means that we have
3908 a virtual CPU fault */
3909 cpu_restore_state(tb
, env
, pc
, NULL
);
3913 raise_exception_err(env
->exception_index
, env
->error_code
);
3915 raise_exception_err_norestore(env
->exception_index
, env
->error_code
);
3921 /* Secure Virtual Machine helpers */
3923 void helper_stgi(void)
3925 env
->hflags
|= HF_GIF_MASK
;
3928 void helper_clgi(void)
3930 env
->hflags
&= ~HF_GIF_MASK
;
3933 #if defined(CONFIG_USER_ONLY)
3935 void helper_vmrun(target_ulong addr
) { }
3936 void helper_vmmcall(void) { }
3937 void helper_vmload(target_ulong addr
) { }
3938 void helper_vmsave(target_ulong addr
) { }
3939 void helper_skinit(void) { }
3940 void helper_invlpga(void) { }
3941 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
) { }
3942 int svm_check_intercept_param(uint32_t type
, uint64_t param
)
3949 static inline uint32_t
3950 vmcb2cpu_attrib(uint16_t vmcb_attrib
, uint32_t vmcb_base
, uint32_t vmcb_limit
)
3952 return ((vmcb_attrib
& 0x00ff) << 8) /* Type, S, DPL, P */
3953 | ((vmcb_attrib
& 0x0f00) << 12) /* AVL, L, DB, G */
3954 | ((vmcb_base
>> 16) & 0xff) /* Base 23-16 */
3955 | (vmcb_base
& 0xff000000) /* Base 31-24 */
3956 | (vmcb_limit
& 0xf0000); /* Limit 19-16 */
3959 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib
)
3961 return ((cpu_attrib
>> 8) & 0xff) /* Type, S, DPL, P */
3962 | ((cpu_attrib
& 0xf00000) >> 12); /* AVL, L, DB, G */
3965 extern uint8_t *phys_ram_base
;
3966 void helper_vmrun(target_ulong addr
)
3971 if (loglevel
& CPU_LOG_TB_IN_ASM
)
3972 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
3974 env
->vm_vmcb
= addr
;
3977 /* save the current CPU state in the hsave page */
3978 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
3979 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
3981 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
3982 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
3984 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
3985 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
3986 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
3987 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
3988 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
), env
->cr
[8]);
3989 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
3990 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
3992 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
3993 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
3995 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_ES
], es
);
3996 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_CS
], cs
);
3997 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_SS
], ss
);
3998 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_DS
], ds
);
4000 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
), EIP
);
4001 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4002 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4004 /* load the interception bitmaps so we do not need to access the
4006 /* We shift all the intercept bits so we can OR them with the TB
4008 env
->intercept
= (ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
)) << INTERCEPT_INTR
) | INTERCEPT_SVM_MASK
;
4009 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4010 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4011 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4012 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4013 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4015 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4016 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4018 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4019 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4021 /* clear exit_info_2 so we behave like the real hardware */
4022 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4024 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4025 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4026 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4027 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4028 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4029 if (int_ctl
& V_INTR_MASKING_MASK
) {
4030 env
->cr
[8] = int_ctl
& V_TPR_MASK
;
4031 if (env
->eflags
& IF_MASK
)
4032 env
->hflags
|= HF_HIF_MASK
;
4035 #ifdef TARGET_X86_64
4036 env
->efer
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
));
4037 env
->hflags
&= ~HF_LMA_MASK
;
4038 if (env
->efer
& MSR_EFER_LMA
)
4039 env
->hflags
|= HF_LMA_MASK
;
4042 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4043 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4044 CC_OP
= CC_OP_EFLAGS
;
4045 CC_DST
= 0xffffffff;
4047 SVM_LOAD_SEG(env
->vm_vmcb
, ES
, es
);
4048 SVM_LOAD_SEG(env
->vm_vmcb
, CS
, cs
);
4049 SVM_LOAD_SEG(env
->vm_vmcb
, SS
, ss
);
4050 SVM_LOAD_SEG(env
->vm_vmcb
, DS
, ds
);
4052 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4054 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4055 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4056 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4057 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4058 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4060 /* FIXME: guest state consistency checks */
4062 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4063 case TLB_CONTROL_DO_NOTHING
:
4065 case TLB_CONTROL_FLUSH_ALL_ASID
:
4066 /* FIXME: this is not 100% correct but should work for now */
4075 /* maybe we need to inject an event */
4076 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4077 if (event_inj
& SVM_EVTINJ_VALID
) {
4078 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4079 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4080 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4081 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4083 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4084 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4085 /* FIXME: need to implement valid_err */
4086 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4087 case SVM_EVTINJ_TYPE_INTR
:
4088 env
->exception_index
= vector
;
4089 env
->error_code
= event_inj_err
;
4090 env
->exception_is_int
= 1;
4091 env
->exception_next_eip
= -1;
4092 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4093 fprintf(logfile
, "INTR");
4095 case SVM_EVTINJ_TYPE_NMI
:
4096 env
->exception_index
= vector
;
4097 env
->error_code
= event_inj_err
;
4098 env
->exception_is_int
= 1;
4099 env
->exception_next_eip
= EIP
;
4100 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4101 fprintf(logfile
, "NMI");
4103 case SVM_EVTINJ_TYPE_EXEPT
:
4104 env
->exception_index
= vector
;
4105 env
->error_code
= event_inj_err
;
4106 env
->exception_is_int
= 0;
4107 env
->exception_next_eip
= -1;
4108 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4109 fprintf(logfile
, "EXEPT");
4111 case SVM_EVTINJ_TYPE_SOFT
:
4112 env
->exception_index
= vector
;
4113 env
->error_code
= event_inj_err
;
4114 env
->exception_is_int
= 1;
4115 env
->exception_next_eip
= EIP
;
4116 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4117 fprintf(logfile
, "SOFT");
4120 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4121 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4123 if ((int_ctl
& V_IRQ_MASK
) || (env
->intercept
& INTERCEPT_VINTR
)) {
4124 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4130 void helper_vmmcall(void)
4132 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4133 fprintf(logfile
,"vmmcall!\n");
4136 void helper_vmload(target_ulong addr
)
4138 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4139 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4140 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4141 env
->segs
[R_FS
].base
);
4143 SVM_LOAD_SEG2(addr
, segs
[R_FS
], fs
);
4144 SVM_LOAD_SEG2(addr
, segs
[R_GS
], gs
);
4145 SVM_LOAD_SEG2(addr
, tr
, tr
);
4146 SVM_LOAD_SEG2(addr
, ldt
, ldtr
);
4148 #ifdef TARGET_X86_64
4149 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
4150 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
4151 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
4152 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
4154 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
4155 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
4156 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
4157 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
4160 void helper_vmsave(target_ulong addr
)
4162 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4163 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4164 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4165 env
->segs
[R_FS
].base
);
4167 SVM_SAVE_SEG(addr
, segs
[R_FS
], fs
);
4168 SVM_SAVE_SEG(addr
, segs
[R_GS
], gs
);
4169 SVM_SAVE_SEG(addr
, tr
, tr
);
4170 SVM_SAVE_SEG(addr
, ldt
, ldtr
);
4172 #ifdef TARGET_X86_64
4173 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
4174 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
4175 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
4176 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
4178 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
4179 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
4180 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
4181 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
4184 void helper_skinit(void)
4186 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4187 fprintf(logfile
,"skinit!\n");
4190 void helper_invlpga(void)
4195 int svm_check_intercept_param(uint32_t type
, uint64_t param
)
4198 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
4199 if (INTERCEPTEDw(_cr_read
, (1 << (type
- SVM_EXIT_READ_CR0
)))) {
4200 vmexit(type
, param
);
4204 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 8:
4205 if (INTERCEPTEDw(_dr_read
, (1 << (type
- SVM_EXIT_READ_DR0
)))) {
4206 vmexit(type
, param
);
4210 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
4211 if (INTERCEPTEDw(_cr_write
, (1 << (type
- SVM_EXIT_WRITE_CR0
)))) {
4212 vmexit(type
, param
);
4216 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 8:
4217 if (INTERCEPTEDw(_dr_write
, (1 << (type
- SVM_EXIT_WRITE_DR0
)))) {
4218 vmexit(type
, param
);
4222 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 16:
4223 if (INTERCEPTEDl(_exceptions
, (1 << (type
- SVM_EXIT_EXCP_BASE
)))) {
4224 vmexit(type
, param
);
4229 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT
)) {
4230 /* FIXME: this should be read in at vmrun (faster this way?) */
4231 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
4232 uint16_t port
= (uint16_t) (param
>> 16);
4234 if(ldub_phys(addr
+ port
/ 8) & (1 << (port
% 8)))
4235 vmexit(type
, param
);
4240 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT
)) {
4241 /* FIXME: this should be read in at vmrun (faster this way?) */
4242 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
4243 switch((uint32_t)ECX
) {
4248 case 0xc0000000 ... 0xc0001fff:
4249 T0
= (8192 + ECX
- 0xc0000000) * 2;
4253 case 0xc0010000 ... 0xc0011fff:
4254 T0
= (16384 + ECX
- 0xc0010000) * 2;
4259 vmexit(type
, param
);
4262 if (ldub_phys(addr
+ T1
) & ((1 << param
) << T0
))
4263 vmexit(type
, param
);
4268 if (INTERCEPTED((1ULL << ((type
- SVM_EXIT_INTR
) + INTERCEPT_INTR
)))) {
4269 vmexit(type
, param
);
4277 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
)
4281 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4282 fprintf(logfile
,"vmexit(%016" PRIx64
", %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
4283 exit_code
, exit_info_1
,
4284 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
4287 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
4288 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
4289 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4291 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
4294 /* Save the VM state in the vmcb */
4295 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_ES
], es
);
4296 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_CS
], cs
);
4297 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_SS
], ss
);
4298 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_DS
], ds
);
4300 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4301 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4303 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4304 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4306 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4307 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4308 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4309 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4310 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4312 if ((int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
))) & V_INTR_MASKING_MASK
) {
4313 int_ctl
&= ~V_TPR_MASK
;
4314 int_ctl
|= env
->cr
[8] & V_TPR_MASK
;
4315 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
4318 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4319 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
4320 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4321 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4322 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4323 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4324 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
4326 /* Reload the host state from vm_hsave */
4327 env
->hflags
&= ~HF_HIF_MASK
;
4329 env
->intercept_exceptions
= 0;
4330 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
4332 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4333 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4335 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
4336 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4338 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
4339 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
4340 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
4341 if (int_ctl
& V_INTR_MASKING_MASK
)
4342 env
->cr
[8] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
));
4343 /* we need to set the efer after the crs so the hidden flags get set properly */
4344 #ifdef TARGET_X86_64
4345 env
->efer
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
));
4346 env
->hflags
&= ~HF_LMA_MASK
;
4347 if (env
->efer
& MSR_EFER_LMA
)
4348 env
->hflags
|= HF_LMA_MASK
;
4352 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
4353 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4354 CC_OP
= CC_OP_EFLAGS
;
4356 SVM_LOAD_SEG(env
->vm_hsave
, ES
, es
);
4357 SVM_LOAD_SEG(env
->vm_hsave
, CS
, cs
);
4358 SVM_LOAD_SEG(env
->vm_hsave
, SS
, ss
);
4359 SVM_LOAD_SEG(env
->vm_hsave
, DS
, ds
);
4361 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
4362 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
4363 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
4365 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
4366 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
4369 cpu_x86_set_cpl(env
, 0);
4370 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code_hi
), (uint32_t)(exit_code
>> 32));
4371 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
4372 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
4375 /* FIXME: Resets the current ASID register to zero (host ASID). */
4377 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4379 /* Clears the TSC_OFFSET inside the processor. */
4381 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4382 from the page table indicated the host's CR3. If the PDPEs contain
4383 illegal state, the processor causes a shutdown. */
4385 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4386 env
->cr
[0] |= CR0_PE_MASK
;
4387 env
->eflags
&= ~VM_MASK
;
4389 /* Disables all breakpoints in the host DR7 register. */
4391 /* Checks the reloaded host state for consistency. */
4393 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4394 host's code segment or non-canonical (in the case of long mode), a
4395 #GP fault is delivered inside the host.) */
4397 /* remove any pending exception */
4398 env
->exception_index
= -1;
4399 env
->error_code
= 0;
4400 env
->old_exception
= -1;