4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
23 #include "host-utils.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
39 #define raise_exception_err(a, b)\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
46 static const uint8_t parity_table
[256] = {
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
67 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
75 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
82 static const uint8_t rclw_table
[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
90 static const uint8_t rclb_table
[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk
[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock
);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock
);
122 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
124 load_eflags(t0
, update_mask
);
127 target_ulong
helper_read_eflags(void)
130 eflags
= helper_cc_compute_all(CC_OP
);
131 eflags
|= (DF
& DF_MASK
);
132 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
148 index
= selector
& ~7;
149 if ((index
+ 7) > dt
->limit
)
151 ptr
= dt
->base
+ index
;
152 *e1_ptr
= ldl_kernel(ptr
);
153 *e2_ptr
= ldl_kernel(ptr
+ 4);
157 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
160 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
161 if (e2
& DESC_G_MASK
)
162 limit
= (limit
<< 12) | 0xfff;
166 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
168 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
173 sc
->base
= get_seg_base(e1
, e2
);
174 sc
->limit
= get_seg_limit(e1
, e2
);
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg
, int selector
)
182 cpu_x86_load_seg_cache(env
, seg
, selector
,
183 (selector
<< 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
187 uint32_t *esp_ptr
, int dpl
)
189 int type
, index
, shift
;
194 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
195 for(i
=0;i
<env
->tr
.limit
;i
++) {
196 printf("%02x ", env
->tr
.base
[i
]);
197 if ((i
& 7) == 7) printf("\n");
203 if (!(env
->tr
.flags
& DESC_P_MASK
))
204 cpu_abort(env
, "invalid tss");
205 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
207 cpu_abort(env
, "invalid tss type");
209 index
= (dpl
* 4 + 2) << shift
;
210 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
211 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
213 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
214 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
216 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg
, int selector
)
227 if ((selector
& 0xfffc) != 0) {
228 if (load_segment(&e1
, &e2
, selector
) != 0)
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if (!(e2
& DESC_S_MASK
))
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
234 cpl
= env
->hflags
& HF_CPL_MASK
;
235 if (seg_reg
== R_CS
) {
236 if (!(e2
& DESC_CS_MASK
))
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 /* XXX: is it correct ? */
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 } else if (seg_reg
== R_SS
) {
244 /* SS must be writable data */
245 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
247 if (dpl
!= cpl
|| dpl
!= rpl
)
248 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 /* not readable code */
251 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
252 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
255 if (dpl
< cpl
|| dpl
< rpl
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
259 if (!(e2
& DESC_P_MASK
))
260 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
261 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
262 get_seg_base(e1
, e2
),
263 get_seg_limit(e1
, e2
),
266 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
267 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector
,
277 uint32_t e1
, uint32_t e2
, int source
,
280 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
281 target_ulong tss_base
;
282 uint32_t new_regs
[8], new_segs
[6];
283 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
284 uint32_t old_eflags
, eflags_mask
;
289 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
292 /* if task gate, we read the TSS segment and we load it */
294 if (!(e2
& DESC_P_MASK
))
295 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
296 tss_selector
= e1
>> 16;
297 if (tss_selector
& 4)
298 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
299 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
300 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (e2
& DESC_S_MASK
)
302 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
303 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 if (!(e2
& DESC_P_MASK
))
309 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
315 tss_limit
= get_seg_limit(e1
, e2
);
316 tss_base
= get_seg_base(e1
, e2
);
317 if ((tss_selector
& 4) != 0 ||
318 tss_limit
< tss_limit_max
)
319 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
320 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
322 old_tss_limit_max
= 103;
324 old_tss_limit_max
= 43;
326 /* read all the registers from the new TSS */
329 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
330 new_eip
= ldl_kernel(tss_base
+ 0x20);
331 new_eflags
= ldl_kernel(tss_base
+ 0x24);
332 for(i
= 0; i
< 8; i
++)
333 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
334 for(i
= 0; i
< 6; i
++)
335 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
336 new_ldt
= lduw_kernel(tss_base
+ 0x60);
337 new_trap
= ldl_kernel(tss_base
+ 0x64);
341 new_eip
= lduw_kernel(tss_base
+ 0x0e);
342 new_eflags
= lduw_kernel(tss_base
+ 0x10);
343 for(i
= 0; i
< 8; i
++)
344 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
345 for(i
= 0; i
< 4; i
++)
346 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
347 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
358 v1
= ldub_kernel(env
->tr
.base
);
359 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
360 stb_kernel(env
->tr
.base
, v1
);
361 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
363 /* clear busy bit (it is restartable) */
364 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
367 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
368 e2
= ldl_kernel(ptr
+ 4);
369 e2
&= ~DESC_TSS_BUSY_MASK
;
370 stl_kernel(ptr
+ 4, e2
);
372 old_eflags
= compute_eflags();
373 if (source
== SWITCH_TSS_IRET
)
374 old_eflags
&= ~NT_MASK
;
376 /* save the current state in the old TSS */
379 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
380 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
388 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
389 for(i
= 0; i
< 6; i
++)
390 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
393 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
394 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
402 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
403 for(i
= 0; i
< 4; i
++)
404 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
407 /* now if an exception occurs, it will occurs in the next task
410 if (source
== SWITCH_TSS_CALL
) {
411 stw_kernel(tss_base
, env
->tr
.selector
);
412 new_eflags
|= NT_MASK
;
416 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
419 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
420 e2
= ldl_kernel(ptr
+ 4);
421 e2
|= DESC_TSS_BUSY_MASK
;
422 stl_kernel(ptr
+ 4, e2
);
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env
->cr
[0] |= CR0_TS_MASK
;
428 env
->hflags
|= HF_TS_MASK
;
429 env
->tr
.selector
= tss_selector
;
430 env
->tr
.base
= tss_base
;
431 env
->tr
.limit
= tss_limit
;
432 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
434 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
435 cpu_x86_update_cr3(env
, new_cr3
);
438 /* load all registers without an exception, then reload them with
439 possible exception */
441 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
442 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
444 eflags_mask
&= 0xffff;
445 load_eflags(new_eflags
, eflags_mask
);
446 /* XXX: what to do in 16 bit case ? */
455 if (new_eflags
& VM_MASK
) {
456 for(i
= 0; i
< 6; i
++)
457 load_seg_vm(i
, new_segs
[i
]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env
, 3);
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i
= 0; i
< 6; i
++)
465 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
468 env
->ldt
.selector
= new_ldt
& ~4;
475 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
477 if ((new_ldt
& 0xfffc) != 0) {
479 index
= new_ldt
& ~7;
480 if ((index
+ 7) > dt
->limit
)
481 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
482 ptr
= dt
->base
+ index
;
483 e1
= ldl_kernel(ptr
);
484 e2
= ldl_kernel(ptr
+ 4);
485 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
487 if (!(e2
& DESC_P_MASK
))
488 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
489 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
492 /* load the segments */
493 if (!(new_eflags
& VM_MASK
)) {
494 tss_load_seg(R_CS
, new_segs
[R_CS
]);
495 tss_load_seg(R_SS
, new_segs
[R_SS
]);
496 tss_load_seg(R_ES
, new_segs
[R_ES
]);
497 tss_load_seg(R_DS
, new_segs
[R_DS
]);
498 tss_load_seg(R_FS
, new_segs
[R_FS
]);
499 tss_load_seg(R_GS
, new_segs
[R_GS
]);
502 /* check that EIP is in the CS segment limits */
503 if (new_eip
> env
->segs
[R_CS
].limit
) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF
, 0);
508 #ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env
->dr
[7] & 0x55) {
511 for (i
= 0; i
< 4; i
++) {
512 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
513 hw_breakpoint_remove(env
, i
);
520 /* check if Port I/O is allowed in TSS */
521 static inline void check_io(int addr
, int size
)
523 int io_offset
, val
, mask
;
525 /* TSS must be a valid 32 bit one */
526 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
527 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
530 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
531 io_offset
+= (addr
>> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset
+ 1) > env
->tr
.limit
)
535 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
537 mask
= (1 << size
) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val
& mask
) != 0) {
541 raise_exception_err(EXCP0D_GPF
, 0);
545 void helper_check_iob(uint32_t t0
)
550 void helper_check_iow(uint32_t t0
)
555 void helper_check_iol(uint32_t t0
)
560 void helper_outb(uint32_t port
, uint32_t data
)
562 cpu_outb(env
, port
, data
& 0xff);
565 target_ulong
helper_inb(uint32_t port
)
567 return cpu_inb(env
, port
);
570 void helper_outw(uint32_t port
, uint32_t data
)
572 cpu_outw(env
, port
, data
& 0xffff);
575 target_ulong
helper_inw(uint32_t port
)
577 return cpu_inw(env
, port
);
580 void helper_outl(uint32_t port
, uint32_t data
)
582 cpu_outl(env
, port
, data
);
585 target_ulong
helper_inl(uint32_t port
)
587 return cpu_inl(env
, port
);
590 static inline unsigned int get_sp_mask(unsigned int e2
)
592 if (e2
& DESC_B_MASK
)
599 #define SET_ESP(val, sp_mask)\
601 if ((sp_mask) == 0xffff)\
602 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603 else if ((sp_mask) == 0xffffffffLL)\
604 ESP = (uint32_t)(val);\
609 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
612 /* in 64-bit machines, this can overflow. So this segment addition macro
613 * can be used to trim the value to 32-bit whenever needed */
614 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
616 /* XXX: add a is_user flag to have proper security support */
617 #define PUSHW(ssp, sp, sp_mask, val)\
620 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
623 #define PUSHL(ssp, sp, sp_mask, val)\
626 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
629 #define POPW(ssp, sp, sp_mask, val)\
631 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
635 #define POPL(ssp, sp, sp_mask, val)\
637 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
641 /* protected mode interrupt */
642 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
643 unsigned int next_eip
, int is_hw
)
646 target_ulong ptr
, ssp
;
647 int type
, dpl
, selector
, ss_dpl
, cpl
;
648 int has_error_code
, new_stack
, shift
;
649 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
650 uint32_t old_eip
, sp_mask
;
653 if (!is_int
&& !is_hw
) {
672 if (intno
* 8 + 7 > dt
->limit
)
673 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
674 ptr
= dt
->base
+ intno
* 8;
675 e1
= ldl_kernel(ptr
);
676 e2
= ldl_kernel(ptr
+ 4);
677 /* check gate type */
678 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2
& DESC_P_MASK
))
683 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
684 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
685 if (has_error_code
) {
688 /* push the error code */
689 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
691 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
695 esp
= (ESP
- (2 << shift
)) & mask
;
696 ssp
= env
->segs
[R_SS
].base
+ esp
;
698 stl_kernel(ssp
, error_code
);
700 stw_kernel(ssp
, error_code
);
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
710 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
713 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
714 cpl
= env
->hflags
& HF_CPL_MASK
;
715 /* check privilege if software int */
716 if (is_int
&& dpl
< cpl
)
717 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
718 /* check valid bit */
719 if (!(e2
& DESC_P_MASK
))
720 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
722 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
723 if ((selector
& 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF
, 0);
726 if (load_segment(&e1
, &e2
, selector
) != 0)
727 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
728 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
729 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
730 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
732 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
733 if (!(e2
& DESC_P_MASK
))
734 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
735 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
736 /* to inner privilege */
737 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
738 if ((ss
& 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
741 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
742 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
743 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
746 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
747 if (!(ss_e2
& DESC_S_MASK
) ||
748 (ss_e2
& DESC_CS_MASK
) ||
749 !(ss_e2
& DESC_W_MASK
))
750 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
751 if (!(ss_e2
& DESC_P_MASK
))
752 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
754 sp_mask
= get_sp_mask(ss_e2
);
755 ssp
= get_seg_base(ss_e1
, ss_e2
);
756 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
757 /* to same privilege */
758 if (env
->eflags
& VM_MASK
)
759 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
761 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
762 ssp
= env
->segs
[R_SS
].base
;
766 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
767 new_stack
= 0; /* avoid warning */
768 sp_mask
= 0; /* avoid warning */
769 ssp
= 0; /* avoid warning */
770 esp
= 0; /* avoid warning */
776 /* XXX: check that enough room is available */
777 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
778 if (env
->eflags
& VM_MASK
)
784 if (env
->eflags
& VM_MASK
) {
785 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
786 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
787 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
788 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, ESP
);
793 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
794 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
795 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
796 if (has_error_code
) {
797 PUSHL(ssp
, esp
, sp_mask
, error_code
);
801 if (env
->eflags
& VM_MASK
) {
802 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
803 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
804 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
805 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, ESP
);
810 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
811 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
812 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
813 if (has_error_code
) {
814 PUSHW(ssp
, esp
, sp_mask
, error_code
);
819 if (env
->eflags
& VM_MASK
) {
820 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
825 ss
= (ss
& ~3) | dpl
;
826 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
827 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
829 SET_ESP(esp
, sp_mask
);
831 selector
= (selector
& ~3) | dpl
;
832 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
833 get_seg_base(e1
, e2
),
834 get_seg_limit(e1
, e2
),
836 cpu_x86_set_cpl(env
, dpl
);
839 /* interrupt gate clear IF mask */
840 if ((type
& 1) == 0) {
841 env
->eflags
&= ~IF_MASK
;
843 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
848 #define PUSHQ(sp, val)\
851 stq_kernel(sp, (val));\
854 #define POPQ(sp, val)\
856 val = ldq_kernel(sp);\
860 static inline target_ulong
get_rsp_from_tss(int level
)
865 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
866 env
->tr
.base
, env
->tr
.limit
);
869 if (!(env
->tr
.flags
& DESC_P_MASK
))
870 cpu_abort(env
, "invalid tss");
871 index
= 8 * level
+ 4;
872 if ((index
+ 7) > env
->tr
.limit
)
873 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
874 return ldq_kernel(env
->tr
.base
+ index
);
877 /* 64 bit interrupt */
878 static void do_interrupt64(int intno
, int is_int
, int error_code
,
879 target_ulong next_eip
, int is_hw
)
883 int type
, dpl
, selector
, cpl
, ist
;
884 int has_error_code
, new_stack
;
885 uint32_t e1
, e2
, e3
, ss
;
886 target_ulong old_eip
, esp
, offset
;
889 if (!is_int
&& !is_hw
) {
908 if (intno
* 16 + 15 > dt
->limit
)
909 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
910 ptr
= dt
->base
+ intno
* 16;
911 e1
= ldl_kernel(ptr
);
912 e2
= ldl_kernel(ptr
+ 4);
913 e3
= ldl_kernel(ptr
+ 8);
914 /* check gate type */
915 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
917 case 14: /* 386 interrupt gate */
918 case 15: /* 386 trap gate */
921 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
924 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
925 cpl
= env
->hflags
& HF_CPL_MASK
;
926 /* check privilege if software int */
927 if (is_int
&& dpl
< cpl
)
928 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
929 /* check valid bit */
930 if (!(e2
& DESC_P_MASK
))
931 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
933 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
935 if ((selector
& 0xfffc) == 0)
936 raise_exception_err(EXCP0D_GPF
, 0);
938 if (load_segment(&e1
, &e2
, selector
) != 0)
939 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
940 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
944 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
945 if (!(e2
& DESC_P_MASK
))
946 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
947 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
948 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
949 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
950 /* to inner privilege */
952 esp
= get_rsp_from_tss(ist
+ 3);
954 esp
= get_rsp_from_tss(dpl
);
955 esp
&= ~0xfLL
; /* align stack */
958 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
959 /* to same privilege */
960 if (env
->eflags
& VM_MASK
)
961 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
964 esp
= get_rsp_from_tss(ist
+ 3);
967 esp
&= ~0xfLL
; /* align stack */
970 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
971 new_stack
= 0; /* avoid warning */
972 esp
= 0; /* avoid warning */
975 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
977 PUSHQ(esp
, compute_eflags());
978 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
980 if (has_error_code
) {
981 PUSHQ(esp
, error_code
);
986 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
990 selector
= (selector
& ~3) | dpl
;
991 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
992 get_seg_base(e1
, e2
),
993 get_seg_limit(e1
, e2
),
995 cpu_x86_set_cpl(env
, dpl
);
998 /* interrupt gate clear IF mask */
999 if ((type
& 1) == 0) {
1000 env
->eflags
&= ~IF_MASK
;
1002 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1006 #ifdef TARGET_X86_64
1007 #if defined(CONFIG_USER_ONLY)
1008 void helper_syscall(int next_eip_addend
)
1010 env
->exception_index
= EXCP_SYSCALL
;
1011 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1015 void helper_syscall(int next_eip_addend
)
1019 if (!(env
->efer
& MSR_EFER_SCE
)) {
1020 raise_exception_err(EXCP06_ILLOP
, 0);
1022 selector
= (env
->star
>> 32) & 0xffff;
1023 if (env
->hflags
& HF_LMA_MASK
) {
1026 ECX
= env
->eip
+ next_eip_addend
;
1027 env
->regs
[11] = compute_eflags();
1029 code64
= env
->hflags
& HF_CS64_MASK
;
1031 cpu_x86_set_cpl(env
, 0);
1032 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1034 DESC_G_MASK
| DESC_P_MASK
|
1036 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1037 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1039 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1041 DESC_W_MASK
| DESC_A_MASK
);
1042 env
->eflags
&= ~env
->fmask
;
1043 load_eflags(env
->eflags
, 0);
1045 env
->eip
= env
->lstar
;
1047 env
->eip
= env
->cstar
;
1049 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1051 cpu_x86_set_cpl(env
, 0);
1052 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1054 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1056 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1057 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1059 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1061 DESC_W_MASK
| DESC_A_MASK
);
1062 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1063 env
->eip
= (uint32_t)env
->star
;
1069 #ifdef TARGET_X86_64
1070 void helper_sysret(int dflag
)
1074 if (!(env
->efer
& MSR_EFER_SCE
)) {
1075 raise_exception_err(EXCP06_ILLOP
, 0);
1077 cpl
= env
->hflags
& HF_CPL_MASK
;
1078 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1079 raise_exception_err(EXCP0D_GPF
, 0);
1081 selector
= (env
->star
>> 48) & 0xffff;
1082 if (env
->hflags
& HF_LMA_MASK
) {
1084 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1086 DESC_G_MASK
| DESC_P_MASK
|
1087 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1088 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1092 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1094 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1095 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1096 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1097 env
->eip
= (uint32_t)ECX
;
1099 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1101 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1102 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1103 DESC_W_MASK
| DESC_A_MASK
);
1104 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1105 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1106 cpu_x86_set_cpl(env
, 3);
1108 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1110 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1111 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1112 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1113 env
->eip
= (uint32_t)ECX
;
1114 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1116 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1117 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1118 DESC_W_MASK
| DESC_A_MASK
);
1119 env
->eflags
|= IF_MASK
;
1120 cpu_x86_set_cpl(env
, 3);
1123 if (kqemu_is_ok(env
)) {
1124 if (env
->hflags
& HF_LMA_MASK
)
1125 CC_OP
= CC_OP_EFLAGS
;
1126 env
->exception_index
= -1;
1133 /* real mode interrupt */
1134 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1135 unsigned int next_eip
)
1138 target_ulong ptr
, ssp
;
1140 uint32_t offset
, esp
;
1141 uint32_t old_cs
, old_eip
;
1143 /* real mode (simpler !) */
1145 if (intno
* 4 + 3 > dt
->limit
)
1146 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1147 ptr
= dt
->base
+ intno
* 4;
1148 offset
= lduw_kernel(ptr
);
1149 selector
= lduw_kernel(ptr
+ 2);
1151 ssp
= env
->segs
[R_SS
].base
;
1156 old_cs
= env
->segs
[R_CS
].selector
;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1159 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1160 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1162 /* update processor state */
1163 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1165 env
->segs
[R_CS
].selector
= selector
;
1166 env
->segs
[R_CS
].base
= (selector
<< 4);
1167 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1170 /* fake user mode interrupt */
1171 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1172 target_ulong next_eip
)
1176 int dpl
, cpl
, shift
;
1180 if (env
->hflags
& HF_LMA_MASK
) {
1185 ptr
= dt
->base
+ (intno
<< shift
);
1186 e2
= ldl_kernel(ptr
+ 4);
1188 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1189 cpl
= env
->hflags
& HF_CPL_MASK
;
1190 /* check privilege if software int */
1191 if (is_int
&& dpl
< cpl
)
1192 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1206 void do_interrupt(int intno
, int is_int
, int error_code
,
1207 target_ulong next_eip
, int is_hw
)
1209 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1210 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1212 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1213 count
, intno
, error_code
, is_int
,
1214 env
->hflags
& HF_CPL_MASK
,
1215 env
->segs
[R_CS
].selector
, EIP
,
1216 (int)env
->segs
[R_CS
].base
+ EIP
,
1217 env
->segs
[R_SS
].selector
, ESP
);
1218 if (intno
== 0x0e) {
1219 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1221 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1224 log_cpu_state(env
, X86_DUMP_CCOP
);
1230 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1231 for(i
= 0; i
< 16; i
++) {
1232 qemu_log(" %02x", ldub(ptr
+ i
));
1240 if (env
->cr
[0] & CR0_PE_MASK
) {
1241 #ifdef TARGET_X86_64
1242 if (env
->hflags
& HF_LMA_MASK
) {
1243 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1247 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1250 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1254 /* This should come from sysemu.h - if we could include it here... */
1255 void qemu_system_reset_request(void);
1258 * Check nested exceptions and change to double or triple fault if
1259 * needed. It should only be called, if this is not an interrupt.
1260 * Returns the new exception number.
1262 static int check_exception(int intno
, int *error_code
)
1264 int first_contributory
= env
->old_exception
== 0 ||
1265 (env
->old_exception
>= 10 &&
1266 env
->old_exception
<= 13);
1267 int second_contributory
= intno
== 0 ||
1268 (intno
>= 10 && intno
<= 13);
1270 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1271 env
->old_exception
, intno
);
1273 #if !defined(CONFIG_USER_ONLY)
1274 if (env
->old_exception
== EXCP08_DBLE
) {
1275 if (env
->hflags
& HF_SVMI_MASK
)
1276 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1278 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1280 qemu_system_reset_request();
1285 if ((first_contributory
&& second_contributory
)
1286 || (env
->old_exception
== EXCP0E_PAGE
&&
1287 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1288 intno
= EXCP08_DBLE
;
1292 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1293 (intno
== EXCP08_DBLE
))
1294 env
->old_exception
= intno
;
1300 * Signal an interruption. It is executed in the main CPU loop.
1301 * is_int is TRUE if coming from the int instruction. next_eip is the
1302 * EIP value AFTER the interrupt instruction. It is only relevant if
1305 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1306 int next_eip_addend
)
1309 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1310 intno
= check_exception(intno
, &error_code
);
1312 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1315 env
->exception_index
= intno
;
1316 env
->error_code
= error_code
;
1317 env
->exception_is_int
= is_int
;
1318 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1322 /* shortcuts to generate exceptions */
1324 void raise_exception_err(int exception_index
, int error_code
)
1326 raise_interrupt(exception_index
, 0, error_code
, 0);
1329 void raise_exception(int exception_index
)
1331 raise_interrupt(exception_index
, 0, 0, 0);
1336 #if defined(CONFIG_USER_ONLY)
1338 void do_smm_enter(void)
1342 void helper_rsm(void)
1348 #ifdef TARGET_X86_64
1349 #define SMM_REVISION_ID 0x00020064
1351 #define SMM_REVISION_ID 0x00020000
1354 void do_smm_enter(void)
1356 target_ulong sm_state
;
1360 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1361 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1363 env
->hflags
|= HF_SMM_MASK
;
1364 cpu_smm_update(env
);
1366 sm_state
= env
->smbase
+ 0x8000;
1368 #ifdef TARGET_X86_64
1369 for(i
= 0; i
< 6; i
++) {
1371 offset
= 0x7e00 + i
* 16;
1372 stw_phys(sm_state
+ offset
, dt
->selector
);
1373 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1374 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1375 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1378 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1379 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1381 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1382 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1383 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1384 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1386 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1387 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1389 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1390 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1391 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1392 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1394 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1396 stq_phys(sm_state
+ 0x7ff8, EAX
);
1397 stq_phys(sm_state
+ 0x7ff0, ECX
);
1398 stq_phys(sm_state
+ 0x7fe8, EDX
);
1399 stq_phys(sm_state
+ 0x7fe0, EBX
);
1400 stq_phys(sm_state
+ 0x7fd8, ESP
);
1401 stq_phys(sm_state
+ 0x7fd0, EBP
);
1402 stq_phys(sm_state
+ 0x7fc8, ESI
);
1403 stq_phys(sm_state
+ 0x7fc0, EDI
);
1404 for(i
= 8; i
< 16; i
++)
1405 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1406 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1407 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1408 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1409 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1411 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1412 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1413 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1415 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1416 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1418 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1419 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1420 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1421 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1422 stl_phys(sm_state
+ 0x7fec, EDI
);
1423 stl_phys(sm_state
+ 0x7fe8, ESI
);
1424 stl_phys(sm_state
+ 0x7fe4, EBP
);
1425 stl_phys(sm_state
+ 0x7fe0, ESP
);
1426 stl_phys(sm_state
+ 0x7fdc, EBX
);
1427 stl_phys(sm_state
+ 0x7fd8, EDX
);
1428 stl_phys(sm_state
+ 0x7fd4, ECX
);
1429 stl_phys(sm_state
+ 0x7fd0, EAX
);
1430 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1431 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1433 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1434 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1435 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1436 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1438 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1439 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1440 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1441 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1443 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1444 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1446 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1447 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1449 for(i
= 0; i
< 6; i
++) {
1452 offset
= 0x7f84 + i
* 12;
1454 offset
= 0x7f2c + (i
- 3) * 12;
1455 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1456 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1457 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1458 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1460 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1462 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1463 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1465 /* init SMM cpu state */
1467 #ifdef TARGET_X86_64
1468 cpu_load_efer(env
, 0);
1470 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1471 env
->eip
= 0x00008000;
1472 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1474 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1475 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1476 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1477 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1478 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1480 cpu_x86_update_cr0(env
,
1481 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1482 cpu_x86_update_cr4(env
, 0);
1483 env
->dr
[7] = 0x00000400;
1484 CC_OP
= CC_OP_EFLAGS
;
1487 void helper_rsm(void)
1489 target_ulong sm_state
;
1493 sm_state
= env
->smbase
+ 0x8000;
1494 #ifdef TARGET_X86_64
1495 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1497 for(i
= 0; i
< 6; i
++) {
1498 offset
= 0x7e00 + i
* 16;
1499 cpu_x86_load_seg_cache(env
, i
,
1500 lduw_phys(sm_state
+ offset
),
1501 ldq_phys(sm_state
+ offset
+ 8),
1502 ldl_phys(sm_state
+ offset
+ 4),
1503 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1506 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1507 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1509 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1510 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1511 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1512 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1514 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1515 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1517 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1518 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1519 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1520 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1522 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1523 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1524 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1525 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1526 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1527 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1528 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1529 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1530 for(i
= 8; i
< 16; i
++)
1531 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1532 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1533 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1534 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1535 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1536 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1538 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1539 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1540 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1542 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1543 if (val
& 0x20000) {
1544 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1547 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1548 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1549 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1550 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1551 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1552 EDI
= ldl_phys(sm_state
+ 0x7fec);
1553 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1554 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1555 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1556 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1557 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1558 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1559 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1560 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1561 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1563 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1564 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1565 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1566 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1568 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1569 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1570 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1571 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1573 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1574 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1576 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1577 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1579 for(i
= 0; i
< 6; i
++) {
1581 offset
= 0x7f84 + i
* 12;
1583 offset
= 0x7f2c + (i
- 3) * 12;
1584 cpu_x86_load_seg_cache(env
, i
,
1585 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1586 ldl_phys(sm_state
+ offset
+ 8),
1587 ldl_phys(sm_state
+ offset
+ 4),
1588 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1590 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1592 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1593 if (val
& 0x20000) {
1594 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1597 CC_OP
= CC_OP_EFLAGS
;
1598 env
->hflags
&= ~HF_SMM_MASK
;
1599 cpu_smm_update(env
);
1601 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1602 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1605 #endif /* !CONFIG_USER_ONLY */
1608 /* division, flags are undefined */
1610 void helper_divb_AL(target_ulong t0
)
1612 unsigned int num
, den
, q
, r
;
1614 num
= (EAX
& 0xffff);
1617 raise_exception(EXCP00_DIVZ
);
1621 raise_exception(EXCP00_DIVZ
);
1623 r
= (num
% den
) & 0xff;
1624 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1627 void helper_idivb_AL(target_ulong t0
)
1634 raise_exception(EXCP00_DIVZ
);
1638 raise_exception(EXCP00_DIVZ
);
1640 r
= (num
% den
) & 0xff;
1641 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1644 void helper_divw_AX(target_ulong t0
)
1646 unsigned int num
, den
, q
, r
;
1648 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1649 den
= (t0
& 0xffff);
1651 raise_exception(EXCP00_DIVZ
);
1655 raise_exception(EXCP00_DIVZ
);
1657 r
= (num
% den
) & 0xffff;
1658 EAX
= (EAX
& ~0xffff) | q
;
1659 EDX
= (EDX
& ~0xffff) | r
;
1662 void helper_idivw_AX(target_ulong t0
)
1666 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1669 raise_exception(EXCP00_DIVZ
);
1672 if (q
!= (int16_t)q
)
1673 raise_exception(EXCP00_DIVZ
);
1675 r
= (num
% den
) & 0xffff;
1676 EAX
= (EAX
& ~0xffff) | q
;
1677 EDX
= (EDX
& ~0xffff) | r
;
1680 void helper_divl_EAX(target_ulong t0
)
1682 unsigned int den
, r
;
1685 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1688 raise_exception(EXCP00_DIVZ
);
1693 raise_exception(EXCP00_DIVZ
);
1698 void helper_idivl_EAX(target_ulong t0
)
1703 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1706 raise_exception(EXCP00_DIVZ
);
1710 if (q
!= (int32_t)q
)
1711 raise_exception(EXCP00_DIVZ
);
1718 /* XXX: exception */
1719 void helper_aam(int base
)
1725 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1729 void helper_aad(int base
)
1733 ah
= (EAX
>> 8) & 0xff;
1734 al
= ((ah
* base
) + al
) & 0xff;
1735 EAX
= (EAX
& ~0xffff) | al
;
1739 void helper_aaa(void)
1745 eflags
= helper_cc_compute_all(CC_OP
);
1748 ah
= (EAX
>> 8) & 0xff;
1750 icarry
= (al
> 0xf9);
1751 if (((al
& 0x0f) > 9 ) || af
) {
1752 al
= (al
+ 6) & 0x0f;
1753 ah
= (ah
+ 1 + icarry
) & 0xff;
1754 eflags
|= CC_C
| CC_A
;
1756 eflags
&= ~(CC_C
| CC_A
);
1759 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1763 void helper_aas(void)
1769 eflags
= helper_cc_compute_all(CC_OP
);
1772 ah
= (EAX
>> 8) & 0xff;
1775 if (((al
& 0x0f) > 9 ) || af
) {
1776 al
= (al
- 6) & 0x0f;
1777 ah
= (ah
- 1 - icarry
) & 0xff;
1778 eflags
|= CC_C
| CC_A
;
1780 eflags
&= ~(CC_C
| CC_A
);
1783 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1787 void helper_daa(void)
1792 eflags
= helper_cc_compute_all(CC_OP
);
1798 if (((al
& 0x0f) > 9 ) || af
) {
1799 al
= (al
+ 6) & 0xff;
1802 if ((al
> 0x9f) || cf
) {
1803 al
= (al
+ 0x60) & 0xff;
1806 EAX
= (EAX
& ~0xff) | al
;
1807 /* well, speed is not an issue here, so we compute the flags by hand */
1808 eflags
|= (al
== 0) << 6; /* zf */
1809 eflags
|= parity_table
[al
]; /* pf */
1810 eflags
|= (al
& 0x80); /* sf */
1814 void helper_das(void)
1816 int al
, al1
, af
, cf
;
1819 eflags
= helper_cc_compute_all(CC_OP
);
1826 if (((al
& 0x0f) > 9 ) || af
) {
1830 al
= (al
- 6) & 0xff;
1832 if ((al1
> 0x99) || cf
) {
1833 al
= (al
- 0x60) & 0xff;
1836 EAX
= (EAX
& ~0xff) | al
;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags
|= (al
== 0) << 6; /* zf */
1839 eflags
|= parity_table
[al
]; /* pf */
1840 eflags
|= (al
& 0x80); /* sf */
1844 void helper_into(int next_eip_addend
)
1847 eflags
= helper_cc_compute_all(CC_OP
);
1848 if (eflags
& CC_O
) {
1849 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1853 void helper_cmpxchg8b(target_ulong a0
)
1858 eflags
= helper_cc_compute_all(CC_OP
);
1860 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1861 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1864 /* always do the store */
1866 EDX
= (uint32_t)(d
>> 32);
1873 #ifdef TARGET_X86_64
1874 void helper_cmpxchg16b(target_ulong a0
)
1879 if ((a0
& 0xf) != 0)
1880 raise_exception(EXCP0D_GPF
);
1881 eflags
= helper_cc_compute_all(CC_OP
);
1884 if (d0
== EAX
&& d1
== EDX
) {
1889 /* always do the store */
1900 void helper_single_step(void)
1902 #ifndef CONFIG_USER_ONLY
1903 check_hw_breakpoints(env
, 1);
1904 env
->dr
[6] |= DR6_BS
;
1906 raise_exception(EXCP01_DB
);
1909 void helper_cpuid(void)
1911 uint32_t eax
, ebx
, ecx
, edx
;
1913 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1915 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1922 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1925 uint32_t esp_mask
, esp
, ebp
;
1927 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1928 ssp
= env
->segs
[R_SS
].base
;
1937 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1940 stl(ssp
+ (esp
& esp_mask
), t1
);
1947 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1950 stw(ssp
+ (esp
& esp_mask
), t1
);
1954 #ifdef TARGET_X86_64
1955 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1957 target_ulong esp
, ebp
;
1977 stw(esp
, lduw(ebp
));
1985 void helper_lldt(int selector
)
1989 int index
, entry_limit
;
1993 if ((selector
& 0xfffc) == 0) {
1994 /* XXX: NULL selector case: invalid LDT */
1999 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2001 index
= selector
& ~7;
2002 #ifdef TARGET_X86_64
2003 if (env
->hflags
& HF_LMA_MASK
)
2008 if ((index
+ entry_limit
) > dt
->limit
)
2009 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2010 ptr
= dt
->base
+ index
;
2011 e1
= ldl_kernel(ptr
);
2012 e2
= ldl_kernel(ptr
+ 4);
2013 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2014 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2015 if (!(e2
& DESC_P_MASK
))
2016 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2017 #ifdef TARGET_X86_64
2018 if (env
->hflags
& HF_LMA_MASK
) {
2020 e3
= ldl_kernel(ptr
+ 8);
2021 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2022 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2026 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2029 env
->ldt
.selector
= selector
;
2032 void helper_ltr(int selector
)
2036 int index
, type
, entry_limit
;
2040 if ((selector
& 0xfffc) == 0) {
2041 /* NULL selector case: invalid TR */
2047 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2049 index
= selector
& ~7;
2050 #ifdef TARGET_X86_64
2051 if (env
->hflags
& HF_LMA_MASK
)
2056 if ((index
+ entry_limit
) > dt
->limit
)
2057 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2058 ptr
= dt
->base
+ index
;
2059 e1
= ldl_kernel(ptr
);
2060 e2
= ldl_kernel(ptr
+ 4);
2061 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2062 if ((e2
& DESC_S_MASK
) ||
2063 (type
!= 1 && type
!= 9))
2064 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2065 if (!(e2
& DESC_P_MASK
))
2066 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2067 #ifdef TARGET_X86_64
2068 if (env
->hflags
& HF_LMA_MASK
) {
2070 e3
= ldl_kernel(ptr
+ 8);
2071 e4
= ldl_kernel(ptr
+ 12);
2072 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2073 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2074 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2075 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2079 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2081 e2
|= DESC_TSS_BUSY_MASK
;
2082 stl_kernel(ptr
+ 4, e2
);
2084 env
->tr
.selector
= selector
;
2087 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2088 void helper_load_seg(int seg_reg
, int selector
)
2097 cpl
= env
->hflags
& HF_CPL_MASK
;
2098 if ((selector
& 0xfffc) == 0) {
2099 /* null selector case */
2101 #ifdef TARGET_X86_64
2102 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2105 raise_exception_err(EXCP0D_GPF
, 0);
2106 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2113 index
= selector
& ~7;
2114 if ((index
+ 7) > dt
->limit
)
2115 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2116 ptr
= dt
->base
+ index
;
2117 e1
= ldl_kernel(ptr
);
2118 e2
= ldl_kernel(ptr
+ 4);
2120 if (!(e2
& DESC_S_MASK
))
2121 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2123 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2124 if (seg_reg
== R_SS
) {
2125 /* must be writable segment */
2126 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2127 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2128 if (rpl
!= cpl
|| dpl
!= cpl
)
2129 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2131 /* must be readable segment */
2132 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2133 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2135 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2136 /* if not conforming code, test rights */
2137 if (dpl
< cpl
|| dpl
< rpl
)
2138 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2142 if (!(e2
& DESC_P_MASK
)) {
2143 if (seg_reg
== R_SS
)
2144 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2146 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2149 /* set the access bit if not already set */
2150 if (!(e2
& DESC_A_MASK
)) {
2152 stl_kernel(ptr
+ 4, e2
);
2155 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2156 get_seg_base(e1
, e2
),
2157 get_seg_limit(e1
, e2
),
2160 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2161 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2166 /* protected mode jump */
2167 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2168 int next_eip_addend
)
2171 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2172 target_ulong next_eip
;
2174 if ((new_cs
& 0xfffc) == 0)
2175 raise_exception_err(EXCP0D_GPF
, 0);
2176 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2177 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2178 cpl
= env
->hflags
& HF_CPL_MASK
;
2179 if (e2
& DESC_S_MASK
) {
2180 if (!(e2
& DESC_CS_MASK
))
2181 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2182 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2183 if (e2
& DESC_C_MASK
) {
2184 /* conforming code segment */
2186 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2188 /* non conforming code segment */
2191 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2193 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2195 if (!(e2
& DESC_P_MASK
))
2196 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2197 limit
= get_seg_limit(e1
, e2
);
2198 if (new_eip
> limit
&&
2199 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2200 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2201 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2202 get_seg_base(e1
, e2
), limit
, e2
);
2205 /* jump to call or task gate */
2206 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2208 cpl
= env
->hflags
& HF_CPL_MASK
;
2209 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2211 case 1: /* 286 TSS */
2212 case 9: /* 386 TSS */
2213 case 5: /* task gate */
2214 if (dpl
< cpl
|| dpl
< rpl
)
2215 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2216 next_eip
= env
->eip
+ next_eip_addend
;
2217 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2218 CC_OP
= CC_OP_EFLAGS
;
2220 case 4: /* 286 call gate */
2221 case 12: /* 386 call gate */
2222 if ((dpl
< cpl
) || (dpl
< rpl
))
2223 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2224 if (!(e2
& DESC_P_MASK
))
2225 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2227 new_eip
= (e1
& 0xffff);
2229 new_eip
|= (e2
& 0xffff0000);
2230 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2231 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2232 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2233 /* must be code segment */
2234 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2235 (DESC_S_MASK
| DESC_CS_MASK
)))
2236 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2237 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2238 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2239 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2240 if (!(e2
& DESC_P_MASK
))
2241 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2242 limit
= get_seg_limit(e1
, e2
);
2243 if (new_eip
> limit
)
2244 raise_exception_err(EXCP0D_GPF
, 0);
2245 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2246 get_seg_base(e1
, e2
), limit
, e2
);
2250 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2256 /* real mode call */
2257 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2258 int shift
, int next_eip
)
2261 uint32_t esp
, esp_mask
;
2266 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2267 ssp
= env
->segs
[R_SS
].base
;
2269 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2270 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2272 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2273 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2276 SET_ESP(esp
, esp_mask
);
2278 env
->segs
[R_CS
].selector
= new_cs
;
2279 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2282 /* protected mode call */
2283 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2284 int shift
, int next_eip_addend
)
2287 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2288 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2289 uint32_t val
, limit
, old_sp_mask
;
2290 target_ulong ssp
, old_ssp
, next_eip
;
2292 next_eip
= env
->eip
+ next_eip_addend
;
2293 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2294 LOG_PCALL_STATE(env
);
2295 if ((new_cs
& 0xfffc) == 0)
2296 raise_exception_err(EXCP0D_GPF
, 0);
2297 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2298 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2299 cpl
= env
->hflags
& HF_CPL_MASK
;
2300 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2301 if (e2
& DESC_S_MASK
) {
2302 if (!(e2
& DESC_CS_MASK
))
2303 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2304 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2305 if (e2
& DESC_C_MASK
) {
2306 /* conforming code segment */
2308 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2310 /* non conforming code segment */
2313 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2315 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2317 if (!(e2
& DESC_P_MASK
))
2318 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2320 #ifdef TARGET_X86_64
2321 /* XXX: check 16/32 bit cases in long mode */
2326 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2327 PUSHQ(rsp
, next_eip
);
2328 /* from this point, not restartable */
2330 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2331 get_seg_base(e1
, e2
),
2332 get_seg_limit(e1
, e2
), e2
);
2338 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2339 ssp
= env
->segs
[R_SS
].base
;
2341 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2342 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2344 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2345 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2348 limit
= get_seg_limit(e1
, e2
);
2349 if (new_eip
> limit
)
2350 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2351 /* from this point, not restartable */
2352 SET_ESP(sp
, sp_mask
);
2353 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2354 get_seg_base(e1
, e2
), limit
, e2
);
2358 /* check gate type */
2359 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2360 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2363 case 1: /* available 286 TSS */
2364 case 9: /* available 386 TSS */
2365 case 5: /* task gate */
2366 if (dpl
< cpl
|| dpl
< rpl
)
2367 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2368 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2369 CC_OP
= CC_OP_EFLAGS
;
2371 case 4: /* 286 call gate */
2372 case 12: /* 386 call gate */
2375 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2380 if (dpl
< cpl
|| dpl
< rpl
)
2381 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2382 /* check valid bit */
2383 if (!(e2
& DESC_P_MASK
))
2384 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2385 selector
= e1
>> 16;
2386 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2387 param_count
= e2
& 0x1f;
2388 if ((selector
& 0xfffc) == 0)
2389 raise_exception_err(EXCP0D_GPF
, 0);
2391 if (load_segment(&e1
, &e2
, selector
) != 0)
2392 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2393 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2394 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2395 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2397 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2398 if (!(e2
& DESC_P_MASK
))
2399 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2401 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2402 /* to inner privilege */
2403 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2404 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2405 ss
, sp
, param_count
, ESP
);
2406 if ((ss
& 0xfffc) == 0)
2407 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2408 if ((ss
& 3) != dpl
)
2409 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2410 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2411 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2412 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2414 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2415 if (!(ss_e2
& DESC_S_MASK
) ||
2416 (ss_e2
& DESC_CS_MASK
) ||
2417 !(ss_e2
& DESC_W_MASK
))
2418 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2419 if (!(ss_e2
& DESC_P_MASK
))
2420 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2422 // push_size = ((param_count * 2) + 8) << shift;
2424 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2425 old_ssp
= env
->segs
[R_SS
].base
;
2427 sp_mask
= get_sp_mask(ss_e2
);
2428 ssp
= get_seg_base(ss_e1
, ss_e2
);
2430 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2431 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2432 for(i
= param_count
- 1; i
>= 0; i
--) {
2433 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2434 PUSHL(ssp
, sp
, sp_mask
, val
);
2437 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2438 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2439 for(i
= param_count
- 1; i
>= 0; i
--) {
2440 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2441 PUSHW(ssp
, sp
, sp_mask
, val
);
2446 /* to same privilege */
2448 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2449 ssp
= env
->segs
[R_SS
].base
;
2450 // push_size = (4 << shift);
2455 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2456 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2458 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2459 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2462 /* from this point, not restartable */
2465 ss
= (ss
& ~3) | dpl
;
2466 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2468 get_seg_limit(ss_e1
, ss_e2
),
2472 selector
= (selector
& ~3) | dpl
;
2473 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2474 get_seg_base(e1
, e2
),
2475 get_seg_limit(e1
, e2
),
2477 cpu_x86_set_cpl(env
, dpl
);
2478 SET_ESP(sp
, sp_mask
);
2482 if (kqemu_is_ok(env
)) {
2483 env
->exception_index
= -1;
2489 /* real and vm86 mode iret */
2490 void helper_iret_real(int shift
)
2492 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2496 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2498 ssp
= env
->segs
[R_SS
].base
;
2501 POPL(ssp
, sp
, sp_mask
, new_eip
);
2502 POPL(ssp
, sp
, sp_mask
, new_cs
);
2504 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2507 POPW(ssp
, sp
, sp_mask
, new_eip
);
2508 POPW(ssp
, sp
, sp_mask
, new_cs
);
2509 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2511 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2512 env
->segs
[R_CS
].selector
= new_cs
;
2513 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2515 if (env
->eflags
& VM_MASK
)
2516 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2518 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2520 eflags_mask
&= 0xffff;
2521 load_eflags(new_eflags
, eflags_mask
);
2522 env
->hflags2
&= ~HF2_NMI_MASK
;
2525 static inline void validate_seg(int seg_reg
, int cpl
)
2530 /* XXX: on x86_64, we do not want to nullify FS and GS because
2531 they may still contain a valid base. I would be interested to
2532 know how a real x86_64 CPU behaves */
2533 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2534 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2537 e2
= env
->segs
[seg_reg
].flags
;
2538 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2539 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2540 /* data or non conforming code segment */
2542 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2547 /* protected mode iret */
2548 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2550 uint32_t new_cs
, new_eflags
, new_ss
;
2551 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2552 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2553 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2554 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2556 #ifdef TARGET_X86_64
2561 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2563 ssp
= env
->segs
[R_SS
].base
;
2564 new_eflags
= 0; /* avoid warning */
2565 #ifdef TARGET_X86_64
2571 POPQ(sp
, new_eflags
);
2577 POPL(ssp
, sp
, sp_mask
, new_eip
);
2578 POPL(ssp
, sp
, sp_mask
, new_cs
);
2581 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2582 if (new_eflags
& VM_MASK
)
2583 goto return_to_vm86
;
2587 POPW(ssp
, sp
, sp_mask
, new_eip
);
2588 POPW(ssp
, sp
, sp_mask
, new_cs
);
2590 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2592 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2593 new_cs
, new_eip
, shift
, addend
);
2594 LOG_PCALL_STATE(env
);
2595 if ((new_cs
& 0xfffc) == 0)
2596 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2597 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2598 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2599 if (!(e2
& DESC_S_MASK
) ||
2600 !(e2
& DESC_CS_MASK
))
2601 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2602 cpl
= env
->hflags
& HF_CPL_MASK
;
2605 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2606 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2607 if (e2
& DESC_C_MASK
) {
2609 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2612 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2614 if (!(e2
& DESC_P_MASK
))
2615 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2618 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2619 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2620 /* return to same privilege level */
2621 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2622 get_seg_base(e1
, e2
),
2623 get_seg_limit(e1
, e2
),
2626 /* return to different privilege level */
2627 #ifdef TARGET_X86_64
2636 POPL(ssp
, sp
, sp_mask
, new_esp
);
2637 POPL(ssp
, sp
, sp_mask
, new_ss
);
2641 POPW(ssp
, sp
, sp_mask
, new_esp
);
2642 POPW(ssp
, sp
, sp_mask
, new_ss
);
2644 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2646 if ((new_ss
& 0xfffc) == 0) {
2647 #ifdef TARGET_X86_64
2648 /* NULL ss is allowed in long mode if cpl != 3*/
2649 /* XXX: test CS64 ? */
2650 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2651 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2653 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2654 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2655 DESC_W_MASK
| DESC_A_MASK
);
2656 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2660 raise_exception_err(EXCP0D_GPF
, 0);
2663 if ((new_ss
& 3) != rpl
)
2664 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2665 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2666 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2667 if (!(ss_e2
& DESC_S_MASK
) ||
2668 (ss_e2
& DESC_CS_MASK
) ||
2669 !(ss_e2
& DESC_W_MASK
))
2670 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2671 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2673 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2674 if (!(ss_e2
& DESC_P_MASK
))
2675 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2676 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2677 get_seg_base(ss_e1
, ss_e2
),
2678 get_seg_limit(ss_e1
, ss_e2
),
2682 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2683 get_seg_base(e1
, e2
),
2684 get_seg_limit(e1
, e2
),
2686 cpu_x86_set_cpl(env
, rpl
);
2688 #ifdef TARGET_X86_64
2689 if (env
->hflags
& HF_CS64_MASK
)
2693 sp_mask
= get_sp_mask(ss_e2
);
2695 /* validate data segments */
2696 validate_seg(R_ES
, rpl
);
2697 validate_seg(R_DS
, rpl
);
2698 validate_seg(R_FS
, rpl
);
2699 validate_seg(R_GS
, rpl
);
2703 SET_ESP(sp
, sp_mask
);
2706 /* NOTE: 'cpl' is the _old_ CPL */
2707 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2709 eflags_mask
|= IOPL_MASK
;
2710 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2712 eflags_mask
|= IF_MASK
;
2714 eflags_mask
&= 0xffff;
2715 load_eflags(new_eflags
, eflags_mask
);
2720 POPL(ssp
, sp
, sp_mask
, new_esp
);
2721 POPL(ssp
, sp
, sp_mask
, new_ss
);
2722 POPL(ssp
, sp
, sp_mask
, new_es
);
2723 POPL(ssp
, sp
, sp_mask
, new_ds
);
2724 POPL(ssp
, sp
, sp_mask
, new_fs
);
2725 POPL(ssp
, sp
, sp_mask
, new_gs
);
2727 /* modify processor state */
2728 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2729 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2730 load_seg_vm(R_CS
, new_cs
& 0xffff);
2731 cpu_x86_set_cpl(env
, 3);
2732 load_seg_vm(R_SS
, new_ss
& 0xffff);
2733 load_seg_vm(R_ES
, new_es
& 0xffff);
2734 load_seg_vm(R_DS
, new_ds
& 0xffff);
2735 load_seg_vm(R_FS
, new_fs
& 0xffff);
2736 load_seg_vm(R_GS
, new_gs
& 0xffff);
2738 env
->eip
= new_eip
& 0xffff;
2742 void helper_iret_protected(int shift
, int next_eip
)
2744 int tss_selector
, type
;
2747 /* specific case for TSS */
2748 if (env
->eflags
& NT_MASK
) {
2749 #ifdef TARGET_X86_64
2750 if (env
->hflags
& HF_LMA_MASK
)
2751 raise_exception_err(EXCP0D_GPF
, 0);
2753 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2754 if (tss_selector
& 4)
2755 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2756 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2757 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2758 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2759 /* NOTE: we check both segment and busy TSS */
2761 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2762 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2764 helper_ret_protected(shift
, 1, 0);
2766 env
->hflags2
&= ~HF2_NMI_MASK
;
2768 if (kqemu_is_ok(env
)) {
2769 CC_OP
= CC_OP_EFLAGS
;
2770 env
->exception_index
= -1;
2776 void helper_lret_protected(int shift
, int addend
)
2778 helper_ret_protected(shift
, 0, addend
);
2780 if (kqemu_is_ok(env
)) {
2781 env
->exception_index
= -1;
2787 void helper_sysenter(void)
2789 if (env
->sysenter_cs
== 0) {
2790 raise_exception_err(EXCP0D_GPF
, 0);
2792 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2793 cpu_x86_set_cpl(env
, 0);
2795 #ifdef TARGET_X86_64
2796 if (env
->hflags
& HF_LMA_MASK
) {
2797 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2799 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2801 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2805 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2807 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2809 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2811 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2813 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2815 DESC_W_MASK
| DESC_A_MASK
);
2816 ESP
= env
->sysenter_esp
;
2817 EIP
= env
->sysenter_eip
;
2820 void helper_sysexit(int dflag
)
2824 cpl
= env
->hflags
& HF_CPL_MASK
;
2825 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2826 raise_exception_err(EXCP0D_GPF
, 0);
2828 cpu_x86_set_cpl(env
, 3);
2829 #ifdef TARGET_X86_64
2831 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2833 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2834 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2835 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2836 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2838 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2839 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2840 DESC_W_MASK
| DESC_A_MASK
);
2844 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2846 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2847 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2848 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2849 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2851 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2852 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2853 DESC_W_MASK
| DESC_A_MASK
);
2858 if (kqemu_is_ok(env
)) {
2859 env
->exception_index
= -1;
2865 #if defined(CONFIG_USER_ONLY)
2866 target_ulong
helper_read_crN(int reg
)
2871 void helper_write_crN(int reg
, target_ulong t0
)
2875 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2879 target_ulong
helper_read_crN(int reg
)
2883 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2889 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2890 val
= cpu_get_apic_tpr(env
);
2899 void helper_write_crN(int reg
, target_ulong t0
)
2901 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2904 cpu_x86_update_cr0(env
, t0
);
2907 cpu_x86_update_cr3(env
, t0
);
2910 cpu_x86_update_cr4(env
, t0
);
2913 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2914 cpu_set_apic_tpr(env
, t0
);
2916 env
->v_tpr
= t0
& 0x0f;
2924 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2929 hw_breakpoint_remove(env
, reg
);
2931 hw_breakpoint_insert(env
, reg
);
2932 } else if (reg
== 7) {
2933 for (i
= 0; i
< 4; i
++)
2934 hw_breakpoint_remove(env
, i
);
2936 for (i
= 0; i
< 4; i
++)
2937 hw_breakpoint_insert(env
, i
);
2943 void helper_lmsw(target_ulong t0
)
2945 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2946 if already set to one. */
2947 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2948 helper_write_crN(0, t0
);
2951 void helper_clts(void)
2953 env
->cr
[0] &= ~CR0_TS_MASK
;
2954 env
->hflags
&= ~HF_TS_MASK
;
2957 void helper_invlpg(target_ulong addr
)
2959 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2960 tlb_flush_page(env
, addr
);
2963 void helper_rdtsc(void)
2967 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2968 raise_exception(EXCP0D_GPF
);
2970 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2972 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2973 EAX
= (uint32_t)(val
);
2974 EDX
= (uint32_t)(val
>> 32);
2977 void helper_rdpmc(void)
2979 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2980 raise_exception(EXCP0D_GPF
);
2982 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2984 /* currently unimplemented */
2985 raise_exception_err(EXCP06_ILLOP
, 0);
2988 #if defined(CONFIG_USER_ONLY)
2989 void helper_wrmsr(void)
2993 void helper_rdmsr(void)
2997 void helper_wrmsr(void)
3001 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3003 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3005 switch((uint32_t)ECX
) {
3006 case MSR_IA32_SYSENTER_CS
:
3007 env
->sysenter_cs
= val
& 0xffff;
3009 case MSR_IA32_SYSENTER_ESP
:
3010 env
->sysenter_esp
= val
;
3012 case MSR_IA32_SYSENTER_EIP
:
3013 env
->sysenter_eip
= val
;
3015 case MSR_IA32_APICBASE
:
3016 cpu_set_apic_base(env
, val
);
3020 uint64_t update_mask
;
3022 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3023 update_mask
|= MSR_EFER_SCE
;
3024 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3025 update_mask
|= MSR_EFER_LME
;
3026 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3027 update_mask
|= MSR_EFER_FFXSR
;
3028 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3029 update_mask
|= MSR_EFER_NXE
;
3030 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3031 update_mask
|= MSR_EFER_SVME
;
3032 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3033 update_mask
|= MSR_EFER_FFXSR
;
3034 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3035 (val
& update_mask
));
3044 case MSR_VM_HSAVE_PA
:
3045 env
->vm_hsave
= val
;
3047 #ifdef TARGET_X86_64
3058 env
->segs
[R_FS
].base
= val
;
3061 env
->segs
[R_GS
].base
= val
;
3063 case MSR_KERNELGSBASE
:
3064 env
->kernelgsbase
= val
;
3067 case MSR_MTRRphysBase(0):
3068 case MSR_MTRRphysBase(1):
3069 case MSR_MTRRphysBase(2):
3070 case MSR_MTRRphysBase(3):
3071 case MSR_MTRRphysBase(4):
3072 case MSR_MTRRphysBase(5):
3073 case MSR_MTRRphysBase(6):
3074 case MSR_MTRRphysBase(7):
3075 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3077 case MSR_MTRRphysMask(0):
3078 case MSR_MTRRphysMask(1):
3079 case MSR_MTRRphysMask(2):
3080 case MSR_MTRRphysMask(3):
3081 case MSR_MTRRphysMask(4):
3082 case MSR_MTRRphysMask(5):
3083 case MSR_MTRRphysMask(6):
3084 case MSR_MTRRphysMask(7):
3085 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3087 case MSR_MTRRfix64K_00000
:
3088 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3090 case MSR_MTRRfix16K_80000
:
3091 case MSR_MTRRfix16K_A0000
:
3092 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3094 case MSR_MTRRfix4K_C0000
:
3095 case MSR_MTRRfix4K_C8000
:
3096 case MSR_MTRRfix4K_D0000
:
3097 case MSR_MTRRfix4K_D8000
:
3098 case MSR_MTRRfix4K_E0000
:
3099 case MSR_MTRRfix4K_E8000
:
3100 case MSR_MTRRfix4K_F0000
:
3101 case MSR_MTRRfix4K_F8000
:
3102 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3104 case MSR_MTRRdefType
:
3105 env
->mtrr_deftype
= val
;
3108 /* XXX: exception ? */
3113 void helper_rdmsr(void)
3117 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3119 switch((uint32_t)ECX
) {
3120 case MSR_IA32_SYSENTER_CS
:
3121 val
= env
->sysenter_cs
;
3123 case MSR_IA32_SYSENTER_ESP
:
3124 val
= env
->sysenter_esp
;
3126 case MSR_IA32_SYSENTER_EIP
:
3127 val
= env
->sysenter_eip
;
3129 case MSR_IA32_APICBASE
:
3130 val
= cpu_get_apic_base(env
);
3141 case MSR_VM_HSAVE_PA
:
3142 val
= env
->vm_hsave
;
3144 case MSR_IA32_PERF_STATUS
:
3145 /* tsc_increment_by_tick */
3147 /* CPU multiplier */
3148 val
|= (((uint64_t)4ULL) << 40);
3150 #ifdef TARGET_X86_64
3161 val
= env
->segs
[R_FS
].base
;
3164 val
= env
->segs
[R_GS
].base
;
3166 case MSR_KERNELGSBASE
:
3167 val
= env
->kernelgsbase
;
3171 case MSR_QPI_COMMBASE
:
3172 if (env
->kqemu_enabled
) {
3173 val
= kqemu_comm_base
;
3179 case MSR_MTRRphysBase(0):
3180 case MSR_MTRRphysBase(1):
3181 case MSR_MTRRphysBase(2):
3182 case MSR_MTRRphysBase(3):
3183 case MSR_MTRRphysBase(4):
3184 case MSR_MTRRphysBase(5):
3185 case MSR_MTRRphysBase(6):
3186 case MSR_MTRRphysBase(7):
3187 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3189 case MSR_MTRRphysMask(0):
3190 case MSR_MTRRphysMask(1):
3191 case MSR_MTRRphysMask(2):
3192 case MSR_MTRRphysMask(3):
3193 case MSR_MTRRphysMask(4):
3194 case MSR_MTRRphysMask(5):
3195 case MSR_MTRRphysMask(6):
3196 case MSR_MTRRphysMask(7):
3197 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3199 case MSR_MTRRfix64K_00000
:
3200 val
= env
->mtrr_fixed
[0];
3202 case MSR_MTRRfix16K_80000
:
3203 case MSR_MTRRfix16K_A0000
:
3204 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3206 case MSR_MTRRfix4K_C0000
:
3207 case MSR_MTRRfix4K_C8000
:
3208 case MSR_MTRRfix4K_D0000
:
3209 case MSR_MTRRfix4K_D8000
:
3210 case MSR_MTRRfix4K_E0000
:
3211 case MSR_MTRRfix4K_E8000
:
3212 case MSR_MTRRfix4K_F0000
:
3213 case MSR_MTRRfix4K_F8000
:
3214 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3216 case MSR_MTRRdefType
:
3217 val
= env
->mtrr_deftype
;
3220 if (env
->cpuid_features
& CPUID_MTRR
)
3221 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3223 /* XXX: exception ? */
3227 /* XXX: exception ? */
3231 EAX
= (uint32_t)(val
);
3232 EDX
= (uint32_t)(val
>> 32);
3236 target_ulong
helper_lsl(target_ulong selector1
)
3239 uint32_t e1
, e2
, eflags
, selector
;
3240 int rpl
, dpl
, cpl
, type
;
3242 selector
= selector1
& 0xffff;
3243 eflags
= helper_cc_compute_all(CC_OP
);
3244 if (load_segment(&e1
, &e2
, selector
) != 0)
3247 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3248 cpl
= env
->hflags
& HF_CPL_MASK
;
3249 if (e2
& DESC_S_MASK
) {
3250 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3253 if (dpl
< cpl
|| dpl
< rpl
)
3257 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3268 if (dpl
< cpl
|| dpl
< rpl
) {
3270 CC_SRC
= eflags
& ~CC_Z
;
3274 limit
= get_seg_limit(e1
, e2
);
3275 CC_SRC
= eflags
| CC_Z
;
3279 target_ulong
helper_lar(target_ulong selector1
)
3281 uint32_t e1
, e2
, eflags
, selector
;
3282 int rpl
, dpl
, cpl
, type
;
3284 selector
= selector1
& 0xffff;
3285 eflags
= helper_cc_compute_all(CC_OP
);
3286 if ((selector
& 0xfffc) == 0)
3288 if (load_segment(&e1
, &e2
, selector
) != 0)
3291 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3292 cpl
= env
->hflags
& HF_CPL_MASK
;
3293 if (e2
& DESC_S_MASK
) {
3294 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3297 if (dpl
< cpl
|| dpl
< rpl
)
3301 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3315 if (dpl
< cpl
|| dpl
< rpl
) {
3317 CC_SRC
= eflags
& ~CC_Z
;
3321 CC_SRC
= eflags
| CC_Z
;
3322 return e2
& 0x00f0ff00;
3325 void helper_verr(target_ulong selector1
)
3327 uint32_t e1
, e2
, eflags
, selector
;
3330 selector
= selector1
& 0xffff;
3331 eflags
= helper_cc_compute_all(CC_OP
);
3332 if ((selector
& 0xfffc) == 0)
3334 if (load_segment(&e1
, &e2
, selector
) != 0)
3336 if (!(e2
& DESC_S_MASK
))
3339 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3340 cpl
= env
->hflags
& HF_CPL_MASK
;
3341 if (e2
& DESC_CS_MASK
) {
3342 if (!(e2
& DESC_R_MASK
))
3344 if (!(e2
& DESC_C_MASK
)) {
3345 if (dpl
< cpl
|| dpl
< rpl
)
3349 if (dpl
< cpl
|| dpl
< rpl
) {
3351 CC_SRC
= eflags
& ~CC_Z
;
3355 CC_SRC
= eflags
| CC_Z
;
3358 void helper_verw(target_ulong selector1
)
3360 uint32_t e1
, e2
, eflags
, selector
;
3363 selector
= selector1
& 0xffff;
3364 eflags
= helper_cc_compute_all(CC_OP
);
3365 if ((selector
& 0xfffc) == 0)
3367 if (load_segment(&e1
, &e2
, selector
) != 0)
3369 if (!(e2
& DESC_S_MASK
))
3372 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3373 cpl
= env
->hflags
& HF_CPL_MASK
;
3374 if (e2
& DESC_CS_MASK
) {
3377 if (dpl
< cpl
|| dpl
< rpl
)
3379 if (!(e2
& DESC_W_MASK
)) {
3381 CC_SRC
= eflags
& ~CC_Z
;
3385 CC_SRC
= eflags
| CC_Z
;
3388 /* x87 FPU helpers */
3390 static void fpu_set_exception(int mask
)
3393 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3394 env
->fpus
|= FPUS_SE
| FPUS_B
;
3397 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3400 fpu_set_exception(FPUS_ZE
);
3404 static void fpu_raise_exception(void)
3406 if (env
->cr
[0] & CR0_NE_MASK
) {
3407 raise_exception(EXCP10_COPR
);
3409 #if !defined(CONFIG_USER_ONLY)
3416 void helper_flds_FT0(uint32_t val
)
3423 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3426 void helper_fldl_FT0(uint64_t val
)
3433 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3436 void helper_fildl_FT0(int32_t val
)
3438 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3441 void helper_flds_ST0(uint32_t val
)
3448 new_fpstt
= (env
->fpstt
- 1) & 7;
3450 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3451 env
->fpstt
= new_fpstt
;
3452 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3455 void helper_fldl_ST0(uint64_t val
)
3462 new_fpstt
= (env
->fpstt
- 1) & 7;
3464 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3465 env
->fpstt
= new_fpstt
;
3466 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3469 void helper_fildl_ST0(int32_t val
)
3472 new_fpstt
= (env
->fpstt
- 1) & 7;
3473 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3474 env
->fpstt
= new_fpstt
;
3475 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3478 void helper_fildll_ST0(int64_t val
)
3481 new_fpstt
= (env
->fpstt
- 1) & 7;
3482 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3483 env
->fpstt
= new_fpstt
;
3484 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3487 uint32_t helper_fsts_ST0(void)
3493 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3497 uint64_t helper_fstl_ST0(void)
3503 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3507 int32_t helper_fist_ST0(void)
3510 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3511 if (val
!= (int16_t)val
)
3516 int32_t helper_fistl_ST0(void)
3519 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3523 int64_t helper_fistll_ST0(void)
3526 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3530 int32_t helper_fistt_ST0(void)
3533 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3534 if (val
!= (int16_t)val
)
3539 int32_t helper_fisttl_ST0(void)
3542 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3546 int64_t helper_fisttll_ST0(void)
3549 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3553 void helper_fldt_ST0(target_ulong ptr
)
3556 new_fpstt
= (env
->fpstt
- 1) & 7;
3557 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3558 env
->fpstt
= new_fpstt
;
3559 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3562 void helper_fstt_ST0(target_ulong ptr
)
3564 helper_fstt(ST0
, ptr
);
3567 void helper_fpush(void)
3572 void helper_fpop(void)
3577 void helper_fdecstp(void)
3579 env
->fpstt
= (env
->fpstt
- 1) & 7;
3580 env
->fpus
&= (~0x4700);
3583 void helper_fincstp(void)
3585 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3586 env
->fpus
&= (~0x4700);
3591 void helper_ffree_STN(int st_index
)
3593 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3596 void helper_fmov_ST0_FT0(void)
3601 void helper_fmov_FT0_STN(int st_index
)
3606 void helper_fmov_ST0_STN(int st_index
)
3611 void helper_fmov_STN_ST0(int st_index
)
3616 void helper_fxchg_ST0_STN(int st_index
)
3624 /* FPU operations */
3626 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3628 void helper_fcom_ST0_FT0(void)
3632 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3633 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3636 void helper_fucom_ST0_FT0(void)
3640 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3641 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3644 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3646 void helper_fcomi_ST0_FT0(void)
3651 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3652 eflags
= helper_cc_compute_all(CC_OP
);
3653 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3657 void helper_fucomi_ST0_FT0(void)
3662 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3663 eflags
= helper_cc_compute_all(CC_OP
);
3664 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3668 void helper_fadd_ST0_FT0(void)
3673 void helper_fmul_ST0_FT0(void)
3678 void helper_fsub_ST0_FT0(void)
3683 void helper_fsubr_ST0_FT0(void)
3688 void helper_fdiv_ST0_FT0(void)
3690 ST0
= helper_fdiv(ST0
, FT0
);
3693 void helper_fdivr_ST0_FT0(void)
3695 ST0
= helper_fdiv(FT0
, ST0
);
3698 /* fp operations between STN and ST0 */
3700 void helper_fadd_STN_ST0(int st_index
)
3702 ST(st_index
) += ST0
;
3705 void helper_fmul_STN_ST0(int st_index
)
3707 ST(st_index
) *= ST0
;
3710 void helper_fsub_STN_ST0(int st_index
)
3712 ST(st_index
) -= ST0
;
3715 void helper_fsubr_STN_ST0(int st_index
)
3722 void helper_fdiv_STN_ST0(int st_index
)
3726 *p
= helper_fdiv(*p
, ST0
);
3729 void helper_fdivr_STN_ST0(int st_index
)
3733 *p
= helper_fdiv(ST0
, *p
);
3736 /* misc FPU operations */
3737 void helper_fchs_ST0(void)
3739 ST0
= floatx_chs(ST0
);
3742 void helper_fabs_ST0(void)
3744 ST0
= floatx_abs(ST0
);
3747 void helper_fld1_ST0(void)
3752 void helper_fldl2t_ST0(void)
3757 void helper_fldl2e_ST0(void)
3762 void helper_fldpi_ST0(void)
3767 void helper_fldlg2_ST0(void)
3772 void helper_fldln2_ST0(void)
3777 void helper_fldz_ST0(void)
3782 void helper_fldz_FT0(void)
3787 uint32_t helper_fnstsw(void)
3789 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3792 uint32_t helper_fnstcw(void)
3797 static void update_fp_status(void)
3801 /* set rounding mode */
3802 switch(env
->fpuc
& RC_MASK
) {
3805 rnd_type
= float_round_nearest_even
;
3808 rnd_type
= float_round_down
;
3811 rnd_type
= float_round_up
;
3814 rnd_type
= float_round_to_zero
;
3817 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3819 switch((env
->fpuc
>> 8) & 3) {
3831 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3835 void helper_fldcw(uint32_t val
)
3841 void helper_fclex(void)
3843 env
->fpus
&= 0x7f00;
3846 void helper_fwait(void)
3848 if (env
->fpus
& FPUS_SE
)
3849 fpu_raise_exception();
3852 void helper_fninit(void)
3869 void helper_fbld_ST0(target_ulong ptr
)
3877 for(i
= 8; i
>= 0; i
--) {
3879 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3882 if (ldub(ptr
+ 9) & 0x80)
3888 void helper_fbst_ST0(target_ulong ptr
)
3891 target_ulong mem_ref
, mem_end
;
3894 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3896 mem_end
= mem_ref
+ 9;
3903 while (mem_ref
< mem_end
) {
3908 v
= ((v
/ 10) << 4) | (v
% 10);
3911 while (mem_ref
< mem_end
) {
3916 void helper_f2xm1(void)
3918 ST0
= pow(2.0,ST0
) - 1.0;
3921 void helper_fyl2x(void)
3923 CPU86_LDouble fptemp
;
3927 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3931 env
->fpus
&= (~0x4700);
3936 void helper_fptan(void)
3938 CPU86_LDouble fptemp
;
3941 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3947 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3948 /* the above code is for |arg| < 2**52 only */
3952 void helper_fpatan(void)
3954 CPU86_LDouble fptemp
, fpsrcop
;
3958 ST1
= atan2(fpsrcop
,fptemp
);
3962 void helper_fxtract(void)
3964 CPU86_LDoubleU temp
;
3965 unsigned int expdif
;
3968 expdif
= EXPD(temp
) - EXPBIAS
;
3969 /*DP exponent bias*/
3976 void helper_fprem1(void)
3978 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3979 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3981 signed long long int q
;
3983 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3984 ST0
= 0.0 / 0.0; /* NaN */
3985 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3991 fpsrcop1
.d
= fpsrcop
;
3993 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3996 /* optimisation? taken from the AMD docs */
3997 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998 /* ST0 is unchanged */
4003 dblq
= fpsrcop
/ fptemp
;
4004 /* round dblq towards nearest integer */
4006 ST0
= fpsrcop
- fptemp
* dblq
;
4008 /* convert dblq to q by truncating towards zero */
4010 q
= (signed long long int)(-dblq
);
4012 q
= (signed long long int)dblq
;
4014 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015 /* (C0,C3,C1) <-- (q2,q1,q0) */
4016 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4017 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4018 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4020 env
->fpus
|= 0x400; /* C2 <-- 1 */
4021 fptemp
= pow(2.0, expdif
- 50);
4022 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4023 /* fpsrcop = integer obtained by chopping */
4024 fpsrcop
= (fpsrcop
< 0.0) ?
4025 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4026 ST0
-= (ST1
* fpsrcop
* fptemp
);
4030 void helper_fprem(void)
4032 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4033 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4035 signed long long int q
;
4037 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4038 ST0
= 0.0 / 0.0; /* NaN */
4039 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4043 fpsrcop
= (CPU86_LDouble
)ST0
;
4044 fptemp
= (CPU86_LDouble
)ST1
;
4045 fpsrcop1
.d
= fpsrcop
;
4047 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4050 /* optimisation? taken from the AMD docs */
4051 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4052 /* ST0 is unchanged */
4056 if ( expdif
< 53 ) {
4057 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4058 /* round dblq towards zero */
4059 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4060 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4062 /* convert dblq to q by truncating towards zero */
4064 q
= (signed long long int)(-dblq
);
4066 q
= (signed long long int)dblq
;
4068 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4069 /* (C0,C3,C1) <-- (q2,q1,q0) */
4070 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4071 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4072 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4074 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4075 env
->fpus
|= 0x400; /* C2 <-- 1 */
4076 fptemp
= pow(2.0, (double)(expdif
- N
));
4077 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4078 /* fpsrcop = integer obtained by chopping */
4079 fpsrcop
= (fpsrcop
< 0.0) ?
4080 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4081 ST0
-= (ST1
* fpsrcop
* fptemp
);
4085 void helper_fyl2xp1(void)
4087 CPU86_LDouble fptemp
;
4090 if ((fptemp
+1.0)>0.0) {
4091 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4095 env
->fpus
&= (~0x4700);
4100 void helper_fsqrt(void)
4102 CPU86_LDouble fptemp
;
4106 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4112 void helper_fsincos(void)
4114 CPU86_LDouble fptemp
;
4117 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4123 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4124 /* the above code is for |arg| < 2**63 only */
4128 void helper_frndint(void)
4130 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4133 void helper_fscale(void)
4135 ST0
= ldexp (ST0
, (int)(ST1
));
4138 void helper_fsin(void)
4140 CPU86_LDouble fptemp
;
4143 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4147 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4148 /* the above code is for |arg| < 2**53 only */
4152 void helper_fcos(void)
4154 CPU86_LDouble fptemp
;
4157 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4161 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4162 /* the above code is for |arg5 < 2**63 only */
4166 void helper_fxam_ST0(void)
4168 CPU86_LDoubleU temp
;
4173 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4175 env
->fpus
|= 0x200; /* C1 <-- 1 */
4177 /* XXX: test fptags too */
4178 expdif
= EXPD(temp
);
4179 if (expdif
== MAXEXPD
) {
4180 #ifdef USE_X86LDOUBLE
4181 if (MANTD(temp
) == 0x8000000000000000ULL
)
4183 if (MANTD(temp
) == 0)
4185 env
->fpus
|= 0x500 /*Infinity*/;
4187 env
->fpus
|= 0x100 /*NaN*/;
4188 } else if (expdif
== 0) {
4189 if (MANTD(temp
) == 0)
4190 env
->fpus
|= 0x4000 /*Zero*/;
4192 env
->fpus
|= 0x4400 /*Denormal*/;
4198 void helper_fstenv(target_ulong ptr
, int data32
)
4200 int fpus
, fptag
, exp
, i
;
4204 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4206 for (i
=7; i
>=0; i
--) {
4208 if (env
->fptags
[i
]) {
4211 tmp
.d
= env
->fpregs
[i
].d
;
4214 if (exp
== 0 && mant
== 0) {
4217 } else if (exp
== 0 || exp
== MAXEXPD
4218 #ifdef USE_X86LDOUBLE
4219 || (mant
& (1LL << 63)) == 0
4222 /* NaNs, infinity, denormal */
4229 stl(ptr
, env
->fpuc
);
4231 stl(ptr
+ 8, fptag
);
4232 stl(ptr
+ 12, 0); /* fpip */
4233 stl(ptr
+ 16, 0); /* fpcs */
4234 stl(ptr
+ 20, 0); /* fpoo */
4235 stl(ptr
+ 24, 0); /* fpos */
4238 stw(ptr
, env
->fpuc
);
4240 stw(ptr
+ 4, fptag
);
4248 void helper_fldenv(target_ulong ptr
, int data32
)
4253 env
->fpuc
= lduw(ptr
);
4254 fpus
= lduw(ptr
+ 4);
4255 fptag
= lduw(ptr
+ 8);
4258 env
->fpuc
= lduw(ptr
);
4259 fpus
= lduw(ptr
+ 2);
4260 fptag
= lduw(ptr
+ 4);
4262 env
->fpstt
= (fpus
>> 11) & 7;
4263 env
->fpus
= fpus
& ~0x3800;
4264 for(i
= 0;i
< 8; i
++) {
4265 env
->fptags
[i
] = ((fptag
& 3) == 3);
4270 void helper_fsave(target_ulong ptr
, int data32
)
4275 helper_fstenv(ptr
, data32
);
4277 ptr
+= (14 << data32
);
4278 for(i
= 0;i
< 8; i
++) {
4280 helper_fstt(tmp
, ptr
);
4298 void helper_frstor(target_ulong ptr
, int data32
)
4303 helper_fldenv(ptr
, data32
);
4304 ptr
+= (14 << data32
);
4306 for(i
= 0;i
< 8; i
++) {
4307 tmp
= helper_fldt(ptr
);
4313 void helper_fxsave(target_ulong ptr
, int data64
)
4315 int fpus
, fptag
, i
, nb_xmm_regs
;
4319 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4321 for(i
= 0; i
< 8; i
++) {
4322 fptag
|= (env
->fptags
[i
] << i
);
4324 stw(ptr
, env
->fpuc
);
4326 stw(ptr
+ 4, fptag
^ 0xff);
4327 #ifdef TARGET_X86_64
4329 stq(ptr
+ 0x08, 0); /* rip */
4330 stq(ptr
+ 0x10, 0); /* rdp */
4334 stl(ptr
+ 0x08, 0); /* eip */
4335 stl(ptr
+ 0x0c, 0); /* sel */
4336 stl(ptr
+ 0x10, 0); /* dp */
4337 stl(ptr
+ 0x14, 0); /* sel */
4341 for(i
= 0;i
< 8; i
++) {
4343 helper_fstt(tmp
, addr
);
4347 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4348 /* XXX: finish it */
4349 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4350 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4351 if (env
->hflags
& HF_CS64_MASK
)
4356 /* Fast FXSAVE leaves out the XMM registers */
4357 if (!(env
->efer
& MSR_EFER_FFXSR
)
4358 || (env
->hflags
& HF_CPL_MASK
)
4359 || !(env
->hflags
& HF_LMA_MASK
)) {
4360 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4361 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4362 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4369 void helper_fxrstor(target_ulong ptr
, int data64
)
4371 int i
, fpus
, fptag
, nb_xmm_regs
;
4375 env
->fpuc
= lduw(ptr
);
4376 fpus
= lduw(ptr
+ 2);
4377 fptag
= lduw(ptr
+ 4);
4378 env
->fpstt
= (fpus
>> 11) & 7;
4379 env
->fpus
= fpus
& ~0x3800;
4381 for(i
= 0;i
< 8; i
++) {
4382 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4386 for(i
= 0;i
< 8; i
++) {
4387 tmp
= helper_fldt(addr
);
4392 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4393 /* XXX: finish it */
4394 env
->mxcsr
= ldl(ptr
+ 0x18);
4396 if (env
->hflags
& HF_CS64_MASK
)
4401 /* Fast FXRESTORE leaves out the XMM registers */
4402 if (!(env
->efer
& MSR_EFER_FFXSR
)
4403 || (env
->hflags
& HF_CPL_MASK
)
4404 || !(env
->hflags
& HF_LMA_MASK
)) {
4405 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4406 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4407 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4414 #ifndef USE_X86LDOUBLE
4416 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4418 CPU86_LDoubleU temp
;
4423 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4424 /* exponent + sign */
4425 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4426 e
|= SIGND(temp
) >> 16;
4430 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4432 CPU86_LDoubleU temp
;
4436 /* XXX: handle overflow ? */
4437 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4438 e
|= (upper
>> 4) & 0x800; /* sign */
4439 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4441 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4444 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4451 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4453 CPU86_LDoubleU temp
;
4456 *pmant
= temp
.l
.lower
;
4457 *pexp
= temp
.l
.upper
;
4460 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4462 CPU86_LDoubleU temp
;
4464 temp
.l
.upper
= upper
;
4465 temp
.l
.lower
= mant
;
4470 #ifdef TARGET_X86_64
4472 //#define DEBUG_MULDIV
4474 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4483 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4487 add128(plow
, phigh
, 1, 0);
4490 /* return TRUE if overflow */
4491 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4493 uint64_t q
, r
, a1
, a0
;
4506 /* XXX: use a better algorithm */
4507 for(i
= 0; i
< 64; i
++) {
4509 a1
= (a1
<< 1) | (a0
>> 63);
4510 if (ab
|| a1
>= b
) {
4516 a0
= (a0
<< 1) | qb
;
4518 #if defined(DEBUG_MULDIV)
4519 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4520 *phigh
, *plow
, b
, a0
, a1
);
4528 /* return TRUE if overflow */
4529 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4532 sa
= ((int64_t)*phigh
< 0);
4534 neg128(plow
, phigh
);
4538 if (div64(plow
, phigh
, b
) != 0)
4541 if (*plow
> (1ULL << 63))
4545 if (*plow
>= (1ULL << 63))
4553 void helper_mulq_EAX_T0(target_ulong t0
)
4557 mulu64(&r0
, &r1
, EAX
, t0
);
4564 void helper_imulq_EAX_T0(target_ulong t0
)
4568 muls64(&r0
, &r1
, EAX
, t0
);
4572 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4575 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4579 muls64(&r0
, &r1
, t0
, t1
);
4581 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4585 void helper_divq_EAX(target_ulong t0
)
4589 raise_exception(EXCP00_DIVZ
);
4593 if (div64(&r0
, &r1
, t0
))
4594 raise_exception(EXCP00_DIVZ
);
4599 void helper_idivq_EAX(target_ulong t0
)
4603 raise_exception(EXCP00_DIVZ
);
4607 if (idiv64(&r0
, &r1
, t0
))
4608 raise_exception(EXCP00_DIVZ
);
4614 static void do_hlt(void)
4616 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4618 env
->exception_index
= EXCP_HLT
;
4622 void helper_hlt(int next_eip_addend
)
4624 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4625 EIP
+= next_eip_addend
;
4630 void helper_monitor(target_ulong ptr
)
4632 if ((uint32_t)ECX
!= 0)
4633 raise_exception(EXCP0D_GPF
);
4634 /* XXX: store address ? */
4635 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4638 void helper_mwait(int next_eip_addend
)
4640 if ((uint32_t)ECX
!= 0)
4641 raise_exception(EXCP0D_GPF
);
4642 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4643 EIP
+= next_eip_addend
;
4645 /* XXX: not complete but not completely erroneous */
4646 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4647 /* more than one CPU: do not sleep because another CPU may
4654 void helper_debug(void)
4656 env
->exception_index
= EXCP_DEBUG
;
4660 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4662 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4665 void helper_raise_exception(int exception_index
)
4667 raise_exception(exception_index
);
4670 void helper_cli(void)
4672 env
->eflags
&= ~IF_MASK
;
4675 void helper_sti(void)
4677 env
->eflags
|= IF_MASK
;
4681 /* vm86plus instructions */
4682 void helper_cli_vm(void)
4684 env
->eflags
&= ~VIF_MASK
;
4687 void helper_sti_vm(void)
4689 env
->eflags
|= VIF_MASK
;
4690 if (env
->eflags
& VIP_MASK
) {
4691 raise_exception(EXCP0D_GPF
);
4696 void helper_set_inhibit_irq(void)
4698 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4701 void helper_reset_inhibit_irq(void)
4703 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4706 void helper_boundw(target_ulong a0
, int v
)
4710 high
= ldsw(a0
+ 2);
4712 if (v
< low
|| v
> high
) {
4713 raise_exception(EXCP05_BOUND
);
4717 void helper_boundl(target_ulong a0
, int v
)
4722 if (v
< low
|| v
> high
) {
4723 raise_exception(EXCP05_BOUND
);
4727 static float approx_rsqrt(float a
)
4729 return 1.0 / sqrt(a
);
4732 static float approx_rcp(float a
)
4737 #if !defined(CONFIG_USER_ONLY)
4739 #define MMUSUFFIX _mmu
4742 #include "softmmu_template.h"
4745 #include "softmmu_template.h"
4748 #include "softmmu_template.h"
4751 #include "softmmu_template.h"
4755 #if !defined(CONFIG_USER_ONLY)
4756 /* try to fill the TLB and return an exception if error. If retaddr is
4757 NULL, it means that the function was called in C code (i.e. not
4758 from generated code or from helper.c) */
4759 /* XXX: fix it to restore all registers */
4760 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4762 TranslationBlock
*tb
;
4765 CPUX86State
*saved_env
;
4767 /* XXX: hack to restore env in all cases, even if not called from
4770 env
= cpu_single_env
;
4772 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4775 /* now we have a real cpu fault */
4776 pc
= (unsigned long)retaddr
;
4777 tb
= tb_find_pc(pc
);
4779 /* the PC is inside the translated code. It means that we have
4780 a virtual CPU fault */
4781 cpu_restore_state(tb
, env
, pc
, NULL
);
4784 raise_exception_err(env
->exception_index
, env
->error_code
);
4790 /* Secure Virtual Machine helpers */
4792 #if defined(CONFIG_USER_ONLY)
4794 void helper_vmrun(int aflag
, int next_eip_addend
)
4797 void helper_vmmcall(void)
4800 void helper_vmload(int aflag
)
4803 void helper_vmsave(int aflag
)
4806 void helper_stgi(void)
4809 void helper_clgi(void)
4812 void helper_skinit(void)
4815 void helper_invlpga(int aflag
)
4818 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4821 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4825 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4826 uint32_t next_eip_addend
)
4831 static inline void svm_save_seg(target_phys_addr_t addr
,
4832 const SegmentCache
*sc
)
4834 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4836 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4838 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4840 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4841 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4844 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4848 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4849 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4850 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4851 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4852 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4855 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4856 CPUState
*env
, int seg_reg
)
4858 SegmentCache sc1
, *sc
= &sc1
;
4859 svm_load_seg(addr
, sc
);
4860 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4861 sc
->base
, sc
->limit
, sc
->flags
);
4864 void helper_vmrun(int aflag
, int next_eip_addend
)
4870 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4875 addr
= (uint32_t)EAX
;
4877 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4879 env
->vm_vmcb
= addr
;
4881 /* save the current CPU state in the hsave page */
4882 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4883 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4885 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4886 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4888 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4889 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4890 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4891 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4892 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4893 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4895 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4896 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4898 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4900 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4902 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4904 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4907 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4908 EIP
+ next_eip_addend
);
4909 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4910 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4912 /* load the interception bitmaps so we do not need to access the
4914 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4915 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4916 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4917 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4918 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4919 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4921 /* enable intercepts */
4922 env
->hflags
|= HF_SVMI_MASK
;
4924 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4926 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4927 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4929 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4930 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4932 /* clear exit_info_2 so we behave like the real hardware */
4933 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4935 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4936 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4937 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4938 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4939 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4940 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4941 if (int_ctl
& V_INTR_MASKING_MASK
) {
4942 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4943 env
->hflags2
|= HF2_VINTR_MASK
;
4944 if (env
->eflags
& IF_MASK
)
4945 env
->hflags2
|= HF2_HIF_MASK
;
4949 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4951 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4952 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4953 CC_OP
= CC_OP_EFLAGS
;
4955 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4957 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4959 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4961 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4964 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4966 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4967 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4968 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4969 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4970 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4972 /* FIXME: guest state consistency checks */
4974 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4975 case TLB_CONTROL_DO_NOTHING
:
4977 case TLB_CONTROL_FLUSH_ALL_ASID
:
4978 /* FIXME: this is not 100% correct but should work for now */
4983 env
->hflags2
|= HF2_GIF_MASK
;
4985 if (int_ctl
& V_IRQ_MASK
) {
4986 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4989 /* maybe we need to inject an event */
4990 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4991 if (event_inj
& SVM_EVTINJ_VALID
) {
4992 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4993 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4994 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4995 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4997 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
4998 /* FIXME: need to implement valid_err */
4999 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5000 case SVM_EVTINJ_TYPE_INTR
:
5001 env
->exception_index
= vector
;
5002 env
->error_code
= event_inj_err
;
5003 env
->exception_is_int
= 0;
5004 env
->exception_next_eip
= -1;
5005 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5006 /* XXX: is it always correct ? */
5007 do_interrupt(vector
, 0, 0, 0, 1);
5009 case SVM_EVTINJ_TYPE_NMI
:
5010 env
->exception_index
= EXCP02_NMI
;
5011 env
->error_code
= event_inj_err
;
5012 env
->exception_is_int
= 0;
5013 env
->exception_next_eip
= EIP
;
5014 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5017 case SVM_EVTINJ_TYPE_EXEPT
:
5018 env
->exception_index
= vector
;
5019 env
->error_code
= event_inj_err
;
5020 env
->exception_is_int
= 0;
5021 env
->exception_next_eip
= -1;
5022 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5025 case SVM_EVTINJ_TYPE_SOFT
:
5026 env
->exception_index
= vector
;
5027 env
->error_code
= event_inj_err
;
5028 env
->exception_is_int
= 1;
5029 env
->exception_next_eip
= EIP
;
5030 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5034 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5038 void helper_vmmcall(void)
5040 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5041 raise_exception(EXCP06_ILLOP
);
5044 void helper_vmload(int aflag
)
5047 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5052 addr
= (uint32_t)EAX
;
5054 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5055 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5056 env
->segs
[R_FS
].base
);
5058 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5060 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5062 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5064 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5067 #ifdef TARGET_X86_64
5068 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5069 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5070 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5071 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5073 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5074 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5075 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5076 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5079 void helper_vmsave(int aflag
)
5082 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5087 addr
= (uint32_t)EAX
;
5089 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5090 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5091 env
->segs
[R_FS
].base
);
5093 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5095 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5097 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5099 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5102 #ifdef TARGET_X86_64
5103 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5104 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5105 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5106 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5108 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5109 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5110 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5111 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5114 void helper_stgi(void)
5116 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5117 env
->hflags2
|= HF2_GIF_MASK
;
5120 void helper_clgi(void)
5122 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5123 env
->hflags2
&= ~HF2_GIF_MASK
;
5126 void helper_skinit(void)
5128 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5129 /* XXX: not implemented */
5130 raise_exception(EXCP06_ILLOP
);
5133 void helper_invlpga(int aflag
)
5136 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5141 addr
= (uint32_t)EAX
;
5143 /* XXX: could use the ASID to see if it is needed to do the
5145 tlb_flush_page(env
, addr
);
5148 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5150 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5153 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5154 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5155 helper_vmexit(type
, param
);
5158 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5159 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5160 helper_vmexit(type
, param
);
5163 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5164 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5165 helper_vmexit(type
, param
);
5168 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5169 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5170 helper_vmexit(type
, param
);
5173 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5174 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5175 helper_vmexit(type
, param
);
5179 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5180 /* FIXME: this should be read in at vmrun (faster this way?) */
5181 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5183 switch((uint32_t)ECX
) {
5188 case 0xc0000000 ... 0xc0001fff:
5189 t0
= (8192 + ECX
- 0xc0000000) * 2;
5193 case 0xc0010000 ... 0xc0011fff:
5194 t0
= (16384 + ECX
- 0xc0010000) * 2;
5199 helper_vmexit(type
, param
);
5204 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5205 helper_vmexit(type
, param
);
5209 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5210 helper_vmexit(type
, param
);
5216 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5217 uint32_t next_eip_addend
)
5219 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5220 /* FIXME: this should be read in at vmrun (faster this way?) */
5221 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5222 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5223 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5225 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5226 env
->eip
+ next_eip_addend
);
5227 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5232 /* Note: currently only 32 bits of exit_code are used */
5233 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5237 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5238 exit_code
, exit_info_1
,
5239 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5242 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5243 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5244 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5246 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5249 /* Save the VM state in the vmcb */
5250 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5252 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5254 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5256 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5259 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5260 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5262 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5263 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5265 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5266 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5267 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5268 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5269 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5271 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5272 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5273 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5274 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5275 int_ctl
|= V_IRQ_MASK
;
5276 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5278 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5279 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5280 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5281 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5282 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5283 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5284 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5286 /* Reload the host state from vm_hsave */
5287 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5288 env
->hflags
&= ~HF_SVMI_MASK
;
5290 env
->intercept_exceptions
= 0;
5291 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5292 env
->tsc_offset
= 0;
5294 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5295 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5297 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5298 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5300 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5301 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5302 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5303 /* we need to set the efer after the crs so the hidden flags get
5306 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5308 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5309 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5310 CC_OP
= CC_OP_EFLAGS
;
5312 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5314 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5316 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5318 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5321 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5322 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5323 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5325 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5326 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5329 cpu_x86_set_cpl(env
, 0);
5330 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5331 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5333 env
->hflags2
&= ~HF2_GIF_MASK
;
5334 /* FIXME: Resets the current ASID register to zero (host ASID). */
5336 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5338 /* Clears the TSC_OFFSET inside the processor. */
5340 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5341 from the page table indicated the host's CR3. If the PDPEs contain
5342 illegal state, the processor causes a shutdown. */
5344 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5345 env
->cr
[0] |= CR0_PE_MASK
;
5346 env
->eflags
&= ~VM_MASK
;
5348 /* Disables all breakpoints in the host DR7 register. */
5350 /* Checks the reloaded host state for consistency. */
5352 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5353 host's code segment or non-canonical (in the case of long mode), a
5354 #GP fault is delivered inside the host.) */
5356 /* remove any pending exception */
5357 env
->exception_index
= -1;
5358 env
->error_code
= 0;
5359 env
->old_exception
= -1;
5367 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5368 void helper_enter_mmx(void)
5371 *(uint32_t *)(env
->fptags
) = 0;
5372 *(uint32_t *)(env
->fptags
+ 4) = 0;
5375 void helper_emms(void)
5377 /* set to empty state */
5378 *(uint32_t *)(env
->fptags
) = 0x01010101;
5379 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5383 void helper_movq(void *d
, void *s
)
5385 *(uint64_t *)d
= *(uint64_t *)s
;
5389 #include "ops_sse.h"
5392 #include "ops_sse.h"
5395 #include "helper_template.h"
5399 #include "helper_template.h"
5403 #include "helper_template.h"
5406 #ifdef TARGET_X86_64
5409 #include "helper_template.h"
5414 /* bit operations */
5415 target_ulong
helper_bsf(target_ulong t0
)
5422 while ((res
& 1) == 0) {
5429 target_ulong
helper_bsr(target_ulong t0
)
5432 target_ulong res
, mask
;
5435 count
= TARGET_LONG_BITS
- 1;
5436 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5437 while ((res
& mask
) == 0) {
5445 static int compute_all_eflags(void)
5450 static int compute_c_eflags(void)
5452 return CC_SRC
& CC_C
;
5455 uint32_t helper_cc_compute_all(int op
)
5458 default: /* should never happen */ return 0;
5460 case CC_OP_EFLAGS
: return compute_all_eflags();
5462 case CC_OP_MULB
: return compute_all_mulb();
5463 case CC_OP_MULW
: return compute_all_mulw();
5464 case CC_OP_MULL
: return compute_all_mull();
5466 case CC_OP_ADDB
: return compute_all_addb();
5467 case CC_OP_ADDW
: return compute_all_addw();
5468 case CC_OP_ADDL
: return compute_all_addl();
5470 case CC_OP_ADCB
: return compute_all_adcb();
5471 case CC_OP_ADCW
: return compute_all_adcw();
5472 case CC_OP_ADCL
: return compute_all_adcl();
5474 case CC_OP_SUBB
: return compute_all_subb();
5475 case CC_OP_SUBW
: return compute_all_subw();
5476 case CC_OP_SUBL
: return compute_all_subl();
5478 case CC_OP_SBBB
: return compute_all_sbbb();
5479 case CC_OP_SBBW
: return compute_all_sbbw();
5480 case CC_OP_SBBL
: return compute_all_sbbl();
5482 case CC_OP_LOGICB
: return compute_all_logicb();
5483 case CC_OP_LOGICW
: return compute_all_logicw();
5484 case CC_OP_LOGICL
: return compute_all_logicl();
5486 case CC_OP_INCB
: return compute_all_incb();
5487 case CC_OP_INCW
: return compute_all_incw();
5488 case CC_OP_INCL
: return compute_all_incl();
5490 case CC_OP_DECB
: return compute_all_decb();
5491 case CC_OP_DECW
: return compute_all_decw();
5492 case CC_OP_DECL
: return compute_all_decl();
5494 case CC_OP_SHLB
: return compute_all_shlb();
5495 case CC_OP_SHLW
: return compute_all_shlw();
5496 case CC_OP_SHLL
: return compute_all_shll();
5498 case CC_OP_SARB
: return compute_all_sarb();
5499 case CC_OP_SARW
: return compute_all_sarw();
5500 case CC_OP_SARL
: return compute_all_sarl();
5502 #ifdef TARGET_X86_64
5503 case CC_OP_MULQ
: return compute_all_mulq();
5505 case CC_OP_ADDQ
: return compute_all_addq();
5507 case CC_OP_ADCQ
: return compute_all_adcq();
5509 case CC_OP_SUBQ
: return compute_all_subq();
5511 case CC_OP_SBBQ
: return compute_all_sbbq();
5513 case CC_OP_LOGICQ
: return compute_all_logicq();
5515 case CC_OP_INCQ
: return compute_all_incq();
5517 case CC_OP_DECQ
: return compute_all_decq();
5519 case CC_OP_SHLQ
: return compute_all_shlq();
5521 case CC_OP_SARQ
: return compute_all_sarq();
5526 uint32_t helper_cc_compute_c(int op
)
5529 default: /* should never happen */ return 0;
5531 case CC_OP_EFLAGS
: return compute_c_eflags();
5533 case CC_OP_MULB
: return compute_c_mull();
5534 case CC_OP_MULW
: return compute_c_mull();
5535 case CC_OP_MULL
: return compute_c_mull();
5537 case CC_OP_ADDB
: return compute_c_addb();
5538 case CC_OP_ADDW
: return compute_c_addw();
5539 case CC_OP_ADDL
: return compute_c_addl();
5541 case CC_OP_ADCB
: return compute_c_adcb();
5542 case CC_OP_ADCW
: return compute_c_adcw();
5543 case CC_OP_ADCL
: return compute_c_adcl();
5545 case CC_OP_SUBB
: return compute_c_subb();
5546 case CC_OP_SUBW
: return compute_c_subw();
5547 case CC_OP_SUBL
: return compute_c_subl();
5549 case CC_OP_SBBB
: return compute_c_sbbb();
5550 case CC_OP_SBBW
: return compute_c_sbbw();
5551 case CC_OP_SBBL
: return compute_c_sbbl();
5553 case CC_OP_LOGICB
: return compute_c_logicb();
5554 case CC_OP_LOGICW
: return compute_c_logicw();
5555 case CC_OP_LOGICL
: return compute_c_logicl();
5557 case CC_OP_INCB
: return compute_c_incl();
5558 case CC_OP_INCW
: return compute_c_incl();
5559 case CC_OP_INCL
: return compute_c_incl();
5561 case CC_OP_DECB
: return compute_c_incl();
5562 case CC_OP_DECW
: return compute_c_incl();
5563 case CC_OP_DECL
: return compute_c_incl();
5565 case CC_OP_SHLB
: return compute_c_shlb();
5566 case CC_OP_SHLW
: return compute_c_shlw();
5567 case CC_OP_SHLL
: return compute_c_shll();
5569 case CC_OP_SARB
: return compute_c_sarl();
5570 case CC_OP_SARW
: return compute_c_sarl();
5571 case CC_OP_SARL
: return compute_c_sarl();
5573 #ifdef TARGET_X86_64
5574 case CC_OP_MULQ
: return compute_c_mull();
5576 case CC_OP_ADDQ
: return compute_c_addq();
5578 case CC_OP_ADCQ
: return compute_c_adcq();
5580 case CC_OP_SUBQ
: return compute_c_subq();
5582 case CC_OP_SBBQ
: return compute_c_sbbq();
5584 case CC_OP_LOGICQ
: return compute_c_logicq();
5586 case CC_OP_INCQ
: return compute_c_incl();
5588 case CC_OP_DECQ
: return compute_c_incl();
5590 case CC_OP_SHLQ
: return compute_c_shlq();
5592 case CC_OP_SARQ
: return compute_c_sarl();