4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
23 #include "host-utils.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
39 #define raise_exception_err(a, b)\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
46 static const uint8_t parity_table
[256] = {
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
67 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
75 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
82 static const uint8_t rclw_table
[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
90 static const uint8_t rclb_table
[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk
[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock
);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock
);
122 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
124 load_eflags(t0
, update_mask
);
127 target_ulong
helper_read_eflags(void)
130 eflags
= helper_cc_compute_all(CC_OP
);
131 eflags
|= (DF
& DF_MASK
);
132 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
148 index
= selector
& ~7;
149 if ((index
+ 7) > dt
->limit
)
151 ptr
= dt
->base
+ index
;
152 *e1_ptr
= ldl_kernel(ptr
);
153 *e2_ptr
= ldl_kernel(ptr
+ 4);
157 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
160 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
161 if (e2
& DESC_G_MASK
)
162 limit
= (limit
<< 12) | 0xfff;
166 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
168 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
173 sc
->base
= get_seg_base(e1
, e2
);
174 sc
->limit
= get_seg_limit(e1
, e2
);
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg
, int selector
)
182 cpu_x86_load_seg_cache(env
, seg
, selector
,
183 (selector
<< 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
187 uint32_t *esp_ptr
, int dpl
)
189 int type
, index
, shift
;
194 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
195 for(i
=0;i
<env
->tr
.limit
;i
++) {
196 printf("%02x ", env
->tr
.base
[i
]);
197 if ((i
& 7) == 7) printf("\n");
203 if (!(env
->tr
.flags
& DESC_P_MASK
))
204 cpu_abort(env
, "invalid tss");
205 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
207 cpu_abort(env
, "invalid tss type");
209 index
= (dpl
* 4 + 2) << shift
;
210 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
211 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
213 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
214 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
216 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg
, int selector
)
227 if ((selector
& 0xfffc) != 0) {
228 if (load_segment(&e1
, &e2
, selector
) != 0)
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if (!(e2
& DESC_S_MASK
))
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
234 cpl
= env
->hflags
& HF_CPL_MASK
;
235 if (seg_reg
== R_CS
) {
236 if (!(e2
& DESC_CS_MASK
))
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 /* XXX: is it correct ? */
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 } else if (seg_reg
== R_SS
) {
244 /* SS must be writable data */
245 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
247 if (dpl
!= cpl
|| dpl
!= rpl
)
248 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 /* not readable code */
251 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
252 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
255 if (dpl
< cpl
|| dpl
< rpl
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
259 if (!(e2
& DESC_P_MASK
))
260 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
261 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
262 get_seg_base(e1
, e2
),
263 get_seg_limit(e1
, e2
),
266 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
267 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector
,
277 uint32_t e1
, uint32_t e2
, int source
,
280 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
281 target_ulong tss_base
;
282 uint32_t new_regs
[8], new_segs
[6];
283 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
284 uint32_t old_eflags
, eflags_mask
;
289 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
292 /* if task gate, we read the TSS segment and we load it */
294 if (!(e2
& DESC_P_MASK
))
295 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
296 tss_selector
= e1
>> 16;
297 if (tss_selector
& 4)
298 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
299 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
300 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (e2
& DESC_S_MASK
)
302 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
303 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 if (!(e2
& DESC_P_MASK
))
309 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
315 tss_limit
= get_seg_limit(e1
, e2
);
316 tss_base
= get_seg_base(e1
, e2
);
317 if ((tss_selector
& 4) != 0 ||
318 tss_limit
< tss_limit_max
)
319 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
320 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
322 old_tss_limit_max
= 103;
324 old_tss_limit_max
= 43;
326 /* read all the registers from the new TSS */
329 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
330 new_eip
= ldl_kernel(tss_base
+ 0x20);
331 new_eflags
= ldl_kernel(tss_base
+ 0x24);
332 for(i
= 0; i
< 8; i
++)
333 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
334 for(i
= 0; i
< 6; i
++)
335 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
336 new_ldt
= lduw_kernel(tss_base
+ 0x60);
337 new_trap
= ldl_kernel(tss_base
+ 0x64);
341 new_eip
= lduw_kernel(tss_base
+ 0x0e);
342 new_eflags
= lduw_kernel(tss_base
+ 0x10);
343 for(i
= 0; i
< 8; i
++)
344 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
345 for(i
= 0; i
< 4; i
++)
346 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
347 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
358 v1
= ldub_kernel(env
->tr
.base
);
359 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
360 stb_kernel(env
->tr
.base
, v1
);
361 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
363 /* clear busy bit (it is restartable) */
364 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
367 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
368 e2
= ldl_kernel(ptr
+ 4);
369 e2
&= ~DESC_TSS_BUSY_MASK
;
370 stl_kernel(ptr
+ 4, e2
);
372 old_eflags
= compute_eflags();
373 if (source
== SWITCH_TSS_IRET
)
374 old_eflags
&= ~NT_MASK
;
376 /* save the current state in the old TSS */
379 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
380 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
388 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
389 for(i
= 0; i
< 6; i
++)
390 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
393 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
394 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
402 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
403 for(i
= 0; i
< 4; i
++)
404 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
407 /* now if an exception occurs, it will occurs in the next task
410 if (source
== SWITCH_TSS_CALL
) {
411 stw_kernel(tss_base
, env
->tr
.selector
);
412 new_eflags
|= NT_MASK
;
416 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
419 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
420 e2
= ldl_kernel(ptr
+ 4);
421 e2
|= DESC_TSS_BUSY_MASK
;
422 stl_kernel(ptr
+ 4, e2
);
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env
->cr
[0] |= CR0_TS_MASK
;
428 env
->hflags
|= HF_TS_MASK
;
429 env
->tr
.selector
= tss_selector
;
430 env
->tr
.base
= tss_base
;
431 env
->tr
.limit
= tss_limit
;
432 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
434 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
435 cpu_x86_update_cr3(env
, new_cr3
);
438 /* load all registers without an exception, then reload them with
439 possible exception */
441 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
442 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
444 eflags_mask
&= 0xffff;
445 load_eflags(new_eflags
, eflags_mask
);
446 /* XXX: what to do in 16 bit case ? */
455 if (new_eflags
& VM_MASK
) {
456 for(i
= 0; i
< 6; i
++)
457 load_seg_vm(i
, new_segs
[i
]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env
, 3);
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i
= 0; i
< 6; i
++)
465 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
468 env
->ldt
.selector
= new_ldt
& ~4;
475 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
477 if ((new_ldt
& 0xfffc) != 0) {
479 index
= new_ldt
& ~7;
480 if ((index
+ 7) > dt
->limit
)
481 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
482 ptr
= dt
->base
+ index
;
483 e1
= ldl_kernel(ptr
);
484 e2
= ldl_kernel(ptr
+ 4);
485 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
487 if (!(e2
& DESC_P_MASK
))
488 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
489 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
492 /* load the segments */
493 if (!(new_eflags
& VM_MASK
)) {
494 tss_load_seg(R_CS
, new_segs
[R_CS
]);
495 tss_load_seg(R_SS
, new_segs
[R_SS
]);
496 tss_load_seg(R_ES
, new_segs
[R_ES
]);
497 tss_load_seg(R_DS
, new_segs
[R_DS
]);
498 tss_load_seg(R_FS
, new_segs
[R_FS
]);
499 tss_load_seg(R_GS
, new_segs
[R_GS
]);
502 /* check that EIP is in the CS segment limits */
503 if (new_eip
> env
->segs
[R_CS
].limit
) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF
, 0);
508 #ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env
->dr
[7] & 0x55) {
511 for (i
= 0; i
< 4; i
++) {
512 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
513 hw_breakpoint_remove(env
, i
);
520 /* check if Port I/O is allowed in TSS */
521 static inline void check_io(int addr
, int size
)
523 int io_offset
, val
, mask
;
525 /* TSS must be a valid 32 bit one */
526 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
527 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
530 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
531 io_offset
+= (addr
>> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset
+ 1) > env
->tr
.limit
)
535 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
537 mask
= (1 << size
) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val
& mask
) != 0) {
541 raise_exception_err(EXCP0D_GPF
, 0);
545 void helper_check_iob(uint32_t t0
)
550 void helper_check_iow(uint32_t t0
)
555 void helper_check_iol(uint32_t t0
)
560 void helper_outb(uint32_t port
, uint32_t data
)
562 cpu_outb(env
, port
, data
& 0xff);
565 target_ulong
helper_inb(uint32_t port
)
567 return cpu_inb(env
, port
);
570 void helper_outw(uint32_t port
, uint32_t data
)
572 cpu_outw(env
, port
, data
& 0xffff);
575 target_ulong
helper_inw(uint32_t port
)
577 return cpu_inw(env
, port
);
580 void helper_outl(uint32_t port
, uint32_t data
)
582 cpu_outl(env
, port
, data
);
585 target_ulong
helper_inl(uint32_t port
)
587 return cpu_inl(env
, port
);
590 static inline unsigned int get_sp_mask(unsigned int e2
)
592 if (e2
& DESC_B_MASK
)
598 static int exeption_has_error_code(int intno
)
614 #define SET_ESP(val, sp_mask)\
616 if ((sp_mask) == 0xffff)\
617 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
618 else if ((sp_mask) == 0xffffffffLL)\
619 ESP = (uint32_t)(val);\
624 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
627 /* in 64-bit machines, this can overflow. So this segment addition macro
628 * can be used to trim the value to 32-bit whenever needed */
629 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
631 /* XXX: add a is_user flag to have proper security support */
632 #define PUSHW(ssp, sp, sp_mask, val)\
635 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
638 #define PUSHL(ssp, sp, sp_mask, val)\
641 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
644 #define POPW(ssp, sp, sp_mask, val)\
646 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
650 #define POPL(ssp, sp, sp_mask, val)\
652 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
656 /* protected mode interrupt */
657 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
658 unsigned int next_eip
, int is_hw
)
661 target_ulong ptr
, ssp
;
662 int type
, dpl
, selector
, ss_dpl
, cpl
;
663 int has_error_code
, new_stack
, shift
;
664 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
665 uint32_t old_eip
, sp_mask
;
668 if (!is_int
&& !is_hw
)
669 has_error_code
= exeption_has_error_code(intno
);
676 if (intno
* 8 + 7 > dt
->limit
)
677 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
678 ptr
= dt
->base
+ intno
* 8;
679 e1
= ldl_kernel(ptr
);
680 e2
= ldl_kernel(ptr
+ 4);
681 /* check gate type */
682 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
684 case 5: /* task gate */
685 /* must do that check here to return the correct error code */
686 if (!(e2
& DESC_P_MASK
))
687 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
688 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
689 if (has_error_code
) {
692 /* push the error code */
693 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
695 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
699 esp
= (ESP
- (2 << shift
)) & mask
;
700 ssp
= env
->segs
[R_SS
].base
+ esp
;
702 stl_kernel(ssp
, error_code
);
704 stw_kernel(ssp
, error_code
);
708 case 6: /* 286 interrupt gate */
709 case 7: /* 286 trap gate */
710 case 14: /* 386 interrupt gate */
711 case 15: /* 386 trap gate */
714 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
717 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
718 cpl
= env
->hflags
& HF_CPL_MASK
;
719 /* check privilege if software int */
720 if (is_int
&& dpl
< cpl
)
721 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
722 /* check valid bit */
723 if (!(e2
& DESC_P_MASK
))
724 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
726 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
727 if ((selector
& 0xfffc) == 0)
728 raise_exception_err(EXCP0D_GPF
, 0);
730 if (load_segment(&e1
, &e2
, selector
) != 0)
731 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
732 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
733 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
734 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
736 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
737 if (!(e2
& DESC_P_MASK
))
738 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
739 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
740 /* to inner privilege */
741 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
742 if ((ss
& 0xfffc) == 0)
743 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
745 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
746 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
747 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
748 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
750 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
751 if (!(ss_e2
& DESC_S_MASK
) ||
752 (ss_e2
& DESC_CS_MASK
) ||
753 !(ss_e2
& DESC_W_MASK
))
754 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
755 if (!(ss_e2
& DESC_P_MASK
))
756 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
758 sp_mask
= get_sp_mask(ss_e2
);
759 ssp
= get_seg_base(ss_e1
, ss_e2
);
760 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
761 /* to same privilege */
762 if (env
->eflags
& VM_MASK
)
763 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
765 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
766 ssp
= env
->segs
[R_SS
].base
;
770 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
771 new_stack
= 0; /* avoid warning */
772 sp_mask
= 0; /* avoid warning */
773 ssp
= 0; /* avoid warning */
774 esp
= 0; /* avoid warning */
780 /* XXX: check that enough room is available */
781 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
782 if (env
->eflags
& VM_MASK
)
788 if (env
->eflags
& VM_MASK
) {
789 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
792 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
795 PUSHL(ssp
, esp
, sp_mask
, ESP
);
797 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
798 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
799 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
800 if (has_error_code
) {
801 PUSHL(ssp
, esp
, sp_mask
, error_code
);
805 if (env
->eflags
& VM_MASK
) {
806 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
809 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
812 PUSHW(ssp
, esp
, sp_mask
, ESP
);
814 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
815 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
816 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
817 if (has_error_code
) {
818 PUSHW(ssp
, esp
, sp_mask
, error_code
);
823 if (env
->eflags
& VM_MASK
) {
824 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
829 ss
= (ss
& ~3) | dpl
;
830 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
831 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
833 SET_ESP(esp
, sp_mask
);
835 selector
= (selector
& ~3) | dpl
;
836 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
837 get_seg_base(e1
, e2
),
838 get_seg_limit(e1
, e2
),
840 cpu_x86_set_cpl(env
, dpl
);
843 /* interrupt gate clear IF mask */
844 if ((type
& 1) == 0) {
845 env
->eflags
&= ~IF_MASK
;
847 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
852 #define PUSHQ(sp, val)\
855 stq_kernel(sp, (val));\
858 #define POPQ(sp, val)\
860 val = ldq_kernel(sp);\
864 static inline target_ulong
get_rsp_from_tss(int level
)
869 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
870 env
->tr
.base
, env
->tr
.limit
);
873 if (!(env
->tr
.flags
& DESC_P_MASK
))
874 cpu_abort(env
, "invalid tss");
875 index
= 8 * level
+ 4;
876 if ((index
+ 7) > env
->tr
.limit
)
877 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
878 return ldq_kernel(env
->tr
.base
+ index
);
881 /* 64 bit interrupt */
882 static void do_interrupt64(int intno
, int is_int
, int error_code
,
883 target_ulong next_eip
, int is_hw
)
887 int type
, dpl
, selector
, cpl
, ist
;
888 int has_error_code
, new_stack
;
889 uint32_t e1
, e2
, e3
, ss
;
890 target_ulong old_eip
, esp
, offset
;
893 if (!is_int
&& !is_hw
)
894 has_error_code
= exeption_has_error_code(intno
);
901 if (intno
* 16 + 15 > dt
->limit
)
902 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
903 ptr
= dt
->base
+ intno
* 16;
904 e1
= ldl_kernel(ptr
);
905 e2
= ldl_kernel(ptr
+ 4);
906 e3
= ldl_kernel(ptr
+ 8);
907 /* check gate type */
908 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
910 case 14: /* 386 interrupt gate */
911 case 15: /* 386 trap gate */
914 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
917 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
918 cpl
= env
->hflags
& HF_CPL_MASK
;
919 /* check privilege if software int */
920 if (is_int
&& dpl
< cpl
)
921 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
922 /* check valid bit */
923 if (!(e2
& DESC_P_MASK
))
924 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
926 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
928 if ((selector
& 0xfffc) == 0)
929 raise_exception_err(EXCP0D_GPF
, 0);
931 if (load_segment(&e1
, &e2
, selector
) != 0)
932 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
933 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
934 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
935 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
937 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
938 if (!(e2
& DESC_P_MASK
))
939 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
940 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
943 /* to inner privilege */
945 esp
= get_rsp_from_tss(ist
+ 3);
947 esp
= get_rsp_from_tss(dpl
);
948 esp
&= ~0xfLL
; /* align stack */
951 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
952 /* to same privilege */
953 if (env
->eflags
& VM_MASK
)
954 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
957 esp
= get_rsp_from_tss(ist
+ 3);
960 esp
&= ~0xfLL
; /* align stack */
963 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
964 new_stack
= 0; /* avoid warning */
965 esp
= 0; /* avoid warning */
968 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
970 PUSHQ(esp
, compute_eflags());
971 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
973 if (has_error_code
) {
974 PUSHQ(esp
, error_code
);
979 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
983 selector
= (selector
& ~3) | dpl
;
984 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
985 get_seg_base(e1
, e2
),
986 get_seg_limit(e1
, e2
),
988 cpu_x86_set_cpl(env
, dpl
);
991 /* interrupt gate clear IF mask */
992 if ((type
& 1) == 0) {
993 env
->eflags
&= ~IF_MASK
;
995 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1000 #if defined(CONFIG_USER_ONLY)
1001 void helper_syscall(int next_eip_addend
)
1003 env
->exception_index
= EXCP_SYSCALL
;
1004 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1008 void helper_syscall(int next_eip_addend
)
1012 if (!(env
->efer
& MSR_EFER_SCE
)) {
1013 raise_exception_err(EXCP06_ILLOP
, 0);
1015 selector
= (env
->star
>> 32) & 0xffff;
1016 if (env
->hflags
& HF_LMA_MASK
) {
1019 ECX
= env
->eip
+ next_eip_addend
;
1020 env
->regs
[11] = compute_eflags();
1022 code64
= env
->hflags
& HF_CS64_MASK
;
1024 cpu_x86_set_cpl(env
, 0);
1025 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1027 DESC_G_MASK
| DESC_P_MASK
|
1029 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1030 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1032 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1034 DESC_W_MASK
| DESC_A_MASK
);
1035 env
->eflags
&= ~env
->fmask
;
1036 load_eflags(env
->eflags
, 0);
1038 env
->eip
= env
->lstar
;
1040 env
->eip
= env
->cstar
;
1042 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1044 cpu_x86_set_cpl(env
, 0);
1045 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1047 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1049 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1050 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1052 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1054 DESC_W_MASK
| DESC_A_MASK
);
1055 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1056 env
->eip
= (uint32_t)env
->star
;
1062 #ifdef TARGET_X86_64
1063 void helper_sysret(int dflag
)
1067 if (!(env
->efer
& MSR_EFER_SCE
)) {
1068 raise_exception_err(EXCP06_ILLOP
, 0);
1070 cpl
= env
->hflags
& HF_CPL_MASK
;
1071 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1072 raise_exception_err(EXCP0D_GPF
, 0);
1074 selector
= (env
->star
>> 48) & 0xffff;
1075 if (env
->hflags
& HF_LMA_MASK
) {
1077 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1079 DESC_G_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1085 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1087 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1088 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1089 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1090 env
->eip
= (uint32_t)ECX
;
1092 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1094 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1095 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1096 DESC_W_MASK
| DESC_A_MASK
);
1097 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1098 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1099 cpu_x86_set_cpl(env
, 3);
1101 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1103 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1104 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1105 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1106 env
->eip
= (uint32_t)ECX
;
1107 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1109 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1110 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1111 DESC_W_MASK
| DESC_A_MASK
);
1112 env
->eflags
|= IF_MASK
;
1113 cpu_x86_set_cpl(env
, 3);
1116 if (kqemu_is_ok(env
)) {
1117 if (env
->hflags
& HF_LMA_MASK
)
1118 CC_OP
= CC_OP_EFLAGS
;
1119 env
->exception_index
= -1;
1126 /* real mode interrupt */
1127 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1128 unsigned int next_eip
)
1131 target_ulong ptr
, ssp
;
1133 uint32_t offset
, esp
;
1134 uint32_t old_cs
, old_eip
;
1136 /* real mode (simpler !) */
1138 if (intno
* 4 + 3 > dt
->limit
)
1139 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1140 ptr
= dt
->base
+ intno
* 4;
1141 offset
= lduw_kernel(ptr
);
1142 selector
= lduw_kernel(ptr
+ 2);
1144 ssp
= env
->segs
[R_SS
].base
;
1149 old_cs
= env
->segs
[R_CS
].selector
;
1150 /* XXX: use SS segment size ? */
1151 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1152 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1153 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1155 /* update processor state */
1156 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1158 env
->segs
[R_CS
].selector
= selector
;
1159 env
->segs
[R_CS
].base
= (selector
<< 4);
1160 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1163 /* fake user mode interrupt */
1164 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1165 target_ulong next_eip
)
1169 int dpl
, cpl
, shift
;
1173 if (env
->hflags
& HF_LMA_MASK
) {
1178 ptr
= dt
->base
+ (intno
<< shift
);
1179 e2
= ldl_kernel(ptr
+ 4);
1181 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1182 cpl
= env
->hflags
& HF_CPL_MASK
;
1183 /* check privilege if software int */
1184 if (is_int
&& dpl
< cpl
)
1185 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1187 /* Since we emulate only user space, we cannot do more than
1188 exiting the emulation with the suitable exception and error
1194 #if !defined(CONFIG_USER_ONLY)
1195 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1198 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1199 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1202 type
= SVM_EVTINJ_TYPE_SOFT
;
1204 type
= SVM_EVTINJ_TYPE_EXEPT
;
1205 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1206 if (!rm
&& exeption_has_error_code(intno
)) {
1207 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1208 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1210 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1216 * Begin execution of an interruption. is_int is TRUE if coming from
1217 * the int instruction. next_eip is the EIP value AFTER the interrupt
1218 * instruction. It is only relevant if is_int is TRUE.
1220 void do_interrupt(int intno
, int is_int
, int error_code
,
1221 target_ulong next_eip
, int is_hw
)
1223 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1224 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1226 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1227 count
, intno
, error_code
, is_int
,
1228 env
->hflags
& HF_CPL_MASK
,
1229 env
->segs
[R_CS
].selector
, EIP
,
1230 (int)env
->segs
[R_CS
].base
+ EIP
,
1231 env
->segs
[R_SS
].selector
, ESP
);
1232 if (intno
== 0x0e) {
1233 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1235 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1238 log_cpu_state(env
, X86_DUMP_CCOP
);
1244 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1245 for(i
= 0; i
< 16; i
++) {
1246 qemu_log(" %02x", ldub(ptr
+ i
));
1254 if (env
->cr
[0] & CR0_PE_MASK
) {
1255 #if !defined(CONFIG_USER_ONLY)
1256 if (env
->hflags
& HF_SVMI_MASK
)
1257 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1259 #ifdef TARGET_X86_64
1260 if (env
->hflags
& HF_LMA_MASK
) {
1261 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1265 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1268 #if !defined(CONFIG_USER_ONLY)
1269 if (env
->hflags
& HF_SVMI_MASK
)
1270 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1272 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1275 #if !defined(CONFIG_USER_ONLY)
1276 if (env
->hflags
& HF_SVMI_MASK
) {
1277 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1278 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1283 /* This should come from sysemu.h - if we could include it here... */
1284 void qemu_system_reset_request(void);
1287 * Check nested exceptions and change to double or triple fault if
1288 * needed. It should only be called, if this is not an interrupt.
1289 * Returns the new exception number.
1291 static int check_exception(int intno
, int *error_code
)
1293 int first_contributory
= env
->old_exception
== 0 ||
1294 (env
->old_exception
>= 10 &&
1295 env
->old_exception
<= 13);
1296 int second_contributory
= intno
== 0 ||
1297 (intno
>= 10 && intno
<= 13);
1299 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1300 env
->old_exception
, intno
);
1302 #if !defined(CONFIG_USER_ONLY)
1303 if (env
->old_exception
== EXCP08_DBLE
) {
1304 if (env
->hflags
& HF_SVMI_MASK
)
1305 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1307 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1309 qemu_system_reset_request();
1314 if ((first_contributory
&& second_contributory
)
1315 || (env
->old_exception
== EXCP0E_PAGE
&&
1316 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1317 intno
= EXCP08_DBLE
;
1321 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1322 (intno
== EXCP08_DBLE
))
1323 env
->old_exception
= intno
;
1329 * Signal an interruption. It is executed in the main CPU loop.
1330 * is_int is TRUE if coming from the int instruction. next_eip is the
1331 * EIP value AFTER the interrupt instruction. It is only relevant if
1334 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1335 int next_eip_addend
)
1338 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1339 intno
= check_exception(intno
, &error_code
);
1341 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1344 env
->exception_index
= intno
;
1345 env
->error_code
= error_code
;
1346 env
->exception_is_int
= is_int
;
1347 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1351 /* shortcuts to generate exceptions */
1353 void raise_exception_err(int exception_index
, int error_code
)
1355 raise_interrupt(exception_index
, 0, error_code
, 0);
1358 void raise_exception(int exception_index
)
1360 raise_interrupt(exception_index
, 0, 0, 0);
1365 #if defined(CONFIG_USER_ONLY)
1367 void do_smm_enter(void)
1371 void helper_rsm(void)
1377 #ifdef TARGET_X86_64
1378 #define SMM_REVISION_ID 0x00020064
1380 #define SMM_REVISION_ID 0x00020000
1383 void do_smm_enter(void)
1385 target_ulong sm_state
;
1389 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1390 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1392 env
->hflags
|= HF_SMM_MASK
;
1393 cpu_smm_update(env
);
1395 sm_state
= env
->smbase
+ 0x8000;
1397 #ifdef TARGET_X86_64
1398 for(i
= 0; i
< 6; i
++) {
1400 offset
= 0x7e00 + i
* 16;
1401 stw_phys(sm_state
+ offset
, dt
->selector
);
1402 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1403 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1404 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1407 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1408 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1410 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1411 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1412 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1413 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1415 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1416 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1418 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1419 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1420 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1421 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1423 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1425 stq_phys(sm_state
+ 0x7ff8, EAX
);
1426 stq_phys(sm_state
+ 0x7ff0, ECX
);
1427 stq_phys(sm_state
+ 0x7fe8, EDX
);
1428 stq_phys(sm_state
+ 0x7fe0, EBX
);
1429 stq_phys(sm_state
+ 0x7fd8, ESP
);
1430 stq_phys(sm_state
+ 0x7fd0, EBP
);
1431 stq_phys(sm_state
+ 0x7fc8, ESI
);
1432 stq_phys(sm_state
+ 0x7fc0, EDI
);
1433 for(i
= 8; i
< 16; i
++)
1434 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1435 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1436 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1437 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1438 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1440 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1441 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1442 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1444 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1445 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1447 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1448 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1449 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1450 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1451 stl_phys(sm_state
+ 0x7fec, EDI
);
1452 stl_phys(sm_state
+ 0x7fe8, ESI
);
1453 stl_phys(sm_state
+ 0x7fe4, EBP
);
1454 stl_phys(sm_state
+ 0x7fe0, ESP
);
1455 stl_phys(sm_state
+ 0x7fdc, EBX
);
1456 stl_phys(sm_state
+ 0x7fd8, EDX
);
1457 stl_phys(sm_state
+ 0x7fd4, ECX
);
1458 stl_phys(sm_state
+ 0x7fd0, EAX
);
1459 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1460 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1462 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1463 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1464 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1465 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1467 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1468 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1469 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1470 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1472 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1473 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1475 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1476 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1478 for(i
= 0; i
< 6; i
++) {
1481 offset
= 0x7f84 + i
* 12;
1483 offset
= 0x7f2c + (i
- 3) * 12;
1484 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1485 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1486 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1487 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1489 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1491 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1492 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1494 /* init SMM cpu state */
1496 #ifdef TARGET_X86_64
1497 cpu_load_efer(env
, 0);
1499 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1500 env
->eip
= 0x00008000;
1501 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1503 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1509 cpu_x86_update_cr0(env
,
1510 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1511 cpu_x86_update_cr4(env
, 0);
1512 env
->dr
[7] = 0x00000400;
1513 CC_OP
= CC_OP_EFLAGS
;
1516 void helper_rsm(void)
1518 target_ulong sm_state
;
1522 sm_state
= env
->smbase
+ 0x8000;
1523 #ifdef TARGET_X86_64
1524 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1526 for(i
= 0; i
< 6; i
++) {
1527 offset
= 0x7e00 + i
* 16;
1528 cpu_x86_load_seg_cache(env
, i
,
1529 lduw_phys(sm_state
+ offset
),
1530 ldq_phys(sm_state
+ offset
+ 8),
1531 ldl_phys(sm_state
+ offset
+ 4),
1532 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1535 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1536 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1538 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1539 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1540 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1541 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1543 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1544 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1546 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1547 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1548 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1549 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1551 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1552 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1553 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1554 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1555 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1556 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1557 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1558 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1559 for(i
= 8; i
< 16; i
++)
1560 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1561 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1562 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1563 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1564 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1565 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1567 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1568 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1569 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1571 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1572 if (val
& 0x20000) {
1573 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1576 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1577 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1578 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1579 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1580 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1581 EDI
= ldl_phys(sm_state
+ 0x7fec);
1582 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1583 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1584 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1585 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1586 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1587 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1588 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1589 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1590 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1592 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1593 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1594 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1595 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1597 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1598 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1599 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1600 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1602 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1603 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1605 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1606 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1608 for(i
= 0; i
< 6; i
++) {
1610 offset
= 0x7f84 + i
* 12;
1612 offset
= 0x7f2c + (i
- 3) * 12;
1613 cpu_x86_load_seg_cache(env
, i
,
1614 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1615 ldl_phys(sm_state
+ offset
+ 8),
1616 ldl_phys(sm_state
+ offset
+ 4),
1617 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1619 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1621 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1622 if (val
& 0x20000) {
1623 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1626 CC_OP
= CC_OP_EFLAGS
;
1627 env
->hflags
&= ~HF_SMM_MASK
;
1628 cpu_smm_update(env
);
1630 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1631 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1634 #endif /* !CONFIG_USER_ONLY */
1637 /* division, flags are undefined */
1639 void helper_divb_AL(target_ulong t0
)
1641 unsigned int num
, den
, q
, r
;
1643 num
= (EAX
& 0xffff);
1646 raise_exception(EXCP00_DIVZ
);
1650 raise_exception(EXCP00_DIVZ
);
1652 r
= (num
% den
) & 0xff;
1653 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1656 void helper_idivb_AL(target_ulong t0
)
1663 raise_exception(EXCP00_DIVZ
);
1667 raise_exception(EXCP00_DIVZ
);
1669 r
= (num
% den
) & 0xff;
1670 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1673 void helper_divw_AX(target_ulong t0
)
1675 unsigned int num
, den
, q
, r
;
1677 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1678 den
= (t0
& 0xffff);
1680 raise_exception(EXCP00_DIVZ
);
1684 raise_exception(EXCP00_DIVZ
);
1686 r
= (num
% den
) & 0xffff;
1687 EAX
= (EAX
& ~0xffff) | q
;
1688 EDX
= (EDX
& ~0xffff) | r
;
1691 void helper_idivw_AX(target_ulong t0
)
1695 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1698 raise_exception(EXCP00_DIVZ
);
1701 if (q
!= (int16_t)q
)
1702 raise_exception(EXCP00_DIVZ
);
1704 r
= (num
% den
) & 0xffff;
1705 EAX
= (EAX
& ~0xffff) | q
;
1706 EDX
= (EDX
& ~0xffff) | r
;
1709 void helper_divl_EAX(target_ulong t0
)
1711 unsigned int den
, r
;
1714 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1717 raise_exception(EXCP00_DIVZ
);
1722 raise_exception(EXCP00_DIVZ
);
1727 void helper_idivl_EAX(target_ulong t0
)
1732 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1735 raise_exception(EXCP00_DIVZ
);
1739 if (q
!= (int32_t)q
)
1740 raise_exception(EXCP00_DIVZ
);
1747 /* XXX: exception */
1748 void helper_aam(int base
)
1754 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1758 void helper_aad(int base
)
1762 ah
= (EAX
>> 8) & 0xff;
1763 al
= ((ah
* base
) + al
) & 0xff;
1764 EAX
= (EAX
& ~0xffff) | al
;
1768 void helper_aaa(void)
1774 eflags
= helper_cc_compute_all(CC_OP
);
1777 ah
= (EAX
>> 8) & 0xff;
1779 icarry
= (al
> 0xf9);
1780 if (((al
& 0x0f) > 9 ) || af
) {
1781 al
= (al
+ 6) & 0x0f;
1782 ah
= (ah
+ 1 + icarry
) & 0xff;
1783 eflags
|= CC_C
| CC_A
;
1785 eflags
&= ~(CC_C
| CC_A
);
1788 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1792 void helper_aas(void)
1798 eflags
= helper_cc_compute_all(CC_OP
);
1801 ah
= (EAX
>> 8) & 0xff;
1804 if (((al
& 0x0f) > 9 ) || af
) {
1805 al
= (al
- 6) & 0x0f;
1806 ah
= (ah
- 1 - icarry
) & 0xff;
1807 eflags
|= CC_C
| CC_A
;
1809 eflags
&= ~(CC_C
| CC_A
);
1812 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1816 void helper_daa(void)
1821 eflags
= helper_cc_compute_all(CC_OP
);
1827 if (((al
& 0x0f) > 9 ) || af
) {
1828 al
= (al
+ 6) & 0xff;
1831 if ((al
> 0x9f) || cf
) {
1832 al
= (al
+ 0x60) & 0xff;
1835 EAX
= (EAX
& ~0xff) | al
;
1836 /* well, speed is not an issue here, so we compute the flags by hand */
1837 eflags
|= (al
== 0) << 6; /* zf */
1838 eflags
|= parity_table
[al
]; /* pf */
1839 eflags
|= (al
& 0x80); /* sf */
1843 void helper_das(void)
1845 int al
, al1
, af
, cf
;
1848 eflags
= helper_cc_compute_all(CC_OP
);
1855 if (((al
& 0x0f) > 9 ) || af
) {
1859 al
= (al
- 6) & 0xff;
1861 if ((al1
> 0x99) || cf
) {
1862 al
= (al
- 0x60) & 0xff;
1865 EAX
= (EAX
& ~0xff) | al
;
1866 /* well, speed is not an issue here, so we compute the flags by hand */
1867 eflags
|= (al
== 0) << 6; /* zf */
1868 eflags
|= parity_table
[al
]; /* pf */
1869 eflags
|= (al
& 0x80); /* sf */
1873 void helper_into(int next_eip_addend
)
1876 eflags
= helper_cc_compute_all(CC_OP
);
1877 if (eflags
& CC_O
) {
1878 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1882 void helper_cmpxchg8b(target_ulong a0
)
1887 eflags
= helper_cc_compute_all(CC_OP
);
1889 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1890 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1893 /* always do the store */
1895 EDX
= (uint32_t)(d
>> 32);
1902 #ifdef TARGET_X86_64
1903 void helper_cmpxchg16b(target_ulong a0
)
1908 if ((a0
& 0xf) != 0)
1909 raise_exception(EXCP0D_GPF
);
1910 eflags
= helper_cc_compute_all(CC_OP
);
1913 if (d0
== EAX
&& d1
== EDX
) {
1918 /* always do the store */
1929 void helper_single_step(void)
1931 #ifndef CONFIG_USER_ONLY
1932 check_hw_breakpoints(env
, 1);
1933 env
->dr
[6] |= DR6_BS
;
1935 raise_exception(EXCP01_DB
);
1938 void helper_cpuid(void)
1940 uint32_t eax
, ebx
, ecx
, edx
;
1942 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1944 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1951 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1954 uint32_t esp_mask
, esp
, ebp
;
1956 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1957 ssp
= env
->segs
[R_SS
].base
;
1966 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1969 stl(ssp
+ (esp
& esp_mask
), t1
);
1976 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1979 stw(ssp
+ (esp
& esp_mask
), t1
);
1983 #ifdef TARGET_X86_64
1984 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1986 target_ulong esp
, ebp
;
2006 stw(esp
, lduw(ebp
));
2014 void helper_lldt(int selector
)
2018 int index
, entry_limit
;
2022 if ((selector
& 0xfffc) == 0) {
2023 /* XXX: NULL selector case: invalid LDT */
2028 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2030 index
= selector
& ~7;
2031 #ifdef TARGET_X86_64
2032 if (env
->hflags
& HF_LMA_MASK
)
2037 if ((index
+ entry_limit
) > dt
->limit
)
2038 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2039 ptr
= dt
->base
+ index
;
2040 e1
= ldl_kernel(ptr
);
2041 e2
= ldl_kernel(ptr
+ 4);
2042 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2043 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2044 if (!(e2
& DESC_P_MASK
))
2045 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2046 #ifdef TARGET_X86_64
2047 if (env
->hflags
& HF_LMA_MASK
) {
2049 e3
= ldl_kernel(ptr
+ 8);
2050 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2051 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2055 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2058 env
->ldt
.selector
= selector
;
2061 void helper_ltr(int selector
)
2065 int index
, type
, entry_limit
;
2069 if ((selector
& 0xfffc) == 0) {
2070 /* NULL selector case: invalid TR */
2076 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2078 index
= selector
& ~7;
2079 #ifdef TARGET_X86_64
2080 if (env
->hflags
& HF_LMA_MASK
)
2085 if ((index
+ entry_limit
) > dt
->limit
)
2086 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2087 ptr
= dt
->base
+ index
;
2088 e1
= ldl_kernel(ptr
);
2089 e2
= ldl_kernel(ptr
+ 4);
2090 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2091 if ((e2
& DESC_S_MASK
) ||
2092 (type
!= 1 && type
!= 9))
2093 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2094 if (!(e2
& DESC_P_MASK
))
2095 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2096 #ifdef TARGET_X86_64
2097 if (env
->hflags
& HF_LMA_MASK
) {
2099 e3
= ldl_kernel(ptr
+ 8);
2100 e4
= ldl_kernel(ptr
+ 12);
2101 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2102 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2103 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2104 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2108 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2110 e2
|= DESC_TSS_BUSY_MASK
;
2111 stl_kernel(ptr
+ 4, e2
);
2113 env
->tr
.selector
= selector
;
2116 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2117 void helper_load_seg(int seg_reg
, int selector
)
2126 cpl
= env
->hflags
& HF_CPL_MASK
;
2127 if ((selector
& 0xfffc) == 0) {
2128 /* null selector case */
2130 #ifdef TARGET_X86_64
2131 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2134 raise_exception_err(EXCP0D_GPF
, 0);
2135 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2142 index
= selector
& ~7;
2143 if ((index
+ 7) > dt
->limit
)
2144 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2145 ptr
= dt
->base
+ index
;
2146 e1
= ldl_kernel(ptr
);
2147 e2
= ldl_kernel(ptr
+ 4);
2149 if (!(e2
& DESC_S_MASK
))
2150 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2152 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2153 if (seg_reg
== R_SS
) {
2154 /* must be writable segment */
2155 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2156 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2157 if (rpl
!= cpl
|| dpl
!= cpl
)
2158 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2160 /* must be readable segment */
2161 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2162 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2164 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2165 /* if not conforming code, test rights */
2166 if (dpl
< cpl
|| dpl
< rpl
)
2167 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2171 if (!(e2
& DESC_P_MASK
)) {
2172 if (seg_reg
== R_SS
)
2173 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2175 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2178 /* set the access bit if not already set */
2179 if (!(e2
& DESC_A_MASK
)) {
2181 stl_kernel(ptr
+ 4, e2
);
2184 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2185 get_seg_base(e1
, e2
),
2186 get_seg_limit(e1
, e2
),
2189 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2190 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2195 /* protected mode jump */
2196 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2197 int next_eip_addend
)
2200 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2201 target_ulong next_eip
;
2203 if ((new_cs
& 0xfffc) == 0)
2204 raise_exception_err(EXCP0D_GPF
, 0);
2205 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2206 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2207 cpl
= env
->hflags
& HF_CPL_MASK
;
2208 if (e2
& DESC_S_MASK
) {
2209 if (!(e2
& DESC_CS_MASK
))
2210 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2211 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2212 if (e2
& DESC_C_MASK
) {
2213 /* conforming code segment */
2215 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2217 /* non conforming code segment */
2220 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2222 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2224 if (!(e2
& DESC_P_MASK
))
2225 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2226 limit
= get_seg_limit(e1
, e2
);
2227 if (new_eip
> limit
&&
2228 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2229 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2230 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2231 get_seg_base(e1
, e2
), limit
, e2
);
2234 /* jump to call or task gate */
2235 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2237 cpl
= env
->hflags
& HF_CPL_MASK
;
2238 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2240 case 1: /* 286 TSS */
2241 case 9: /* 386 TSS */
2242 case 5: /* task gate */
2243 if (dpl
< cpl
|| dpl
< rpl
)
2244 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2245 next_eip
= env
->eip
+ next_eip_addend
;
2246 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2247 CC_OP
= CC_OP_EFLAGS
;
2249 case 4: /* 286 call gate */
2250 case 12: /* 386 call gate */
2251 if ((dpl
< cpl
) || (dpl
< rpl
))
2252 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2253 if (!(e2
& DESC_P_MASK
))
2254 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2256 new_eip
= (e1
& 0xffff);
2258 new_eip
|= (e2
& 0xffff0000);
2259 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2260 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2261 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2262 /* must be code segment */
2263 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2264 (DESC_S_MASK
| DESC_CS_MASK
)))
2265 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2266 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2267 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2268 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2269 if (!(e2
& DESC_P_MASK
))
2270 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2271 limit
= get_seg_limit(e1
, e2
);
2272 if (new_eip
> limit
)
2273 raise_exception_err(EXCP0D_GPF
, 0);
2274 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2275 get_seg_base(e1
, e2
), limit
, e2
);
2279 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2285 /* real mode call */
2286 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2287 int shift
, int next_eip
)
2290 uint32_t esp
, esp_mask
;
2295 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2296 ssp
= env
->segs
[R_SS
].base
;
2298 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2299 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2301 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2302 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2305 SET_ESP(esp
, esp_mask
);
2307 env
->segs
[R_CS
].selector
= new_cs
;
2308 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2311 /* protected mode call */
2312 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2313 int shift
, int next_eip_addend
)
2316 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2317 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2318 uint32_t val
, limit
, old_sp_mask
;
2319 target_ulong ssp
, old_ssp
, next_eip
;
2321 next_eip
= env
->eip
+ next_eip_addend
;
2322 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2323 LOG_PCALL_STATE(env
);
2324 if ((new_cs
& 0xfffc) == 0)
2325 raise_exception_err(EXCP0D_GPF
, 0);
2326 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2327 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2328 cpl
= env
->hflags
& HF_CPL_MASK
;
2329 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2330 if (e2
& DESC_S_MASK
) {
2331 if (!(e2
& DESC_CS_MASK
))
2332 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2333 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2334 if (e2
& DESC_C_MASK
) {
2335 /* conforming code segment */
2337 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2339 /* non conforming code segment */
2342 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2344 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2346 if (!(e2
& DESC_P_MASK
))
2347 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2349 #ifdef TARGET_X86_64
2350 /* XXX: check 16/32 bit cases in long mode */
2355 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2356 PUSHQ(rsp
, next_eip
);
2357 /* from this point, not restartable */
2359 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2360 get_seg_base(e1
, e2
),
2361 get_seg_limit(e1
, e2
), e2
);
2367 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2368 ssp
= env
->segs
[R_SS
].base
;
2370 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2371 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2373 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2374 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2377 limit
= get_seg_limit(e1
, e2
);
2378 if (new_eip
> limit
)
2379 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2380 /* from this point, not restartable */
2381 SET_ESP(sp
, sp_mask
);
2382 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2383 get_seg_base(e1
, e2
), limit
, e2
);
2387 /* check gate type */
2388 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2389 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2392 case 1: /* available 286 TSS */
2393 case 9: /* available 386 TSS */
2394 case 5: /* task gate */
2395 if (dpl
< cpl
|| dpl
< rpl
)
2396 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2397 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2398 CC_OP
= CC_OP_EFLAGS
;
2400 case 4: /* 286 call gate */
2401 case 12: /* 386 call gate */
2404 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2409 if (dpl
< cpl
|| dpl
< rpl
)
2410 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2411 /* check valid bit */
2412 if (!(e2
& DESC_P_MASK
))
2413 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2414 selector
= e1
>> 16;
2415 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2416 param_count
= e2
& 0x1f;
2417 if ((selector
& 0xfffc) == 0)
2418 raise_exception_err(EXCP0D_GPF
, 0);
2420 if (load_segment(&e1
, &e2
, selector
) != 0)
2421 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2422 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2423 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2424 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2426 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2427 if (!(e2
& DESC_P_MASK
))
2428 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2430 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2431 /* to inner privilege */
2432 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2433 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2434 ss
, sp
, param_count
, ESP
);
2435 if ((ss
& 0xfffc) == 0)
2436 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2437 if ((ss
& 3) != dpl
)
2438 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2439 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2440 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2441 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2443 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2444 if (!(ss_e2
& DESC_S_MASK
) ||
2445 (ss_e2
& DESC_CS_MASK
) ||
2446 !(ss_e2
& DESC_W_MASK
))
2447 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2448 if (!(ss_e2
& DESC_P_MASK
))
2449 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2451 // push_size = ((param_count * 2) + 8) << shift;
2453 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2454 old_ssp
= env
->segs
[R_SS
].base
;
2456 sp_mask
= get_sp_mask(ss_e2
);
2457 ssp
= get_seg_base(ss_e1
, ss_e2
);
2459 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2460 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2461 for(i
= param_count
- 1; i
>= 0; i
--) {
2462 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2463 PUSHL(ssp
, sp
, sp_mask
, val
);
2466 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2467 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2468 for(i
= param_count
- 1; i
>= 0; i
--) {
2469 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2470 PUSHW(ssp
, sp
, sp_mask
, val
);
2475 /* to same privilege */
2477 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2478 ssp
= env
->segs
[R_SS
].base
;
2479 // push_size = (4 << shift);
2484 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2485 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2487 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2488 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2491 /* from this point, not restartable */
2494 ss
= (ss
& ~3) | dpl
;
2495 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2497 get_seg_limit(ss_e1
, ss_e2
),
2501 selector
= (selector
& ~3) | dpl
;
2502 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2503 get_seg_base(e1
, e2
),
2504 get_seg_limit(e1
, e2
),
2506 cpu_x86_set_cpl(env
, dpl
);
2507 SET_ESP(sp
, sp_mask
);
2511 if (kqemu_is_ok(env
)) {
2512 env
->exception_index
= -1;
2518 /* real and vm86 mode iret */
2519 void helper_iret_real(int shift
)
2521 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2525 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2527 ssp
= env
->segs
[R_SS
].base
;
2530 POPL(ssp
, sp
, sp_mask
, new_eip
);
2531 POPL(ssp
, sp
, sp_mask
, new_cs
);
2533 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2536 POPW(ssp
, sp
, sp_mask
, new_eip
);
2537 POPW(ssp
, sp
, sp_mask
, new_cs
);
2538 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2540 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2541 env
->segs
[R_CS
].selector
= new_cs
;
2542 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2544 if (env
->eflags
& VM_MASK
)
2545 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2547 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2549 eflags_mask
&= 0xffff;
2550 load_eflags(new_eflags
, eflags_mask
);
2551 env
->hflags2
&= ~HF2_NMI_MASK
;
2554 static inline void validate_seg(int seg_reg
, int cpl
)
2559 /* XXX: on x86_64, we do not want to nullify FS and GS because
2560 they may still contain a valid base. I would be interested to
2561 know how a real x86_64 CPU behaves */
2562 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2563 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2566 e2
= env
->segs
[seg_reg
].flags
;
2567 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2568 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2569 /* data or non conforming code segment */
2571 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2576 /* protected mode iret */
2577 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2579 uint32_t new_cs
, new_eflags
, new_ss
;
2580 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2581 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2582 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2583 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2585 #ifdef TARGET_X86_64
2590 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2592 ssp
= env
->segs
[R_SS
].base
;
2593 new_eflags
= 0; /* avoid warning */
2594 #ifdef TARGET_X86_64
2600 POPQ(sp
, new_eflags
);
2606 POPL(ssp
, sp
, sp_mask
, new_eip
);
2607 POPL(ssp
, sp
, sp_mask
, new_cs
);
2610 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2611 if (new_eflags
& VM_MASK
)
2612 goto return_to_vm86
;
2616 POPW(ssp
, sp
, sp_mask
, new_eip
);
2617 POPW(ssp
, sp
, sp_mask
, new_cs
);
2619 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2621 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2622 new_cs
, new_eip
, shift
, addend
);
2623 LOG_PCALL_STATE(env
);
2624 if ((new_cs
& 0xfffc) == 0)
2625 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2626 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2627 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2628 if (!(e2
& DESC_S_MASK
) ||
2629 !(e2
& DESC_CS_MASK
))
2630 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2631 cpl
= env
->hflags
& HF_CPL_MASK
;
2634 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2635 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2636 if (e2
& DESC_C_MASK
) {
2638 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2641 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2643 if (!(e2
& DESC_P_MASK
))
2644 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2647 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2648 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2649 /* return to same privilege level */
2650 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2651 get_seg_base(e1
, e2
),
2652 get_seg_limit(e1
, e2
),
2655 /* return to different privilege level */
2656 #ifdef TARGET_X86_64
2665 POPL(ssp
, sp
, sp_mask
, new_esp
);
2666 POPL(ssp
, sp
, sp_mask
, new_ss
);
2670 POPW(ssp
, sp
, sp_mask
, new_esp
);
2671 POPW(ssp
, sp
, sp_mask
, new_ss
);
2673 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2675 if ((new_ss
& 0xfffc) == 0) {
2676 #ifdef TARGET_X86_64
2677 /* NULL ss is allowed in long mode if cpl != 3*/
2678 /* XXX: test CS64 ? */
2679 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2680 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2682 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2683 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2684 DESC_W_MASK
| DESC_A_MASK
);
2685 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2689 raise_exception_err(EXCP0D_GPF
, 0);
2692 if ((new_ss
& 3) != rpl
)
2693 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2694 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2695 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2696 if (!(ss_e2
& DESC_S_MASK
) ||
2697 (ss_e2
& DESC_CS_MASK
) ||
2698 !(ss_e2
& DESC_W_MASK
))
2699 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2700 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2702 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2703 if (!(ss_e2
& DESC_P_MASK
))
2704 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2705 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2706 get_seg_base(ss_e1
, ss_e2
),
2707 get_seg_limit(ss_e1
, ss_e2
),
2711 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2712 get_seg_base(e1
, e2
),
2713 get_seg_limit(e1
, e2
),
2715 cpu_x86_set_cpl(env
, rpl
);
2717 #ifdef TARGET_X86_64
2718 if (env
->hflags
& HF_CS64_MASK
)
2722 sp_mask
= get_sp_mask(ss_e2
);
2724 /* validate data segments */
2725 validate_seg(R_ES
, rpl
);
2726 validate_seg(R_DS
, rpl
);
2727 validate_seg(R_FS
, rpl
);
2728 validate_seg(R_GS
, rpl
);
2732 SET_ESP(sp
, sp_mask
);
2735 /* NOTE: 'cpl' is the _old_ CPL */
2736 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2738 eflags_mask
|= IOPL_MASK
;
2739 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2741 eflags_mask
|= IF_MASK
;
2743 eflags_mask
&= 0xffff;
2744 load_eflags(new_eflags
, eflags_mask
);
2749 POPL(ssp
, sp
, sp_mask
, new_esp
);
2750 POPL(ssp
, sp
, sp_mask
, new_ss
);
2751 POPL(ssp
, sp
, sp_mask
, new_es
);
2752 POPL(ssp
, sp
, sp_mask
, new_ds
);
2753 POPL(ssp
, sp
, sp_mask
, new_fs
);
2754 POPL(ssp
, sp
, sp_mask
, new_gs
);
2756 /* modify processor state */
2757 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2758 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2759 load_seg_vm(R_CS
, new_cs
& 0xffff);
2760 cpu_x86_set_cpl(env
, 3);
2761 load_seg_vm(R_SS
, new_ss
& 0xffff);
2762 load_seg_vm(R_ES
, new_es
& 0xffff);
2763 load_seg_vm(R_DS
, new_ds
& 0xffff);
2764 load_seg_vm(R_FS
, new_fs
& 0xffff);
2765 load_seg_vm(R_GS
, new_gs
& 0xffff);
2767 env
->eip
= new_eip
& 0xffff;
2771 void helper_iret_protected(int shift
, int next_eip
)
2773 int tss_selector
, type
;
2776 /* specific case for TSS */
2777 if (env
->eflags
& NT_MASK
) {
2778 #ifdef TARGET_X86_64
2779 if (env
->hflags
& HF_LMA_MASK
)
2780 raise_exception_err(EXCP0D_GPF
, 0);
2782 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2783 if (tss_selector
& 4)
2784 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2785 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2786 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2787 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2788 /* NOTE: we check both segment and busy TSS */
2790 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2791 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2793 helper_ret_protected(shift
, 1, 0);
2795 env
->hflags2
&= ~HF2_NMI_MASK
;
2797 if (kqemu_is_ok(env
)) {
2798 CC_OP
= CC_OP_EFLAGS
;
2799 env
->exception_index
= -1;
2805 void helper_lret_protected(int shift
, int addend
)
2807 helper_ret_protected(shift
, 0, addend
);
2809 if (kqemu_is_ok(env
)) {
2810 env
->exception_index
= -1;
2816 void helper_sysenter(void)
2818 if (env
->sysenter_cs
== 0) {
2819 raise_exception_err(EXCP0D_GPF
, 0);
2821 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2822 cpu_x86_set_cpl(env
, 0);
2824 #ifdef TARGET_X86_64
2825 if (env
->hflags
& HF_LMA_MASK
) {
2826 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2828 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2830 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2834 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2836 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2838 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2840 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2842 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2844 DESC_W_MASK
| DESC_A_MASK
);
2845 ESP
= env
->sysenter_esp
;
2846 EIP
= env
->sysenter_eip
;
2849 void helper_sysexit(int dflag
)
2853 cpl
= env
->hflags
& HF_CPL_MASK
;
2854 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2855 raise_exception_err(EXCP0D_GPF
, 0);
2857 cpu_x86_set_cpl(env
, 3);
2858 #ifdef TARGET_X86_64
2860 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2862 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2863 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2864 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2865 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2867 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2868 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2869 DESC_W_MASK
| DESC_A_MASK
);
2873 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2875 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2876 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2877 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2878 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2880 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2881 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2882 DESC_W_MASK
| DESC_A_MASK
);
2887 if (kqemu_is_ok(env
)) {
2888 env
->exception_index
= -1;
2894 #if defined(CONFIG_USER_ONLY)
2895 target_ulong
helper_read_crN(int reg
)
2900 void helper_write_crN(int reg
, target_ulong t0
)
2904 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2908 target_ulong
helper_read_crN(int reg
)
2912 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2918 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2919 val
= cpu_get_apic_tpr(env
);
2928 void helper_write_crN(int reg
, target_ulong t0
)
2930 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2933 cpu_x86_update_cr0(env
, t0
);
2936 cpu_x86_update_cr3(env
, t0
);
2939 cpu_x86_update_cr4(env
, t0
);
2942 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2943 cpu_set_apic_tpr(env
, t0
);
2945 env
->v_tpr
= t0
& 0x0f;
2953 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2958 hw_breakpoint_remove(env
, reg
);
2960 hw_breakpoint_insert(env
, reg
);
2961 } else if (reg
== 7) {
2962 for (i
= 0; i
< 4; i
++)
2963 hw_breakpoint_remove(env
, i
);
2965 for (i
= 0; i
< 4; i
++)
2966 hw_breakpoint_insert(env
, i
);
2972 void helper_lmsw(target_ulong t0
)
2974 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2975 if already set to one. */
2976 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2977 helper_write_crN(0, t0
);
2980 void helper_clts(void)
2982 env
->cr
[0] &= ~CR0_TS_MASK
;
2983 env
->hflags
&= ~HF_TS_MASK
;
2986 void helper_invlpg(target_ulong addr
)
2988 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2989 tlb_flush_page(env
, addr
);
2992 void helper_rdtsc(void)
2996 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2997 raise_exception(EXCP0D_GPF
);
2999 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3001 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
3002 EAX
= (uint32_t)(val
);
3003 EDX
= (uint32_t)(val
>> 32);
3006 void helper_rdpmc(void)
3008 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3009 raise_exception(EXCP0D_GPF
);
3011 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3013 /* currently unimplemented */
3014 raise_exception_err(EXCP06_ILLOP
, 0);
3017 #if defined(CONFIG_USER_ONLY)
3018 void helper_wrmsr(void)
3022 void helper_rdmsr(void)
3026 void helper_wrmsr(void)
3030 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3032 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3034 switch((uint32_t)ECX
) {
3035 case MSR_IA32_SYSENTER_CS
:
3036 env
->sysenter_cs
= val
& 0xffff;
3038 case MSR_IA32_SYSENTER_ESP
:
3039 env
->sysenter_esp
= val
;
3041 case MSR_IA32_SYSENTER_EIP
:
3042 env
->sysenter_eip
= val
;
3044 case MSR_IA32_APICBASE
:
3045 cpu_set_apic_base(env
, val
);
3049 uint64_t update_mask
;
3051 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3052 update_mask
|= MSR_EFER_SCE
;
3053 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3054 update_mask
|= MSR_EFER_LME
;
3055 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3056 update_mask
|= MSR_EFER_FFXSR
;
3057 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3058 update_mask
|= MSR_EFER_NXE
;
3059 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3060 update_mask
|= MSR_EFER_SVME
;
3061 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3062 update_mask
|= MSR_EFER_FFXSR
;
3063 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3064 (val
& update_mask
));
3073 case MSR_VM_HSAVE_PA
:
3074 env
->vm_hsave
= val
;
3076 #ifdef TARGET_X86_64
3087 env
->segs
[R_FS
].base
= val
;
3090 env
->segs
[R_GS
].base
= val
;
3092 case MSR_KERNELGSBASE
:
3093 env
->kernelgsbase
= val
;
3096 case MSR_MTRRphysBase(0):
3097 case MSR_MTRRphysBase(1):
3098 case MSR_MTRRphysBase(2):
3099 case MSR_MTRRphysBase(3):
3100 case MSR_MTRRphysBase(4):
3101 case MSR_MTRRphysBase(5):
3102 case MSR_MTRRphysBase(6):
3103 case MSR_MTRRphysBase(7):
3104 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3106 case MSR_MTRRphysMask(0):
3107 case MSR_MTRRphysMask(1):
3108 case MSR_MTRRphysMask(2):
3109 case MSR_MTRRphysMask(3):
3110 case MSR_MTRRphysMask(4):
3111 case MSR_MTRRphysMask(5):
3112 case MSR_MTRRphysMask(6):
3113 case MSR_MTRRphysMask(7):
3114 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3116 case MSR_MTRRfix64K_00000
:
3117 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3119 case MSR_MTRRfix16K_80000
:
3120 case MSR_MTRRfix16K_A0000
:
3121 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3123 case MSR_MTRRfix4K_C0000
:
3124 case MSR_MTRRfix4K_C8000
:
3125 case MSR_MTRRfix4K_D0000
:
3126 case MSR_MTRRfix4K_D8000
:
3127 case MSR_MTRRfix4K_E0000
:
3128 case MSR_MTRRfix4K_E8000
:
3129 case MSR_MTRRfix4K_F0000
:
3130 case MSR_MTRRfix4K_F8000
:
3131 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3133 case MSR_MTRRdefType
:
3134 env
->mtrr_deftype
= val
;
3137 /* XXX: exception ? */
3142 void helper_rdmsr(void)
3146 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3148 switch((uint32_t)ECX
) {
3149 case MSR_IA32_SYSENTER_CS
:
3150 val
= env
->sysenter_cs
;
3152 case MSR_IA32_SYSENTER_ESP
:
3153 val
= env
->sysenter_esp
;
3155 case MSR_IA32_SYSENTER_EIP
:
3156 val
= env
->sysenter_eip
;
3158 case MSR_IA32_APICBASE
:
3159 val
= cpu_get_apic_base(env
);
3170 case MSR_VM_HSAVE_PA
:
3171 val
= env
->vm_hsave
;
3173 case MSR_IA32_PERF_STATUS
:
3174 /* tsc_increment_by_tick */
3176 /* CPU multiplier */
3177 val
|= (((uint64_t)4ULL) << 40);
3179 #ifdef TARGET_X86_64
3190 val
= env
->segs
[R_FS
].base
;
3193 val
= env
->segs
[R_GS
].base
;
3195 case MSR_KERNELGSBASE
:
3196 val
= env
->kernelgsbase
;
3200 case MSR_QPI_COMMBASE
:
3201 if (env
->kqemu_enabled
) {
3202 val
= kqemu_comm_base
;
3208 case MSR_MTRRphysBase(0):
3209 case MSR_MTRRphysBase(1):
3210 case MSR_MTRRphysBase(2):
3211 case MSR_MTRRphysBase(3):
3212 case MSR_MTRRphysBase(4):
3213 case MSR_MTRRphysBase(5):
3214 case MSR_MTRRphysBase(6):
3215 case MSR_MTRRphysBase(7):
3216 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3218 case MSR_MTRRphysMask(0):
3219 case MSR_MTRRphysMask(1):
3220 case MSR_MTRRphysMask(2):
3221 case MSR_MTRRphysMask(3):
3222 case MSR_MTRRphysMask(4):
3223 case MSR_MTRRphysMask(5):
3224 case MSR_MTRRphysMask(6):
3225 case MSR_MTRRphysMask(7):
3226 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3228 case MSR_MTRRfix64K_00000
:
3229 val
= env
->mtrr_fixed
[0];
3231 case MSR_MTRRfix16K_80000
:
3232 case MSR_MTRRfix16K_A0000
:
3233 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3235 case MSR_MTRRfix4K_C0000
:
3236 case MSR_MTRRfix4K_C8000
:
3237 case MSR_MTRRfix4K_D0000
:
3238 case MSR_MTRRfix4K_D8000
:
3239 case MSR_MTRRfix4K_E0000
:
3240 case MSR_MTRRfix4K_E8000
:
3241 case MSR_MTRRfix4K_F0000
:
3242 case MSR_MTRRfix4K_F8000
:
3243 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3245 case MSR_MTRRdefType
:
3246 val
= env
->mtrr_deftype
;
3249 if (env
->cpuid_features
& CPUID_MTRR
)
3250 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3252 /* XXX: exception ? */
3256 /* XXX: exception ? */
3260 EAX
= (uint32_t)(val
);
3261 EDX
= (uint32_t)(val
>> 32);
3265 target_ulong
helper_lsl(target_ulong selector1
)
3268 uint32_t e1
, e2
, eflags
, selector
;
3269 int rpl
, dpl
, cpl
, type
;
3271 selector
= selector1
& 0xffff;
3272 eflags
= helper_cc_compute_all(CC_OP
);
3273 if ((selector
& 0xfffc) == 0)
3275 if (load_segment(&e1
, &e2
, selector
) != 0)
3278 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3279 cpl
= env
->hflags
& HF_CPL_MASK
;
3280 if (e2
& DESC_S_MASK
) {
3281 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3284 if (dpl
< cpl
|| dpl
< rpl
)
3288 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3299 if (dpl
< cpl
|| dpl
< rpl
) {
3301 CC_SRC
= eflags
& ~CC_Z
;
3305 limit
= get_seg_limit(e1
, e2
);
3306 CC_SRC
= eflags
| CC_Z
;
3310 target_ulong
helper_lar(target_ulong selector1
)
3312 uint32_t e1
, e2
, eflags
, selector
;
3313 int rpl
, dpl
, cpl
, type
;
3315 selector
= selector1
& 0xffff;
3316 eflags
= helper_cc_compute_all(CC_OP
);
3317 if ((selector
& 0xfffc) == 0)
3319 if (load_segment(&e1
, &e2
, selector
) != 0)
3322 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3323 cpl
= env
->hflags
& HF_CPL_MASK
;
3324 if (e2
& DESC_S_MASK
) {
3325 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3328 if (dpl
< cpl
|| dpl
< rpl
)
3332 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3346 if (dpl
< cpl
|| dpl
< rpl
) {
3348 CC_SRC
= eflags
& ~CC_Z
;
3352 CC_SRC
= eflags
| CC_Z
;
3353 return e2
& 0x00f0ff00;
3356 void helper_verr(target_ulong selector1
)
3358 uint32_t e1
, e2
, eflags
, selector
;
3361 selector
= selector1
& 0xffff;
3362 eflags
= helper_cc_compute_all(CC_OP
);
3363 if ((selector
& 0xfffc) == 0)
3365 if (load_segment(&e1
, &e2
, selector
) != 0)
3367 if (!(e2
& DESC_S_MASK
))
3370 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3371 cpl
= env
->hflags
& HF_CPL_MASK
;
3372 if (e2
& DESC_CS_MASK
) {
3373 if (!(e2
& DESC_R_MASK
))
3375 if (!(e2
& DESC_C_MASK
)) {
3376 if (dpl
< cpl
|| dpl
< rpl
)
3380 if (dpl
< cpl
|| dpl
< rpl
) {
3382 CC_SRC
= eflags
& ~CC_Z
;
3386 CC_SRC
= eflags
| CC_Z
;
3389 void helper_verw(target_ulong selector1
)
3391 uint32_t e1
, e2
, eflags
, selector
;
3394 selector
= selector1
& 0xffff;
3395 eflags
= helper_cc_compute_all(CC_OP
);
3396 if ((selector
& 0xfffc) == 0)
3398 if (load_segment(&e1
, &e2
, selector
) != 0)
3400 if (!(e2
& DESC_S_MASK
))
3403 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3404 cpl
= env
->hflags
& HF_CPL_MASK
;
3405 if (e2
& DESC_CS_MASK
) {
3408 if (dpl
< cpl
|| dpl
< rpl
)
3410 if (!(e2
& DESC_W_MASK
)) {
3412 CC_SRC
= eflags
& ~CC_Z
;
3416 CC_SRC
= eflags
| CC_Z
;
3419 /* x87 FPU helpers */
3421 static void fpu_set_exception(int mask
)
3424 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3425 env
->fpus
|= FPUS_SE
| FPUS_B
;
3428 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3431 fpu_set_exception(FPUS_ZE
);
3435 static void fpu_raise_exception(void)
3437 if (env
->cr
[0] & CR0_NE_MASK
) {
3438 raise_exception(EXCP10_COPR
);
3440 #if !defined(CONFIG_USER_ONLY)
3447 void helper_flds_FT0(uint32_t val
)
3454 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3457 void helper_fldl_FT0(uint64_t val
)
3464 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3467 void helper_fildl_FT0(int32_t val
)
3469 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3472 void helper_flds_ST0(uint32_t val
)
3479 new_fpstt
= (env
->fpstt
- 1) & 7;
3481 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3482 env
->fpstt
= new_fpstt
;
3483 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3486 void helper_fldl_ST0(uint64_t val
)
3493 new_fpstt
= (env
->fpstt
- 1) & 7;
3495 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3496 env
->fpstt
= new_fpstt
;
3497 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3500 void helper_fildl_ST0(int32_t val
)
3503 new_fpstt
= (env
->fpstt
- 1) & 7;
3504 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3505 env
->fpstt
= new_fpstt
;
3506 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3509 void helper_fildll_ST0(int64_t val
)
3512 new_fpstt
= (env
->fpstt
- 1) & 7;
3513 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3514 env
->fpstt
= new_fpstt
;
3515 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3518 uint32_t helper_fsts_ST0(void)
3524 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3528 uint64_t helper_fstl_ST0(void)
3534 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3538 int32_t helper_fist_ST0(void)
3541 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3542 if (val
!= (int16_t)val
)
3547 int32_t helper_fistl_ST0(void)
3550 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3554 int64_t helper_fistll_ST0(void)
3557 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3561 int32_t helper_fistt_ST0(void)
3564 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3565 if (val
!= (int16_t)val
)
3570 int32_t helper_fisttl_ST0(void)
3573 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3577 int64_t helper_fisttll_ST0(void)
3580 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3584 void helper_fldt_ST0(target_ulong ptr
)
3587 new_fpstt
= (env
->fpstt
- 1) & 7;
3588 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3589 env
->fpstt
= new_fpstt
;
3590 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3593 void helper_fstt_ST0(target_ulong ptr
)
3595 helper_fstt(ST0
, ptr
);
3598 void helper_fpush(void)
3603 void helper_fpop(void)
3608 void helper_fdecstp(void)
3610 env
->fpstt
= (env
->fpstt
- 1) & 7;
3611 env
->fpus
&= (~0x4700);
3614 void helper_fincstp(void)
3616 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3617 env
->fpus
&= (~0x4700);
3622 void helper_ffree_STN(int st_index
)
3624 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3627 void helper_fmov_ST0_FT0(void)
3632 void helper_fmov_FT0_STN(int st_index
)
3637 void helper_fmov_ST0_STN(int st_index
)
3642 void helper_fmov_STN_ST0(int st_index
)
3647 void helper_fxchg_ST0_STN(int st_index
)
3655 /* FPU operations */
3657 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3659 void helper_fcom_ST0_FT0(void)
3663 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3664 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3667 void helper_fucom_ST0_FT0(void)
3671 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3672 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3675 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3677 void helper_fcomi_ST0_FT0(void)
3682 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3683 eflags
= helper_cc_compute_all(CC_OP
);
3684 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3688 void helper_fucomi_ST0_FT0(void)
3693 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3694 eflags
= helper_cc_compute_all(CC_OP
);
3695 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3699 void helper_fadd_ST0_FT0(void)
3704 void helper_fmul_ST0_FT0(void)
3709 void helper_fsub_ST0_FT0(void)
3714 void helper_fsubr_ST0_FT0(void)
3719 void helper_fdiv_ST0_FT0(void)
3721 ST0
= helper_fdiv(ST0
, FT0
);
3724 void helper_fdivr_ST0_FT0(void)
3726 ST0
= helper_fdiv(FT0
, ST0
);
3729 /* fp operations between STN and ST0 */
3731 void helper_fadd_STN_ST0(int st_index
)
3733 ST(st_index
) += ST0
;
3736 void helper_fmul_STN_ST0(int st_index
)
3738 ST(st_index
) *= ST0
;
3741 void helper_fsub_STN_ST0(int st_index
)
3743 ST(st_index
) -= ST0
;
3746 void helper_fsubr_STN_ST0(int st_index
)
3753 void helper_fdiv_STN_ST0(int st_index
)
3757 *p
= helper_fdiv(*p
, ST0
);
3760 void helper_fdivr_STN_ST0(int st_index
)
3764 *p
= helper_fdiv(ST0
, *p
);
3767 /* misc FPU operations */
3768 void helper_fchs_ST0(void)
3770 ST0
= floatx_chs(ST0
);
3773 void helper_fabs_ST0(void)
3775 ST0
= floatx_abs(ST0
);
3778 void helper_fld1_ST0(void)
3783 void helper_fldl2t_ST0(void)
3788 void helper_fldl2e_ST0(void)
3793 void helper_fldpi_ST0(void)
3798 void helper_fldlg2_ST0(void)
3803 void helper_fldln2_ST0(void)
3808 void helper_fldz_ST0(void)
3813 void helper_fldz_FT0(void)
3818 uint32_t helper_fnstsw(void)
3820 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3823 uint32_t helper_fnstcw(void)
3828 static void update_fp_status(void)
3832 /* set rounding mode */
3833 switch(env
->fpuc
& RC_MASK
) {
3836 rnd_type
= float_round_nearest_even
;
3839 rnd_type
= float_round_down
;
3842 rnd_type
= float_round_up
;
3845 rnd_type
= float_round_to_zero
;
3848 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3850 switch((env
->fpuc
>> 8) & 3) {
3862 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3866 void helper_fldcw(uint32_t val
)
3872 void helper_fclex(void)
3874 env
->fpus
&= 0x7f00;
3877 void helper_fwait(void)
3879 if (env
->fpus
& FPUS_SE
)
3880 fpu_raise_exception();
3883 void helper_fninit(void)
3900 void helper_fbld_ST0(target_ulong ptr
)
3908 for(i
= 8; i
>= 0; i
--) {
3910 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3913 if (ldub(ptr
+ 9) & 0x80)
3919 void helper_fbst_ST0(target_ulong ptr
)
3922 target_ulong mem_ref
, mem_end
;
3925 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3927 mem_end
= mem_ref
+ 9;
3934 while (mem_ref
< mem_end
) {
3939 v
= ((v
/ 10) << 4) | (v
% 10);
3942 while (mem_ref
< mem_end
) {
3947 void helper_f2xm1(void)
3949 ST0
= pow(2.0,ST0
) - 1.0;
3952 void helper_fyl2x(void)
3954 CPU86_LDouble fptemp
;
3958 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3962 env
->fpus
&= (~0x4700);
3967 void helper_fptan(void)
3969 CPU86_LDouble fptemp
;
3972 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3978 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3979 /* the above code is for |arg| < 2**52 only */
3983 void helper_fpatan(void)
3985 CPU86_LDouble fptemp
, fpsrcop
;
3989 ST1
= atan2(fpsrcop
,fptemp
);
3993 void helper_fxtract(void)
3995 CPU86_LDoubleU temp
;
3996 unsigned int expdif
;
3999 expdif
= EXPD(temp
) - EXPBIAS
;
4000 /*DP exponent bias*/
4007 void helper_fprem1(void)
4009 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4010 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4012 signed long long int q
;
4014 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4015 ST0
= 0.0 / 0.0; /* NaN */
4016 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4022 fpsrcop1
.d
= fpsrcop
;
4024 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4027 /* optimisation? taken from the AMD docs */
4028 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4029 /* ST0 is unchanged */
4034 dblq
= fpsrcop
/ fptemp
;
4035 /* round dblq towards nearest integer */
4037 ST0
= fpsrcop
- fptemp
* dblq
;
4039 /* convert dblq to q by truncating towards zero */
4041 q
= (signed long long int)(-dblq
);
4043 q
= (signed long long int)dblq
;
4045 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4046 /* (C0,C3,C1) <-- (q2,q1,q0) */
4047 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4048 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4049 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4051 env
->fpus
|= 0x400; /* C2 <-- 1 */
4052 fptemp
= pow(2.0, expdif
- 50);
4053 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4054 /* fpsrcop = integer obtained by chopping */
4055 fpsrcop
= (fpsrcop
< 0.0) ?
4056 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4057 ST0
-= (ST1
* fpsrcop
* fptemp
);
4061 void helper_fprem(void)
4063 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4064 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4066 signed long long int q
;
4068 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4069 ST0
= 0.0 / 0.0; /* NaN */
4070 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4074 fpsrcop
= (CPU86_LDouble
)ST0
;
4075 fptemp
= (CPU86_LDouble
)ST1
;
4076 fpsrcop1
.d
= fpsrcop
;
4078 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4081 /* optimisation? taken from the AMD docs */
4082 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4083 /* ST0 is unchanged */
4087 if ( expdif
< 53 ) {
4088 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4089 /* round dblq towards zero */
4090 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4091 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4093 /* convert dblq to q by truncating towards zero */
4095 q
= (signed long long int)(-dblq
);
4097 q
= (signed long long int)dblq
;
4099 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4100 /* (C0,C3,C1) <-- (q2,q1,q0) */
4101 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4102 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4103 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4105 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4106 env
->fpus
|= 0x400; /* C2 <-- 1 */
4107 fptemp
= pow(2.0, (double)(expdif
- N
));
4108 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4109 /* fpsrcop = integer obtained by chopping */
4110 fpsrcop
= (fpsrcop
< 0.0) ?
4111 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4112 ST0
-= (ST1
* fpsrcop
* fptemp
);
4116 void helper_fyl2xp1(void)
4118 CPU86_LDouble fptemp
;
4121 if ((fptemp
+1.0)>0.0) {
4122 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4126 env
->fpus
&= (~0x4700);
4131 void helper_fsqrt(void)
4133 CPU86_LDouble fptemp
;
4137 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4143 void helper_fsincos(void)
4145 CPU86_LDouble fptemp
;
4148 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4154 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4155 /* the above code is for |arg| < 2**63 only */
4159 void helper_frndint(void)
4161 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4164 void helper_fscale(void)
4166 ST0
= ldexp (ST0
, (int)(ST1
));
4169 void helper_fsin(void)
4171 CPU86_LDouble fptemp
;
4174 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4178 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4179 /* the above code is for |arg| < 2**53 only */
4183 void helper_fcos(void)
4185 CPU86_LDouble fptemp
;
4188 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4192 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4193 /* the above code is for |arg5 < 2**63 only */
4197 void helper_fxam_ST0(void)
4199 CPU86_LDoubleU temp
;
4204 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4206 env
->fpus
|= 0x200; /* C1 <-- 1 */
4208 /* XXX: test fptags too */
4209 expdif
= EXPD(temp
);
4210 if (expdif
== MAXEXPD
) {
4211 #ifdef USE_X86LDOUBLE
4212 if (MANTD(temp
) == 0x8000000000000000ULL
)
4214 if (MANTD(temp
) == 0)
4216 env
->fpus
|= 0x500 /*Infinity*/;
4218 env
->fpus
|= 0x100 /*NaN*/;
4219 } else if (expdif
== 0) {
4220 if (MANTD(temp
) == 0)
4221 env
->fpus
|= 0x4000 /*Zero*/;
4223 env
->fpus
|= 0x4400 /*Denormal*/;
4229 void helper_fstenv(target_ulong ptr
, int data32
)
4231 int fpus
, fptag
, exp
, i
;
4235 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4237 for (i
=7; i
>=0; i
--) {
4239 if (env
->fptags
[i
]) {
4242 tmp
.d
= env
->fpregs
[i
].d
;
4245 if (exp
== 0 && mant
== 0) {
4248 } else if (exp
== 0 || exp
== MAXEXPD
4249 #ifdef USE_X86LDOUBLE
4250 || (mant
& (1LL << 63)) == 0
4253 /* NaNs, infinity, denormal */
4260 stl(ptr
, env
->fpuc
);
4262 stl(ptr
+ 8, fptag
);
4263 stl(ptr
+ 12, 0); /* fpip */
4264 stl(ptr
+ 16, 0); /* fpcs */
4265 stl(ptr
+ 20, 0); /* fpoo */
4266 stl(ptr
+ 24, 0); /* fpos */
4269 stw(ptr
, env
->fpuc
);
4271 stw(ptr
+ 4, fptag
);
4279 void helper_fldenv(target_ulong ptr
, int data32
)
4284 env
->fpuc
= lduw(ptr
);
4285 fpus
= lduw(ptr
+ 4);
4286 fptag
= lduw(ptr
+ 8);
4289 env
->fpuc
= lduw(ptr
);
4290 fpus
= lduw(ptr
+ 2);
4291 fptag
= lduw(ptr
+ 4);
4293 env
->fpstt
= (fpus
>> 11) & 7;
4294 env
->fpus
= fpus
& ~0x3800;
4295 for(i
= 0;i
< 8; i
++) {
4296 env
->fptags
[i
] = ((fptag
& 3) == 3);
4301 void helper_fsave(target_ulong ptr
, int data32
)
4306 helper_fstenv(ptr
, data32
);
4308 ptr
+= (14 << data32
);
4309 for(i
= 0;i
< 8; i
++) {
4311 helper_fstt(tmp
, ptr
);
4329 void helper_frstor(target_ulong ptr
, int data32
)
4334 helper_fldenv(ptr
, data32
);
4335 ptr
+= (14 << data32
);
4337 for(i
= 0;i
< 8; i
++) {
4338 tmp
= helper_fldt(ptr
);
4344 void helper_fxsave(target_ulong ptr
, int data64
)
4346 int fpus
, fptag
, i
, nb_xmm_regs
;
4350 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4352 for(i
= 0; i
< 8; i
++) {
4353 fptag
|= (env
->fptags
[i
] << i
);
4355 stw(ptr
, env
->fpuc
);
4357 stw(ptr
+ 4, fptag
^ 0xff);
4358 #ifdef TARGET_X86_64
4360 stq(ptr
+ 0x08, 0); /* rip */
4361 stq(ptr
+ 0x10, 0); /* rdp */
4365 stl(ptr
+ 0x08, 0); /* eip */
4366 stl(ptr
+ 0x0c, 0); /* sel */
4367 stl(ptr
+ 0x10, 0); /* dp */
4368 stl(ptr
+ 0x14, 0); /* sel */
4372 for(i
= 0;i
< 8; i
++) {
4374 helper_fstt(tmp
, addr
);
4378 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4379 /* XXX: finish it */
4380 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4381 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4382 if (env
->hflags
& HF_CS64_MASK
)
4387 /* Fast FXSAVE leaves out the XMM registers */
4388 if (!(env
->efer
& MSR_EFER_FFXSR
)
4389 || (env
->hflags
& HF_CPL_MASK
)
4390 || !(env
->hflags
& HF_LMA_MASK
)) {
4391 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4392 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4393 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4400 void helper_fxrstor(target_ulong ptr
, int data64
)
4402 int i
, fpus
, fptag
, nb_xmm_regs
;
4406 env
->fpuc
= lduw(ptr
);
4407 fpus
= lduw(ptr
+ 2);
4408 fptag
= lduw(ptr
+ 4);
4409 env
->fpstt
= (fpus
>> 11) & 7;
4410 env
->fpus
= fpus
& ~0x3800;
4412 for(i
= 0;i
< 8; i
++) {
4413 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4417 for(i
= 0;i
< 8; i
++) {
4418 tmp
= helper_fldt(addr
);
4423 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4424 /* XXX: finish it */
4425 env
->mxcsr
= ldl(ptr
+ 0x18);
4427 if (env
->hflags
& HF_CS64_MASK
)
4432 /* Fast FXRESTORE leaves out the XMM registers */
4433 if (!(env
->efer
& MSR_EFER_FFXSR
)
4434 || (env
->hflags
& HF_CPL_MASK
)
4435 || !(env
->hflags
& HF_LMA_MASK
)) {
4436 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4437 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4438 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4445 #ifndef USE_X86LDOUBLE
4447 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4449 CPU86_LDoubleU temp
;
4454 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4455 /* exponent + sign */
4456 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4457 e
|= SIGND(temp
) >> 16;
4461 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4463 CPU86_LDoubleU temp
;
4467 /* XXX: handle overflow ? */
4468 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4469 e
|= (upper
>> 4) & 0x800; /* sign */
4470 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4472 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4475 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4482 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4484 CPU86_LDoubleU temp
;
4487 *pmant
= temp
.l
.lower
;
4488 *pexp
= temp
.l
.upper
;
4491 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4493 CPU86_LDoubleU temp
;
4495 temp
.l
.upper
= upper
;
4496 temp
.l
.lower
= mant
;
4501 #ifdef TARGET_X86_64
4503 //#define DEBUG_MULDIV
4505 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4514 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4518 add128(plow
, phigh
, 1, 0);
4521 /* return TRUE if overflow */
4522 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4524 uint64_t q
, r
, a1
, a0
;
4537 /* XXX: use a better algorithm */
4538 for(i
= 0; i
< 64; i
++) {
4540 a1
= (a1
<< 1) | (a0
>> 63);
4541 if (ab
|| a1
>= b
) {
4547 a0
= (a0
<< 1) | qb
;
4549 #if defined(DEBUG_MULDIV)
4550 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4551 *phigh
, *plow
, b
, a0
, a1
);
4559 /* return TRUE if overflow */
4560 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4563 sa
= ((int64_t)*phigh
< 0);
4565 neg128(plow
, phigh
);
4569 if (div64(plow
, phigh
, b
) != 0)
4572 if (*plow
> (1ULL << 63))
4576 if (*plow
>= (1ULL << 63))
4584 void helper_mulq_EAX_T0(target_ulong t0
)
4588 mulu64(&r0
, &r1
, EAX
, t0
);
4595 void helper_imulq_EAX_T0(target_ulong t0
)
4599 muls64(&r0
, &r1
, EAX
, t0
);
4603 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4606 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4610 muls64(&r0
, &r1
, t0
, t1
);
4612 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4616 void helper_divq_EAX(target_ulong t0
)
4620 raise_exception(EXCP00_DIVZ
);
4624 if (div64(&r0
, &r1
, t0
))
4625 raise_exception(EXCP00_DIVZ
);
4630 void helper_idivq_EAX(target_ulong t0
)
4634 raise_exception(EXCP00_DIVZ
);
4638 if (idiv64(&r0
, &r1
, t0
))
4639 raise_exception(EXCP00_DIVZ
);
4645 static void do_hlt(void)
4647 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4649 env
->exception_index
= EXCP_HLT
;
4653 void helper_hlt(int next_eip_addend
)
4655 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4656 EIP
+= next_eip_addend
;
4661 void helper_monitor(target_ulong ptr
)
4663 if ((uint32_t)ECX
!= 0)
4664 raise_exception(EXCP0D_GPF
);
4665 /* XXX: store address ? */
4666 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4669 void helper_mwait(int next_eip_addend
)
4671 if ((uint32_t)ECX
!= 0)
4672 raise_exception(EXCP0D_GPF
);
4673 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4674 EIP
+= next_eip_addend
;
4676 /* XXX: not complete but not completely erroneous */
4677 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4678 /* more than one CPU: do not sleep because another CPU may
4685 void helper_debug(void)
4687 env
->exception_index
= EXCP_DEBUG
;
4691 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4693 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4696 void helper_raise_exception(int exception_index
)
4698 raise_exception(exception_index
);
4701 void helper_cli(void)
4703 env
->eflags
&= ~IF_MASK
;
4706 void helper_sti(void)
4708 env
->eflags
|= IF_MASK
;
4712 /* vm86plus instructions */
4713 void helper_cli_vm(void)
4715 env
->eflags
&= ~VIF_MASK
;
4718 void helper_sti_vm(void)
4720 env
->eflags
|= VIF_MASK
;
4721 if (env
->eflags
& VIP_MASK
) {
4722 raise_exception(EXCP0D_GPF
);
4727 void helper_set_inhibit_irq(void)
4729 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4732 void helper_reset_inhibit_irq(void)
4734 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4737 void helper_boundw(target_ulong a0
, int v
)
4741 high
= ldsw(a0
+ 2);
4743 if (v
< low
|| v
> high
) {
4744 raise_exception(EXCP05_BOUND
);
4748 void helper_boundl(target_ulong a0
, int v
)
4753 if (v
< low
|| v
> high
) {
4754 raise_exception(EXCP05_BOUND
);
4758 static float approx_rsqrt(float a
)
4760 return 1.0 / sqrt(a
);
4763 static float approx_rcp(float a
)
4768 #if !defined(CONFIG_USER_ONLY)
4770 #define MMUSUFFIX _mmu
4773 #include "softmmu_template.h"
4776 #include "softmmu_template.h"
4779 #include "softmmu_template.h"
4782 #include "softmmu_template.h"
4786 #if !defined(CONFIG_USER_ONLY)
4787 /* try to fill the TLB and return an exception if error. If retaddr is
4788 NULL, it means that the function was called in C code (i.e. not
4789 from generated code or from helper.c) */
4790 /* XXX: fix it to restore all registers */
4791 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4793 TranslationBlock
*tb
;
4796 CPUX86State
*saved_env
;
4798 /* XXX: hack to restore env in all cases, even if not called from
4801 env
= cpu_single_env
;
4803 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4806 /* now we have a real cpu fault */
4807 pc
= (unsigned long)retaddr
;
4808 tb
= tb_find_pc(pc
);
4810 /* the PC is inside the translated code. It means that we have
4811 a virtual CPU fault */
4812 cpu_restore_state(tb
, env
, pc
, NULL
);
4815 raise_exception_err(env
->exception_index
, env
->error_code
);
4821 /* Secure Virtual Machine helpers */
4823 #if defined(CONFIG_USER_ONLY)
4825 void helper_vmrun(int aflag
, int next_eip_addend
)
4828 void helper_vmmcall(void)
4831 void helper_vmload(int aflag
)
4834 void helper_vmsave(int aflag
)
4837 void helper_stgi(void)
4840 void helper_clgi(void)
4843 void helper_skinit(void)
4846 void helper_invlpga(int aflag
)
4849 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4852 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4856 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4857 uint32_t next_eip_addend
)
4862 static inline void svm_save_seg(target_phys_addr_t addr
,
4863 const SegmentCache
*sc
)
4865 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4867 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4869 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4871 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4872 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4875 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4879 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4880 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4881 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4882 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4883 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4886 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4887 CPUState
*env
, int seg_reg
)
4889 SegmentCache sc1
, *sc
= &sc1
;
4890 svm_load_seg(addr
, sc
);
4891 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4892 sc
->base
, sc
->limit
, sc
->flags
);
4895 void helper_vmrun(int aflag
, int next_eip_addend
)
4901 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4906 addr
= (uint32_t)EAX
;
4908 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4910 env
->vm_vmcb
= addr
;
4912 /* save the current CPU state in the hsave page */
4913 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4914 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4916 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4917 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4919 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4920 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4921 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4922 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4923 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4924 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4926 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4927 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4929 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4931 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4933 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4935 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4938 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4939 EIP
+ next_eip_addend
);
4940 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4941 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4943 /* load the interception bitmaps so we do not need to access the
4945 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4946 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4947 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4948 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4949 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4950 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4952 /* enable intercepts */
4953 env
->hflags
|= HF_SVMI_MASK
;
4955 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4957 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4958 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4960 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4961 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4963 /* clear exit_info_2 so we behave like the real hardware */
4964 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4966 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4967 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4968 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4969 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4970 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4971 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4972 if (int_ctl
& V_INTR_MASKING_MASK
) {
4973 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4974 env
->hflags2
|= HF2_VINTR_MASK
;
4975 if (env
->eflags
& IF_MASK
)
4976 env
->hflags2
|= HF2_HIF_MASK
;
4980 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4982 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4983 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4984 CC_OP
= CC_OP_EFLAGS
;
4986 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4988 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4990 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4992 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4995 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4997 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4998 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4999 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5000 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5001 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5003 /* FIXME: guest state consistency checks */
5005 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5006 case TLB_CONTROL_DO_NOTHING
:
5008 case TLB_CONTROL_FLUSH_ALL_ASID
:
5009 /* FIXME: this is not 100% correct but should work for now */
5014 env
->hflags2
|= HF2_GIF_MASK
;
5016 if (int_ctl
& V_IRQ_MASK
) {
5017 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5020 /* maybe we need to inject an event */
5021 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5022 if (event_inj
& SVM_EVTINJ_VALID
) {
5023 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5024 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5025 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5027 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5028 /* FIXME: need to implement valid_err */
5029 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5030 case SVM_EVTINJ_TYPE_INTR
:
5031 env
->exception_index
= vector
;
5032 env
->error_code
= event_inj_err
;
5033 env
->exception_is_int
= 0;
5034 env
->exception_next_eip
= -1;
5035 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5036 /* XXX: is it always correct ? */
5037 do_interrupt(vector
, 0, 0, 0, 1);
5039 case SVM_EVTINJ_TYPE_NMI
:
5040 env
->exception_index
= EXCP02_NMI
;
5041 env
->error_code
= event_inj_err
;
5042 env
->exception_is_int
= 0;
5043 env
->exception_next_eip
= EIP
;
5044 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5047 case SVM_EVTINJ_TYPE_EXEPT
:
5048 env
->exception_index
= vector
;
5049 env
->error_code
= event_inj_err
;
5050 env
->exception_is_int
= 0;
5051 env
->exception_next_eip
= -1;
5052 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5055 case SVM_EVTINJ_TYPE_SOFT
:
5056 env
->exception_index
= vector
;
5057 env
->error_code
= event_inj_err
;
5058 env
->exception_is_int
= 1;
5059 env
->exception_next_eip
= EIP
;
5060 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5064 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5068 void helper_vmmcall(void)
5070 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5071 raise_exception(EXCP06_ILLOP
);
5074 void helper_vmload(int aflag
)
5077 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5082 addr
= (uint32_t)EAX
;
5084 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5085 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5086 env
->segs
[R_FS
].base
);
5088 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5090 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5092 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5094 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5097 #ifdef TARGET_X86_64
5098 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5099 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5100 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5101 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5103 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5104 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5105 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5106 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5109 void helper_vmsave(int aflag
)
5112 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5117 addr
= (uint32_t)EAX
;
5119 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5120 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5121 env
->segs
[R_FS
].base
);
5123 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5125 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5127 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5129 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5132 #ifdef TARGET_X86_64
5133 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5134 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5135 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5136 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5138 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5139 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5140 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5141 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5144 void helper_stgi(void)
5146 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5147 env
->hflags2
|= HF2_GIF_MASK
;
5150 void helper_clgi(void)
5152 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5153 env
->hflags2
&= ~HF2_GIF_MASK
;
5156 void helper_skinit(void)
5158 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5159 /* XXX: not implemented */
5160 raise_exception(EXCP06_ILLOP
);
5163 void helper_invlpga(int aflag
)
5166 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5171 addr
= (uint32_t)EAX
;
5173 /* XXX: could use the ASID to see if it is needed to do the
5175 tlb_flush_page(env
, addr
);
5178 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5180 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5183 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5184 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5185 helper_vmexit(type
, param
);
5188 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5189 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5190 helper_vmexit(type
, param
);
5193 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5194 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5195 helper_vmexit(type
, param
);
5198 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5199 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5200 helper_vmexit(type
, param
);
5203 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5204 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5205 helper_vmexit(type
, param
);
5209 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5210 /* FIXME: this should be read in at vmrun (faster this way?) */
5211 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5213 switch((uint32_t)ECX
) {
5218 case 0xc0000000 ... 0xc0001fff:
5219 t0
= (8192 + ECX
- 0xc0000000) * 2;
5223 case 0xc0010000 ... 0xc0011fff:
5224 t0
= (16384 + ECX
- 0xc0010000) * 2;
5229 helper_vmexit(type
, param
);
5234 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5235 helper_vmexit(type
, param
);
5239 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5240 helper_vmexit(type
, param
);
5246 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5247 uint32_t next_eip_addend
)
5249 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5250 /* FIXME: this should be read in at vmrun (faster this way?) */
5251 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5252 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5253 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5255 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5256 env
->eip
+ next_eip_addend
);
5257 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5262 /* Note: currently only 32 bits of exit_code are used */
5263 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5267 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5268 exit_code
, exit_info_1
,
5269 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5272 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5273 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5274 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5276 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5279 /* Save the VM state in the vmcb */
5280 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5282 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5284 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5286 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5289 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5290 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5292 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5293 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5295 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5296 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5297 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5298 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5299 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5301 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5302 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5303 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5304 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5305 int_ctl
|= V_IRQ_MASK
;
5306 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5308 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5309 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5310 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5311 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5312 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5313 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5314 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5316 /* Reload the host state from vm_hsave */
5317 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5318 env
->hflags
&= ~HF_SVMI_MASK
;
5320 env
->intercept_exceptions
= 0;
5321 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5322 env
->tsc_offset
= 0;
5324 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5325 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5327 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5328 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5330 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5331 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5332 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5333 /* we need to set the efer after the crs so the hidden flags get
5336 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5338 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5339 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5340 CC_OP
= CC_OP_EFLAGS
;
5342 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5344 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5346 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5348 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5351 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5352 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5353 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5355 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5356 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5359 cpu_x86_set_cpl(env
, 0);
5360 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5361 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5363 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5364 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5365 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5366 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5368 env
->hflags2
&= ~HF2_GIF_MASK
;
5369 /* FIXME: Resets the current ASID register to zero (host ASID). */
5371 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5373 /* Clears the TSC_OFFSET inside the processor. */
5375 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5376 from the page table indicated the host's CR3. If the PDPEs contain
5377 illegal state, the processor causes a shutdown. */
5379 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5380 env
->cr
[0] |= CR0_PE_MASK
;
5381 env
->eflags
&= ~VM_MASK
;
5383 /* Disables all breakpoints in the host DR7 register. */
5385 /* Checks the reloaded host state for consistency. */
5387 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5388 host's code segment or non-canonical (in the case of long mode), a
5389 #GP fault is delivered inside the host.) */
5391 /* remove any pending exception */
5392 env
->exception_index
= -1;
5393 env
->error_code
= 0;
5394 env
->old_exception
= -1;
5402 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5403 void helper_enter_mmx(void)
5406 *(uint32_t *)(env
->fptags
) = 0;
5407 *(uint32_t *)(env
->fptags
+ 4) = 0;
5410 void helper_emms(void)
5412 /* set to empty state */
5413 *(uint32_t *)(env
->fptags
) = 0x01010101;
5414 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5418 void helper_movq(void *d
, void *s
)
5420 *(uint64_t *)d
= *(uint64_t *)s
;
5424 #include "ops_sse.h"
5427 #include "ops_sse.h"
5430 #include "helper_template.h"
5434 #include "helper_template.h"
5438 #include "helper_template.h"
5441 #ifdef TARGET_X86_64
5444 #include "helper_template.h"
5449 /* bit operations */
5450 target_ulong
helper_bsf(target_ulong t0
)
5457 while ((res
& 1) == 0) {
5464 target_ulong
helper_bsr(target_ulong t0
)
5467 target_ulong res
, mask
;
5470 count
= TARGET_LONG_BITS
- 1;
5471 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5472 while ((res
& mask
) == 0) {
5480 static int compute_all_eflags(void)
5485 static int compute_c_eflags(void)
5487 return CC_SRC
& CC_C
;
5490 uint32_t helper_cc_compute_all(int op
)
5493 default: /* should never happen */ return 0;
5495 case CC_OP_EFLAGS
: return compute_all_eflags();
5497 case CC_OP_MULB
: return compute_all_mulb();
5498 case CC_OP_MULW
: return compute_all_mulw();
5499 case CC_OP_MULL
: return compute_all_mull();
5501 case CC_OP_ADDB
: return compute_all_addb();
5502 case CC_OP_ADDW
: return compute_all_addw();
5503 case CC_OP_ADDL
: return compute_all_addl();
5505 case CC_OP_ADCB
: return compute_all_adcb();
5506 case CC_OP_ADCW
: return compute_all_adcw();
5507 case CC_OP_ADCL
: return compute_all_adcl();
5509 case CC_OP_SUBB
: return compute_all_subb();
5510 case CC_OP_SUBW
: return compute_all_subw();
5511 case CC_OP_SUBL
: return compute_all_subl();
5513 case CC_OP_SBBB
: return compute_all_sbbb();
5514 case CC_OP_SBBW
: return compute_all_sbbw();
5515 case CC_OP_SBBL
: return compute_all_sbbl();
5517 case CC_OP_LOGICB
: return compute_all_logicb();
5518 case CC_OP_LOGICW
: return compute_all_logicw();
5519 case CC_OP_LOGICL
: return compute_all_logicl();
5521 case CC_OP_INCB
: return compute_all_incb();
5522 case CC_OP_INCW
: return compute_all_incw();
5523 case CC_OP_INCL
: return compute_all_incl();
5525 case CC_OP_DECB
: return compute_all_decb();
5526 case CC_OP_DECW
: return compute_all_decw();
5527 case CC_OP_DECL
: return compute_all_decl();
5529 case CC_OP_SHLB
: return compute_all_shlb();
5530 case CC_OP_SHLW
: return compute_all_shlw();
5531 case CC_OP_SHLL
: return compute_all_shll();
5533 case CC_OP_SARB
: return compute_all_sarb();
5534 case CC_OP_SARW
: return compute_all_sarw();
5535 case CC_OP_SARL
: return compute_all_sarl();
5537 #ifdef TARGET_X86_64
5538 case CC_OP_MULQ
: return compute_all_mulq();
5540 case CC_OP_ADDQ
: return compute_all_addq();
5542 case CC_OP_ADCQ
: return compute_all_adcq();
5544 case CC_OP_SUBQ
: return compute_all_subq();
5546 case CC_OP_SBBQ
: return compute_all_sbbq();
5548 case CC_OP_LOGICQ
: return compute_all_logicq();
5550 case CC_OP_INCQ
: return compute_all_incq();
5552 case CC_OP_DECQ
: return compute_all_decq();
5554 case CC_OP_SHLQ
: return compute_all_shlq();
5556 case CC_OP_SARQ
: return compute_all_sarq();
5561 uint32_t helper_cc_compute_c(int op
)
5564 default: /* should never happen */ return 0;
5566 case CC_OP_EFLAGS
: return compute_c_eflags();
5568 case CC_OP_MULB
: return compute_c_mull();
5569 case CC_OP_MULW
: return compute_c_mull();
5570 case CC_OP_MULL
: return compute_c_mull();
5572 case CC_OP_ADDB
: return compute_c_addb();
5573 case CC_OP_ADDW
: return compute_c_addw();
5574 case CC_OP_ADDL
: return compute_c_addl();
5576 case CC_OP_ADCB
: return compute_c_adcb();
5577 case CC_OP_ADCW
: return compute_c_adcw();
5578 case CC_OP_ADCL
: return compute_c_adcl();
5580 case CC_OP_SUBB
: return compute_c_subb();
5581 case CC_OP_SUBW
: return compute_c_subw();
5582 case CC_OP_SUBL
: return compute_c_subl();
5584 case CC_OP_SBBB
: return compute_c_sbbb();
5585 case CC_OP_SBBW
: return compute_c_sbbw();
5586 case CC_OP_SBBL
: return compute_c_sbbl();
5588 case CC_OP_LOGICB
: return compute_c_logicb();
5589 case CC_OP_LOGICW
: return compute_c_logicw();
5590 case CC_OP_LOGICL
: return compute_c_logicl();
5592 case CC_OP_INCB
: return compute_c_incl();
5593 case CC_OP_INCW
: return compute_c_incl();
5594 case CC_OP_INCL
: return compute_c_incl();
5596 case CC_OP_DECB
: return compute_c_incl();
5597 case CC_OP_DECW
: return compute_c_incl();
5598 case CC_OP_DECL
: return compute_c_incl();
5600 case CC_OP_SHLB
: return compute_c_shlb();
5601 case CC_OP_SHLW
: return compute_c_shlw();
5602 case CC_OP_SHLL
: return compute_c_shll();
5604 case CC_OP_SARB
: return compute_c_sarl();
5605 case CC_OP_SARW
: return compute_c_sarl();
5606 case CC_OP_SARL
: return compute_c_sarl();
5608 #ifdef TARGET_X86_64
5609 case CC_OP_MULQ
: return compute_c_mull();
5611 case CC_OP_ADDQ
: return compute_c_addq();
5613 case CC_OP_ADCQ
: return compute_c_adcq();
5615 case CC_OP_SUBQ
: return compute_c_subq();
5617 case CC_OP_SBBQ
: return compute_c_sbbq();
5619 case CC_OP_LOGICQ
: return compute_c_logicq();
5621 case CC_OP_INCQ
: return compute_c_incl();
5623 case CC_OP_DECQ
: return compute_c_incl();
5625 case CC_OP_SHLQ
: return compute_c_shlq();
5627 case CC_OP_SARQ
: return compute_c_sarl();