4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
28 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29 # define LOG_PCALL_STATE(env) \
30 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 # define LOG_PCALL(...) do { } while (0)
33 # define LOG_PCALL_STATE(env) do { } while (0)
38 #define raise_exception_err(a, b)\
40 qemu_log("raise_exception line=%d\n", __LINE__);\
41 (raise_exception_err)(a, b);\
45 static const uint8_t parity_table
[256] = {
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
74 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
75 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
81 static const uint8_t rclw_table
[32] = {
82 0, 1, 2, 3, 4, 5, 6, 7,
83 8, 9,10,11,12,13,14,15,
84 16, 0, 1, 2, 3, 4, 5, 6,
85 7, 8, 9,10,11,12,13,14,
89 static const uint8_t rclb_table
[32] = {
90 0, 1, 2, 3, 4, 5, 6, 7,
91 8, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 0, 1, 2, 3, 4, 5,
93 6, 7, 8, 0, 1, 2, 3, 4,
96 static const CPU86_LDouble f15rk
[7] =
98 0.00000000000000000000L,
99 1.00000000000000000000L,
100 3.14159265358979323851L, /*pi*/
101 0.30102999566398119523L, /*lg2*/
102 0.69314718055994530943L, /*ln2*/
103 1.44269504088896340739L, /*l2e*/
104 3.32192809488736234781L, /*l2t*/
107 /* broken thread support */
109 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
111 void helper_lock(void)
113 spin_lock(&global_cpu_lock
);
116 void helper_unlock(void)
118 spin_unlock(&global_cpu_lock
);
121 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
123 load_eflags(t0
, update_mask
);
126 target_ulong
helper_read_eflags(void)
129 eflags
= helper_cc_compute_all(CC_OP
);
130 eflags
|= (DF
& DF_MASK
);
131 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
135 /* return non zero if error */
136 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
147 index
= selector
& ~7;
148 if ((index
+ 7) > dt
->limit
)
150 ptr
= dt
->base
+ index
;
151 *e1_ptr
= ldl_kernel(ptr
);
152 *e2_ptr
= ldl_kernel(ptr
+ 4);
156 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
159 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
160 if (e2
& DESC_G_MASK
)
161 limit
= (limit
<< 12) | 0xfff;
165 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
167 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
170 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
172 sc
->base
= get_seg_base(e1
, e2
);
173 sc
->limit
= get_seg_limit(e1
, e2
);
177 /* init the segment cache in vm86 mode. */
178 static inline void load_seg_vm(int seg
, int selector
)
181 cpu_x86_load_seg_cache(env
, seg
, selector
,
182 (selector
<< 4), 0xffff, 0);
185 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
186 uint32_t *esp_ptr
, int dpl
)
188 int type
, index
, shift
;
193 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
194 for(i
=0;i
<env
->tr
.limit
;i
++) {
195 printf("%02x ", env
->tr
.base
[i
]);
196 if ((i
& 7) == 7) printf("\n");
202 if (!(env
->tr
.flags
& DESC_P_MASK
))
203 cpu_abort(env
, "invalid tss");
204 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
206 cpu_abort(env
, "invalid tss type");
208 index
= (dpl
* 4 + 2) << shift
;
209 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
210 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
212 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
213 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
215 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
216 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
220 /* XXX: merge with load_seg() */
221 static void tss_load_seg(int seg_reg
, int selector
)
226 if ((selector
& 0xfffc) != 0) {
227 if (load_segment(&e1
, &e2
, selector
) != 0)
228 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
229 if (!(e2
& DESC_S_MASK
))
230 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
233 cpl
= env
->hflags
& HF_CPL_MASK
;
234 if (seg_reg
== R_CS
) {
235 if (!(e2
& DESC_CS_MASK
))
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
237 /* XXX: is it correct ? */
239 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
240 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 } else if (seg_reg
== R_SS
) {
243 /* SS must be writable data */
244 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 if (dpl
!= cpl
|| dpl
!= rpl
)
247 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
249 /* not readable code */
250 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
252 /* if data or non conforming code, checks the rights */
253 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
254 if (dpl
< cpl
|| dpl
< rpl
)
255 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
258 if (!(e2
& DESC_P_MASK
))
259 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
260 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
261 get_seg_base(e1
, e2
),
262 get_seg_limit(e1
, e2
),
265 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
266 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
270 #define SWITCH_TSS_JMP 0
271 #define SWITCH_TSS_IRET 1
272 #define SWITCH_TSS_CALL 2
274 /* XXX: restore CPU state in registers (PowerPC case) */
275 static void switch_tss(int tss_selector
,
276 uint32_t e1
, uint32_t e2
, int source
,
279 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
280 target_ulong tss_base
;
281 uint32_t new_regs
[8], new_segs
[6];
282 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
283 uint32_t old_eflags
, eflags_mask
;
288 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
289 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
291 /* if task gate, we read the TSS segment and we load it */
293 if (!(e2
& DESC_P_MASK
))
294 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
295 tss_selector
= e1
>> 16;
296 if (tss_selector
& 4)
297 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
298 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
299 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (e2
& DESC_S_MASK
)
301 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
302 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
304 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
307 if (!(e2
& DESC_P_MASK
))
308 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
314 tss_limit
= get_seg_limit(e1
, e2
);
315 tss_base
= get_seg_base(e1
, e2
);
316 if ((tss_selector
& 4) != 0 ||
317 tss_limit
< tss_limit_max
)
318 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
319 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
321 old_tss_limit_max
= 103;
323 old_tss_limit_max
= 43;
325 /* read all the registers from the new TSS */
328 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
329 new_eip
= ldl_kernel(tss_base
+ 0x20);
330 new_eflags
= ldl_kernel(tss_base
+ 0x24);
331 for(i
= 0; i
< 8; i
++)
332 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
333 for(i
= 0; i
< 6; i
++)
334 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
335 new_ldt
= lduw_kernel(tss_base
+ 0x60);
336 new_trap
= ldl_kernel(tss_base
+ 0x64);
340 new_eip
= lduw_kernel(tss_base
+ 0x0e);
341 new_eflags
= lduw_kernel(tss_base
+ 0x10);
342 for(i
= 0; i
< 8; i
++)
343 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
344 for(i
= 0; i
< 4; i
++)
345 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
346 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
352 /* NOTE: we must avoid memory exceptions during the task switch,
353 so we make dummy accesses before */
354 /* XXX: it can still fail in some cases, so a bigger hack is
355 necessary to valid the TLB after having done the accesses */
357 v1
= ldub_kernel(env
->tr
.base
);
358 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
359 stb_kernel(env
->tr
.base
, v1
);
360 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
362 /* clear busy bit (it is restartable) */
363 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
366 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
367 e2
= ldl_kernel(ptr
+ 4);
368 e2
&= ~DESC_TSS_BUSY_MASK
;
369 stl_kernel(ptr
+ 4, e2
);
371 old_eflags
= compute_eflags();
372 if (source
== SWITCH_TSS_IRET
)
373 old_eflags
&= ~NT_MASK
;
375 /* save the current state in the old TSS */
378 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
379 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
388 for(i
= 0; i
< 6; i
++)
389 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
392 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
393 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
402 for(i
= 0; i
< 4; i
++)
403 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
406 /* now if an exception occurs, it will occurs in the next task
409 if (source
== SWITCH_TSS_CALL
) {
410 stw_kernel(tss_base
, env
->tr
.selector
);
411 new_eflags
|= NT_MASK
;
415 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
418 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
419 e2
= ldl_kernel(ptr
+ 4);
420 e2
|= DESC_TSS_BUSY_MASK
;
421 stl_kernel(ptr
+ 4, e2
);
424 /* set the new CPU state */
425 /* from this point, any exception which occurs can give problems */
426 env
->cr
[0] |= CR0_TS_MASK
;
427 env
->hflags
|= HF_TS_MASK
;
428 env
->tr
.selector
= tss_selector
;
429 env
->tr
.base
= tss_base
;
430 env
->tr
.limit
= tss_limit
;
431 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
433 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
434 cpu_x86_update_cr3(env
, new_cr3
);
437 /* load all registers without an exception, then reload them with
438 possible exception */
440 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
441 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
443 eflags_mask
&= 0xffff;
444 load_eflags(new_eflags
, eflags_mask
);
445 /* XXX: what to do in 16 bit case ? */
454 if (new_eflags
& VM_MASK
) {
455 for(i
= 0; i
< 6; i
++)
456 load_seg_vm(i
, new_segs
[i
]);
457 /* in vm86, CPL is always 3 */
458 cpu_x86_set_cpl(env
, 3);
460 /* CPL is set the RPL of CS */
461 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
462 /* first just selectors as the rest may trigger exceptions */
463 for(i
= 0; i
< 6; i
++)
464 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
467 env
->ldt
.selector
= new_ldt
& ~4;
474 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
476 if ((new_ldt
& 0xfffc) != 0) {
478 index
= new_ldt
& ~7;
479 if ((index
+ 7) > dt
->limit
)
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 ptr
= dt
->base
+ index
;
482 e1
= ldl_kernel(ptr
);
483 e2
= ldl_kernel(ptr
+ 4);
484 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
485 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
486 if (!(e2
& DESC_P_MASK
))
487 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
488 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
491 /* load the segments */
492 if (!(new_eflags
& VM_MASK
)) {
493 tss_load_seg(R_CS
, new_segs
[R_CS
]);
494 tss_load_seg(R_SS
, new_segs
[R_SS
]);
495 tss_load_seg(R_ES
, new_segs
[R_ES
]);
496 tss_load_seg(R_DS
, new_segs
[R_DS
]);
497 tss_load_seg(R_FS
, new_segs
[R_FS
]);
498 tss_load_seg(R_GS
, new_segs
[R_GS
]);
501 /* check that EIP is in the CS segment limits */
502 if (new_eip
> env
->segs
[R_CS
].limit
) {
503 /* XXX: different exception if CALL ? */
504 raise_exception_err(EXCP0D_GPF
, 0);
507 #ifndef CONFIG_USER_ONLY
508 /* reset local breakpoints */
509 if (env
->dr
[7] & 0x55) {
510 for (i
= 0; i
< 4; i
++) {
511 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
512 hw_breakpoint_remove(env
, i
);
519 /* check if Port I/O is allowed in TSS */
520 static inline void check_io(int addr
, int size
)
522 int io_offset
, val
, mask
;
524 /* TSS must be a valid 32 bit one */
525 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
526 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
529 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
530 io_offset
+= (addr
>> 3);
531 /* Note: the check needs two bytes */
532 if ((io_offset
+ 1) > env
->tr
.limit
)
534 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
536 mask
= (1 << size
) - 1;
537 /* all bits must be zero to allow the I/O */
538 if ((val
& mask
) != 0) {
540 raise_exception_err(EXCP0D_GPF
, 0);
544 void helper_check_iob(uint32_t t0
)
549 void helper_check_iow(uint32_t t0
)
554 void helper_check_iol(uint32_t t0
)
559 void helper_outb(uint32_t port
, uint32_t data
)
561 cpu_outb(env
, port
, data
& 0xff);
564 target_ulong
helper_inb(uint32_t port
)
566 return cpu_inb(env
, port
);
569 void helper_outw(uint32_t port
, uint32_t data
)
571 cpu_outw(env
, port
, data
& 0xffff);
574 target_ulong
helper_inw(uint32_t port
)
576 return cpu_inw(env
, port
);
579 void helper_outl(uint32_t port
, uint32_t data
)
581 cpu_outl(env
, port
, data
);
584 target_ulong
helper_inl(uint32_t port
)
586 return cpu_inl(env
, port
);
589 static inline unsigned int get_sp_mask(unsigned int e2
)
591 if (e2
& DESC_B_MASK
)
597 static int exeption_has_error_code(int intno
)
613 #define SET_ESP(val, sp_mask)\
615 if ((sp_mask) == 0xffff)\
616 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617 else if ((sp_mask) == 0xffffffffLL)\
618 ESP = (uint32_t)(val);\
623 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
626 /* in 64-bit machines, this can overflow. So this segment addition macro
627 * can be used to trim the value to 32-bit whenever needed */
628 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
630 /* XXX: add a is_user flag to have proper security support */
631 #define PUSHW(ssp, sp, sp_mask, val)\
634 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
637 #define PUSHL(ssp, sp, sp_mask, val)\
640 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
643 #define POPW(ssp, sp, sp_mask, val)\
645 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
649 #define POPL(ssp, sp, sp_mask, val)\
651 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
655 /* protected mode interrupt */
656 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
657 unsigned int next_eip
, int is_hw
)
660 target_ulong ptr
, ssp
;
661 int type
, dpl
, selector
, ss_dpl
, cpl
;
662 int has_error_code
, new_stack
, shift
;
663 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
664 uint32_t old_eip
, sp_mask
;
667 if (!is_int
&& !is_hw
)
668 has_error_code
= exeption_has_error_code(intno
);
675 if (intno
* 8 + 7 > dt
->limit
)
676 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
677 ptr
= dt
->base
+ intno
* 8;
678 e1
= ldl_kernel(ptr
);
679 e2
= ldl_kernel(ptr
+ 4);
680 /* check gate type */
681 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
683 case 5: /* task gate */
684 /* must do that check here to return the correct error code */
685 if (!(e2
& DESC_P_MASK
))
686 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
687 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
688 if (has_error_code
) {
691 /* push the error code */
692 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
694 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
698 esp
= (ESP
- (2 << shift
)) & mask
;
699 ssp
= env
->segs
[R_SS
].base
+ esp
;
701 stl_kernel(ssp
, error_code
);
703 stw_kernel(ssp
, error_code
);
707 case 6: /* 286 interrupt gate */
708 case 7: /* 286 trap gate */
709 case 14: /* 386 interrupt gate */
710 case 15: /* 386 trap gate */
713 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
716 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
717 cpl
= env
->hflags
& HF_CPL_MASK
;
718 /* check privilege if software int */
719 if (is_int
&& dpl
< cpl
)
720 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
721 /* check valid bit */
722 if (!(e2
& DESC_P_MASK
))
723 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
725 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
726 if ((selector
& 0xfffc) == 0)
727 raise_exception_err(EXCP0D_GPF
, 0);
729 if (load_segment(&e1
, &e2
, selector
) != 0)
730 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
731 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
732 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
733 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
735 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
736 if (!(e2
& DESC_P_MASK
))
737 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
738 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
739 /* to inner privilege */
740 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
741 if ((ss
& 0xfffc) == 0)
742 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
745 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
746 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
747 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
749 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
750 if (!(ss_e2
& DESC_S_MASK
) ||
751 (ss_e2
& DESC_CS_MASK
) ||
752 !(ss_e2
& DESC_W_MASK
))
753 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
754 if (!(ss_e2
& DESC_P_MASK
))
755 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
757 sp_mask
= get_sp_mask(ss_e2
);
758 ssp
= get_seg_base(ss_e1
, ss_e2
);
759 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
760 /* to same privilege */
761 if (env
->eflags
& VM_MASK
)
762 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
764 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
765 ssp
= env
->segs
[R_SS
].base
;
769 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
770 new_stack
= 0; /* avoid warning */
771 sp_mask
= 0; /* avoid warning */
772 ssp
= 0; /* avoid warning */
773 esp
= 0; /* avoid warning */
779 /* XXX: check that enough room is available */
780 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
781 if (env
->eflags
& VM_MASK
)
787 if (env
->eflags
& VM_MASK
) {
788 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
789 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
793 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, ESP
);
796 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
797 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
798 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
799 if (has_error_code
) {
800 PUSHL(ssp
, esp
, sp_mask
, error_code
);
804 if (env
->eflags
& VM_MASK
) {
805 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
806 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
810 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, ESP
);
813 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
814 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
815 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
816 if (has_error_code
) {
817 PUSHW(ssp
, esp
, sp_mask
, error_code
);
822 if (env
->eflags
& VM_MASK
) {
823 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
828 ss
= (ss
& ~3) | dpl
;
829 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
830 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
832 SET_ESP(esp
, sp_mask
);
834 selector
= (selector
& ~3) | dpl
;
835 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
836 get_seg_base(e1
, e2
),
837 get_seg_limit(e1
, e2
),
839 cpu_x86_set_cpl(env
, dpl
);
842 /* interrupt gate clear IF mask */
843 if ((type
& 1) == 0) {
844 env
->eflags
&= ~IF_MASK
;
846 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
851 #define PUSHQ(sp, val)\
854 stq_kernel(sp, (val));\
857 #define POPQ(sp, val)\
859 val = ldq_kernel(sp);\
863 static inline target_ulong
get_rsp_from_tss(int level
)
868 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
869 env
->tr
.base
, env
->tr
.limit
);
872 if (!(env
->tr
.flags
& DESC_P_MASK
))
873 cpu_abort(env
, "invalid tss");
874 index
= 8 * level
+ 4;
875 if ((index
+ 7) > env
->tr
.limit
)
876 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
877 return ldq_kernel(env
->tr
.base
+ index
);
880 /* 64 bit interrupt */
881 static void do_interrupt64(int intno
, int is_int
, int error_code
,
882 target_ulong next_eip
, int is_hw
)
886 int type
, dpl
, selector
, cpl
, ist
;
887 int has_error_code
, new_stack
;
888 uint32_t e1
, e2
, e3
, ss
;
889 target_ulong old_eip
, esp
, offset
;
892 if (!is_int
&& !is_hw
)
893 has_error_code
= exeption_has_error_code(intno
);
900 if (intno
* 16 + 15 > dt
->limit
)
901 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
902 ptr
= dt
->base
+ intno
* 16;
903 e1
= ldl_kernel(ptr
);
904 e2
= ldl_kernel(ptr
+ 4);
905 e3
= ldl_kernel(ptr
+ 8);
906 /* check gate type */
907 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
909 case 14: /* 386 interrupt gate */
910 case 15: /* 386 trap gate */
913 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
916 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
917 cpl
= env
->hflags
& HF_CPL_MASK
;
918 /* check privilege if software int */
919 if (is_int
&& dpl
< cpl
)
920 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
921 /* check valid bit */
922 if (!(e2
& DESC_P_MASK
))
923 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
925 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
927 if ((selector
& 0xfffc) == 0)
928 raise_exception_err(EXCP0D_GPF
, 0);
930 if (load_segment(&e1
, &e2
, selector
) != 0)
931 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
932 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
933 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
934 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
936 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
937 if (!(e2
& DESC_P_MASK
))
938 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
939 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
940 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
941 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
942 /* to inner privilege */
944 esp
= get_rsp_from_tss(ist
+ 3);
946 esp
= get_rsp_from_tss(dpl
);
947 esp
&= ~0xfLL
; /* align stack */
950 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
951 /* to same privilege */
952 if (env
->eflags
& VM_MASK
)
953 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
956 esp
= get_rsp_from_tss(ist
+ 3);
959 esp
&= ~0xfLL
; /* align stack */
962 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
963 new_stack
= 0; /* avoid warning */
964 esp
= 0; /* avoid warning */
967 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
969 PUSHQ(esp
, compute_eflags());
970 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
972 if (has_error_code
) {
973 PUSHQ(esp
, error_code
);
978 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
982 selector
= (selector
& ~3) | dpl
;
983 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
984 get_seg_base(e1
, e2
),
985 get_seg_limit(e1
, e2
),
987 cpu_x86_set_cpl(env
, dpl
);
990 /* interrupt gate clear IF mask */
991 if ((type
& 1) == 0) {
992 env
->eflags
&= ~IF_MASK
;
994 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
999 #if defined(CONFIG_USER_ONLY)
1000 void helper_syscall(int next_eip_addend
)
1002 env
->exception_index
= EXCP_SYSCALL
;
1003 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1007 void helper_syscall(int next_eip_addend
)
1011 if (!(env
->efer
& MSR_EFER_SCE
)) {
1012 raise_exception_err(EXCP06_ILLOP
, 0);
1014 selector
= (env
->star
>> 32) & 0xffff;
1015 if (env
->hflags
& HF_LMA_MASK
) {
1018 ECX
= env
->eip
+ next_eip_addend
;
1019 env
->regs
[11] = compute_eflags();
1021 code64
= env
->hflags
& HF_CS64_MASK
;
1023 cpu_x86_set_cpl(env
, 0);
1024 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1026 DESC_G_MASK
| DESC_P_MASK
|
1028 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1029 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1033 DESC_W_MASK
| DESC_A_MASK
);
1034 env
->eflags
&= ~env
->fmask
;
1035 load_eflags(env
->eflags
, 0);
1037 env
->eip
= env
->lstar
;
1039 env
->eip
= env
->cstar
;
1041 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1043 cpu_x86_set_cpl(env
, 0);
1044 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1046 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1048 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1049 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1053 DESC_W_MASK
| DESC_A_MASK
);
1054 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1055 env
->eip
= (uint32_t)env
->star
;
1061 #ifdef TARGET_X86_64
1062 void helper_sysret(int dflag
)
1066 if (!(env
->efer
& MSR_EFER_SCE
)) {
1067 raise_exception_err(EXCP06_ILLOP
, 0);
1069 cpl
= env
->hflags
& HF_CPL_MASK
;
1070 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1071 raise_exception_err(EXCP0D_GPF
, 0);
1073 selector
= (env
->star
>> 48) & 0xffff;
1074 if (env
->hflags
& HF_LMA_MASK
) {
1076 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1078 DESC_G_MASK
| DESC_P_MASK
|
1079 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1080 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1084 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1086 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1087 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1088 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1089 env
->eip
= (uint32_t)ECX
;
1091 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1093 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1094 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1095 DESC_W_MASK
| DESC_A_MASK
);
1096 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1097 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1098 cpu_x86_set_cpl(env
, 3);
1100 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1102 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1103 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1104 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1105 env
->eip
= (uint32_t)ECX
;
1106 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1108 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1109 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1110 DESC_W_MASK
| DESC_A_MASK
);
1111 env
->eflags
|= IF_MASK
;
1112 cpu_x86_set_cpl(env
, 3);
1117 /* real mode interrupt */
1118 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1119 unsigned int next_eip
)
1122 target_ulong ptr
, ssp
;
1124 uint32_t offset
, esp
;
1125 uint32_t old_cs
, old_eip
;
1127 /* real mode (simpler !) */
1129 if (intno
* 4 + 3 > dt
->limit
)
1130 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1131 ptr
= dt
->base
+ intno
* 4;
1132 offset
= lduw_kernel(ptr
);
1133 selector
= lduw_kernel(ptr
+ 2);
1135 ssp
= env
->segs
[R_SS
].base
;
1140 old_cs
= env
->segs
[R_CS
].selector
;
1141 /* XXX: use SS segment size ? */
1142 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1143 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1144 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1146 /* update processor state */
1147 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1149 env
->segs
[R_CS
].selector
= selector
;
1150 env
->segs
[R_CS
].base
= (selector
<< 4);
1151 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1154 /* fake user mode interrupt */
1155 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1156 target_ulong next_eip
)
1160 int dpl
, cpl
, shift
;
1164 if (env
->hflags
& HF_LMA_MASK
) {
1169 ptr
= dt
->base
+ (intno
<< shift
);
1170 e2
= ldl_kernel(ptr
+ 4);
1172 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1173 cpl
= env
->hflags
& HF_CPL_MASK
;
1174 /* check privilege if software int */
1175 if (is_int
&& dpl
< cpl
)
1176 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1178 /* Since we emulate only user space, we cannot do more than
1179 exiting the emulation with the suitable exception and error
1185 #if !defined(CONFIG_USER_ONLY)
1186 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1189 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1190 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1193 type
= SVM_EVTINJ_TYPE_SOFT
;
1195 type
= SVM_EVTINJ_TYPE_EXEPT
;
1196 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1197 if (!rm
&& exeption_has_error_code(intno
)) {
1198 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1199 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1201 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1207 * Begin execution of an interruption. is_int is TRUE if coming from
1208 * the int instruction. next_eip is the EIP value AFTER the interrupt
1209 * instruction. It is only relevant if is_int is TRUE.
1211 void do_interrupt(int intno
, int is_int
, int error_code
,
1212 target_ulong next_eip
, int is_hw
)
1214 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1215 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1217 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1218 count
, intno
, error_code
, is_int
,
1219 env
->hflags
& HF_CPL_MASK
,
1220 env
->segs
[R_CS
].selector
, EIP
,
1221 (int)env
->segs
[R_CS
].base
+ EIP
,
1222 env
->segs
[R_SS
].selector
, ESP
);
1223 if (intno
== 0x0e) {
1224 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1226 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1229 log_cpu_state(env
, X86_DUMP_CCOP
);
1235 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1236 for(i
= 0; i
< 16; i
++) {
1237 qemu_log(" %02x", ldub(ptr
+ i
));
1245 if (env
->cr
[0] & CR0_PE_MASK
) {
1246 #if !defined(CONFIG_USER_ONLY)
1247 if (env
->hflags
& HF_SVMI_MASK
)
1248 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1250 #ifdef TARGET_X86_64
1251 if (env
->hflags
& HF_LMA_MASK
) {
1252 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1256 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1259 #if !defined(CONFIG_USER_ONLY)
1260 if (env
->hflags
& HF_SVMI_MASK
)
1261 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1263 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1266 #if !defined(CONFIG_USER_ONLY)
1267 if (env
->hflags
& HF_SVMI_MASK
) {
1268 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1269 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1274 /* This should come from sysemu.h - if we could include it here... */
1275 void qemu_system_reset_request(void);
1278 * Check nested exceptions and change to double or triple fault if
1279 * needed. It should only be called, if this is not an interrupt.
1280 * Returns the new exception number.
1282 static int check_exception(int intno
, int *error_code
)
1284 int first_contributory
= env
->old_exception
== 0 ||
1285 (env
->old_exception
>= 10 &&
1286 env
->old_exception
<= 13);
1287 int second_contributory
= intno
== 0 ||
1288 (intno
>= 10 && intno
<= 13);
1290 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1291 env
->old_exception
, intno
);
1293 #if !defined(CONFIG_USER_ONLY)
1294 if (env
->old_exception
== EXCP08_DBLE
) {
1295 if (env
->hflags
& HF_SVMI_MASK
)
1296 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1298 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1300 qemu_system_reset_request();
1305 if ((first_contributory
&& second_contributory
)
1306 || (env
->old_exception
== EXCP0E_PAGE
&&
1307 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1308 intno
= EXCP08_DBLE
;
1312 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1313 (intno
== EXCP08_DBLE
))
1314 env
->old_exception
= intno
;
1320 * Signal an interruption. It is executed in the main CPU loop.
1321 * is_int is TRUE if coming from the int instruction. next_eip is the
1322 * EIP value AFTER the interrupt instruction. It is only relevant if
1325 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1326 int next_eip_addend
)
1329 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1330 intno
= check_exception(intno
, &error_code
);
1332 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1335 env
->exception_index
= intno
;
1336 env
->error_code
= error_code
;
1337 env
->exception_is_int
= is_int
;
1338 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1342 /* shortcuts to generate exceptions */
1344 void raise_exception_err(int exception_index
, int error_code
)
1346 raise_interrupt(exception_index
, 0, error_code
, 0);
1349 void raise_exception(int exception_index
)
1351 raise_interrupt(exception_index
, 0, 0, 0);
1356 #if defined(CONFIG_USER_ONLY)
1358 void do_smm_enter(void)
1362 void helper_rsm(void)
1368 #ifdef TARGET_X86_64
1369 #define SMM_REVISION_ID 0x00020064
1371 #define SMM_REVISION_ID 0x00020000
1374 void do_smm_enter(void)
1376 target_ulong sm_state
;
1380 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1381 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1383 env
->hflags
|= HF_SMM_MASK
;
1384 cpu_smm_update(env
);
1386 sm_state
= env
->smbase
+ 0x8000;
1388 #ifdef TARGET_X86_64
1389 for(i
= 0; i
< 6; i
++) {
1391 offset
= 0x7e00 + i
* 16;
1392 stw_phys(sm_state
+ offset
, dt
->selector
);
1393 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1394 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1395 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1398 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1399 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1401 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1402 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1403 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1404 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1406 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1407 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1409 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1410 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1411 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1412 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1414 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1416 stq_phys(sm_state
+ 0x7ff8, EAX
);
1417 stq_phys(sm_state
+ 0x7ff0, ECX
);
1418 stq_phys(sm_state
+ 0x7fe8, EDX
);
1419 stq_phys(sm_state
+ 0x7fe0, EBX
);
1420 stq_phys(sm_state
+ 0x7fd8, ESP
);
1421 stq_phys(sm_state
+ 0x7fd0, EBP
);
1422 stq_phys(sm_state
+ 0x7fc8, ESI
);
1423 stq_phys(sm_state
+ 0x7fc0, EDI
);
1424 for(i
= 8; i
< 16; i
++)
1425 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1426 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1427 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1428 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1429 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1431 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1432 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1433 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1435 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1436 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1438 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1439 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1440 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1441 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1442 stl_phys(sm_state
+ 0x7fec, EDI
);
1443 stl_phys(sm_state
+ 0x7fe8, ESI
);
1444 stl_phys(sm_state
+ 0x7fe4, EBP
);
1445 stl_phys(sm_state
+ 0x7fe0, ESP
);
1446 stl_phys(sm_state
+ 0x7fdc, EBX
);
1447 stl_phys(sm_state
+ 0x7fd8, EDX
);
1448 stl_phys(sm_state
+ 0x7fd4, ECX
);
1449 stl_phys(sm_state
+ 0x7fd0, EAX
);
1450 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1451 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1453 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1454 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1455 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1456 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1458 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1459 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1460 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1461 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1463 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1464 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1466 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1467 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1469 for(i
= 0; i
< 6; i
++) {
1472 offset
= 0x7f84 + i
* 12;
1474 offset
= 0x7f2c + (i
- 3) * 12;
1475 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1476 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1477 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1478 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1480 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1482 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1483 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1485 /* init SMM cpu state */
1487 #ifdef TARGET_X86_64
1488 cpu_load_efer(env
, 0);
1490 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1491 env
->eip
= 0x00008000;
1492 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1494 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1495 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1496 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1497 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1498 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1500 cpu_x86_update_cr0(env
,
1501 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1502 cpu_x86_update_cr4(env
, 0);
1503 env
->dr
[7] = 0x00000400;
1504 CC_OP
= CC_OP_EFLAGS
;
1507 void helper_rsm(void)
1509 target_ulong sm_state
;
1513 sm_state
= env
->smbase
+ 0x8000;
1514 #ifdef TARGET_X86_64
1515 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1517 for(i
= 0; i
< 6; i
++) {
1518 offset
= 0x7e00 + i
* 16;
1519 cpu_x86_load_seg_cache(env
, i
,
1520 lduw_phys(sm_state
+ offset
),
1521 ldq_phys(sm_state
+ offset
+ 8),
1522 ldl_phys(sm_state
+ offset
+ 4),
1523 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1526 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1527 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1529 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1530 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1531 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1532 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1534 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1535 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1537 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1538 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1539 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1540 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1542 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1543 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1544 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1545 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1546 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1547 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1548 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1549 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1550 for(i
= 8; i
< 16; i
++)
1551 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1552 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1553 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1554 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1555 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1556 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1558 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1559 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1560 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1562 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1563 if (val
& 0x20000) {
1564 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1567 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1568 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1569 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1570 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1571 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1572 EDI
= ldl_phys(sm_state
+ 0x7fec);
1573 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1574 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1575 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1576 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1577 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1578 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1579 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1580 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1581 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1583 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1584 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1585 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1586 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1588 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1589 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1590 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1591 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1593 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1594 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1596 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1597 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1599 for(i
= 0; i
< 6; i
++) {
1601 offset
= 0x7f84 + i
* 12;
1603 offset
= 0x7f2c + (i
- 3) * 12;
1604 cpu_x86_load_seg_cache(env
, i
,
1605 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1606 ldl_phys(sm_state
+ offset
+ 8),
1607 ldl_phys(sm_state
+ offset
+ 4),
1608 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1610 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1612 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1613 if (val
& 0x20000) {
1614 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1617 CC_OP
= CC_OP_EFLAGS
;
1618 env
->hflags
&= ~HF_SMM_MASK
;
1619 cpu_smm_update(env
);
1621 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1622 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1625 #endif /* !CONFIG_USER_ONLY */
1628 /* division, flags are undefined */
1630 void helper_divb_AL(target_ulong t0
)
1632 unsigned int num
, den
, q
, r
;
1634 num
= (EAX
& 0xffff);
1637 raise_exception(EXCP00_DIVZ
);
1641 raise_exception(EXCP00_DIVZ
);
1643 r
= (num
% den
) & 0xff;
1644 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1647 void helper_idivb_AL(target_ulong t0
)
1654 raise_exception(EXCP00_DIVZ
);
1658 raise_exception(EXCP00_DIVZ
);
1660 r
= (num
% den
) & 0xff;
1661 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1664 void helper_divw_AX(target_ulong t0
)
1666 unsigned int num
, den
, q
, r
;
1668 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1669 den
= (t0
& 0xffff);
1671 raise_exception(EXCP00_DIVZ
);
1675 raise_exception(EXCP00_DIVZ
);
1677 r
= (num
% den
) & 0xffff;
1678 EAX
= (EAX
& ~0xffff) | q
;
1679 EDX
= (EDX
& ~0xffff) | r
;
1682 void helper_idivw_AX(target_ulong t0
)
1686 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1689 raise_exception(EXCP00_DIVZ
);
1692 if (q
!= (int16_t)q
)
1693 raise_exception(EXCP00_DIVZ
);
1695 r
= (num
% den
) & 0xffff;
1696 EAX
= (EAX
& ~0xffff) | q
;
1697 EDX
= (EDX
& ~0xffff) | r
;
1700 void helper_divl_EAX(target_ulong t0
)
1702 unsigned int den
, r
;
1705 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1708 raise_exception(EXCP00_DIVZ
);
1713 raise_exception(EXCP00_DIVZ
);
1718 void helper_idivl_EAX(target_ulong t0
)
1723 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1726 raise_exception(EXCP00_DIVZ
);
1730 if (q
!= (int32_t)q
)
1731 raise_exception(EXCP00_DIVZ
);
1738 /* XXX: exception */
1739 void helper_aam(int base
)
1745 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1749 void helper_aad(int base
)
1753 ah
= (EAX
>> 8) & 0xff;
1754 al
= ((ah
* base
) + al
) & 0xff;
1755 EAX
= (EAX
& ~0xffff) | al
;
1759 void helper_aaa(void)
1765 eflags
= helper_cc_compute_all(CC_OP
);
1768 ah
= (EAX
>> 8) & 0xff;
1770 icarry
= (al
> 0xf9);
1771 if (((al
& 0x0f) > 9 ) || af
) {
1772 al
= (al
+ 6) & 0x0f;
1773 ah
= (ah
+ 1 + icarry
) & 0xff;
1774 eflags
|= CC_C
| CC_A
;
1776 eflags
&= ~(CC_C
| CC_A
);
1779 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1783 void helper_aas(void)
1789 eflags
= helper_cc_compute_all(CC_OP
);
1792 ah
= (EAX
>> 8) & 0xff;
1795 if (((al
& 0x0f) > 9 ) || af
) {
1796 al
= (al
- 6) & 0x0f;
1797 ah
= (ah
- 1 - icarry
) & 0xff;
1798 eflags
|= CC_C
| CC_A
;
1800 eflags
&= ~(CC_C
| CC_A
);
1803 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1807 void helper_daa(void)
1812 eflags
= helper_cc_compute_all(CC_OP
);
1818 if (((al
& 0x0f) > 9 ) || af
) {
1819 al
= (al
+ 6) & 0xff;
1822 if ((al
> 0x9f) || cf
) {
1823 al
= (al
+ 0x60) & 0xff;
1826 EAX
= (EAX
& ~0xff) | al
;
1827 /* well, speed is not an issue here, so we compute the flags by hand */
1828 eflags
|= (al
== 0) << 6; /* zf */
1829 eflags
|= parity_table
[al
]; /* pf */
1830 eflags
|= (al
& 0x80); /* sf */
1834 void helper_das(void)
1836 int al
, al1
, af
, cf
;
1839 eflags
= helper_cc_compute_all(CC_OP
);
1846 if (((al
& 0x0f) > 9 ) || af
) {
1850 al
= (al
- 6) & 0xff;
1852 if ((al1
> 0x99) || cf
) {
1853 al
= (al
- 0x60) & 0xff;
1856 EAX
= (EAX
& ~0xff) | al
;
1857 /* well, speed is not an issue here, so we compute the flags by hand */
1858 eflags
|= (al
== 0) << 6; /* zf */
1859 eflags
|= parity_table
[al
]; /* pf */
1860 eflags
|= (al
& 0x80); /* sf */
1864 void helper_into(int next_eip_addend
)
1867 eflags
= helper_cc_compute_all(CC_OP
);
1868 if (eflags
& CC_O
) {
1869 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1873 void helper_cmpxchg8b(target_ulong a0
)
1878 eflags
= helper_cc_compute_all(CC_OP
);
1880 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1881 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1884 /* always do the store */
1886 EDX
= (uint32_t)(d
>> 32);
1893 #ifdef TARGET_X86_64
1894 void helper_cmpxchg16b(target_ulong a0
)
1899 if ((a0
& 0xf) != 0)
1900 raise_exception(EXCP0D_GPF
);
1901 eflags
= helper_cc_compute_all(CC_OP
);
1904 if (d0
== EAX
&& d1
== EDX
) {
1909 /* always do the store */
1920 void helper_single_step(void)
1922 #ifndef CONFIG_USER_ONLY
1923 check_hw_breakpoints(env
, 1);
1924 env
->dr
[6] |= DR6_BS
;
1926 raise_exception(EXCP01_DB
);
1929 void helper_cpuid(void)
1931 uint32_t eax
, ebx
, ecx
, edx
;
1933 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1935 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1942 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1945 uint32_t esp_mask
, esp
, ebp
;
1947 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1948 ssp
= env
->segs
[R_SS
].base
;
1957 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1960 stl(ssp
+ (esp
& esp_mask
), t1
);
1967 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1970 stw(ssp
+ (esp
& esp_mask
), t1
);
1974 #ifdef TARGET_X86_64
1975 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1977 target_ulong esp
, ebp
;
1997 stw(esp
, lduw(ebp
));
2005 void helper_lldt(int selector
)
2009 int index
, entry_limit
;
2013 if ((selector
& 0xfffc) == 0) {
2014 /* XXX: NULL selector case: invalid LDT */
2019 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2021 index
= selector
& ~7;
2022 #ifdef TARGET_X86_64
2023 if (env
->hflags
& HF_LMA_MASK
)
2028 if ((index
+ entry_limit
) > dt
->limit
)
2029 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2030 ptr
= dt
->base
+ index
;
2031 e1
= ldl_kernel(ptr
);
2032 e2
= ldl_kernel(ptr
+ 4);
2033 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2034 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2035 if (!(e2
& DESC_P_MASK
))
2036 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2037 #ifdef TARGET_X86_64
2038 if (env
->hflags
& HF_LMA_MASK
) {
2040 e3
= ldl_kernel(ptr
+ 8);
2041 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2042 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2046 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2049 env
->ldt
.selector
= selector
;
2052 void helper_ltr(int selector
)
2056 int index
, type
, entry_limit
;
2060 if ((selector
& 0xfffc) == 0) {
2061 /* NULL selector case: invalid TR */
2067 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2069 index
= selector
& ~7;
2070 #ifdef TARGET_X86_64
2071 if (env
->hflags
& HF_LMA_MASK
)
2076 if ((index
+ entry_limit
) > dt
->limit
)
2077 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2078 ptr
= dt
->base
+ index
;
2079 e1
= ldl_kernel(ptr
);
2080 e2
= ldl_kernel(ptr
+ 4);
2081 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2082 if ((e2
& DESC_S_MASK
) ||
2083 (type
!= 1 && type
!= 9))
2084 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2085 if (!(e2
& DESC_P_MASK
))
2086 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2087 #ifdef TARGET_X86_64
2088 if (env
->hflags
& HF_LMA_MASK
) {
2090 e3
= ldl_kernel(ptr
+ 8);
2091 e4
= ldl_kernel(ptr
+ 12);
2092 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2093 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2094 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2095 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2099 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2101 e2
|= DESC_TSS_BUSY_MASK
;
2102 stl_kernel(ptr
+ 4, e2
);
2104 env
->tr
.selector
= selector
;
2107 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2108 void helper_load_seg(int seg_reg
, int selector
)
2117 cpl
= env
->hflags
& HF_CPL_MASK
;
2118 if ((selector
& 0xfffc) == 0) {
2119 /* null selector case */
2121 #ifdef TARGET_X86_64
2122 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2125 raise_exception_err(EXCP0D_GPF
, 0);
2126 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2133 index
= selector
& ~7;
2134 if ((index
+ 7) > dt
->limit
)
2135 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2136 ptr
= dt
->base
+ index
;
2137 e1
= ldl_kernel(ptr
);
2138 e2
= ldl_kernel(ptr
+ 4);
2140 if (!(e2
& DESC_S_MASK
))
2141 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2143 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2144 if (seg_reg
== R_SS
) {
2145 /* must be writable segment */
2146 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2147 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2148 if (rpl
!= cpl
|| dpl
!= cpl
)
2149 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2151 /* must be readable segment */
2152 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2153 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2155 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2156 /* if not conforming code, test rights */
2157 if (dpl
< cpl
|| dpl
< rpl
)
2158 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2162 if (!(e2
& DESC_P_MASK
)) {
2163 if (seg_reg
== R_SS
)
2164 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2166 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2169 /* set the access bit if not already set */
2170 if (!(e2
& DESC_A_MASK
)) {
2172 stl_kernel(ptr
+ 4, e2
);
2175 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2176 get_seg_base(e1
, e2
),
2177 get_seg_limit(e1
, e2
),
2180 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2181 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2186 /* protected mode jump */
2187 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2188 int next_eip_addend
)
2191 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2192 target_ulong next_eip
;
2194 if ((new_cs
& 0xfffc) == 0)
2195 raise_exception_err(EXCP0D_GPF
, 0);
2196 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2197 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2198 cpl
= env
->hflags
& HF_CPL_MASK
;
2199 if (e2
& DESC_S_MASK
) {
2200 if (!(e2
& DESC_CS_MASK
))
2201 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2202 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2203 if (e2
& DESC_C_MASK
) {
2204 /* conforming code segment */
2206 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2208 /* non conforming code segment */
2211 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2213 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2215 if (!(e2
& DESC_P_MASK
))
2216 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2217 limit
= get_seg_limit(e1
, e2
);
2218 if (new_eip
> limit
&&
2219 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2220 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2221 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2222 get_seg_base(e1
, e2
), limit
, e2
);
2225 /* jump to call or task gate */
2226 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2228 cpl
= env
->hflags
& HF_CPL_MASK
;
2229 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2231 case 1: /* 286 TSS */
2232 case 9: /* 386 TSS */
2233 case 5: /* task gate */
2234 if (dpl
< cpl
|| dpl
< rpl
)
2235 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2236 next_eip
= env
->eip
+ next_eip_addend
;
2237 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2238 CC_OP
= CC_OP_EFLAGS
;
2240 case 4: /* 286 call gate */
2241 case 12: /* 386 call gate */
2242 if ((dpl
< cpl
) || (dpl
< rpl
))
2243 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2244 if (!(e2
& DESC_P_MASK
))
2245 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2247 new_eip
= (e1
& 0xffff);
2249 new_eip
|= (e2
& 0xffff0000);
2250 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2251 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2252 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2253 /* must be code segment */
2254 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2255 (DESC_S_MASK
| DESC_CS_MASK
)))
2256 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2257 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2258 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2259 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2260 if (!(e2
& DESC_P_MASK
))
2261 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2262 limit
= get_seg_limit(e1
, e2
);
2263 if (new_eip
> limit
)
2264 raise_exception_err(EXCP0D_GPF
, 0);
2265 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2266 get_seg_base(e1
, e2
), limit
, e2
);
2270 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2276 /* real mode call */
2277 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2278 int shift
, int next_eip
)
2281 uint32_t esp
, esp_mask
;
2286 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2287 ssp
= env
->segs
[R_SS
].base
;
2289 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2290 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2292 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2293 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2296 SET_ESP(esp
, esp_mask
);
2298 env
->segs
[R_CS
].selector
= new_cs
;
2299 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2302 /* protected mode call */
2303 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2304 int shift
, int next_eip_addend
)
2307 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2308 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2309 uint32_t val
, limit
, old_sp_mask
;
2310 target_ulong ssp
, old_ssp
, next_eip
;
2312 next_eip
= env
->eip
+ next_eip_addend
;
2313 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2314 LOG_PCALL_STATE(env
);
2315 if ((new_cs
& 0xfffc) == 0)
2316 raise_exception_err(EXCP0D_GPF
, 0);
2317 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2318 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2319 cpl
= env
->hflags
& HF_CPL_MASK
;
2320 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2321 if (e2
& DESC_S_MASK
) {
2322 if (!(e2
& DESC_CS_MASK
))
2323 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2324 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2325 if (e2
& DESC_C_MASK
) {
2326 /* conforming code segment */
2328 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2330 /* non conforming code segment */
2333 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2335 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2337 if (!(e2
& DESC_P_MASK
))
2338 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2340 #ifdef TARGET_X86_64
2341 /* XXX: check 16/32 bit cases in long mode */
2346 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2347 PUSHQ(rsp
, next_eip
);
2348 /* from this point, not restartable */
2350 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2351 get_seg_base(e1
, e2
),
2352 get_seg_limit(e1
, e2
), e2
);
2358 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2359 ssp
= env
->segs
[R_SS
].base
;
2361 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2362 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2364 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2365 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2368 limit
= get_seg_limit(e1
, e2
);
2369 if (new_eip
> limit
)
2370 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2371 /* from this point, not restartable */
2372 SET_ESP(sp
, sp_mask
);
2373 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2374 get_seg_base(e1
, e2
), limit
, e2
);
2378 /* check gate type */
2379 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2380 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2383 case 1: /* available 286 TSS */
2384 case 9: /* available 386 TSS */
2385 case 5: /* task gate */
2386 if (dpl
< cpl
|| dpl
< rpl
)
2387 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2388 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2389 CC_OP
= CC_OP_EFLAGS
;
2391 case 4: /* 286 call gate */
2392 case 12: /* 386 call gate */
2395 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2400 if (dpl
< cpl
|| dpl
< rpl
)
2401 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2402 /* check valid bit */
2403 if (!(e2
& DESC_P_MASK
))
2404 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2405 selector
= e1
>> 16;
2406 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2407 param_count
= e2
& 0x1f;
2408 if ((selector
& 0xfffc) == 0)
2409 raise_exception_err(EXCP0D_GPF
, 0);
2411 if (load_segment(&e1
, &e2
, selector
) != 0)
2412 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2413 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2414 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2415 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2417 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2418 if (!(e2
& DESC_P_MASK
))
2419 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2421 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2422 /* to inner privilege */
2423 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2424 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2425 ss
, sp
, param_count
, ESP
);
2426 if ((ss
& 0xfffc) == 0)
2427 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2428 if ((ss
& 3) != dpl
)
2429 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2430 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2431 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2432 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2434 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2435 if (!(ss_e2
& DESC_S_MASK
) ||
2436 (ss_e2
& DESC_CS_MASK
) ||
2437 !(ss_e2
& DESC_W_MASK
))
2438 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2439 if (!(ss_e2
& DESC_P_MASK
))
2440 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2442 // push_size = ((param_count * 2) + 8) << shift;
2444 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2445 old_ssp
= env
->segs
[R_SS
].base
;
2447 sp_mask
= get_sp_mask(ss_e2
);
2448 ssp
= get_seg_base(ss_e1
, ss_e2
);
2450 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2451 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2452 for(i
= param_count
- 1; i
>= 0; i
--) {
2453 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2454 PUSHL(ssp
, sp
, sp_mask
, val
);
2457 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2458 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2459 for(i
= param_count
- 1; i
>= 0; i
--) {
2460 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2461 PUSHW(ssp
, sp
, sp_mask
, val
);
2466 /* to same privilege */
2468 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2469 ssp
= env
->segs
[R_SS
].base
;
2470 // push_size = (4 << shift);
2475 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2476 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2478 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2479 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2482 /* from this point, not restartable */
2485 ss
= (ss
& ~3) | dpl
;
2486 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2488 get_seg_limit(ss_e1
, ss_e2
),
2492 selector
= (selector
& ~3) | dpl
;
2493 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2494 get_seg_base(e1
, e2
),
2495 get_seg_limit(e1
, e2
),
2497 cpu_x86_set_cpl(env
, dpl
);
2498 SET_ESP(sp
, sp_mask
);
2503 /* real and vm86 mode iret */
2504 void helper_iret_real(int shift
)
2506 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2510 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2512 ssp
= env
->segs
[R_SS
].base
;
2515 POPL(ssp
, sp
, sp_mask
, new_eip
);
2516 POPL(ssp
, sp
, sp_mask
, new_cs
);
2518 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2521 POPW(ssp
, sp
, sp_mask
, new_eip
);
2522 POPW(ssp
, sp
, sp_mask
, new_cs
);
2523 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2525 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2526 env
->segs
[R_CS
].selector
= new_cs
;
2527 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2529 if (env
->eflags
& VM_MASK
)
2530 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2532 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2534 eflags_mask
&= 0xffff;
2535 load_eflags(new_eflags
, eflags_mask
);
2536 env
->hflags2
&= ~HF2_NMI_MASK
;
2539 static inline void validate_seg(int seg_reg
, int cpl
)
2544 /* XXX: on x86_64, we do not want to nullify FS and GS because
2545 they may still contain a valid base. I would be interested to
2546 know how a real x86_64 CPU behaves */
2547 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2548 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2551 e2
= env
->segs
[seg_reg
].flags
;
2552 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2553 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2554 /* data or non conforming code segment */
2556 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2561 /* protected mode iret */
2562 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2564 uint32_t new_cs
, new_eflags
, new_ss
;
2565 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2566 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2567 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2568 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2570 #ifdef TARGET_X86_64
2575 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2577 ssp
= env
->segs
[R_SS
].base
;
2578 new_eflags
= 0; /* avoid warning */
2579 #ifdef TARGET_X86_64
2585 POPQ(sp
, new_eflags
);
2591 POPL(ssp
, sp
, sp_mask
, new_eip
);
2592 POPL(ssp
, sp
, sp_mask
, new_cs
);
2595 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2596 if (new_eflags
& VM_MASK
)
2597 goto return_to_vm86
;
2601 POPW(ssp
, sp
, sp_mask
, new_eip
);
2602 POPW(ssp
, sp
, sp_mask
, new_cs
);
2604 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2606 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2607 new_cs
, new_eip
, shift
, addend
);
2608 LOG_PCALL_STATE(env
);
2609 if ((new_cs
& 0xfffc) == 0)
2610 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2611 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2612 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2613 if (!(e2
& DESC_S_MASK
) ||
2614 !(e2
& DESC_CS_MASK
))
2615 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2616 cpl
= env
->hflags
& HF_CPL_MASK
;
2619 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2620 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2621 if (e2
& DESC_C_MASK
) {
2623 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2626 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2628 if (!(e2
& DESC_P_MASK
))
2629 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2632 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2633 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2634 /* return to same privilege level */
2635 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2636 get_seg_base(e1
, e2
),
2637 get_seg_limit(e1
, e2
),
2640 /* return to different privilege level */
2641 #ifdef TARGET_X86_64
2650 POPL(ssp
, sp
, sp_mask
, new_esp
);
2651 POPL(ssp
, sp
, sp_mask
, new_ss
);
2655 POPW(ssp
, sp
, sp_mask
, new_esp
);
2656 POPW(ssp
, sp
, sp_mask
, new_ss
);
2658 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2660 if ((new_ss
& 0xfffc) == 0) {
2661 #ifdef TARGET_X86_64
2662 /* NULL ss is allowed in long mode if cpl != 3*/
2663 /* XXX: test CS64 ? */
2664 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2665 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2667 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2668 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2669 DESC_W_MASK
| DESC_A_MASK
);
2670 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2674 raise_exception_err(EXCP0D_GPF
, 0);
2677 if ((new_ss
& 3) != rpl
)
2678 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2679 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2680 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2681 if (!(ss_e2
& DESC_S_MASK
) ||
2682 (ss_e2
& DESC_CS_MASK
) ||
2683 !(ss_e2
& DESC_W_MASK
))
2684 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2685 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2687 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2688 if (!(ss_e2
& DESC_P_MASK
))
2689 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2690 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2691 get_seg_base(ss_e1
, ss_e2
),
2692 get_seg_limit(ss_e1
, ss_e2
),
2696 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2697 get_seg_base(e1
, e2
),
2698 get_seg_limit(e1
, e2
),
2700 cpu_x86_set_cpl(env
, rpl
);
2702 #ifdef TARGET_X86_64
2703 if (env
->hflags
& HF_CS64_MASK
)
2707 sp_mask
= get_sp_mask(ss_e2
);
2709 /* validate data segments */
2710 validate_seg(R_ES
, rpl
);
2711 validate_seg(R_DS
, rpl
);
2712 validate_seg(R_FS
, rpl
);
2713 validate_seg(R_GS
, rpl
);
2717 SET_ESP(sp
, sp_mask
);
2720 /* NOTE: 'cpl' is the _old_ CPL */
2721 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2723 eflags_mask
|= IOPL_MASK
;
2724 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2726 eflags_mask
|= IF_MASK
;
2728 eflags_mask
&= 0xffff;
2729 load_eflags(new_eflags
, eflags_mask
);
2734 POPL(ssp
, sp
, sp_mask
, new_esp
);
2735 POPL(ssp
, sp
, sp_mask
, new_ss
);
2736 POPL(ssp
, sp
, sp_mask
, new_es
);
2737 POPL(ssp
, sp
, sp_mask
, new_ds
);
2738 POPL(ssp
, sp
, sp_mask
, new_fs
);
2739 POPL(ssp
, sp
, sp_mask
, new_gs
);
2741 /* modify processor state */
2742 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2743 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2744 load_seg_vm(R_CS
, new_cs
& 0xffff);
2745 cpu_x86_set_cpl(env
, 3);
2746 load_seg_vm(R_SS
, new_ss
& 0xffff);
2747 load_seg_vm(R_ES
, new_es
& 0xffff);
2748 load_seg_vm(R_DS
, new_ds
& 0xffff);
2749 load_seg_vm(R_FS
, new_fs
& 0xffff);
2750 load_seg_vm(R_GS
, new_gs
& 0xffff);
2752 env
->eip
= new_eip
& 0xffff;
2756 void helper_iret_protected(int shift
, int next_eip
)
2758 int tss_selector
, type
;
2761 /* specific case for TSS */
2762 if (env
->eflags
& NT_MASK
) {
2763 #ifdef TARGET_X86_64
2764 if (env
->hflags
& HF_LMA_MASK
)
2765 raise_exception_err(EXCP0D_GPF
, 0);
2767 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2768 if (tss_selector
& 4)
2769 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2770 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2771 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2772 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2773 /* NOTE: we check both segment and busy TSS */
2775 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2776 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2778 helper_ret_protected(shift
, 1, 0);
2780 env
->hflags2
&= ~HF2_NMI_MASK
;
2783 void helper_lret_protected(int shift
, int addend
)
2785 helper_ret_protected(shift
, 0, addend
);
2788 void helper_sysenter(void)
2790 if (env
->sysenter_cs
== 0) {
2791 raise_exception_err(EXCP0D_GPF
, 0);
2793 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2794 cpu_x86_set_cpl(env
, 0);
2796 #ifdef TARGET_X86_64
2797 if (env
->hflags
& HF_LMA_MASK
) {
2798 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2800 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2802 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2806 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2808 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2810 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2812 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2814 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2816 DESC_W_MASK
| DESC_A_MASK
);
2817 ESP
= env
->sysenter_esp
;
2818 EIP
= env
->sysenter_eip
;
2821 void helper_sysexit(int dflag
)
2825 cpl
= env
->hflags
& HF_CPL_MASK
;
2826 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2827 raise_exception_err(EXCP0D_GPF
, 0);
2829 cpu_x86_set_cpl(env
, 3);
2830 #ifdef TARGET_X86_64
2832 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2834 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2835 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2836 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2837 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2839 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2840 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2841 DESC_W_MASK
| DESC_A_MASK
);
2845 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2847 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2848 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2849 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2850 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2852 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2853 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2854 DESC_W_MASK
| DESC_A_MASK
);
2860 #if defined(CONFIG_USER_ONLY)
2861 target_ulong
helper_read_crN(int reg
)
2866 void helper_write_crN(int reg
, target_ulong t0
)
2870 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2874 target_ulong
helper_read_crN(int reg
)
2878 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2884 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2885 val
= cpu_get_apic_tpr(env
);
2894 void helper_write_crN(int reg
, target_ulong t0
)
2896 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2899 cpu_x86_update_cr0(env
, t0
);
2902 cpu_x86_update_cr3(env
, t0
);
2905 cpu_x86_update_cr4(env
, t0
);
2908 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2909 cpu_set_apic_tpr(env
, t0
);
2911 env
->v_tpr
= t0
& 0x0f;
2919 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2924 hw_breakpoint_remove(env
, reg
);
2926 hw_breakpoint_insert(env
, reg
);
2927 } else if (reg
== 7) {
2928 for (i
= 0; i
< 4; i
++)
2929 hw_breakpoint_remove(env
, i
);
2931 for (i
= 0; i
< 4; i
++)
2932 hw_breakpoint_insert(env
, i
);
2938 void helper_lmsw(target_ulong t0
)
2940 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2941 if already set to one. */
2942 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2943 helper_write_crN(0, t0
);
2946 void helper_clts(void)
2948 env
->cr
[0] &= ~CR0_TS_MASK
;
2949 env
->hflags
&= ~HF_TS_MASK
;
2952 void helper_invlpg(target_ulong addr
)
2954 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2955 tlb_flush_page(env
, addr
);
2958 void helper_rdtsc(void)
2962 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2963 raise_exception(EXCP0D_GPF
);
2965 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2967 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2968 EAX
= (uint32_t)(val
);
2969 EDX
= (uint32_t)(val
>> 32);
2972 void helper_rdpmc(void)
2974 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2975 raise_exception(EXCP0D_GPF
);
2977 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2979 /* currently unimplemented */
2980 raise_exception_err(EXCP06_ILLOP
, 0);
2983 #if defined(CONFIG_USER_ONLY)
2984 void helper_wrmsr(void)
2988 void helper_rdmsr(void)
2992 void helper_wrmsr(void)
2996 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
2998 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3000 switch((uint32_t)ECX
) {
3001 case MSR_IA32_SYSENTER_CS
:
3002 env
->sysenter_cs
= val
& 0xffff;
3004 case MSR_IA32_SYSENTER_ESP
:
3005 env
->sysenter_esp
= val
;
3007 case MSR_IA32_SYSENTER_EIP
:
3008 env
->sysenter_eip
= val
;
3010 case MSR_IA32_APICBASE
:
3011 cpu_set_apic_base(env
, val
);
3015 uint64_t update_mask
;
3017 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3018 update_mask
|= MSR_EFER_SCE
;
3019 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3020 update_mask
|= MSR_EFER_LME
;
3021 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3022 update_mask
|= MSR_EFER_FFXSR
;
3023 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3024 update_mask
|= MSR_EFER_NXE
;
3025 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3026 update_mask
|= MSR_EFER_SVME
;
3027 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3028 update_mask
|= MSR_EFER_FFXSR
;
3029 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3030 (val
& update_mask
));
3039 case MSR_VM_HSAVE_PA
:
3040 env
->vm_hsave
= val
;
3042 #ifdef TARGET_X86_64
3053 env
->segs
[R_FS
].base
= val
;
3056 env
->segs
[R_GS
].base
= val
;
3058 case MSR_KERNELGSBASE
:
3059 env
->kernelgsbase
= val
;
3062 case MSR_MTRRphysBase(0):
3063 case MSR_MTRRphysBase(1):
3064 case MSR_MTRRphysBase(2):
3065 case MSR_MTRRphysBase(3):
3066 case MSR_MTRRphysBase(4):
3067 case MSR_MTRRphysBase(5):
3068 case MSR_MTRRphysBase(6):
3069 case MSR_MTRRphysBase(7):
3070 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3072 case MSR_MTRRphysMask(0):
3073 case MSR_MTRRphysMask(1):
3074 case MSR_MTRRphysMask(2):
3075 case MSR_MTRRphysMask(3):
3076 case MSR_MTRRphysMask(4):
3077 case MSR_MTRRphysMask(5):
3078 case MSR_MTRRphysMask(6):
3079 case MSR_MTRRphysMask(7):
3080 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3082 case MSR_MTRRfix64K_00000
:
3083 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3085 case MSR_MTRRfix16K_80000
:
3086 case MSR_MTRRfix16K_A0000
:
3087 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3089 case MSR_MTRRfix4K_C0000
:
3090 case MSR_MTRRfix4K_C8000
:
3091 case MSR_MTRRfix4K_D0000
:
3092 case MSR_MTRRfix4K_D8000
:
3093 case MSR_MTRRfix4K_E0000
:
3094 case MSR_MTRRfix4K_E8000
:
3095 case MSR_MTRRfix4K_F0000
:
3096 case MSR_MTRRfix4K_F8000
:
3097 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3099 case MSR_MTRRdefType
:
3100 env
->mtrr_deftype
= val
;
3102 case MSR_MCG_STATUS
:
3103 env
->mcg_status
= val
;
3106 if ((env
->mcg_cap
& MCG_CTL_P
)
3107 && (val
== 0 || val
== ~(uint64_t)0))
3111 if ((uint32_t)ECX
>= MSR_MC0_CTL
3112 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3113 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3114 if ((offset
& 0x3) != 0
3115 || (val
== 0 || val
== ~(uint64_t)0))
3116 env
->mce_banks
[offset
] = val
;
3119 /* XXX: exception ? */
3124 void helper_rdmsr(void)
3128 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3130 switch((uint32_t)ECX
) {
3131 case MSR_IA32_SYSENTER_CS
:
3132 val
= env
->sysenter_cs
;
3134 case MSR_IA32_SYSENTER_ESP
:
3135 val
= env
->sysenter_esp
;
3137 case MSR_IA32_SYSENTER_EIP
:
3138 val
= env
->sysenter_eip
;
3140 case MSR_IA32_APICBASE
:
3141 val
= cpu_get_apic_base(env
);
3152 case MSR_VM_HSAVE_PA
:
3153 val
= env
->vm_hsave
;
3155 case MSR_IA32_PERF_STATUS
:
3156 /* tsc_increment_by_tick */
3158 /* CPU multiplier */
3159 val
|= (((uint64_t)4ULL) << 40);
3161 #ifdef TARGET_X86_64
3172 val
= env
->segs
[R_FS
].base
;
3175 val
= env
->segs
[R_GS
].base
;
3177 case MSR_KERNELGSBASE
:
3178 val
= env
->kernelgsbase
;
3181 case MSR_MTRRphysBase(0):
3182 case MSR_MTRRphysBase(1):
3183 case MSR_MTRRphysBase(2):
3184 case MSR_MTRRphysBase(3):
3185 case MSR_MTRRphysBase(4):
3186 case MSR_MTRRphysBase(5):
3187 case MSR_MTRRphysBase(6):
3188 case MSR_MTRRphysBase(7):
3189 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3191 case MSR_MTRRphysMask(0):
3192 case MSR_MTRRphysMask(1):
3193 case MSR_MTRRphysMask(2):
3194 case MSR_MTRRphysMask(3):
3195 case MSR_MTRRphysMask(4):
3196 case MSR_MTRRphysMask(5):
3197 case MSR_MTRRphysMask(6):
3198 case MSR_MTRRphysMask(7):
3199 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3201 case MSR_MTRRfix64K_00000
:
3202 val
= env
->mtrr_fixed
[0];
3204 case MSR_MTRRfix16K_80000
:
3205 case MSR_MTRRfix16K_A0000
:
3206 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3208 case MSR_MTRRfix4K_C0000
:
3209 case MSR_MTRRfix4K_C8000
:
3210 case MSR_MTRRfix4K_D0000
:
3211 case MSR_MTRRfix4K_D8000
:
3212 case MSR_MTRRfix4K_E0000
:
3213 case MSR_MTRRfix4K_E8000
:
3214 case MSR_MTRRfix4K_F0000
:
3215 case MSR_MTRRfix4K_F8000
:
3216 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3218 case MSR_MTRRdefType
:
3219 val
= env
->mtrr_deftype
;
3222 if (env
->cpuid_features
& CPUID_MTRR
)
3223 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3225 /* XXX: exception ? */
3232 if (env
->mcg_cap
& MCG_CTL_P
)
3237 case MSR_MCG_STATUS
:
3238 val
= env
->mcg_status
;
3241 if ((uint32_t)ECX
>= MSR_MC0_CTL
3242 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3243 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3244 val
= env
->mce_banks
[offset
];
3247 /* XXX: exception ? */
3251 EAX
= (uint32_t)(val
);
3252 EDX
= (uint32_t)(val
>> 32);
3256 target_ulong
helper_lsl(target_ulong selector1
)
3259 uint32_t e1
, e2
, eflags
, selector
;
3260 int rpl
, dpl
, cpl
, type
;
3262 selector
= selector1
& 0xffff;
3263 eflags
= helper_cc_compute_all(CC_OP
);
3264 if ((selector
& 0xfffc) == 0)
3266 if (load_segment(&e1
, &e2
, selector
) != 0)
3269 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3270 cpl
= env
->hflags
& HF_CPL_MASK
;
3271 if (e2
& DESC_S_MASK
) {
3272 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3275 if (dpl
< cpl
|| dpl
< rpl
)
3279 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3290 if (dpl
< cpl
|| dpl
< rpl
) {
3292 CC_SRC
= eflags
& ~CC_Z
;
3296 limit
= get_seg_limit(e1
, e2
);
3297 CC_SRC
= eflags
| CC_Z
;
3301 target_ulong
helper_lar(target_ulong selector1
)
3303 uint32_t e1
, e2
, eflags
, selector
;
3304 int rpl
, dpl
, cpl
, type
;
3306 selector
= selector1
& 0xffff;
3307 eflags
= helper_cc_compute_all(CC_OP
);
3308 if ((selector
& 0xfffc) == 0)
3310 if (load_segment(&e1
, &e2
, selector
) != 0)
3313 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3314 cpl
= env
->hflags
& HF_CPL_MASK
;
3315 if (e2
& DESC_S_MASK
) {
3316 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3319 if (dpl
< cpl
|| dpl
< rpl
)
3323 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3337 if (dpl
< cpl
|| dpl
< rpl
) {
3339 CC_SRC
= eflags
& ~CC_Z
;
3343 CC_SRC
= eflags
| CC_Z
;
3344 return e2
& 0x00f0ff00;
3347 void helper_verr(target_ulong selector1
)
3349 uint32_t e1
, e2
, eflags
, selector
;
3352 selector
= selector1
& 0xffff;
3353 eflags
= helper_cc_compute_all(CC_OP
);
3354 if ((selector
& 0xfffc) == 0)
3356 if (load_segment(&e1
, &e2
, selector
) != 0)
3358 if (!(e2
& DESC_S_MASK
))
3361 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3362 cpl
= env
->hflags
& HF_CPL_MASK
;
3363 if (e2
& DESC_CS_MASK
) {
3364 if (!(e2
& DESC_R_MASK
))
3366 if (!(e2
& DESC_C_MASK
)) {
3367 if (dpl
< cpl
|| dpl
< rpl
)
3371 if (dpl
< cpl
|| dpl
< rpl
) {
3373 CC_SRC
= eflags
& ~CC_Z
;
3377 CC_SRC
= eflags
| CC_Z
;
3380 void helper_verw(target_ulong selector1
)
3382 uint32_t e1
, e2
, eflags
, selector
;
3385 selector
= selector1
& 0xffff;
3386 eflags
= helper_cc_compute_all(CC_OP
);
3387 if ((selector
& 0xfffc) == 0)
3389 if (load_segment(&e1
, &e2
, selector
) != 0)
3391 if (!(e2
& DESC_S_MASK
))
3394 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3395 cpl
= env
->hflags
& HF_CPL_MASK
;
3396 if (e2
& DESC_CS_MASK
) {
3399 if (dpl
< cpl
|| dpl
< rpl
)
3401 if (!(e2
& DESC_W_MASK
)) {
3403 CC_SRC
= eflags
& ~CC_Z
;
3407 CC_SRC
= eflags
| CC_Z
;
3410 /* x87 FPU helpers */
3412 static void fpu_set_exception(int mask
)
3415 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3416 env
->fpus
|= FPUS_SE
| FPUS_B
;
3419 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3422 fpu_set_exception(FPUS_ZE
);
3426 static void fpu_raise_exception(void)
3428 if (env
->cr
[0] & CR0_NE_MASK
) {
3429 raise_exception(EXCP10_COPR
);
3431 #if !defined(CONFIG_USER_ONLY)
3438 void helper_flds_FT0(uint32_t val
)
3445 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3448 void helper_fldl_FT0(uint64_t val
)
3455 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3458 void helper_fildl_FT0(int32_t val
)
3460 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3463 void helper_flds_ST0(uint32_t val
)
3470 new_fpstt
= (env
->fpstt
- 1) & 7;
3472 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3473 env
->fpstt
= new_fpstt
;
3474 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3477 void helper_fldl_ST0(uint64_t val
)
3484 new_fpstt
= (env
->fpstt
- 1) & 7;
3486 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3487 env
->fpstt
= new_fpstt
;
3488 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3491 void helper_fildl_ST0(int32_t val
)
3494 new_fpstt
= (env
->fpstt
- 1) & 7;
3495 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3496 env
->fpstt
= new_fpstt
;
3497 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3500 void helper_fildll_ST0(int64_t val
)
3503 new_fpstt
= (env
->fpstt
- 1) & 7;
3504 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3505 env
->fpstt
= new_fpstt
;
3506 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3509 uint32_t helper_fsts_ST0(void)
3515 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3519 uint64_t helper_fstl_ST0(void)
3525 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3529 int32_t helper_fist_ST0(void)
3532 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3533 if (val
!= (int16_t)val
)
3538 int32_t helper_fistl_ST0(void)
3541 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3545 int64_t helper_fistll_ST0(void)
3548 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3552 int32_t helper_fistt_ST0(void)
3555 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3556 if (val
!= (int16_t)val
)
3561 int32_t helper_fisttl_ST0(void)
3564 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3568 int64_t helper_fisttll_ST0(void)
3571 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3575 void helper_fldt_ST0(target_ulong ptr
)
3578 new_fpstt
= (env
->fpstt
- 1) & 7;
3579 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3580 env
->fpstt
= new_fpstt
;
3581 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3584 void helper_fstt_ST0(target_ulong ptr
)
3586 helper_fstt(ST0
, ptr
);
3589 void helper_fpush(void)
3594 void helper_fpop(void)
3599 void helper_fdecstp(void)
3601 env
->fpstt
= (env
->fpstt
- 1) & 7;
3602 env
->fpus
&= (~0x4700);
3605 void helper_fincstp(void)
3607 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3608 env
->fpus
&= (~0x4700);
3613 void helper_ffree_STN(int st_index
)
3615 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3618 void helper_fmov_ST0_FT0(void)
3623 void helper_fmov_FT0_STN(int st_index
)
3628 void helper_fmov_ST0_STN(int st_index
)
3633 void helper_fmov_STN_ST0(int st_index
)
3638 void helper_fxchg_ST0_STN(int st_index
)
3646 /* FPU operations */
3648 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3650 void helper_fcom_ST0_FT0(void)
3654 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3655 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3658 void helper_fucom_ST0_FT0(void)
3662 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3663 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3666 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3668 void helper_fcomi_ST0_FT0(void)
3673 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3674 eflags
= helper_cc_compute_all(CC_OP
);
3675 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3679 void helper_fucomi_ST0_FT0(void)
3684 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3685 eflags
= helper_cc_compute_all(CC_OP
);
3686 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3690 void helper_fadd_ST0_FT0(void)
3695 void helper_fmul_ST0_FT0(void)
3700 void helper_fsub_ST0_FT0(void)
3705 void helper_fsubr_ST0_FT0(void)
3710 void helper_fdiv_ST0_FT0(void)
3712 ST0
= helper_fdiv(ST0
, FT0
);
3715 void helper_fdivr_ST0_FT0(void)
3717 ST0
= helper_fdiv(FT0
, ST0
);
3720 /* fp operations between STN and ST0 */
3722 void helper_fadd_STN_ST0(int st_index
)
3724 ST(st_index
) += ST0
;
3727 void helper_fmul_STN_ST0(int st_index
)
3729 ST(st_index
) *= ST0
;
3732 void helper_fsub_STN_ST0(int st_index
)
3734 ST(st_index
) -= ST0
;
3737 void helper_fsubr_STN_ST0(int st_index
)
3744 void helper_fdiv_STN_ST0(int st_index
)
3748 *p
= helper_fdiv(*p
, ST0
);
3751 void helper_fdivr_STN_ST0(int st_index
)
3755 *p
= helper_fdiv(ST0
, *p
);
3758 /* misc FPU operations */
3759 void helper_fchs_ST0(void)
3761 ST0
= floatx_chs(ST0
);
3764 void helper_fabs_ST0(void)
3766 ST0
= floatx_abs(ST0
);
3769 void helper_fld1_ST0(void)
3774 void helper_fldl2t_ST0(void)
3779 void helper_fldl2e_ST0(void)
3784 void helper_fldpi_ST0(void)
3789 void helper_fldlg2_ST0(void)
3794 void helper_fldln2_ST0(void)
3799 void helper_fldz_ST0(void)
3804 void helper_fldz_FT0(void)
3809 uint32_t helper_fnstsw(void)
3811 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3814 uint32_t helper_fnstcw(void)
3819 static void update_fp_status(void)
3823 /* set rounding mode */
3824 switch(env
->fpuc
& RC_MASK
) {
3827 rnd_type
= float_round_nearest_even
;
3830 rnd_type
= float_round_down
;
3833 rnd_type
= float_round_up
;
3836 rnd_type
= float_round_to_zero
;
3839 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3841 switch((env
->fpuc
>> 8) & 3) {
3853 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3857 void helper_fldcw(uint32_t val
)
3863 void helper_fclex(void)
3865 env
->fpus
&= 0x7f00;
3868 void helper_fwait(void)
3870 if (env
->fpus
& FPUS_SE
)
3871 fpu_raise_exception();
3874 void helper_fninit(void)
3891 void helper_fbld_ST0(target_ulong ptr
)
3899 for(i
= 8; i
>= 0; i
--) {
3901 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3904 if (ldub(ptr
+ 9) & 0x80)
3910 void helper_fbst_ST0(target_ulong ptr
)
3913 target_ulong mem_ref
, mem_end
;
3916 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3918 mem_end
= mem_ref
+ 9;
3925 while (mem_ref
< mem_end
) {
3930 v
= ((v
/ 10) << 4) | (v
% 10);
3933 while (mem_ref
< mem_end
) {
3938 void helper_f2xm1(void)
3940 ST0
= pow(2.0,ST0
) - 1.0;
3943 void helper_fyl2x(void)
3945 CPU86_LDouble fptemp
;
3949 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3953 env
->fpus
&= (~0x4700);
3958 void helper_fptan(void)
3960 CPU86_LDouble fptemp
;
3963 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3969 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3970 /* the above code is for |arg| < 2**52 only */
3974 void helper_fpatan(void)
3976 CPU86_LDouble fptemp
, fpsrcop
;
3980 ST1
= atan2(fpsrcop
,fptemp
);
3984 void helper_fxtract(void)
3986 CPU86_LDoubleU temp
;
3987 unsigned int expdif
;
3990 expdif
= EXPD(temp
) - EXPBIAS
;
3991 /*DP exponent bias*/
3998 void helper_fprem1(void)
4000 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4001 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4003 signed long long int q
;
4005 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4006 ST0
= 0.0 / 0.0; /* NaN */
4007 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4013 fpsrcop1
.d
= fpsrcop
;
4015 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4018 /* optimisation? taken from the AMD docs */
4019 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4020 /* ST0 is unchanged */
4025 dblq
= fpsrcop
/ fptemp
;
4026 /* round dblq towards nearest integer */
4028 ST0
= fpsrcop
- fptemp
* dblq
;
4030 /* convert dblq to q by truncating towards zero */
4032 q
= (signed long long int)(-dblq
);
4034 q
= (signed long long int)dblq
;
4036 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4037 /* (C0,C3,C1) <-- (q2,q1,q0) */
4038 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4039 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4040 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4042 env
->fpus
|= 0x400; /* C2 <-- 1 */
4043 fptemp
= pow(2.0, expdif
- 50);
4044 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4045 /* fpsrcop = integer obtained by chopping */
4046 fpsrcop
= (fpsrcop
< 0.0) ?
4047 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4048 ST0
-= (ST1
* fpsrcop
* fptemp
);
4052 void helper_fprem(void)
4054 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4055 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4057 signed long long int q
;
4059 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4060 ST0
= 0.0 / 0.0; /* NaN */
4061 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4065 fpsrcop
= (CPU86_LDouble
)ST0
;
4066 fptemp
= (CPU86_LDouble
)ST1
;
4067 fpsrcop1
.d
= fpsrcop
;
4069 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4072 /* optimisation? taken from the AMD docs */
4073 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4074 /* ST0 is unchanged */
4078 if ( expdif
< 53 ) {
4079 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4080 /* round dblq towards zero */
4081 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4082 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4084 /* convert dblq to q by truncating towards zero */
4086 q
= (signed long long int)(-dblq
);
4088 q
= (signed long long int)dblq
;
4090 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4091 /* (C0,C3,C1) <-- (q2,q1,q0) */
4092 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4093 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4094 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4096 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4097 env
->fpus
|= 0x400; /* C2 <-- 1 */
4098 fptemp
= pow(2.0, (double)(expdif
- N
));
4099 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4100 /* fpsrcop = integer obtained by chopping */
4101 fpsrcop
= (fpsrcop
< 0.0) ?
4102 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4103 ST0
-= (ST1
* fpsrcop
* fptemp
);
4107 void helper_fyl2xp1(void)
4109 CPU86_LDouble fptemp
;
4112 if ((fptemp
+1.0)>0.0) {
4113 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4117 env
->fpus
&= (~0x4700);
4122 void helper_fsqrt(void)
4124 CPU86_LDouble fptemp
;
4128 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4134 void helper_fsincos(void)
4136 CPU86_LDouble fptemp
;
4139 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4145 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4146 /* the above code is for |arg| < 2**63 only */
4150 void helper_frndint(void)
4152 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4155 void helper_fscale(void)
4157 ST0
= ldexp (ST0
, (int)(ST1
));
4160 void helper_fsin(void)
4162 CPU86_LDouble fptemp
;
4165 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4169 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4170 /* the above code is for |arg| < 2**53 only */
4174 void helper_fcos(void)
4176 CPU86_LDouble fptemp
;
4179 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4183 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4184 /* the above code is for |arg5 < 2**63 only */
4188 void helper_fxam_ST0(void)
4190 CPU86_LDoubleU temp
;
4195 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4197 env
->fpus
|= 0x200; /* C1 <-- 1 */
4199 /* XXX: test fptags too */
4200 expdif
= EXPD(temp
);
4201 if (expdif
== MAXEXPD
) {
4202 #ifdef USE_X86LDOUBLE
4203 if (MANTD(temp
) == 0x8000000000000000ULL
)
4205 if (MANTD(temp
) == 0)
4207 env
->fpus
|= 0x500 /*Infinity*/;
4209 env
->fpus
|= 0x100 /*NaN*/;
4210 } else if (expdif
== 0) {
4211 if (MANTD(temp
) == 0)
4212 env
->fpus
|= 0x4000 /*Zero*/;
4214 env
->fpus
|= 0x4400 /*Denormal*/;
4220 void helper_fstenv(target_ulong ptr
, int data32
)
4222 int fpus
, fptag
, exp
, i
;
4226 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4228 for (i
=7; i
>=0; i
--) {
4230 if (env
->fptags
[i
]) {
4233 tmp
.d
= env
->fpregs
[i
].d
;
4236 if (exp
== 0 && mant
== 0) {
4239 } else if (exp
== 0 || exp
== MAXEXPD
4240 #ifdef USE_X86LDOUBLE
4241 || (mant
& (1LL << 63)) == 0
4244 /* NaNs, infinity, denormal */
4251 stl(ptr
, env
->fpuc
);
4253 stl(ptr
+ 8, fptag
);
4254 stl(ptr
+ 12, 0); /* fpip */
4255 stl(ptr
+ 16, 0); /* fpcs */
4256 stl(ptr
+ 20, 0); /* fpoo */
4257 stl(ptr
+ 24, 0); /* fpos */
4260 stw(ptr
, env
->fpuc
);
4262 stw(ptr
+ 4, fptag
);
4270 void helper_fldenv(target_ulong ptr
, int data32
)
4275 env
->fpuc
= lduw(ptr
);
4276 fpus
= lduw(ptr
+ 4);
4277 fptag
= lduw(ptr
+ 8);
4280 env
->fpuc
= lduw(ptr
);
4281 fpus
= lduw(ptr
+ 2);
4282 fptag
= lduw(ptr
+ 4);
4284 env
->fpstt
= (fpus
>> 11) & 7;
4285 env
->fpus
= fpus
& ~0x3800;
4286 for(i
= 0;i
< 8; i
++) {
4287 env
->fptags
[i
] = ((fptag
& 3) == 3);
4292 void helper_fsave(target_ulong ptr
, int data32
)
4297 helper_fstenv(ptr
, data32
);
4299 ptr
+= (14 << data32
);
4300 for(i
= 0;i
< 8; i
++) {
4302 helper_fstt(tmp
, ptr
);
4320 void helper_frstor(target_ulong ptr
, int data32
)
4325 helper_fldenv(ptr
, data32
);
4326 ptr
+= (14 << data32
);
4328 for(i
= 0;i
< 8; i
++) {
4329 tmp
= helper_fldt(ptr
);
4335 void helper_fxsave(target_ulong ptr
, int data64
)
4337 int fpus
, fptag
, i
, nb_xmm_regs
;
4341 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4343 for(i
= 0; i
< 8; i
++) {
4344 fptag
|= (env
->fptags
[i
] << i
);
4346 stw(ptr
, env
->fpuc
);
4348 stw(ptr
+ 4, fptag
^ 0xff);
4349 #ifdef TARGET_X86_64
4351 stq(ptr
+ 0x08, 0); /* rip */
4352 stq(ptr
+ 0x10, 0); /* rdp */
4356 stl(ptr
+ 0x08, 0); /* eip */
4357 stl(ptr
+ 0x0c, 0); /* sel */
4358 stl(ptr
+ 0x10, 0); /* dp */
4359 stl(ptr
+ 0x14, 0); /* sel */
4363 for(i
= 0;i
< 8; i
++) {
4365 helper_fstt(tmp
, addr
);
4369 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4370 /* XXX: finish it */
4371 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4372 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4373 if (env
->hflags
& HF_CS64_MASK
)
4378 /* Fast FXSAVE leaves out the XMM registers */
4379 if (!(env
->efer
& MSR_EFER_FFXSR
)
4380 || (env
->hflags
& HF_CPL_MASK
)
4381 || !(env
->hflags
& HF_LMA_MASK
)) {
4382 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4383 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4384 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4391 void helper_fxrstor(target_ulong ptr
, int data64
)
4393 int i
, fpus
, fptag
, nb_xmm_regs
;
4397 env
->fpuc
= lduw(ptr
);
4398 fpus
= lduw(ptr
+ 2);
4399 fptag
= lduw(ptr
+ 4);
4400 env
->fpstt
= (fpus
>> 11) & 7;
4401 env
->fpus
= fpus
& ~0x3800;
4403 for(i
= 0;i
< 8; i
++) {
4404 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4408 for(i
= 0;i
< 8; i
++) {
4409 tmp
= helper_fldt(addr
);
4414 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4415 /* XXX: finish it */
4416 env
->mxcsr
= ldl(ptr
+ 0x18);
4418 if (env
->hflags
& HF_CS64_MASK
)
4423 /* Fast FXRESTORE leaves out the XMM registers */
4424 if (!(env
->efer
& MSR_EFER_FFXSR
)
4425 || (env
->hflags
& HF_CPL_MASK
)
4426 || !(env
->hflags
& HF_LMA_MASK
)) {
4427 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4428 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4429 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4436 #ifndef USE_X86LDOUBLE
4438 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4440 CPU86_LDoubleU temp
;
4445 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4446 /* exponent + sign */
4447 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4448 e
|= SIGND(temp
) >> 16;
4452 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4454 CPU86_LDoubleU temp
;
4458 /* XXX: handle overflow ? */
4459 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4460 e
|= (upper
>> 4) & 0x800; /* sign */
4461 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4463 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4466 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4473 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4475 CPU86_LDoubleU temp
;
4478 *pmant
= temp
.l
.lower
;
4479 *pexp
= temp
.l
.upper
;
4482 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4484 CPU86_LDoubleU temp
;
4486 temp
.l
.upper
= upper
;
4487 temp
.l
.lower
= mant
;
4492 #ifdef TARGET_X86_64
4494 //#define DEBUG_MULDIV
4496 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4505 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4509 add128(plow
, phigh
, 1, 0);
4512 /* return TRUE if overflow */
4513 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4515 uint64_t q
, r
, a1
, a0
;
4528 /* XXX: use a better algorithm */
4529 for(i
= 0; i
< 64; i
++) {
4531 a1
= (a1
<< 1) | (a0
>> 63);
4532 if (ab
|| a1
>= b
) {
4538 a0
= (a0
<< 1) | qb
;
4540 #if defined(DEBUG_MULDIV)
4541 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4542 *phigh
, *plow
, b
, a0
, a1
);
4550 /* return TRUE if overflow */
4551 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4554 sa
= ((int64_t)*phigh
< 0);
4556 neg128(plow
, phigh
);
4560 if (div64(plow
, phigh
, b
) != 0)
4563 if (*plow
> (1ULL << 63))
4567 if (*plow
>= (1ULL << 63))
4575 void helper_mulq_EAX_T0(target_ulong t0
)
4579 mulu64(&r0
, &r1
, EAX
, t0
);
4586 void helper_imulq_EAX_T0(target_ulong t0
)
4590 muls64(&r0
, &r1
, EAX
, t0
);
4594 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4597 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4601 muls64(&r0
, &r1
, t0
, t1
);
4603 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4607 void helper_divq_EAX(target_ulong t0
)
4611 raise_exception(EXCP00_DIVZ
);
4615 if (div64(&r0
, &r1
, t0
))
4616 raise_exception(EXCP00_DIVZ
);
4621 void helper_idivq_EAX(target_ulong t0
)
4625 raise_exception(EXCP00_DIVZ
);
4629 if (idiv64(&r0
, &r1
, t0
))
4630 raise_exception(EXCP00_DIVZ
);
4636 static void do_hlt(void)
4638 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4640 env
->exception_index
= EXCP_HLT
;
4644 void helper_hlt(int next_eip_addend
)
4646 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4647 EIP
+= next_eip_addend
;
4652 void helper_monitor(target_ulong ptr
)
4654 if ((uint32_t)ECX
!= 0)
4655 raise_exception(EXCP0D_GPF
);
4656 /* XXX: store address ? */
4657 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4660 void helper_mwait(int next_eip_addend
)
4662 if ((uint32_t)ECX
!= 0)
4663 raise_exception(EXCP0D_GPF
);
4664 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4665 EIP
+= next_eip_addend
;
4667 /* XXX: not complete but not completely erroneous */
4668 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4669 /* more than one CPU: do not sleep because another CPU may
4676 void helper_debug(void)
4678 env
->exception_index
= EXCP_DEBUG
;
4682 void helper_reset_rf(void)
4684 env
->eflags
&= ~RF_MASK
;
4687 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4689 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4692 void helper_raise_exception(int exception_index
)
4694 raise_exception(exception_index
);
4697 void helper_cli(void)
4699 env
->eflags
&= ~IF_MASK
;
4702 void helper_sti(void)
4704 env
->eflags
|= IF_MASK
;
4708 /* vm86plus instructions */
4709 void helper_cli_vm(void)
4711 env
->eflags
&= ~VIF_MASK
;
4714 void helper_sti_vm(void)
4716 env
->eflags
|= VIF_MASK
;
4717 if (env
->eflags
& VIP_MASK
) {
4718 raise_exception(EXCP0D_GPF
);
4723 void helper_set_inhibit_irq(void)
4725 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4728 void helper_reset_inhibit_irq(void)
4730 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4733 void helper_boundw(target_ulong a0
, int v
)
4737 high
= ldsw(a0
+ 2);
4739 if (v
< low
|| v
> high
) {
4740 raise_exception(EXCP05_BOUND
);
4744 void helper_boundl(target_ulong a0
, int v
)
4749 if (v
< low
|| v
> high
) {
4750 raise_exception(EXCP05_BOUND
);
4754 static float approx_rsqrt(float a
)
4756 return 1.0 / sqrt(a
);
4759 static float approx_rcp(float a
)
4764 #if !defined(CONFIG_USER_ONLY)
4766 #define MMUSUFFIX _mmu
4769 #include "softmmu_template.h"
4772 #include "softmmu_template.h"
4775 #include "softmmu_template.h"
4778 #include "softmmu_template.h"
4782 #if !defined(CONFIG_USER_ONLY)
4783 /* try to fill the TLB and return an exception if error. If retaddr is
4784 NULL, it means that the function was called in C code (i.e. not
4785 from generated code or from helper.c) */
4786 /* XXX: fix it to restore all registers */
4787 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4789 TranslationBlock
*tb
;
4792 CPUX86State
*saved_env
;
4794 /* XXX: hack to restore env in all cases, even if not called from
4797 env
= cpu_single_env
;
4799 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4802 /* now we have a real cpu fault */
4803 pc
= (unsigned long)retaddr
;
4804 tb
= tb_find_pc(pc
);
4806 /* the PC is inside the translated code. It means that we have
4807 a virtual CPU fault */
4808 cpu_restore_state(tb
, env
, pc
, NULL
);
4811 raise_exception_err(env
->exception_index
, env
->error_code
);
4817 /* Secure Virtual Machine helpers */
4819 #if defined(CONFIG_USER_ONLY)
4821 void helper_vmrun(int aflag
, int next_eip_addend
)
4824 void helper_vmmcall(void)
4827 void helper_vmload(int aflag
)
4830 void helper_vmsave(int aflag
)
4833 void helper_stgi(void)
4836 void helper_clgi(void)
4839 void helper_skinit(void)
4842 void helper_invlpga(int aflag
)
4845 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4848 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4852 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4853 uint32_t next_eip_addend
)
4858 static inline void svm_save_seg(target_phys_addr_t addr
,
4859 const SegmentCache
*sc
)
4861 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4863 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4865 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4867 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4868 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4871 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4875 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4876 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4877 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4878 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4879 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4882 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4883 CPUState
*env
, int seg_reg
)
4885 SegmentCache sc1
, *sc
= &sc1
;
4886 svm_load_seg(addr
, sc
);
4887 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4888 sc
->base
, sc
->limit
, sc
->flags
);
4891 void helper_vmrun(int aflag
, int next_eip_addend
)
4897 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4902 addr
= (uint32_t)EAX
;
4904 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4906 env
->vm_vmcb
= addr
;
4908 /* save the current CPU state in the hsave page */
4909 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4910 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4912 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4913 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4915 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4916 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4917 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4918 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4919 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4920 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4922 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4923 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4925 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4927 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4929 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4931 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4934 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4935 EIP
+ next_eip_addend
);
4936 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4937 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4939 /* load the interception bitmaps so we do not need to access the
4941 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4942 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4943 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4944 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4945 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4946 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4948 /* enable intercepts */
4949 env
->hflags
|= HF_SVMI_MASK
;
4951 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4953 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4954 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4956 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4957 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4959 /* clear exit_info_2 so we behave like the real hardware */
4960 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4962 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4963 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4964 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4965 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4966 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4967 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4968 if (int_ctl
& V_INTR_MASKING_MASK
) {
4969 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4970 env
->hflags2
|= HF2_VINTR_MASK
;
4971 if (env
->eflags
& IF_MASK
)
4972 env
->hflags2
|= HF2_HIF_MASK
;
4976 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4978 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4979 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4980 CC_OP
= CC_OP_EFLAGS
;
4982 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4984 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4986 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4988 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4991 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4993 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4994 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4995 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4996 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4997 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4999 /* FIXME: guest state consistency checks */
5001 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5002 case TLB_CONTROL_DO_NOTHING
:
5004 case TLB_CONTROL_FLUSH_ALL_ASID
:
5005 /* FIXME: this is not 100% correct but should work for now */
5010 env
->hflags2
|= HF2_GIF_MASK
;
5012 if (int_ctl
& V_IRQ_MASK
) {
5013 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5016 /* maybe we need to inject an event */
5017 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5018 if (event_inj
& SVM_EVTINJ_VALID
) {
5019 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5020 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5021 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5023 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5024 /* FIXME: need to implement valid_err */
5025 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5026 case SVM_EVTINJ_TYPE_INTR
:
5027 env
->exception_index
= vector
;
5028 env
->error_code
= event_inj_err
;
5029 env
->exception_is_int
= 0;
5030 env
->exception_next_eip
= -1;
5031 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5032 /* XXX: is it always correct ? */
5033 do_interrupt(vector
, 0, 0, 0, 1);
5035 case SVM_EVTINJ_TYPE_NMI
:
5036 env
->exception_index
= EXCP02_NMI
;
5037 env
->error_code
= event_inj_err
;
5038 env
->exception_is_int
= 0;
5039 env
->exception_next_eip
= EIP
;
5040 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5043 case SVM_EVTINJ_TYPE_EXEPT
:
5044 env
->exception_index
= vector
;
5045 env
->error_code
= event_inj_err
;
5046 env
->exception_is_int
= 0;
5047 env
->exception_next_eip
= -1;
5048 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5051 case SVM_EVTINJ_TYPE_SOFT
:
5052 env
->exception_index
= vector
;
5053 env
->error_code
= event_inj_err
;
5054 env
->exception_is_int
= 1;
5055 env
->exception_next_eip
= EIP
;
5056 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5060 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5064 void helper_vmmcall(void)
5066 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5067 raise_exception(EXCP06_ILLOP
);
5070 void helper_vmload(int aflag
)
5073 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5078 addr
= (uint32_t)EAX
;
5080 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5081 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5082 env
->segs
[R_FS
].base
);
5084 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5086 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5088 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5090 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5093 #ifdef TARGET_X86_64
5094 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5095 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5096 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5097 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5099 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5100 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5101 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5102 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5105 void helper_vmsave(int aflag
)
5108 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5113 addr
= (uint32_t)EAX
;
5115 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5116 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5117 env
->segs
[R_FS
].base
);
5119 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5121 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5123 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5125 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5128 #ifdef TARGET_X86_64
5129 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5130 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5131 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5132 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5134 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5135 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5136 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5137 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5140 void helper_stgi(void)
5142 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5143 env
->hflags2
|= HF2_GIF_MASK
;
5146 void helper_clgi(void)
5148 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5149 env
->hflags2
&= ~HF2_GIF_MASK
;
5152 void helper_skinit(void)
5154 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5155 /* XXX: not implemented */
5156 raise_exception(EXCP06_ILLOP
);
5159 void helper_invlpga(int aflag
)
5162 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5167 addr
= (uint32_t)EAX
;
5169 /* XXX: could use the ASID to see if it is needed to do the
5171 tlb_flush_page(env
, addr
);
5174 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5176 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5179 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5180 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5181 helper_vmexit(type
, param
);
5184 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5185 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5186 helper_vmexit(type
, param
);
5189 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5190 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5191 helper_vmexit(type
, param
);
5194 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5195 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5196 helper_vmexit(type
, param
);
5199 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5200 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5201 helper_vmexit(type
, param
);
5205 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5206 /* FIXME: this should be read in at vmrun (faster this way?) */
5207 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5209 switch((uint32_t)ECX
) {
5214 case 0xc0000000 ... 0xc0001fff:
5215 t0
= (8192 + ECX
- 0xc0000000) * 2;
5219 case 0xc0010000 ... 0xc0011fff:
5220 t0
= (16384 + ECX
- 0xc0010000) * 2;
5225 helper_vmexit(type
, param
);
5230 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5231 helper_vmexit(type
, param
);
5235 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5236 helper_vmexit(type
, param
);
5242 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5243 uint32_t next_eip_addend
)
5245 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5246 /* FIXME: this should be read in at vmrun (faster this way?) */
5247 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5248 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5249 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5251 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5252 env
->eip
+ next_eip_addend
);
5253 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5258 /* Note: currently only 32 bits of exit_code are used */
5259 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5263 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5264 exit_code
, exit_info_1
,
5265 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5268 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5269 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5270 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5272 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5275 /* Save the VM state in the vmcb */
5276 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5278 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5280 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5282 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5285 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5286 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5288 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5289 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5291 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5292 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5293 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5294 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5295 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5297 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5298 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5299 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5300 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5301 int_ctl
|= V_IRQ_MASK
;
5302 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5304 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5305 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5306 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5307 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5308 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5309 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5310 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5312 /* Reload the host state from vm_hsave */
5313 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5314 env
->hflags
&= ~HF_SVMI_MASK
;
5316 env
->intercept_exceptions
= 0;
5317 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5318 env
->tsc_offset
= 0;
5320 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5321 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5323 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5324 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5326 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5327 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5328 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5329 /* we need to set the efer after the crs so the hidden flags get
5332 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5334 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5335 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5336 CC_OP
= CC_OP_EFLAGS
;
5338 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5340 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5342 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5344 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5347 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5348 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5349 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5351 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5352 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5355 cpu_x86_set_cpl(env
, 0);
5356 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5357 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5359 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5360 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5361 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5362 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5364 env
->hflags2
&= ~HF2_GIF_MASK
;
5365 /* FIXME: Resets the current ASID register to zero (host ASID). */
5367 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5369 /* Clears the TSC_OFFSET inside the processor. */
5371 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5372 from the page table indicated the host's CR3. If the PDPEs contain
5373 illegal state, the processor causes a shutdown. */
5375 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5376 env
->cr
[0] |= CR0_PE_MASK
;
5377 env
->eflags
&= ~VM_MASK
;
5379 /* Disables all breakpoints in the host DR7 register. */
5381 /* Checks the reloaded host state for consistency. */
5383 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5384 host's code segment or non-canonical (in the case of long mode), a
5385 #GP fault is delivered inside the host.) */
5387 /* remove any pending exception */
5388 env
->exception_index
= -1;
5389 env
->error_code
= 0;
5390 env
->old_exception
= -1;
5398 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5399 void helper_enter_mmx(void)
5402 *(uint32_t *)(env
->fptags
) = 0;
5403 *(uint32_t *)(env
->fptags
+ 4) = 0;
5406 void helper_emms(void)
5408 /* set to empty state */
5409 *(uint32_t *)(env
->fptags
) = 0x01010101;
5410 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5414 void helper_movq(void *d
, void *s
)
5416 *(uint64_t *)d
= *(uint64_t *)s
;
5420 #include "ops_sse.h"
5423 #include "ops_sse.h"
5426 #include "helper_template.h"
5430 #include "helper_template.h"
5434 #include "helper_template.h"
5437 #ifdef TARGET_X86_64
5440 #include "helper_template.h"
5445 /* bit operations */
5446 target_ulong
helper_bsf(target_ulong t0
)
5453 while ((res
& 1) == 0) {
5460 target_ulong
helper_bsr(target_ulong t0
)
5463 target_ulong res
, mask
;
5466 count
= TARGET_LONG_BITS
- 1;
5467 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5468 while ((res
& mask
) == 0) {
5476 static int compute_all_eflags(void)
5481 static int compute_c_eflags(void)
5483 return CC_SRC
& CC_C
;
5486 uint32_t helper_cc_compute_all(int op
)
5489 default: /* should never happen */ return 0;
5491 case CC_OP_EFLAGS
: return compute_all_eflags();
5493 case CC_OP_MULB
: return compute_all_mulb();
5494 case CC_OP_MULW
: return compute_all_mulw();
5495 case CC_OP_MULL
: return compute_all_mull();
5497 case CC_OP_ADDB
: return compute_all_addb();
5498 case CC_OP_ADDW
: return compute_all_addw();
5499 case CC_OP_ADDL
: return compute_all_addl();
5501 case CC_OP_ADCB
: return compute_all_adcb();
5502 case CC_OP_ADCW
: return compute_all_adcw();
5503 case CC_OP_ADCL
: return compute_all_adcl();
5505 case CC_OP_SUBB
: return compute_all_subb();
5506 case CC_OP_SUBW
: return compute_all_subw();
5507 case CC_OP_SUBL
: return compute_all_subl();
5509 case CC_OP_SBBB
: return compute_all_sbbb();
5510 case CC_OP_SBBW
: return compute_all_sbbw();
5511 case CC_OP_SBBL
: return compute_all_sbbl();
5513 case CC_OP_LOGICB
: return compute_all_logicb();
5514 case CC_OP_LOGICW
: return compute_all_logicw();
5515 case CC_OP_LOGICL
: return compute_all_logicl();
5517 case CC_OP_INCB
: return compute_all_incb();
5518 case CC_OP_INCW
: return compute_all_incw();
5519 case CC_OP_INCL
: return compute_all_incl();
5521 case CC_OP_DECB
: return compute_all_decb();
5522 case CC_OP_DECW
: return compute_all_decw();
5523 case CC_OP_DECL
: return compute_all_decl();
5525 case CC_OP_SHLB
: return compute_all_shlb();
5526 case CC_OP_SHLW
: return compute_all_shlw();
5527 case CC_OP_SHLL
: return compute_all_shll();
5529 case CC_OP_SARB
: return compute_all_sarb();
5530 case CC_OP_SARW
: return compute_all_sarw();
5531 case CC_OP_SARL
: return compute_all_sarl();
5533 #ifdef TARGET_X86_64
5534 case CC_OP_MULQ
: return compute_all_mulq();
5536 case CC_OP_ADDQ
: return compute_all_addq();
5538 case CC_OP_ADCQ
: return compute_all_adcq();
5540 case CC_OP_SUBQ
: return compute_all_subq();
5542 case CC_OP_SBBQ
: return compute_all_sbbq();
5544 case CC_OP_LOGICQ
: return compute_all_logicq();
5546 case CC_OP_INCQ
: return compute_all_incq();
5548 case CC_OP_DECQ
: return compute_all_decq();
5550 case CC_OP_SHLQ
: return compute_all_shlq();
5552 case CC_OP_SARQ
: return compute_all_sarq();
5557 uint32_t helper_cc_compute_c(int op
)
5560 default: /* should never happen */ return 0;
5562 case CC_OP_EFLAGS
: return compute_c_eflags();
5564 case CC_OP_MULB
: return compute_c_mull();
5565 case CC_OP_MULW
: return compute_c_mull();
5566 case CC_OP_MULL
: return compute_c_mull();
5568 case CC_OP_ADDB
: return compute_c_addb();
5569 case CC_OP_ADDW
: return compute_c_addw();
5570 case CC_OP_ADDL
: return compute_c_addl();
5572 case CC_OP_ADCB
: return compute_c_adcb();
5573 case CC_OP_ADCW
: return compute_c_adcw();
5574 case CC_OP_ADCL
: return compute_c_adcl();
5576 case CC_OP_SUBB
: return compute_c_subb();
5577 case CC_OP_SUBW
: return compute_c_subw();
5578 case CC_OP_SUBL
: return compute_c_subl();
5580 case CC_OP_SBBB
: return compute_c_sbbb();
5581 case CC_OP_SBBW
: return compute_c_sbbw();
5582 case CC_OP_SBBL
: return compute_c_sbbl();
5584 case CC_OP_LOGICB
: return compute_c_logicb();
5585 case CC_OP_LOGICW
: return compute_c_logicw();
5586 case CC_OP_LOGICL
: return compute_c_logicl();
5588 case CC_OP_INCB
: return compute_c_incl();
5589 case CC_OP_INCW
: return compute_c_incl();
5590 case CC_OP_INCL
: return compute_c_incl();
5592 case CC_OP_DECB
: return compute_c_incl();
5593 case CC_OP_DECW
: return compute_c_incl();
5594 case CC_OP_DECL
: return compute_c_incl();
5596 case CC_OP_SHLB
: return compute_c_shlb();
5597 case CC_OP_SHLW
: return compute_c_shlw();
5598 case CC_OP_SHLL
: return compute_c_shll();
5600 case CC_OP_SARB
: return compute_c_sarl();
5601 case CC_OP_SARW
: return compute_c_sarl();
5602 case CC_OP_SARL
: return compute_c_sarl();
5604 #ifdef TARGET_X86_64
5605 case CC_OP_MULQ
: return compute_c_mull();
5607 case CC_OP_ADDQ
: return compute_c_addq();
5609 case CC_OP_ADCQ
: return compute_c_adcq();
5611 case CC_OP_SUBQ
: return compute_c_subq();
5613 case CC_OP_SBBQ
: return compute_c_sbbq();
5615 case CC_OP_LOGICQ
: return compute_c_logicq();
5617 case CC_OP_INCQ
: return compute_c_incl();
5619 case CC_OP_DECQ
: return compute_c_incl();
5621 case CC_OP_SHLQ
: return compute_c_shlq();
5623 case CC_OP_SARQ
: return compute_c_sarl();