4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "host-utils.h"
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(env) do { } while (0)
40 #define raise_exception_err(a, b)\
42 qemu_log("raise_exception line=%d\n", __LINE__);\
43 (raise_exception_err)(a, b);\
47 static const uint8_t parity_table
[256] = {
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
75 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
76 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
79 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
83 static const uint8_t rclw_table
[32] = {
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
91 static const uint8_t rclb_table
[32] = {
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
98 #if defined(CONFIG_SOFTFLOAT)
99 # define floatx_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
100 # define floatx_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
101 # define floatx_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
103 # define floatx_lg2 (0.30102999566398119523L)
104 # define floatx_l2e (1.44269504088896340739L)
105 # define floatx_l2t (3.32192809488736234781L)
108 static const CPU86_LDouble f15rk
[7] =
119 /* broken thread support */
121 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
123 void helper_lock(void)
125 spin_lock(&global_cpu_lock
);
128 void helper_unlock(void)
130 spin_unlock(&global_cpu_lock
);
133 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
135 load_eflags(t0
, update_mask
);
138 target_ulong
helper_read_eflags(void)
141 eflags
= helper_cc_compute_all(CC_OP
);
142 eflags
|= (DF
& DF_MASK
);
143 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
147 /* return non zero if error */
148 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
159 index
= selector
& ~7;
160 if ((index
+ 7) > dt
->limit
)
162 ptr
= dt
->base
+ index
;
163 *e1_ptr
= ldl_kernel(ptr
);
164 *e2_ptr
= ldl_kernel(ptr
+ 4);
168 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
171 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
172 if (e2
& DESC_G_MASK
)
173 limit
= (limit
<< 12) | 0xfff;
177 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
179 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
182 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
184 sc
->base
= get_seg_base(e1
, e2
);
185 sc
->limit
= get_seg_limit(e1
, e2
);
189 /* init the segment cache in vm86 mode. */
190 static inline void load_seg_vm(int seg
, int selector
)
193 cpu_x86_load_seg_cache(env
, seg
, selector
,
194 (selector
<< 4), 0xffff, 0);
197 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
198 uint32_t *esp_ptr
, int dpl
)
200 int type
, index
, shift
;
205 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
206 for(i
=0;i
<env
->tr
.limit
;i
++) {
207 printf("%02x ", env
->tr
.base
[i
]);
208 if ((i
& 7) == 7) printf("\n");
214 if (!(env
->tr
.flags
& DESC_P_MASK
))
215 cpu_abort(env
, "invalid tss");
216 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
218 cpu_abort(env
, "invalid tss type");
220 index
= (dpl
* 4 + 2) << shift
;
221 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
222 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
224 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
225 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
227 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
228 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
232 /* XXX: merge with load_seg() */
233 static void tss_load_seg(int seg_reg
, int selector
)
238 if ((selector
& 0xfffc) != 0) {
239 if (load_segment(&e1
, &e2
, selector
) != 0)
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if (!(e2
& DESC_S_MASK
))
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
244 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
245 cpl
= env
->hflags
& HF_CPL_MASK
;
246 if (seg_reg
== R_CS
) {
247 if (!(e2
& DESC_CS_MASK
))
248 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
249 /* XXX: is it correct ? */
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
252 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
253 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
254 } else if (seg_reg
== R_SS
) {
255 /* SS must be writable data */
256 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
257 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
258 if (dpl
!= cpl
|| dpl
!= rpl
)
259 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
261 /* not readable code */
262 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
263 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
264 /* if data or non conforming code, checks the rights */
265 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
266 if (dpl
< cpl
|| dpl
< rpl
)
267 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
270 if (!(e2
& DESC_P_MASK
))
271 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
272 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
273 get_seg_base(e1
, e2
),
274 get_seg_limit(e1
, e2
),
277 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
278 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
282 #define SWITCH_TSS_JMP 0
283 #define SWITCH_TSS_IRET 1
284 #define SWITCH_TSS_CALL 2
286 /* XXX: restore CPU state in registers (PowerPC case) */
287 static void switch_tss(int tss_selector
,
288 uint32_t e1
, uint32_t e2
, int source
,
291 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
292 target_ulong tss_base
;
293 uint32_t new_regs
[8], new_segs
[6];
294 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
295 uint32_t old_eflags
, eflags_mask
;
300 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
301 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
303 /* if task gate, we read the TSS segment and we load it */
305 if (!(e2
& DESC_P_MASK
))
306 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
307 tss_selector
= e1
>> 16;
308 if (tss_selector
& 4)
309 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
310 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
311 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
312 if (e2
& DESC_S_MASK
)
313 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
314 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
316 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
319 if (!(e2
& DESC_P_MASK
))
320 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
326 tss_limit
= get_seg_limit(e1
, e2
);
327 tss_base
= get_seg_base(e1
, e2
);
328 if ((tss_selector
& 4) != 0 ||
329 tss_limit
< tss_limit_max
)
330 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
331 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
333 old_tss_limit_max
= 103;
335 old_tss_limit_max
= 43;
337 /* read all the registers from the new TSS */
340 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
341 new_eip
= ldl_kernel(tss_base
+ 0x20);
342 new_eflags
= ldl_kernel(tss_base
+ 0x24);
343 for(i
= 0; i
< 8; i
++)
344 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
345 for(i
= 0; i
< 6; i
++)
346 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
347 new_ldt
= lduw_kernel(tss_base
+ 0x60);
348 new_trap
= ldl_kernel(tss_base
+ 0x64);
352 new_eip
= lduw_kernel(tss_base
+ 0x0e);
353 new_eflags
= lduw_kernel(tss_base
+ 0x10);
354 for(i
= 0; i
< 8; i
++)
355 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
356 for(i
= 0; i
< 4; i
++)
357 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
358 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
363 /* XXX: avoid a compiler warning, see
364 http://support.amd.com/us/Processor_TechDocs/24593.pdf
365 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
368 /* NOTE: we must avoid memory exceptions during the task switch,
369 so we make dummy accesses before */
370 /* XXX: it can still fail in some cases, so a bigger hack is
371 necessary to valid the TLB after having done the accesses */
373 v1
= ldub_kernel(env
->tr
.base
);
374 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
375 stb_kernel(env
->tr
.base
, v1
);
376 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
378 /* clear busy bit (it is restartable) */
379 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
382 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
383 e2
= ldl_kernel(ptr
+ 4);
384 e2
&= ~DESC_TSS_BUSY_MASK
;
385 stl_kernel(ptr
+ 4, e2
);
387 old_eflags
= compute_eflags();
388 if (source
== SWITCH_TSS_IRET
)
389 old_eflags
&= ~NT_MASK
;
391 /* save the current state in the old TSS */
394 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
395 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
396 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
397 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
398 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
399 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
400 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
401 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
402 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
403 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
404 for(i
= 0; i
< 6; i
++)
405 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
408 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
409 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
410 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
411 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
412 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
413 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
414 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
415 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
416 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
417 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
418 for(i
= 0; i
< 4; i
++)
419 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
422 /* now if an exception occurs, it will occurs in the next task
425 if (source
== SWITCH_TSS_CALL
) {
426 stw_kernel(tss_base
, env
->tr
.selector
);
427 new_eflags
|= NT_MASK
;
431 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
434 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
435 e2
= ldl_kernel(ptr
+ 4);
436 e2
|= DESC_TSS_BUSY_MASK
;
437 stl_kernel(ptr
+ 4, e2
);
440 /* set the new CPU state */
441 /* from this point, any exception which occurs can give problems */
442 env
->cr
[0] |= CR0_TS_MASK
;
443 env
->hflags
|= HF_TS_MASK
;
444 env
->tr
.selector
= tss_selector
;
445 env
->tr
.base
= tss_base
;
446 env
->tr
.limit
= tss_limit
;
447 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
449 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
450 cpu_x86_update_cr3(env
, new_cr3
);
453 /* load all registers without an exception, then reload them with
454 possible exception */
456 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
457 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
459 eflags_mask
&= 0xffff;
460 load_eflags(new_eflags
, eflags_mask
);
461 /* XXX: what to do in 16 bit case ? */
470 if (new_eflags
& VM_MASK
) {
471 for(i
= 0; i
< 6; i
++)
472 load_seg_vm(i
, new_segs
[i
]);
473 /* in vm86, CPL is always 3 */
474 cpu_x86_set_cpl(env
, 3);
476 /* CPL is set the RPL of CS */
477 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
478 /* first just selectors as the rest may trigger exceptions */
479 for(i
= 0; i
< 6; i
++)
480 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
483 env
->ldt
.selector
= new_ldt
& ~4;
490 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
492 if ((new_ldt
& 0xfffc) != 0) {
494 index
= new_ldt
& ~7;
495 if ((index
+ 7) > dt
->limit
)
496 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
497 ptr
= dt
->base
+ index
;
498 e1
= ldl_kernel(ptr
);
499 e2
= ldl_kernel(ptr
+ 4);
500 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
501 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
502 if (!(e2
& DESC_P_MASK
))
503 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
504 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
507 /* load the segments */
508 if (!(new_eflags
& VM_MASK
)) {
509 tss_load_seg(R_CS
, new_segs
[R_CS
]);
510 tss_load_seg(R_SS
, new_segs
[R_SS
]);
511 tss_load_seg(R_ES
, new_segs
[R_ES
]);
512 tss_load_seg(R_DS
, new_segs
[R_DS
]);
513 tss_load_seg(R_FS
, new_segs
[R_FS
]);
514 tss_load_seg(R_GS
, new_segs
[R_GS
]);
517 /* check that EIP is in the CS segment limits */
518 if (new_eip
> env
->segs
[R_CS
].limit
) {
519 /* XXX: different exception if CALL ? */
520 raise_exception_err(EXCP0D_GPF
, 0);
523 #ifndef CONFIG_USER_ONLY
524 /* reset local breakpoints */
525 if (env
->dr
[7] & 0x55) {
526 for (i
= 0; i
< 4; i
++) {
527 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
528 hw_breakpoint_remove(env
, i
);
535 /* check if Port I/O is allowed in TSS */
536 static inline void check_io(int addr
, int size
)
538 int io_offset
, val
, mask
;
540 /* TSS must be a valid 32 bit one */
541 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
542 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
545 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
546 io_offset
+= (addr
>> 3);
547 /* Note: the check needs two bytes */
548 if ((io_offset
+ 1) > env
->tr
.limit
)
550 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
552 mask
= (1 << size
) - 1;
553 /* all bits must be zero to allow the I/O */
554 if ((val
& mask
) != 0) {
556 raise_exception_err(EXCP0D_GPF
, 0);
560 void helper_check_iob(uint32_t t0
)
565 void helper_check_iow(uint32_t t0
)
570 void helper_check_iol(uint32_t t0
)
575 void helper_outb(uint32_t port
, uint32_t data
)
577 cpu_outb(port
, data
& 0xff);
580 target_ulong
helper_inb(uint32_t port
)
582 return cpu_inb(port
);
585 void helper_outw(uint32_t port
, uint32_t data
)
587 cpu_outw(port
, data
& 0xffff);
590 target_ulong
helper_inw(uint32_t port
)
592 return cpu_inw(port
);
595 void helper_outl(uint32_t port
, uint32_t data
)
597 cpu_outl(port
, data
);
600 target_ulong
helper_inl(uint32_t port
)
602 return cpu_inl(port
);
605 static inline unsigned int get_sp_mask(unsigned int e2
)
607 if (e2
& DESC_B_MASK
)
613 static int exeption_has_error_code(int intno
)
629 #define SET_ESP(val, sp_mask)\
631 if ((sp_mask) == 0xffff)\
632 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
633 else if ((sp_mask) == 0xffffffffLL)\
634 ESP = (uint32_t)(val);\
639 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
642 /* in 64-bit machines, this can overflow. So this segment addition macro
643 * can be used to trim the value to 32-bit whenever needed */
644 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
646 /* XXX: add a is_user flag to have proper security support */
647 #define PUSHW(ssp, sp, sp_mask, val)\
650 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
653 #define PUSHL(ssp, sp, sp_mask, val)\
656 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
659 #define POPW(ssp, sp, sp_mask, val)\
661 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
665 #define POPL(ssp, sp, sp_mask, val)\
667 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
671 /* protected mode interrupt */
672 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
673 unsigned int next_eip
, int is_hw
)
676 target_ulong ptr
, ssp
;
677 int type
, dpl
, selector
, ss_dpl
, cpl
;
678 int has_error_code
, new_stack
, shift
;
679 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
680 uint32_t old_eip
, sp_mask
;
683 if (!is_int
&& !is_hw
)
684 has_error_code
= exeption_has_error_code(intno
);
691 if (intno
* 8 + 7 > dt
->limit
)
692 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
693 ptr
= dt
->base
+ intno
* 8;
694 e1
= ldl_kernel(ptr
);
695 e2
= ldl_kernel(ptr
+ 4);
696 /* check gate type */
697 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
699 case 5: /* task gate */
700 /* must do that check here to return the correct error code */
701 if (!(e2
& DESC_P_MASK
))
702 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
703 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
704 if (has_error_code
) {
707 /* push the error code */
708 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
710 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
714 esp
= (ESP
- (2 << shift
)) & mask
;
715 ssp
= env
->segs
[R_SS
].base
+ esp
;
717 stl_kernel(ssp
, error_code
);
719 stw_kernel(ssp
, error_code
);
723 case 6: /* 286 interrupt gate */
724 case 7: /* 286 trap gate */
725 case 14: /* 386 interrupt gate */
726 case 15: /* 386 trap gate */
729 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
732 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
733 cpl
= env
->hflags
& HF_CPL_MASK
;
734 /* check privilege if software int */
735 if (is_int
&& dpl
< cpl
)
736 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
737 /* check valid bit */
738 if (!(e2
& DESC_P_MASK
))
739 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
741 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
742 if ((selector
& 0xfffc) == 0)
743 raise_exception_err(EXCP0D_GPF
, 0);
745 if (load_segment(&e1
, &e2
, selector
) != 0)
746 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
747 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
748 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
749 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
751 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
752 if (!(e2
& DESC_P_MASK
))
753 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
754 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
755 /* to inner privilege */
756 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
757 if ((ss
& 0xfffc) == 0)
758 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
760 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
761 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
762 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
763 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
765 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
766 if (!(ss_e2
& DESC_S_MASK
) ||
767 (ss_e2
& DESC_CS_MASK
) ||
768 !(ss_e2
& DESC_W_MASK
))
769 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
770 if (!(ss_e2
& DESC_P_MASK
))
771 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
773 sp_mask
= get_sp_mask(ss_e2
);
774 ssp
= get_seg_base(ss_e1
, ss_e2
);
775 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
776 /* to same privilege */
777 if (env
->eflags
& VM_MASK
)
778 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
780 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
781 ssp
= env
->segs
[R_SS
].base
;
785 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
786 new_stack
= 0; /* avoid warning */
787 sp_mask
= 0; /* avoid warning */
788 ssp
= 0; /* avoid warning */
789 esp
= 0; /* avoid warning */
795 /* XXX: check that enough room is available */
796 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
797 if (env
->eflags
& VM_MASK
)
803 if (env
->eflags
& VM_MASK
) {
804 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
805 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
806 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
807 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
809 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
810 PUSHL(ssp
, esp
, sp_mask
, ESP
);
812 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
813 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
814 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
815 if (has_error_code
) {
816 PUSHL(ssp
, esp
, sp_mask
, error_code
);
820 if (env
->eflags
& VM_MASK
) {
821 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
822 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
823 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
824 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
826 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
827 PUSHW(ssp
, esp
, sp_mask
, ESP
);
829 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
830 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
831 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
832 if (has_error_code
) {
833 PUSHW(ssp
, esp
, sp_mask
, error_code
);
838 if (env
->eflags
& VM_MASK
) {
839 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
840 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
841 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
842 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
844 ss
= (ss
& ~3) | dpl
;
845 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
846 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
848 SET_ESP(esp
, sp_mask
);
850 selector
= (selector
& ~3) | dpl
;
851 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
852 get_seg_base(e1
, e2
),
853 get_seg_limit(e1
, e2
),
855 cpu_x86_set_cpl(env
, dpl
);
858 /* interrupt gate clear IF mask */
859 if ((type
& 1) == 0) {
860 env
->eflags
&= ~IF_MASK
;
862 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
867 #define PUSHQ(sp, val)\
870 stq_kernel(sp, (val));\
873 #define POPQ(sp, val)\
875 val = ldq_kernel(sp);\
879 static inline target_ulong
get_rsp_from_tss(int level
)
884 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
885 env
->tr
.base
, env
->tr
.limit
);
888 if (!(env
->tr
.flags
& DESC_P_MASK
))
889 cpu_abort(env
, "invalid tss");
890 index
= 8 * level
+ 4;
891 if ((index
+ 7) > env
->tr
.limit
)
892 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
893 return ldq_kernel(env
->tr
.base
+ index
);
896 /* 64 bit interrupt */
897 static void do_interrupt64(int intno
, int is_int
, int error_code
,
898 target_ulong next_eip
, int is_hw
)
902 int type
, dpl
, selector
, cpl
, ist
;
903 int has_error_code
, new_stack
;
904 uint32_t e1
, e2
, e3
, ss
;
905 target_ulong old_eip
, esp
, offset
;
908 if (!is_int
&& !is_hw
)
909 has_error_code
= exeption_has_error_code(intno
);
916 if (intno
* 16 + 15 > dt
->limit
)
917 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
918 ptr
= dt
->base
+ intno
* 16;
919 e1
= ldl_kernel(ptr
);
920 e2
= ldl_kernel(ptr
+ 4);
921 e3
= ldl_kernel(ptr
+ 8);
922 /* check gate type */
923 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
925 case 14: /* 386 interrupt gate */
926 case 15: /* 386 trap gate */
929 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
932 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
933 cpl
= env
->hflags
& HF_CPL_MASK
;
934 /* check privilege if software int */
935 if (is_int
&& dpl
< cpl
)
936 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
937 /* check valid bit */
938 if (!(e2
& DESC_P_MASK
))
939 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
941 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
943 if ((selector
& 0xfffc) == 0)
944 raise_exception_err(EXCP0D_GPF
, 0);
946 if (load_segment(&e1
, &e2
, selector
) != 0)
947 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
948 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
949 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
950 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
952 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
953 if (!(e2
& DESC_P_MASK
))
954 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
955 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
956 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
957 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
958 /* to inner privilege */
960 esp
= get_rsp_from_tss(ist
+ 3);
962 esp
= get_rsp_from_tss(dpl
);
963 esp
&= ~0xfLL
; /* align stack */
966 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
967 /* to same privilege */
968 if (env
->eflags
& VM_MASK
)
969 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
972 esp
= get_rsp_from_tss(ist
+ 3);
975 esp
&= ~0xfLL
; /* align stack */
978 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
979 new_stack
= 0; /* avoid warning */
980 esp
= 0; /* avoid warning */
983 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
985 PUSHQ(esp
, compute_eflags());
986 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
988 if (has_error_code
) {
989 PUSHQ(esp
, error_code
);
994 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
998 selector
= (selector
& ~3) | dpl
;
999 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1000 get_seg_base(e1
, e2
),
1001 get_seg_limit(e1
, e2
),
1003 cpu_x86_set_cpl(env
, dpl
);
1006 /* interrupt gate clear IF mask */
1007 if ((type
& 1) == 0) {
1008 env
->eflags
&= ~IF_MASK
;
1010 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1014 #ifdef TARGET_X86_64
1015 #if defined(CONFIG_USER_ONLY)
1016 void helper_syscall(int next_eip_addend
)
1018 env
->exception_index
= EXCP_SYSCALL
;
1019 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1023 void helper_syscall(int next_eip_addend
)
1027 if (!(env
->efer
& MSR_EFER_SCE
)) {
1028 raise_exception_err(EXCP06_ILLOP
, 0);
1030 selector
= (env
->star
>> 32) & 0xffff;
1031 if (env
->hflags
& HF_LMA_MASK
) {
1034 ECX
= env
->eip
+ next_eip_addend
;
1035 env
->regs
[11] = compute_eflags();
1037 code64
= env
->hflags
& HF_CS64_MASK
;
1039 cpu_x86_set_cpl(env
, 0);
1040 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1042 DESC_G_MASK
| DESC_P_MASK
|
1044 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1045 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1047 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1049 DESC_W_MASK
| DESC_A_MASK
);
1050 env
->eflags
&= ~env
->fmask
;
1051 load_eflags(env
->eflags
, 0);
1053 env
->eip
= env
->lstar
;
1055 env
->eip
= env
->cstar
;
1057 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1059 cpu_x86_set_cpl(env
, 0);
1060 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1062 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1064 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1065 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1067 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1069 DESC_W_MASK
| DESC_A_MASK
);
1070 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1071 env
->eip
= (uint32_t)env
->star
;
1077 #ifdef TARGET_X86_64
1078 void helper_sysret(int dflag
)
1082 if (!(env
->efer
& MSR_EFER_SCE
)) {
1083 raise_exception_err(EXCP06_ILLOP
, 0);
1085 cpl
= env
->hflags
& HF_CPL_MASK
;
1086 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1087 raise_exception_err(EXCP0D_GPF
, 0);
1089 selector
= (env
->star
>> 48) & 0xffff;
1090 if (env
->hflags
& HF_LMA_MASK
) {
1092 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1094 DESC_G_MASK
| DESC_P_MASK
|
1095 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1096 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1100 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1102 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1103 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1104 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1105 env
->eip
= (uint32_t)ECX
;
1107 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1109 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1110 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1111 DESC_W_MASK
| DESC_A_MASK
);
1112 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1113 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1114 cpu_x86_set_cpl(env
, 3);
1116 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1118 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1119 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1120 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1121 env
->eip
= (uint32_t)ECX
;
1122 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1124 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1125 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1126 DESC_W_MASK
| DESC_A_MASK
);
1127 env
->eflags
|= IF_MASK
;
1128 cpu_x86_set_cpl(env
, 3);
1133 /* real mode interrupt */
1134 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1135 unsigned int next_eip
)
1138 target_ulong ptr
, ssp
;
1140 uint32_t offset
, esp
;
1141 uint32_t old_cs
, old_eip
;
1143 /* real mode (simpler !) */
1145 if (intno
* 4 + 3 > dt
->limit
)
1146 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1147 ptr
= dt
->base
+ intno
* 4;
1148 offset
= lduw_kernel(ptr
);
1149 selector
= lduw_kernel(ptr
+ 2);
1151 ssp
= env
->segs
[R_SS
].base
;
1156 old_cs
= env
->segs
[R_CS
].selector
;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1159 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1160 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1162 /* update processor state */
1163 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1165 env
->segs
[R_CS
].selector
= selector
;
1166 env
->segs
[R_CS
].base
= (selector
<< 4);
1167 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1170 /* fake user mode interrupt */
1171 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1172 target_ulong next_eip
)
1176 int dpl
, cpl
, shift
;
1180 if (env
->hflags
& HF_LMA_MASK
) {
1185 ptr
= dt
->base
+ (intno
<< shift
);
1186 e2
= ldl_kernel(ptr
+ 4);
1188 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1189 cpl
= env
->hflags
& HF_CPL_MASK
;
1190 /* check privilege if software int */
1191 if (is_int
&& dpl
< cpl
)
1192 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1201 #if !defined(CONFIG_USER_ONLY)
1202 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1205 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1206 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1209 type
= SVM_EVTINJ_TYPE_SOFT
;
1211 type
= SVM_EVTINJ_TYPE_EXEPT
;
1212 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1213 if (!rm
&& exeption_has_error_code(intno
)) {
1214 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1215 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1217 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1223 * Begin execution of an interruption. is_int is TRUE if coming from
1224 * the int instruction. next_eip is the EIP value AFTER the interrupt
1225 * instruction. It is only relevant if is_int is TRUE.
1227 void do_interrupt(int intno
, int is_int
, int error_code
,
1228 target_ulong next_eip
, int is_hw
)
1230 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1231 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1233 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1234 count
, intno
, error_code
, is_int
,
1235 env
->hflags
& HF_CPL_MASK
,
1236 env
->segs
[R_CS
].selector
, EIP
,
1237 (int)env
->segs
[R_CS
].base
+ EIP
,
1238 env
->segs
[R_SS
].selector
, ESP
);
1239 if (intno
== 0x0e) {
1240 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1242 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1245 log_cpu_state(env
, X86_DUMP_CCOP
);
1251 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1252 for(i
= 0; i
< 16; i
++) {
1253 qemu_log(" %02x", ldub(ptr
+ i
));
1261 if (env
->cr
[0] & CR0_PE_MASK
) {
1262 #if !defined(CONFIG_USER_ONLY)
1263 if (env
->hflags
& HF_SVMI_MASK
)
1264 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1266 #ifdef TARGET_X86_64
1267 if (env
->hflags
& HF_LMA_MASK
) {
1268 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1272 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1275 #if !defined(CONFIG_USER_ONLY)
1276 if (env
->hflags
& HF_SVMI_MASK
)
1277 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1279 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1282 #if !defined(CONFIG_USER_ONLY)
1283 if (env
->hflags
& HF_SVMI_MASK
) {
1284 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1285 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1290 /* This should come from sysemu.h - if we could include it here... */
1291 void qemu_system_reset_request(void);
1294 * Check nested exceptions and change to double or triple fault if
1295 * needed. It should only be called, if this is not an interrupt.
1296 * Returns the new exception number.
1298 static int check_exception(int intno
, int *error_code
)
1300 int first_contributory
= env
->old_exception
== 0 ||
1301 (env
->old_exception
>= 10 &&
1302 env
->old_exception
<= 13);
1303 int second_contributory
= intno
== 0 ||
1304 (intno
>= 10 && intno
<= 13);
1306 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1307 env
->old_exception
, intno
);
1309 #if !defined(CONFIG_USER_ONLY)
1310 if (env
->old_exception
== EXCP08_DBLE
) {
1311 if (env
->hflags
& HF_SVMI_MASK
)
1312 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1314 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1316 qemu_system_reset_request();
1321 if ((first_contributory
&& second_contributory
)
1322 || (env
->old_exception
== EXCP0E_PAGE
&&
1323 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1324 intno
= EXCP08_DBLE
;
1328 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1329 (intno
== EXCP08_DBLE
))
1330 env
->old_exception
= intno
;
1336 * Signal an interruption. It is executed in the main CPU loop.
1337 * is_int is TRUE if coming from the int instruction. next_eip is the
1338 * EIP value AFTER the interrupt instruction. It is only relevant if
1341 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1342 int next_eip_addend
)
1345 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1346 intno
= check_exception(intno
, &error_code
);
1348 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1351 env
->exception_index
= intno
;
1352 env
->error_code
= error_code
;
1353 env
->exception_is_int
= is_int
;
1354 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1358 /* shortcuts to generate exceptions */
1360 void raise_exception_err(int exception_index
, int error_code
)
1362 raise_interrupt(exception_index
, 0, error_code
, 0);
1365 void raise_exception(int exception_index
)
1367 raise_interrupt(exception_index
, 0, 0, 0);
1370 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1373 raise_exception(exception_index
);
1377 #if defined(CONFIG_USER_ONLY)
1379 void do_smm_enter(void)
1383 void helper_rsm(void)
1389 #ifdef TARGET_X86_64
1390 #define SMM_REVISION_ID 0x00020064
1392 #define SMM_REVISION_ID 0x00020000
1395 void do_smm_enter(void)
1397 target_ulong sm_state
;
1401 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1402 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1404 env
->hflags
|= HF_SMM_MASK
;
1405 cpu_smm_update(env
);
1407 sm_state
= env
->smbase
+ 0x8000;
1409 #ifdef TARGET_X86_64
1410 for(i
= 0; i
< 6; i
++) {
1412 offset
= 0x7e00 + i
* 16;
1413 stw_phys(sm_state
+ offset
, dt
->selector
);
1414 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1415 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1416 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1419 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1420 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1422 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1423 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1424 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1425 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1427 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1428 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1430 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1431 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1432 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1433 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1435 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1437 stq_phys(sm_state
+ 0x7ff8, EAX
);
1438 stq_phys(sm_state
+ 0x7ff0, ECX
);
1439 stq_phys(sm_state
+ 0x7fe8, EDX
);
1440 stq_phys(sm_state
+ 0x7fe0, EBX
);
1441 stq_phys(sm_state
+ 0x7fd8, ESP
);
1442 stq_phys(sm_state
+ 0x7fd0, EBP
);
1443 stq_phys(sm_state
+ 0x7fc8, ESI
);
1444 stq_phys(sm_state
+ 0x7fc0, EDI
);
1445 for(i
= 8; i
< 16; i
++)
1446 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1447 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1448 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1449 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1450 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1452 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1453 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1454 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1456 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1457 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1459 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1460 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1461 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1462 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1463 stl_phys(sm_state
+ 0x7fec, EDI
);
1464 stl_phys(sm_state
+ 0x7fe8, ESI
);
1465 stl_phys(sm_state
+ 0x7fe4, EBP
);
1466 stl_phys(sm_state
+ 0x7fe0, ESP
);
1467 stl_phys(sm_state
+ 0x7fdc, EBX
);
1468 stl_phys(sm_state
+ 0x7fd8, EDX
);
1469 stl_phys(sm_state
+ 0x7fd4, ECX
);
1470 stl_phys(sm_state
+ 0x7fd0, EAX
);
1471 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1472 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1474 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1475 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1476 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1477 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1479 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1480 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1481 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1482 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1484 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1485 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1487 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1488 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1490 for(i
= 0; i
< 6; i
++) {
1493 offset
= 0x7f84 + i
* 12;
1495 offset
= 0x7f2c + (i
- 3) * 12;
1496 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1497 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1498 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1499 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1501 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1503 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1504 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1506 /* init SMM cpu state */
1508 #ifdef TARGET_X86_64
1509 cpu_load_efer(env
, 0);
1511 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1512 env
->eip
= 0x00008000;
1513 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1515 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1516 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1517 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1518 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1519 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1521 cpu_x86_update_cr0(env
,
1522 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1523 cpu_x86_update_cr4(env
, 0);
1524 env
->dr
[7] = 0x00000400;
1525 CC_OP
= CC_OP_EFLAGS
;
1528 void helper_rsm(void)
1530 target_ulong sm_state
;
1534 sm_state
= env
->smbase
+ 0x8000;
1535 #ifdef TARGET_X86_64
1536 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1538 for(i
= 0; i
< 6; i
++) {
1539 offset
= 0x7e00 + i
* 16;
1540 cpu_x86_load_seg_cache(env
, i
,
1541 lduw_phys(sm_state
+ offset
),
1542 ldq_phys(sm_state
+ offset
+ 8),
1543 ldl_phys(sm_state
+ offset
+ 4),
1544 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1547 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1548 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1550 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1551 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1552 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1553 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1555 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1556 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1558 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1559 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1560 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1561 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1563 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1564 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1565 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1566 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1567 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1568 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1569 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1570 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1571 for(i
= 8; i
< 16; i
++)
1572 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1573 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1574 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1575 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1576 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1577 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1579 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1580 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1581 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1583 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1584 if (val
& 0x20000) {
1585 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1588 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1589 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1590 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1591 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1592 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1593 EDI
= ldl_phys(sm_state
+ 0x7fec);
1594 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1595 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1596 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1597 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1598 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1599 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1600 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1601 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1602 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1604 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1605 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1606 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1607 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1609 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1610 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1611 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1612 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1614 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1615 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1617 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1618 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1620 for(i
= 0; i
< 6; i
++) {
1622 offset
= 0x7f84 + i
* 12;
1624 offset
= 0x7f2c + (i
- 3) * 12;
1625 cpu_x86_load_seg_cache(env
, i
,
1626 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1627 ldl_phys(sm_state
+ offset
+ 8),
1628 ldl_phys(sm_state
+ offset
+ 4),
1629 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1631 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1633 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1634 if (val
& 0x20000) {
1635 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1638 CC_OP
= CC_OP_EFLAGS
;
1639 env
->hflags
&= ~HF_SMM_MASK
;
1640 cpu_smm_update(env
);
1642 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1643 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1646 #endif /* !CONFIG_USER_ONLY */
1649 /* division, flags are undefined */
1651 void helper_divb_AL(target_ulong t0
)
1653 unsigned int num
, den
, q
, r
;
1655 num
= (EAX
& 0xffff);
1658 raise_exception(EXCP00_DIVZ
);
1662 raise_exception(EXCP00_DIVZ
);
1664 r
= (num
% den
) & 0xff;
1665 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1668 void helper_idivb_AL(target_ulong t0
)
1675 raise_exception(EXCP00_DIVZ
);
1679 raise_exception(EXCP00_DIVZ
);
1681 r
= (num
% den
) & 0xff;
1682 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1685 void helper_divw_AX(target_ulong t0
)
1687 unsigned int num
, den
, q
, r
;
1689 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1690 den
= (t0
& 0xffff);
1692 raise_exception(EXCP00_DIVZ
);
1696 raise_exception(EXCP00_DIVZ
);
1698 r
= (num
% den
) & 0xffff;
1699 EAX
= (EAX
& ~0xffff) | q
;
1700 EDX
= (EDX
& ~0xffff) | r
;
1703 void helper_idivw_AX(target_ulong t0
)
1707 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1710 raise_exception(EXCP00_DIVZ
);
1713 if (q
!= (int16_t)q
)
1714 raise_exception(EXCP00_DIVZ
);
1716 r
= (num
% den
) & 0xffff;
1717 EAX
= (EAX
& ~0xffff) | q
;
1718 EDX
= (EDX
& ~0xffff) | r
;
1721 void helper_divl_EAX(target_ulong t0
)
1723 unsigned int den
, r
;
1726 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1729 raise_exception(EXCP00_DIVZ
);
1734 raise_exception(EXCP00_DIVZ
);
1739 void helper_idivl_EAX(target_ulong t0
)
1744 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1747 raise_exception(EXCP00_DIVZ
);
1751 if (q
!= (int32_t)q
)
1752 raise_exception(EXCP00_DIVZ
);
1759 /* XXX: exception */
1760 void helper_aam(int base
)
1766 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1770 void helper_aad(int base
)
1774 ah
= (EAX
>> 8) & 0xff;
1775 al
= ((ah
* base
) + al
) & 0xff;
1776 EAX
= (EAX
& ~0xffff) | al
;
1780 void helper_aaa(void)
1786 eflags
= helper_cc_compute_all(CC_OP
);
1789 ah
= (EAX
>> 8) & 0xff;
1791 icarry
= (al
> 0xf9);
1792 if (((al
& 0x0f) > 9 ) || af
) {
1793 al
= (al
+ 6) & 0x0f;
1794 ah
= (ah
+ 1 + icarry
) & 0xff;
1795 eflags
|= CC_C
| CC_A
;
1797 eflags
&= ~(CC_C
| CC_A
);
1800 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1804 void helper_aas(void)
1810 eflags
= helper_cc_compute_all(CC_OP
);
1813 ah
= (EAX
>> 8) & 0xff;
1816 if (((al
& 0x0f) > 9 ) || af
) {
1817 al
= (al
- 6) & 0x0f;
1818 ah
= (ah
- 1 - icarry
) & 0xff;
1819 eflags
|= CC_C
| CC_A
;
1821 eflags
&= ~(CC_C
| CC_A
);
1824 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1828 void helper_daa(void)
1833 eflags
= helper_cc_compute_all(CC_OP
);
1839 if (((al
& 0x0f) > 9 ) || af
) {
1840 al
= (al
+ 6) & 0xff;
1843 if ((al
> 0x9f) || cf
) {
1844 al
= (al
+ 0x60) & 0xff;
1847 EAX
= (EAX
& ~0xff) | al
;
1848 /* well, speed is not an issue here, so we compute the flags by hand */
1849 eflags
|= (al
== 0) << 6; /* zf */
1850 eflags
|= parity_table
[al
]; /* pf */
1851 eflags
|= (al
& 0x80); /* sf */
1855 void helper_das(void)
1857 int al
, al1
, af
, cf
;
1860 eflags
= helper_cc_compute_all(CC_OP
);
1867 if (((al
& 0x0f) > 9 ) || af
) {
1871 al
= (al
- 6) & 0xff;
1873 if ((al1
> 0x99) || cf
) {
1874 al
= (al
- 0x60) & 0xff;
1877 EAX
= (EAX
& ~0xff) | al
;
1878 /* well, speed is not an issue here, so we compute the flags by hand */
1879 eflags
|= (al
== 0) << 6; /* zf */
1880 eflags
|= parity_table
[al
]; /* pf */
1881 eflags
|= (al
& 0x80); /* sf */
1885 void helper_into(int next_eip_addend
)
1888 eflags
= helper_cc_compute_all(CC_OP
);
1889 if (eflags
& CC_O
) {
1890 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1894 void helper_cmpxchg8b(target_ulong a0
)
1899 eflags
= helper_cc_compute_all(CC_OP
);
1901 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1902 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1905 /* always do the store */
1907 EDX
= (uint32_t)(d
>> 32);
1914 #ifdef TARGET_X86_64
1915 void helper_cmpxchg16b(target_ulong a0
)
1920 if ((a0
& 0xf) != 0)
1921 raise_exception(EXCP0D_GPF
);
1922 eflags
= helper_cc_compute_all(CC_OP
);
1925 if (d0
== EAX
&& d1
== EDX
) {
1930 /* always do the store */
1941 void helper_single_step(void)
1943 #ifndef CONFIG_USER_ONLY
1944 check_hw_breakpoints(env
, 1);
1945 env
->dr
[6] |= DR6_BS
;
1947 raise_exception(EXCP01_DB
);
1950 void helper_cpuid(void)
1952 uint32_t eax
, ebx
, ecx
, edx
;
1954 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1956 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1963 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1966 uint32_t esp_mask
, esp
, ebp
;
1968 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1969 ssp
= env
->segs
[R_SS
].base
;
1978 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1981 stl(ssp
+ (esp
& esp_mask
), t1
);
1988 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1991 stw(ssp
+ (esp
& esp_mask
), t1
);
1995 #ifdef TARGET_X86_64
1996 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1998 target_ulong esp
, ebp
;
2018 stw(esp
, lduw(ebp
));
2026 void helper_lldt(int selector
)
2030 int index
, entry_limit
;
2034 if ((selector
& 0xfffc) == 0) {
2035 /* XXX: NULL selector case: invalid LDT */
2040 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2042 index
= selector
& ~7;
2043 #ifdef TARGET_X86_64
2044 if (env
->hflags
& HF_LMA_MASK
)
2049 if ((index
+ entry_limit
) > dt
->limit
)
2050 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2051 ptr
= dt
->base
+ index
;
2052 e1
= ldl_kernel(ptr
);
2053 e2
= ldl_kernel(ptr
+ 4);
2054 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2055 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2056 if (!(e2
& DESC_P_MASK
))
2057 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2058 #ifdef TARGET_X86_64
2059 if (env
->hflags
& HF_LMA_MASK
) {
2061 e3
= ldl_kernel(ptr
+ 8);
2062 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2063 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2067 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2070 env
->ldt
.selector
= selector
;
2073 void helper_ltr(int selector
)
2077 int index
, type
, entry_limit
;
2081 if ((selector
& 0xfffc) == 0) {
2082 /* NULL selector case: invalid TR */
2088 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2090 index
= selector
& ~7;
2091 #ifdef TARGET_X86_64
2092 if (env
->hflags
& HF_LMA_MASK
)
2097 if ((index
+ entry_limit
) > dt
->limit
)
2098 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2099 ptr
= dt
->base
+ index
;
2100 e1
= ldl_kernel(ptr
);
2101 e2
= ldl_kernel(ptr
+ 4);
2102 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2103 if ((e2
& DESC_S_MASK
) ||
2104 (type
!= 1 && type
!= 9))
2105 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2106 if (!(e2
& DESC_P_MASK
))
2107 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2108 #ifdef TARGET_X86_64
2109 if (env
->hflags
& HF_LMA_MASK
) {
2111 e3
= ldl_kernel(ptr
+ 8);
2112 e4
= ldl_kernel(ptr
+ 12);
2113 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2114 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2115 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2116 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2120 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2122 e2
|= DESC_TSS_BUSY_MASK
;
2123 stl_kernel(ptr
+ 4, e2
);
2125 env
->tr
.selector
= selector
;
2128 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2129 void helper_load_seg(int seg_reg
, int selector
)
2138 cpl
= env
->hflags
& HF_CPL_MASK
;
2139 if ((selector
& 0xfffc) == 0) {
2140 /* null selector case */
2142 #ifdef TARGET_X86_64
2143 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2146 raise_exception_err(EXCP0D_GPF
, 0);
2147 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2154 index
= selector
& ~7;
2155 if ((index
+ 7) > dt
->limit
)
2156 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2157 ptr
= dt
->base
+ index
;
2158 e1
= ldl_kernel(ptr
);
2159 e2
= ldl_kernel(ptr
+ 4);
2161 if (!(e2
& DESC_S_MASK
))
2162 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2164 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2165 if (seg_reg
== R_SS
) {
2166 /* must be writable segment */
2167 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2168 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2169 if (rpl
!= cpl
|| dpl
!= cpl
)
2170 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2172 /* must be readable segment */
2173 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2174 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2176 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2177 /* if not conforming code, test rights */
2178 if (dpl
< cpl
|| dpl
< rpl
)
2179 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2183 if (!(e2
& DESC_P_MASK
)) {
2184 if (seg_reg
== R_SS
)
2185 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2187 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2190 /* set the access bit if not already set */
2191 if (!(e2
& DESC_A_MASK
)) {
2193 stl_kernel(ptr
+ 4, e2
);
2196 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2197 get_seg_base(e1
, e2
),
2198 get_seg_limit(e1
, e2
),
2201 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2202 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2207 /* protected mode jump */
2208 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2209 int next_eip_addend
)
2212 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2213 target_ulong next_eip
;
2215 if ((new_cs
& 0xfffc) == 0)
2216 raise_exception_err(EXCP0D_GPF
, 0);
2217 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2218 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2219 cpl
= env
->hflags
& HF_CPL_MASK
;
2220 if (e2
& DESC_S_MASK
) {
2221 if (!(e2
& DESC_CS_MASK
))
2222 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2223 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2224 if (e2
& DESC_C_MASK
) {
2225 /* conforming code segment */
2227 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2229 /* non conforming code segment */
2232 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2234 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2236 if (!(e2
& DESC_P_MASK
))
2237 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2238 limit
= get_seg_limit(e1
, e2
);
2239 if (new_eip
> limit
&&
2240 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2241 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2242 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2243 get_seg_base(e1
, e2
), limit
, e2
);
2246 /* jump to call or task gate */
2247 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2249 cpl
= env
->hflags
& HF_CPL_MASK
;
2250 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2252 case 1: /* 286 TSS */
2253 case 9: /* 386 TSS */
2254 case 5: /* task gate */
2255 if (dpl
< cpl
|| dpl
< rpl
)
2256 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2257 next_eip
= env
->eip
+ next_eip_addend
;
2258 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2259 CC_OP
= CC_OP_EFLAGS
;
2261 case 4: /* 286 call gate */
2262 case 12: /* 386 call gate */
2263 if ((dpl
< cpl
) || (dpl
< rpl
))
2264 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2265 if (!(e2
& DESC_P_MASK
))
2266 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2268 new_eip
= (e1
& 0xffff);
2270 new_eip
|= (e2
& 0xffff0000);
2271 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2272 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2273 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2274 /* must be code segment */
2275 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2276 (DESC_S_MASK
| DESC_CS_MASK
)))
2277 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2278 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2279 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2280 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2281 if (!(e2
& DESC_P_MASK
))
2282 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2283 limit
= get_seg_limit(e1
, e2
);
2284 if (new_eip
> limit
)
2285 raise_exception_err(EXCP0D_GPF
, 0);
2286 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2287 get_seg_base(e1
, e2
), limit
, e2
);
2291 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2297 /* real mode call */
2298 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2299 int shift
, int next_eip
)
2302 uint32_t esp
, esp_mask
;
2307 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2308 ssp
= env
->segs
[R_SS
].base
;
2310 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2311 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2313 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2314 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2317 SET_ESP(esp
, esp_mask
);
2319 env
->segs
[R_CS
].selector
= new_cs
;
2320 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2323 /* protected mode call */
2324 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2325 int shift
, int next_eip_addend
)
2328 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2329 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2330 uint32_t val
, limit
, old_sp_mask
;
2331 target_ulong ssp
, old_ssp
, next_eip
;
2333 next_eip
= env
->eip
+ next_eip_addend
;
2334 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2335 LOG_PCALL_STATE(env
);
2336 if ((new_cs
& 0xfffc) == 0)
2337 raise_exception_err(EXCP0D_GPF
, 0);
2338 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2339 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2340 cpl
= env
->hflags
& HF_CPL_MASK
;
2341 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2342 if (e2
& DESC_S_MASK
) {
2343 if (!(e2
& DESC_CS_MASK
))
2344 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2345 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2346 if (e2
& DESC_C_MASK
) {
2347 /* conforming code segment */
2349 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2351 /* non conforming code segment */
2354 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2356 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2358 if (!(e2
& DESC_P_MASK
))
2359 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2361 #ifdef TARGET_X86_64
2362 /* XXX: check 16/32 bit cases in long mode */
2367 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2368 PUSHQ(rsp
, next_eip
);
2369 /* from this point, not restartable */
2371 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2372 get_seg_base(e1
, e2
),
2373 get_seg_limit(e1
, e2
), e2
);
2379 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2380 ssp
= env
->segs
[R_SS
].base
;
2382 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2383 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2385 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2386 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2389 limit
= get_seg_limit(e1
, e2
);
2390 if (new_eip
> limit
)
2391 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2392 /* from this point, not restartable */
2393 SET_ESP(sp
, sp_mask
);
2394 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2395 get_seg_base(e1
, e2
), limit
, e2
);
2399 /* check gate type */
2400 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2401 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2404 case 1: /* available 286 TSS */
2405 case 9: /* available 386 TSS */
2406 case 5: /* task gate */
2407 if (dpl
< cpl
|| dpl
< rpl
)
2408 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2409 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2410 CC_OP
= CC_OP_EFLAGS
;
2412 case 4: /* 286 call gate */
2413 case 12: /* 386 call gate */
2416 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2421 if (dpl
< cpl
|| dpl
< rpl
)
2422 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2423 /* check valid bit */
2424 if (!(e2
& DESC_P_MASK
))
2425 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2426 selector
= e1
>> 16;
2427 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2428 param_count
= e2
& 0x1f;
2429 if ((selector
& 0xfffc) == 0)
2430 raise_exception_err(EXCP0D_GPF
, 0);
2432 if (load_segment(&e1
, &e2
, selector
) != 0)
2433 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2434 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2435 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2436 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2438 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2439 if (!(e2
& DESC_P_MASK
))
2440 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2442 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2443 /* to inner privilege */
2444 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2445 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2446 ss
, sp
, param_count
, ESP
);
2447 if ((ss
& 0xfffc) == 0)
2448 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2449 if ((ss
& 3) != dpl
)
2450 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2451 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2452 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2453 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2455 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2456 if (!(ss_e2
& DESC_S_MASK
) ||
2457 (ss_e2
& DESC_CS_MASK
) ||
2458 !(ss_e2
& DESC_W_MASK
))
2459 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2460 if (!(ss_e2
& DESC_P_MASK
))
2461 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2463 // push_size = ((param_count * 2) + 8) << shift;
2465 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2466 old_ssp
= env
->segs
[R_SS
].base
;
2468 sp_mask
= get_sp_mask(ss_e2
);
2469 ssp
= get_seg_base(ss_e1
, ss_e2
);
2471 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2472 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2473 for(i
= param_count
- 1; i
>= 0; i
--) {
2474 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2475 PUSHL(ssp
, sp
, sp_mask
, val
);
2478 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2479 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2480 for(i
= param_count
- 1; i
>= 0; i
--) {
2481 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2482 PUSHW(ssp
, sp
, sp_mask
, val
);
2487 /* to same privilege */
2489 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2490 ssp
= env
->segs
[R_SS
].base
;
2491 // push_size = (4 << shift);
2496 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2497 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2499 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2500 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2503 /* from this point, not restartable */
2506 ss
= (ss
& ~3) | dpl
;
2507 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2509 get_seg_limit(ss_e1
, ss_e2
),
2513 selector
= (selector
& ~3) | dpl
;
2514 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2515 get_seg_base(e1
, e2
),
2516 get_seg_limit(e1
, e2
),
2518 cpu_x86_set_cpl(env
, dpl
);
2519 SET_ESP(sp
, sp_mask
);
2524 /* real and vm86 mode iret */
2525 void helper_iret_real(int shift
)
2527 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2531 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2533 ssp
= env
->segs
[R_SS
].base
;
2536 POPL(ssp
, sp
, sp_mask
, new_eip
);
2537 POPL(ssp
, sp
, sp_mask
, new_cs
);
2539 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2542 POPW(ssp
, sp
, sp_mask
, new_eip
);
2543 POPW(ssp
, sp
, sp_mask
, new_cs
);
2544 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2546 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2547 env
->segs
[R_CS
].selector
= new_cs
;
2548 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2550 if (env
->eflags
& VM_MASK
)
2551 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2553 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2555 eflags_mask
&= 0xffff;
2556 load_eflags(new_eflags
, eflags_mask
);
2557 env
->hflags2
&= ~HF2_NMI_MASK
;
2560 static inline void validate_seg(int seg_reg
, int cpl
)
2565 /* XXX: on x86_64, we do not want to nullify FS and GS because
2566 they may still contain a valid base. I would be interested to
2567 know how a real x86_64 CPU behaves */
2568 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2569 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2572 e2
= env
->segs
[seg_reg
].flags
;
2573 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2574 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2575 /* data or non conforming code segment */
2577 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2582 /* protected mode iret */
2583 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2585 uint32_t new_cs
, new_eflags
, new_ss
;
2586 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2587 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2588 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2589 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2591 #ifdef TARGET_X86_64
2596 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2598 ssp
= env
->segs
[R_SS
].base
;
2599 new_eflags
= 0; /* avoid warning */
2600 #ifdef TARGET_X86_64
2606 POPQ(sp
, new_eflags
);
2612 POPL(ssp
, sp
, sp_mask
, new_eip
);
2613 POPL(ssp
, sp
, sp_mask
, new_cs
);
2616 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2617 if (new_eflags
& VM_MASK
)
2618 goto return_to_vm86
;
2622 POPW(ssp
, sp
, sp_mask
, new_eip
);
2623 POPW(ssp
, sp
, sp_mask
, new_cs
);
2625 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2627 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2628 new_cs
, new_eip
, shift
, addend
);
2629 LOG_PCALL_STATE(env
);
2630 if ((new_cs
& 0xfffc) == 0)
2631 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2632 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2633 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2634 if (!(e2
& DESC_S_MASK
) ||
2635 !(e2
& DESC_CS_MASK
))
2636 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2637 cpl
= env
->hflags
& HF_CPL_MASK
;
2640 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2641 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2642 if (e2
& DESC_C_MASK
) {
2644 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2647 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2649 if (!(e2
& DESC_P_MASK
))
2650 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2653 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2654 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2655 /* return to same privilege level */
2656 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2657 get_seg_base(e1
, e2
),
2658 get_seg_limit(e1
, e2
),
2661 /* return to different privilege level */
2662 #ifdef TARGET_X86_64
2671 POPL(ssp
, sp
, sp_mask
, new_esp
);
2672 POPL(ssp
, sp
, sp_mask
, new_ss
);
2676 POPW(ssp
, sp
, sp_mask
, new_esp
);
2677 POPW(ssp
, sp
, sp_mask
, new_ss
);
2679 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2681 if ((new_ss
& 0xfffc) == 0) {
2682 #ifdef TARGET_X86_64
2683 /* NULL ss is allowed in long mode if cpl != 3*/
2684 /* XXX: test CS64 ? */
2685 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2686 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2688 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2689 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2690 DESC_W_MASK
| DESC_A_MASK
);
2691 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2695 raise_exception_err(EXCP0D_GPF
, 0);
2698 if ((new_ss
& 3) != rpl
)
2699 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2700 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2701 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2702 if (!(ss_e2
& DESC_S_MASK
) ||
2703 (ss_e2
& DESC_CS_MASK
) ||
2704 !(ss_e2
& DESC_W_MASK
))
2705 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2706 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2708 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2709 if (!(ss_e2
& DESC_P_MASK
))
2710 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2711 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2712 get_seg_base(ss_e1
, ss_e2
),
2713 get_seg_limit(ss_e1
, ss_e2
),
2717 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2718 get_seg_base(e1
, e2
),
2719 get_seg_limit(e1
, e2
),
2721 cpu_x86_set_cpl(env
, rpl
);
2723 #ifdef TARGET_X86_64
2724 if (env
->hflags
& HF_CS64_MASK
)
2728 sp_mask
= get_sp_mask(ss_e2
);
2730 /* validate data segments */
2731 validate_seg(R_ES
, rpl
);
2732 validate_seg(R_DS
, rpl
);
2733 validate_seg(R_FS
, rpl
);
2734 validate_seg(R_GS
, rpl
);
2738 SET_ESP(sp
, sp_mask
);
2741 /* NOTE: 'cpl' is the _old_ CPL */
2742 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2744 eflags_mask
|= IOPL_MASK
;
2745 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2747 eflags_mask
|= IF_MASK
;
2749 eflags_mask
&= 0xffff;
2750 load_eflags(new_eflags
, eflags_mask
);
2755 POPL(ssp
, sp
, sp_mask
, new_esp
);
2756 POPL(ssp
, sp
, sp_mask
, new_ss
);
2757 POPL(ssp
, sp
, sp_mask
, new_es
);
2758 POPL(ssp
, sp
, sp_mask
, new_ds
);
2759 POPL(ssp
, sp
, sp_mask
, new_fs
);
2760 POPL(ssp
, sp
, sp_mask
, new_gs
);
2762 /* modify processor state */
2763 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2764 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2765 load_seg_vm(R_CS
, new_cs
& 0xffff);
2766 cpu_x86_set_cpl(env
, 3);
2767 load_seg_vm(R_SS
, new_ss
& 0xffff);
2768 load_seg_vm(R_ES
, new_es
& 0xffff);
2769 load_seg_vm(R_DS
, new_ds
& 0xffff);
2770 load_seg_vm(R_FS
, new_fs
& 0xffff);
2771 load_seg_vm(R_GS
, new_gs
& 0xffff);
2773 env
->eip
= new_eip
& 0xffff;
2777 void helper_iret_protected(int shift
, int next_eip
)
2779 int tss_selector
, type
;
2782 /* specific case for TSS */
2783 if (env
->eflags
& NT_MASK
) {
2784 #ifdef TARGET_X86_64
2785 if (env
->hflags
& HF_LMA_MASK
)
2786 raise_exception_err(EXCP0D_GPF
, 0);
2788 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2789 if (tss_selector
& 4)
2790 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2791 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2792 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2793 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2794 /* NOTE: we check both segment and busy TSS */
2796 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2797 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2799 helper_ret_protected(shift
, 1, 0);
2801 env
->hflags2
&= ~HF2_NMI_MASK
;
2804 void helper_lret_protected(int shift
, int addend
)
2806 helper_ret_protected(shift
, 0, addend
);
2809 void helper_sysenter(void)
2811 if (env
->sysenter_cs
== 0) {
2812 raise_exception_err(EXCP0D_GPF
, 0);
2814 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2815 cpu_x86_set_cpl(env
, 0);
2817 #ifdef TARGET_X86_64
2818 if (env
->hflags
& HF_LMA_MASK
) {
2819 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2821 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2823 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2827 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2829 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2831 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2833 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2835 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2837 DESC_W_MASK
| DESC_A_MASK
);
2838 ESP
= env
->sysenter_esp
;
2839 EIP
= env
->sysenter_eip
;
2842 void helper_sysexit(int dflag
)
2846 cpl
= env
->hflags
& HF_CPL_MASK
;
2847 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2848 raise_exception_err(EXCP0D_GPF
, 0);
2850 cpu_x86_set_cpl(env
, 3);
2851 #ifdef TARGET_X86_64
2853 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2855 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2856 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2857 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2858 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2860 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2861 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2862 DESC_W_MASK
| DESC_A_MASK
);
2866 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2868 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2869 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2870 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2871 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2873 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2874 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2875 DESC_W_MASK
| DESC_A_MASK
);
2881 #if defined(CONFIG_USER_ONLY)
2882 target_ulong
helper_read_crN(int reg
)
2887 void helper_write_crN(int reg
, target_ulong t0
)
2891 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2895 target_ulong
helper_read_crN(int reg
)
2899 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2905 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2906 val
= cpu_get_apic_tpr(env
->apic_state
);
2915 void helper_write_crN(int reg
, target_ulong t0
)
2917 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2920 cpu_x86_update_cr0(env
, t0
);
2923 cpu_x86_update_cr3(env
, t0
);
2926 cpu_x86_update_cr4(env
, t0
);
2929 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2930 cpu_set_apic_tpr(env
->apic_state
, t0
);
2932 env
->v_tpr
= t0
& 0x0f;
2940 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2945 hw_breakpoint_remove(env
, reg
);
2947 hw_breakpoint_insert(env
, reg
);
2948 } else if (reg
== 7) {
2949 for (i
= 0; i
< 4; i
++)
2950 hw_breakpoint_remove(env
, i
);
2952 for (i
= 0; i
< 4; i
++)
2953 hw_breakpoint_insert(env
, i
);
2959 void helper_lmsw(target_ulong t0
)
2961 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2962 if already set to one. */
2963 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2964 helper_write_crN(0, t0
);
2967 void helper_clts(void)
2969 env
->cr
[0] &= ~CR0_TS_MASK
;
2970 env
->hflags
&= ~HF_TS_MASK
;
2973 void helper_invlpg(target_ulong addr
)
2975 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2976 tlb_flush_page(env
, addr
);
2979 void helper_rdtsc(void)
2983 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2984 raise_exception(EXCP0D_GPF
);
2986 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2988 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2989 EAX
= (uint32_t)(val
);
2990 EDX
= (uint32_t)(val
>> 32);
2993 void helper_rdtscp(void)
2996 ECX
= (uint32_t)(env
->tsc_aux
);
2999 void helper_rdpmc(void)
3001 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3002 raise_exception(EXCP0D_GPF
);
3004 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3006 /* currently unimplemented */
3007 raise_exception_err(EXCP06_ILLOP
, 0);
3010 #if defined(CONFIG_USER_ONLY)
3011 void helper_wrmsr(void)
3015 void helper_rdmsr(void)
3019 void helper_wrmsr(void)
3023 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3025 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3027 switch((uint32_t)ECX
) {
3028 case MSR_IA32_SYSENTER_CS
:
3029 env
->sysenter_cs
= val
& 0xffff;
3031 case MSR_IA32_SYSENTER_ESP
:
3032 env
->sysenter_esp
= val
;
3034 case MSR_IA32_SYSENTER_EIP
:
3035 env
->sysenter_eip
= val
;
3037 case MSR_IA32_APICBASE
:
3038 cpu_set_apic_base(env
->apic_state
, val
);
3042 uint64_t update_mask
;
3044 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3045 update_mask
|= MSR_EFER_SCE
;
3046 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3047 update_mask
|= MSR_EFER_LME
;
3048 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3049 update_mask
|= MSR_EFER_FFXSR
;
3050 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3051 update_mask
|= MSR_EFER_NXE
;
3052 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3053 update_mask
|= MSR_EFER_SVME
;
3054 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3055 update_mask
|= MSR_EFER_FFXSR
;
3056 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3057 (val
& update_mask
));
3066 case MSR_VM_HSAVE_PA
:
3067 env
->vm_hsave
= val
;
3069 #ifdef TARGET_X86_64
3080 env
->segs
[R_FS
].base
= val
;
3083 env
->segs
[R_GS
].base
= val
;
3085 case MSR_KERNELGSBASE
:
3086 env
->kernelgsbase
= val
;
3089 case MSR_MTRRphysBase(0):
3090 case MSR_MTRRphysBase(1):
3091 case MSR_MTRRphysBase(2):
3092 case MSR_MTRRphysBase(3):
3093 case MSR_MTRRphysBase(4):
3094 case MSR_MTRRphysBase(5):
3095 case MSR_MTRRphysBase(6):
3096 case MSR_MTRRphysBase(7):
3097 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3099 case MSR_MTRRphysMask(0):
3100 case MSR_MTRRphysMask(1):
3101 case MSR_MTRRphysMask(2):
3102 case MSR_MTRRphysMask(3):
3103 case MSR_MTRRphysMask(4):
3104 case MSR_MTRRphysMask(5):
3105 case MSR_MTRRphysMask(6):
3106 case MSR_MTRRphysMask(7):
3107 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3109 case MSR_MTRRfix64K_00000
:
3110 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3112 case MSR_MTRRfix16K_80000
:
3113 case MSR_MTRRfix16K_A0000
:
3114 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3116 case MSR_MTRRfix4K_C0000
:
3117 case MSR_MTRRfix4K_C8000
:
3118 case MSR_MTRRfix4K_D0000
:
3119 case MSR_MTRRfix4K_D8000
:
3120 case MSR_MTRRfix4K_E0000
:
3121 case MSR_MTRRfix4K_E8000
:
3122 case MSR_MTRRfix4K_F0000
:
3123 case MSR_MTRRfix4K_F8000
:
3124 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3126 case MSR_MTRRdefType
:
3127 env
->mtrr_deftype
= val
;
3129 case MSR_MCG_STATUS
:
3130 env
->mcg_status
= val
;
3133 if ((env
->mcg_cap
& MCG_CTL_P
)
3134 && (val
== 0 || val
== ~(uint64_t)0))
3141 if ((uint32_t)ECX
>= MSR_MC0_CTL
3142 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3143 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3144 if ((offset
& 0x3) != 0
3145 || (val
== 0 || val
== ~(uint64_t)0))
3146 env
->mce_banks
[offset
] = val
;
3149 /* XXX: exception ? */
3154 void helper_rdmsr(void)
3158 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3160 switch((uint32_t)ECX
) {
3161 case MSR_IA32_SYSENTER_CS
:
3162 val
= env
->sysenter_cs
;
3164 case MSR_IA32_SYSENTER_ESP
:
3165 val
= env
->sysenter_esp
;
3167 case MSR_IA32_SYSENTER_EIP
:
3168 val
= env
->sysenter_eip
;
3170 case MSR_IA32_APICBASE
:
3171 val
= cpu_get_apic_base(env
->apic_state
);
3182 case MSR_VM_HSAVE_PA
:
3183 val
= env
->vm_hsave
;
3185 case MSR_IA32_PERF_STATUS
:
3186 /* tsc_increment_by_tick */
3188 /* CPU multiplier */
3189 val
|= (((uint64_t)4ULL) << 40);
3191 #ifdef TARGET_X86_64
3202 val
= env
->segs
[R_FS
].base
;
3205 val
= env
->segs
[R_GS
].base
;
3207 case MSR_KERNELGSBASE
:
3208 val
= env
->kernelgsbase
;
3214 case MSR_MTRRphysBase(0):
3215 case MSR_MTRRphysBase(1):
3216 case MSR_MTRRphysBase(2):
3217 case MSR_MTRRphysBase(3):
3218 case MSR_MTRRphysBase(4):
3219 case MSR_MTRRphysBase(5):
3220 case MSR_MTRRphysBase(6):
3221 case MSR_MTRRphysBase(7):
3222 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3224 case MSR_MTRRphysMask(0):
3225 case MSR_MTRRphysMask(1):
3226 case MSR_MTRRphysMask(2):
3227 case MSR_MTRRphysMask(3):
3228 case MSR_MTRRphysMask(4):
3229 case MSR_MTRRphysMask(5):
3230 case MSR_MTRRphysMask(6):
3231 case MSR_MTRRphysMask(7):
3232 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3234 case MSR_MTRRfix64K_00000
:
3235 val
= env
->mtrr_fixed
[0];
3237 case MSR_MTRRfix16K_80000
:
3238 case MSR_MTRRfix16K_A0000
:
3239 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3241 case MSR_MTRRfix4K_C0000
:
3242 case MSR_MTRRfix4K_C8000
:
3243 case MSR_MTRRfix4K_D0000
:
3244 case MSR_MTRRfix4K_D8000
:
3245 case MSR_MTRRfix4K_E0000
:
3246 case MSR_MTRRfix4K_E8000
:
3247 case MSR_MTRRfix4K_F0000
:
3248 case MSR_MTRRfix4K_F8000
:
3249 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3251 case MSR_MTRRdefType
:
3252 val
= env
->mtrr_deftype
;
3255 if (env
->cpuid_features
& CPUID_MTRR
)
3256 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3258 /* XXX: exception ? */
3265 if (env
->mcg_cap
& MCG_CTL_P
)
3270 case MSR_MCG_STATUS
:
3271 val
= env
->mcg_status
;
3274 if ((uint32_t)ECX
>= MSR_MC0_CTL
3275 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3276 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3277 val
= env
->mce_banks
[offset
];
3280 /* XXX: exception ? */
3284 EAX
= (uint32_t)(val
);
3285 EDX
= (uint32_t)(val
>> 32);
3289 target_ulong
helper_lsl(target_ulong selector1
)
3292 uint32_t e1
, e2
, eflags
, selector
;
3293 int rpl
, dpl
, cpl
, type
;
3295 selector
= selector1
& 0xffff;
3296 eflags
= helper_cc_compute_all(CC_OP
);
3297 if ((selector
& 0xfffc) == 0)
3299 if (load_segment(&e1
, &e2
, selector
) != 0)
3302 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3303 cpl
= env
->hflags
& HF_CPL_MASK
;
3304 if (e2
& DESC_S_MASK
) {
3305 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3308 if (dpl
< cpl
|| dpl
< rpl
)
3312 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3323 if (dpl
< cpl
|| dpl
< rpl
) {
3325 CC_SRC
= eflags
& ~CC_Z
;
3329 limit
= get_seg_limit(e1
, e2
);
3330 CC_SRC
= eflags
| CC_Z
;
3334 target_ulong
helper_lar(target_ulong selector1
)
3336 uint32_t e1
, e2
, eflags
, selector
;
3337 int rpl
, dpl
, cpl
, type
;
3339 selector
= selector1
& 0xffff;
3340 eflags
= helper_cc_compute_all(CC_OP
);
3341 if ((selector
& 0xfffc) == 0)
3343 if (load_segment(&e1
, &e2
, selector
) != 0)
3346 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3347 cpl
= env
->hflags
& HF_CPL_MASK
;
3348 if (e2
& DESC_S_MASK
) {
3349 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3352 if (dpl
< cpl
|| dpl
< rpl
)
3356 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3370 if (dpl
< cpl
|| dpl
< rpl
) {
3372 CC_SRC
= eflags
& ~CC_Z
;
3376 CC_SRC
= eflags
| CC_Z
;
3377 return e2
& 0x00f0ff00;
3380 void helper_verr(target_ulong selector1
)
3382 uint32_t e1
, e2
, eflags
, selector
;
3385 selector
= selector1
& 0xffff;
3386 eflags
= helper_cc_compute_all(CC_OP
);
3387 if ((selector
& 0xfffc) == 0)
3389 if (load_segment(&e1
, &e2
, selector
) != 0)
3391 if (!(e2
& DESC_S_MASK
))
3394 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3395 cpl
= env
->hflags
& HF_CPL_MASK
;
3396 if (e2
& DESC_CS_MASK
) {
3397 if (!(e2
& DESC_R_MASK
))
3399 if (!(e2
& DESC_C_MASK
)) {
3400 if (dpl
< cpl
|| dpl
< rpl
)
3404 if (dpl
< cpl
|| dpl
< rpl
) {
3406 CC_SRC
= eflags
& ~CC_Z
;
3410 CC_SRC
= eflags
| CC_Z
;
3413 void helper_verw(target_ulong selector1
)
3415 uint32_t e1
, e2
, eflags
, selector
;
3418 selector
= selector1
& 0xffff;
3419 eflags
= helper_cc_compute_all(CC_OP
);
3420 if ((selector
& 0xfffc) == 0)
3422 if (load_segment(&e1
, &e2
, selector
) != 0)
3424 if (!(e2
& DESC_S_MASK
))
3427 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3428 cpl
= env
->hflags
& HF_CPL_MASK
;
3429 if (e2
& DESC_CS_MASK
) {
3432 if (dpl
< cpl
|| dpl
< rpl
)
3434 if (!(e2
& DESC_W_MASK
)) {
3436 CC_SRC
= eflags
& ~CC_Z
;
3440 CC_SRC
= eflags
| CC_Z
;
3443 /* x87 FPU helpers */
3445 static inline double CPU86_LDouble_to_double(CPU86_LDouble a
)
3452 u
.f64
= floatx_to_float64(a
, &env
->fp_status
);
3456 static inline CPU86_LDouble
double_to_CPU86_LDouble(double a
)
3464 return float64_to_floatx(u
.f64
, &env
->fp_status
);
3467 static void fpu_set_exception(int mask
)
3470 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3471 env
->fpus
|= FPUS_SE
| FPUS_B
;
3474 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3476 if (floatx_is_zero(b
)) {
3477 fpu_set_exception(FPUS_ZE
);
3479 return floatx_div(a
, b
, &env
->fp_status
);
3482 static void fpu_raise_exception(void)
3484 if (env
->cr
[0] & CR0_NE_MASK
) {
3485 raise_exception(EXCP10_COPR
);
3487 #if !defined(CONFIG_USER_ONLY)
3494 void helper_flds_FT0(uint32_t val
)
3501 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3504 void helper_fldl_FT0(uint64_t val
)
3511 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3514 void helper_fildl_FT0(int32_t val
)
3516 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3519 void helper_flds_ST0(uint32_t val
)
3526 new_fpstt
= (env
->fpstt
- 1) & 7;
3528 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3529 env
->fpstt
= new_fpstt
;
3530 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3533 void helper_fldl_ST0(uint64_t val
)
3540 new_fpstt
= (env
->fpstt
- 1) & 7;
3542 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3543 env
->fpstt
= new_fpstt
;
3544 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3547 void helper_fildl_ST0(int32_t val
)
3550 new_fpstt
= (env
->fpstt
- 1) & 7;
3551 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3552 env
->fpstt
= new_fpstt
;
3553 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3556 void helper_fildll_ST0(int64_t val
)
3559 new_fpstt
= (env
->fpstt
- 1) & 7;
3560 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3561 env
->fpstt
= new_fpstt
;
3562 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3565 uint32_t helper_fsts_ST0(void)
3571 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3575 uint64_t helper_fstl_ST0(void)
3581 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3585 int32_t helper_fist_ST0(void)
3588 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3589 if (val
!= (int16_t)val
)
3594 int32_t helper_fistl_ST0(void)
3597 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3601 int64_t helper_fistll_ST0(void)
3604 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3608 int32_t helper_fistt_ST0(void)
3611 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3612 if (val
!= (int16_t)val
)
3617 int32_t helper_fisttl_ST0(void)
3620 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3624 int64_t helper_fisttll_ST0(void)
3627 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3631 void helper_fldt_ST0(target_ulong ptr
)
3634 new_fpstt
= (env
->fpstt
- 1) & 7;
3635 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3636 env
->fpstt
= new_fpstt
;
3637 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3640 void helper_fstt_ST0(target_ulong ptr
)
3642 helper_fstt(ST0
, ptr
);
3645 void helper_fpush(void)
3650 void helper_fpop(void)
3655 void helper_fdecstp(void)
3657 env
->fpstt
= (env
->fpstt
- 1) & 7;
3658 env
->fpus
&= (~0x4700);
3661 void helper_fincstp(void)
3663 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3664 env
->fpus
&= (~0x4700);
3669 void helper_ffree_STN(int st_index
)
3671 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3674 void helper_fmov_ST0_FT0(void)
3679 void helper_fmov_FT0_STN(int st_index
)
3684 void helper_fmov_ST0_STN(int st_index
)
3689 void helper_fmov_STN_ST0(int st_index
)
3694 void helper_fxchg_ST0_STN(int st_index
)
3702 /* FPU operations */
3704 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3706 void helper_fcom_ST0_FT0(void)
3710 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3711 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3714 void helper_fucom_ST0_FT0(void)
3718 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3719 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3722 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3724 void helper_fcomi_ST0_FT0(void)
3729 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3730 eflags
= helper_cc_compute_all(CC_OP
);
3731 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3735 void helper_fucomi_ST0_FT0(void)
3740 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3741 eflags
= helper_cc_compute_all(CC_OP
);
3742 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3746 void helper_fadd_ST0_FT0(void)
3748 ST0
= floatx_add(ST0
, FT0
, &env
->fp_status
);
3751 void helper_fmul_ST0_FT0(void)
3753 ST0
= floatx_mul(ST0
, FT0
, &env
->fp_status
);
3756 void helper_fsub_ST0_FT0(void)
3758 ST0
= floatx_sub(ST0
, FT0
, &env
->fp_status
);
3761 void helper_fsubr_ST0_FT0(void)
3763 ST0
= floatx_sub(FT0
, ST0
, &env
->fp_status
);
3766 void helper_fdiv_ST0_FT0(void)
3768 ST0
= helper_fdiv(ST0
, FT0
);
3771 void helper_fdivr_ST0_FT0(void)
3773 ST0
= helper_fdiv(FT0
, ST0
);
3776 /* fp operations between STN and ST0 */
3778 void helper_fadd_STN_ST0(int st_index
)
3780 ST(st_index
) = floatx_add(ST(st_index
), ST0
, &env
->fp_status
);
3783 void helper_fmul_STN_ST0(int st_index
)
3785 ST(st_index
) = floatx_mul(ST(st_index
), ST0
, &env
->fp_status
);
3788 void helper_fsub_STN_ST0(int st_index
)
3790 ST(st_index
) = floatx_sub(ST(st_index
), ST0
, &env
->fp_status
);
3793 void helper_fsubr_STN_ST0(int st_index
)
3795 ST(st_index
) = floatx_sub(ST0
, ST(st_index
), &env
->fp_status
);
3798 void helper_fdiv_STN_ST0(int st_index
)
3802 *p
= helper_fdiv(*p
, ST0
);
3805 void helper_fdivr_STN_ST0(int st_index
)
3809 *p
= helper_fdiv(ST0
, *p
);
3812 /* misc FPU operations */
3813 void helper_fchs_ST0(void)
3815 ST0
= floatx_chs(ST0
);
3818 void helper_fabs_ST0(void)
3820 ST0
= floatx_abs(ST0
);
3823 void helper_fld1_ST0(void)
3828 void helper_fldl2t_ST0(void)
3833 void helper_fldl2e_ST0(void)
3838 void helper_fldpi_ST0(void)
3843 void helper_fldlg2_ST0(void)
3848 void helper_fldln2_ST0(void)
3853 void helper_fldz_ST0(void)
3858 void helper_fldz_FT0(void)
3863 uint32_t helper_fnstsw(void)
3865 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3868 uint32_t helper_fnstcw(void)
3873 static void update_fp_status(void)
3877 /* set rounding mode */
3878 switch(env
->fpuc
& RC_MASK
) {
3881 rnd_type
= float_round_nearest_even
;
3884 rnd_type
= float_round_down
;
3887 rnd_type
= float_round_up
;
3890 rnd_type
= float_round_to_zero
;
3893 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3895 switch((env
->fpuc
>> 8) & 3) {
3907 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3911 void helper_fldcw(uint32_t val
)
3917 void helper_fclex(void)
3919 env
->fpus
&= 0x7f00;
3922 void helper_fwait(void)
3924 if (env
->fpus
& FPUS_SE
)
3925 fpu_raise_exception();
3928 void helper_fninit(void)
3945 void helper_fbld_ST0(target_ulong ptr
)
3953 for(i
= 8; i
>= 0; i
--) {
3955 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3957 tmp
= int64_to_floatx(val
, &env
->fp_status
);
3958 if (ldub(ptr
+ 9) & 0x80) {
3965 void helper_fbst_ST0(target_ulong ptr
)
3968 target_ulong mem_ref
, mem_end
;
3971 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3973 mem_end
= mem_ref
+ 9;
3980 while (mem_ref
< mem_end
) {
3985 v
= ((v
/ 10) << 4) | (v
% 10);
3988 while (mem_ref
< mem_end
) {
3993 void helper_f2xm1(void)
3995 double val
= CPU86_LDouble_to_double(ST0
);
3996 val
= pow(2.0, val
) - 1.0;
3997 ST0
= double_to_CPU86_LDouble(val
);
4000 void helper_fyl2x(void)
4002 double fptemp
= CPU86_LDouble_to_double(ST0
);
4005 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
4006 fptemp
*= CPU86_LDouble_to_double(ST1
);
4007 ST1
= double_to_CPU86_LDouble(fptemp
);
4010 env
->fpus
&= (~0x4700);
4015 void helper_fptan(void)
4017 double fptemp
= CPU86_LDouble_to_double(ST0
);
4019 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4022 fptemp
= tan(fptemp
);
4023 ST0
= double_to_CPU86_LDouble(fptemp
);
4026 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4027 /* the above code is for |arg| < 2**52 only */
4031 void helper_fpatan(void)
4033 double fptemp
, fpsrcop
;
4035 fpsrcop
= CPU86_LDouble_to_double(ST1
);
4036 fptemp
= CPU86_LDouble_to_double(ST0
);
4037 ST1
= double_to_CPU86_LDouble(atan2(fpsrcop
, fptemp
));
4041 void helper_fxtract(void)
4043 CPU86_LDoubleU temp
;
4047 if (floatx_is_zero(ST0
)) {
4048 /* Easy way to generate -inf and raising division by 0 exception */
4049 ST0
= floatx_div(floatx_chs(floatx_one
), floatx_zero
, &env
->fp_status
);
4055 expdif
= EXPD(temp
) - EXPBIAS
;
4056 /*DP exponent bias*/
4057 ST0
= int32_to_floatx(expdif
, &env
->fp_status
);
4064 void helper_fprem1(void)
4066 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4067 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4069 signed long long int q
;
4071 st0
= CPU86_LDouble_to_double(ST0
);
4072 st1
= CPU86_LDouble_to_double(ST1
);
4074 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4075 ST0
= double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */
4076 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4084 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4087 /* optimisation? taken from the AMD docs */
4088 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4089 /* ST0 is unchanged */
4094 dblq
= fpsrcop
/ fptemp
;
4095 /* round dblq towards nearest integer */
4097 st0
= fpsrcop
- fptemp
* dblq
;
4099 /* convert dblq to q by truncating towards zero */
4101 q
= (signed long long int)(-dblq
);
4103 q
= (signed long long int)dblq
;
4105 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4106 /* (C0,C3,C1) <-- (q2,q1,q0) */
4107 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4108 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4109 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4111 env
->fpus
|= 0x400; /* C2 <-- 1 */
4112 fptemp
= pow(2.0, expdif
- 50);
4113 fpsrcop
= (st0
/ st1
) / fptemp
;
4114 /* fpsrcop = integer obtained by chopping */
4115 fpsrcop
= (fpsrcop
< 0.0) ?
4116 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4117 st0
-= (st1
* fpsrcop
* fptemp
);
4119 ST0
= double_to_CPU86_LDouble(st0
);
4122 void helper_fprem(void)
4124 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4125 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4127 signed long long int q
;
4129 st0
= CPU86_LDouble_to_double(ST0
);
4130 st1
= CPU86_LDouble_to_double(ST1
);
4132 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4133 ST0
= double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */
4134 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4142 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4145 /* optimisation? taken from the AMD docs */
4146 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4147 /* ST0 is unchanged */
4151 if ( expdif
< 53 ) {
4152 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4153 /* round dblq towards zero */
4154 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4155 st0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4157 /* convert dblq to q by truncating towards zero */
4159 q
= (signed long long int)(-dblq
);
4161 q
= (signed long long int)dblq
;
4163 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4164 /* (C0,C3,C1) <-- (q2,q1,q0) */
4165 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4166 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4167 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4169 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4170 env
->fpus
|= 0x400; /* C2 <-- 1 */
4171 fptemp
= pow(2.0, (double)(expdif
- N
));
4172 fpsrcop
= (st0
/ st1
) / fptemp
;
4173 /* fpsrcop = integer obtained by chopping */
4174 fpsrcop
= (fpsrcop
< 0.0) ?
4175 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4176 st0
-= (st1
* fpsrcop
* fptemp
);
4178 ST0
= double_to_CPU86_LDouble(st0
);
4181 void helper_fyl2xp1(void)
4183 double fptemp
= CPU86_LDouble_to_double(ST0
);
4185 if ((fptemp
+1.0)>0.0) {
4186 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4187 fptemp
*= CPU86_LDouble_to_double(ST1
);
4188 ST1
= double_to_CPU86_LDouble(fptemp
);
4191 env
->fpus
&= (~0x4700);
4196 void helper_fsqrt(void)
4198 if (floatx_is_neg(ST0
)) {
4199 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4202 ST0
= floatx_sqrt(ST0
, &env
->fp_status
);
4205 void helper_fsincos(void)
4207 double fptemp
= CPU86_LDouble_to_double(ST0
);
4209 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4212 ST0
= double_to_CPU86_LDouble(sin(fptemp
));
4214 ST0
= double_to_CPU86_LDouble(cos(fptemp
));
4215 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4216 /* the above code is for |arg| < 2**63 only */
4220 void helper_frndint(void)
4222 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4225 void helper_fscale(void)
4227 if (floatx_is_any_nan(ST1
)) {
4230 int n
= floatx_to_int32_round_to_zero(ST1
, &env
->fp_status
);
4231 ST0
= floatx_scalbn(ST0
, n
, &env
->fp_status
);
4235 void helper_fsin(void)
4237 double fptemp
= CPU86_LDouble_to_double(ST0
);
4239 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4242 ST0
= double_to_CPU86_LDouble(sin(fptemp
));
4243 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4244 /* the above code is for |arg| < 2**53 only */
4248 void helper_fcos(void)
4250 double fptemp
= CPU86_LDouble_to_double(ST0
);
4252 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4255 ST0
= double_to_CPU86_LDouble(cos(fptemp
));
4256 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4257 /* the above code is for |arg5 < 2**63 only */
4261 void helper_fxam_ST0(void)
4263 CPU86_LDoubleU temp
;
4268 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4270 env
->fpus
|= 0x200; /* C1 <-- 1 */
4272 /* XXX: test fptags too */
4273 expdif
= EXPD(temp
);
4274 if (expdif
== MAXEXPD
) {
4275 #ifdef USE_X86LDOUBLE
4276 if (MANTD(temp
) == 0x8000000000000000ULL
)
4278 if (MANTD(temp
) == 0)
4280 env
->fpus
|= 0x500 /*Infinity*/;
4282 env
->fpus
|= 0x100 /*NaN*/;
4283 } else if (expdif
== 0) {
4284 if (MANTD(temp
) == 0)
4285 env
->fpus
|= 0x4000 /*Zero*/;
4287 env
->fpus
|= 0x4400 /*Denormal*/;
4293 void helper_fstenv(target_ulong ptr
, int data32
)
4295 int fpus
, fptag
, exp
, i
;
4299 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4301 for (i
=7; i
>=0; i
--) {
4303 if (env
->fptags
[i
]) {
4306 tmp
.d
= env
->fpregs
[i
].d
;
4309 if (exp
== 0 && mant
== 0) {
4312 } else if (exp
== 0 || exp
== MAXEXPD
4313 #ifdef USE_X86LDOUBLE
4314 || (mant
& (1LL << 63)) == 0
4317 /* NaNs, infinity, denormal */
4324 stl(ptr
, env
->fpuc
);
4326 stl(ptr
+ 8, fptag
);
4327 stl(ptr
+ 12, 0); /* fpip */
4328 stl(ptr
+ 16, 0); /* fpcs */
4329 stl(ptr
+ 20, 0); /* fpoo */
4330 stl(ptr
+ 24, 0); /* fpos */
4333 stw(ptr
, env
->fpuc
);
4335 stw(ptr
+ 4, fptag
);
4343 void helper_fldenv(target_ulong ptr
, int data32
)
4348 env
->fpuc
= lduw(ptr
);
4349 fpus
= lduw(ptr
+ 4);
4350 fptag
= lduw(ptr
+ 8);
4353 env
->fpuc
= lduw(ptr
);
4354 fpus
= lduw(ptr
+ 2);
4355 fptag
= lduw(ptr
+ 4);
4357 env
->fpstt
= (fpus
>> 11) & 7;
4358 env
->fpus
= fpus
& ~0x3800;
4359 for(i
= 0;i
< 8; i
++) {
4360 env
->fptags
[i
] = ((fptag
& 3) == 3);
4365 void helper_fsave(target_ulong ptr
, int data32
)
4370 helper_fstenv(ptr
, data32
);
4372 ptr
+= (14 << data32
);
4373 for(i
= 0;i
< 8; i
++) {
4375 helper_fstt(tmp
, ptr
);
4393 void helper_frstor(target_ulong ptr
, int data32
)
4398 helper_fldenv(ptr
, data32
);
4399 ptr
+= (14 << data32
);
4401 for(i
= 0;i
< 8; i
++) {
4402 tmp
= helper_fldt(ptr
);
4408 void helper_fxsave(target_ulong ptr
, int data64
)
4410 int fpus
, fptag
, i
, nb_xmm_regs
;
4414 /* The operand must be 16 byte aligned */
4416 raise_exception(EXCP0D_GPF
);
4419 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4421 for(i
= 0; i
< 8; i
++) {
4422 fptag
|= (env
->fptags
[i
] << i
);
4424 stw(ptr
, env
->fpuc
);
4426 stw(ptr
+ 4, fptag
^ 0xff);
4427 #ifdef TARGET_X86_64
4429 stq(ptr
+ 0x08, 0); /* rip */
4430 stq(ptr
+ 0x10, 0); /* rdp */
4434 stl(ptr
+ 0x08, 0); /* eip */
4435 stl(ptr
+ 0x0c, 0); /* sel */
4436 stl(ptr
+ 0x10, 0); /* dp */
4437 stl(ptr
+ 0x14, 0); /* sel */
4441 for(i
= 0;i
< 8; i
++) {
4443 helper_fstt(tmp
, addr
);
4447 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4448 /* XXX: finish it */
4449 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4450 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4451 if (env
->hflags
& HF_CS64_MASK
)
4456 /* Fast FXSAVE leaves out the XMM registers */
4457 if (!(env
->efer
& MSR_EFER_FFXSR
)
4458 || (env
->hflags
& HF_CPL_MASK
)
4459 || !(env
->hflags
& HF_LMA_MASK
)) {
4460 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4461 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4462 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4469 void helper_fxrstor(target_ulong ptr
, int data64
)
4471 int i
, fpus
, fptag
, nb_xmm_regs
;
4475 /* The operand must be 16 byte aligned */
4477 raise_exception(EXCP0D_GPF
);
4480 env
->fpuc
= lduw(ptr
);
4481 fpus
= lduw(ptr
+ 2);
4482 fptag
= lduw(ptr
+ 4);
4483 env
->fpstt
= (fpus
>> 11) & 7;
4484 env
->fpus
= fpus
& ~0x3800;
4486 for(i
= 0;i
< 8; i
++) {
4487 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4491 for(i
= 0;i
< 8; i
++) {
4492 tmp
= helper_fldt(addr
);
4497 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4498 /* XXX: finish it */
4499 env
->mxcsr
= ldl(ptr
+ 0x18);
4501 if (env
->hflags
& HF_CS64_MASK
)
4506 /* Fast FXRESTORE leaves out the XMM registers */
4507 if (!(env
->efer
& MSR_EFER_FFXSR
)
4508 || (env
->hflags
& HF_CPL_MASK
)
4509 || !(env
->hflags
& HF_LMA_MASK
)) {
4510 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4511 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4512 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4519 #ifndef USE_X86LDOUBLE
4521 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4523 CPU86_LDoubleU temp
;
4528 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4529 /* exponent + sign */
4530 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4531 e
|= SIGND(temp
) >> 16;
4535 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4537 CPU86_LDoubleU temp
;
4541 /* XXX: handle overflow ? */
4542 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4543 e
|= (upper
>> 4) & 0x800; /* sign */
4544 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4546 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4549 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4556 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4558 CPU86_LDoubleU temp
;
4561 *pmant
= temp
.l
.lower
;
4562 *pexp
= temp
.l
.upper
;
4565 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4567 CPU86_LDoubleU temp
;
4569 temp
.l
.upper
= upper
;
4570 temp
.l
.lower
= mant
;
4575 #ifdef TARGET_X86_64
4577 //#define DEBUG_MULDIV
4579 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4588 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4592 add128(plow
, phigh
, 1, 0);
4595 /* return TRUE if overflow */
4596 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4598 uint64_t q
, r
, a1
, a0
;
4611 /* XXX: use a better algorithm */
4612 for(i
= 0; i
< 64; i
++) {
4614 a1
= (a1
<< 1) | (a0
>> 63);
4615 if (ab
|| a1
>= b
) {
4621 a0
= (a0
<< 1) | qb
;
4623 #if defined(DEBUG_MULDIV)
4624 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4625 *phigh
, *plow
, b
, a0
, a1
);
4633 /* return TRUE if overflow */
4634 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4637 sa
= ((int64_t)*phigh
< 0);
4639 neg128(plow
, phigh
);
4643 if (div64(plow
, phigh
, b
) != 0)
4646 if (*plow
> (1ULL << 63))
4650 if (*plow
>= (1ULL << 63))
4658 void helper_mulq_EAX_T0(target_ulong t0
)
4662 mulu64(&r0
, &r1
, EAX
, t0
);
4669 void helper_imulq_EAX_T0(target_ulong t0
)
4673 muls64(&r0
, &r1
, EAX
, t0
);
4677 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4680 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4684 muls64(&r0
, &r1
, t0
, t1
);
4686 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4690 void helper_divq_EAX(target_ulong t0
)
4694 raise_exception(EXCP00_DIVZ
);
4698 if (div64(&r0
, &r1
, t0
))
4699 raise_exception(EXCP00_DIVZ
);
4704 void helper_idivq_EAX(target_ulong t0
)
4708 raise_exception(EXCP00_DIVZ
);
4712 if (idiv64(&r0
, &r1
, t0
))
4713 raise_exception(EXCP00_DIVZ
);
4719 static void do_hlt(void)
4721 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4723 env
->exception_index
= EXCP_HLT
;
4727 void helper_hlt(int next_eip_addend
)
4729 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4730 EIP
+= next_eip_addend
;
4735 void helper_monitor(target_ulong ptr
)
4737 if ((uint32_t)ECX
!= 0)
4738 raise_exception(EXCP0D_GPF
);
4739 /* XXX: store address ? */
4740 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4743 void helper_mwait(int next_eip_addend
)
4745 if ((uint32_t)ECX
!= 0)
4746 raise_exception(EXCP0D_GPF
);
4747 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4748 EIP
+= next_eip_addend
;
4750 /* XXX: not complete but not completely erroneous */
4751 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4752 /* more than one CPU: do not sleep because another CPU may
4759 void helper_debug(void)
4761 env
->exception_index
= EXCP_DEBUG
;
4765 void helper_reset_rf(void)
4767 env
->eflags
&= ~RF_MASK
;
4770 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4772 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4775 void helper_raise_exception(int exception_index
)
4777 raise_exception(exception_index
);
4780 void helper_cli(void)
4782 env
->eflags
&= ~IF_MASK
;
4785 void helper_sti(void)
4787 env
->eflags
|= IF_MASK
;
4791 /* vm86plus instructions */
4792 void helper_cli_vm(void)
4794 env
->eflags
&= ~VIF_MASK
;
4797 void helper_sti_vm(void)
4799 env
->eflags
|= VIF_MASK
;
4800 if (env
->eflags
& VIP_MASK
) {
4801 raise_exception(EXCP0D_GPF
);
4806 void helper_set_inhibit_irq(void)
4808 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4811 void helper_reset_inhibit_irq(void)
4813 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4816 void helper_boundw(target_ulong a0
, int v
)
4820 high
= ldsw(a0
+ 2);
4822 if (v
< low
|| v
> high
) {
4823 raise_exception(EXCP05_BOUND
);
4827 void helper_boundl(target_ulong a0
, int v
)
4832 if (v
< low
|| v
> high
) {
4833 raise_exception(EXCP05_BOUND
);
4837 #if !defined(CONFIG_USER_ONLY)
4839 #define MMUSUFFIX _mmu
4842 #include "softmmu_template.h"
4845 #include "softmmu_template.h"
4848 #include "softmmu_template.h"
4851 #include "softmmu_template.h"
4855 #if !defined(CONFIG_USER_ONLY)
4856 /* try to fill the TLB and return an exception if error. If retaddr is
4857 NULL, it means that the function was called in C code (i.e. not
4858 from generated code or from helper.c) */
4859 /* XXX: fix it to restore all registers */
4860 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4862 TranslationBlock
*tb
;
4865 CPUX86State
*saved_env
;
4867 /* XXX: hack to restore env in all cases, even if not called from
4870 env
= cpu_single_env
;
4872 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4875 /* now we have a real cpu fault */
4876 pc
= (unsigned long)retaddr
;
4877 tb
= tb_find_pc(pc
);
4879 /* the PC is inside the translated code. It means that we have
4880 a virtual CPU fault */
4881 cpu_restore_state(tb
, env
, pc
);
4884 raise_exception_err(env
->exception_index
, env
->error_code
);
4890 /* Secure Virtual Machine helpers */
4892 #if defined(CONFIG_USER_ONLY)
4894 void helper_vmrun(int aflag
, int next_eip_addend
)
4897 void helper_vmmcall(void)
4900 void helper_vmload(int aflag
)
4903 void helper_vmsave(int aflag
)
4906 void helper_stgi(void)
4909 void helper_clgi(void)
4912 void helper_skinit(void)
4915 void helper_invlpga(int aflag
)
4918 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4921 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4925 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4926 uint32_t next_eip_addend
)
4931 static inline void svm_save_seg(target_phys_addr_t addr
,
4932 const SegmentCache
*sc
)
4934 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4936 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4938 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4940 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4941 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4944 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4948 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4949 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4950 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4951 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4952 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4955 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4956 CPUState
*env
, int seg_reg
)
4958 SegmentCache sc1
, *sc
= &sc1
;
4959 svm_load_seg(addr
, sc
);
4960 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4961 sc
->base
, sc
->limit
, sc
->flags
);
4964 void helper_vmrun(int aflag
, int next_eip_addend
)
4970 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4975 addr
= (uint32_t)EAX
;
4977 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4979 env
->vm_vmcb
= addr
;
4981 /* save the current CPU state in the hsave page */
4982 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4983 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4985 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4986 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4988 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4989 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4990 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4991 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4992 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4993 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4995 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4996 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4998 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5000 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5002 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5004 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5007 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
5008 EIP
+ next_eip_addend
);
5009 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5010 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5012 /* load the interception bitmaps so we do not need to access the
5014 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
5015 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
5016 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
5017 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
5018 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
5019 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
5021 /* enable intercepts */
5022 env
->hflags
|= HF_SVMI_MASK
;
5024 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
5026 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5027 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5029 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
5030 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5032 /* clear exit_info_2 so we behave like the real hardware */
5033 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
5035 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
5036 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
5037 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
5038 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
5039 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5040 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5041 if (int_ctl
& V_INTR_MASKING_MASK
) {
5042 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
5043 env
->hflags2
|= HF2_VINTR_MASK
;
5044 if (env
->eflags
& IF_MASK
)
5045 env
->hflags2
|= HF2_HIF_MASK
;
5049 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5051 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5052 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5053 CC_OP
= CC_OP_EFLAGS
;
5055 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5057 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5059 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5061 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5064 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5066 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5067 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5068 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5069 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5070 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5072 /* FIXME: guest state consistency checks */
5074 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5075 case TLB_CONTROL_DO_NOTHING
:
5077 case TLB_CONTROL_FLUSH_ALL_ASID
:
5078 /* FIXME: this is not 100% correct but should work for now */
5083 env
->hflags2
|= HF2_GIF_MASK
;
5085 if (int_ctl
& V_IRQ_MASK
) {
5086 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5089 /* maybe we need to inject an event */
5090 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5091 if (event_inj
& SVM_EVTINJ_VALID
) {
5092 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5093 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5094 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5096 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5097 /* FIXME: need to implement valid_err */
5098 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5099 case SVM_EVTINJ_TYPE_INTR
:
5100 env
->exception_index
= vector
;
5101 env
->error_code
= event_inj_err
;
5102 env
->exception_is_int
= 0;
5103 env
->exception_next_eip
= -1;
5104 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5105 /* XXX: is it always correct ? */
5106 do_interrupt(vector
, 0, 0, 0, 1);
5108 case SVM_EVTINJ_TYPE_NMI
:
5109 env
->exception_index
= EXCP02_NMI
;
5110 env
->error_code
= event_inj_err
;
5111 env
->exception_is_int
= 0;
5112 env
->exception_next_eip
= EIP
;
5113 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5116 case SVM_EVTINJ_TYPE_EXEPT
:
5117 env
->exception_index
= vector
;
5118 env
->error_code
= event_inj_err
;
5119 env
->exception_is_int
= 0;
5120 env
->exception_next_eip
= -1;
5121 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5124 case SVM_EVTINJ_TYPE_SOFT
:
5125 env
->exception_index
= vector
;
5126 env
->error_code
= event_inj_err
;
5127 env
->exception_is_int
= 1;
5128 env
->exception_next_eip
= EIP
;
5129 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5133 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5137 void helper_vmmcall(void)
5139 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5140 raise_exception(EXCP06_ILLOP
);
5143 void helper_vmload(int aflag
)
5146 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5151 addr
= (uint32_t)EAX
;
5153 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5154 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5155 env
->segs
[R_FS
].base
);
5157 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5159 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5161 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5163 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5166 #ifdef TARGET_X86_64
5167 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5168 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5169 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5170 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5172 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5173 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5174 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5175 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5178 void helper_vmsave(int aflag
)
5181 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5186 addr
= (uint32_t)EAX
;
5188 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5189 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5190 env
->segs
[R_FS
].base
);
5192 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5194 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5196 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5198 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5201 #ifdef TARGET_X86_64
5202 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5203 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5204 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5205 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5207 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5208 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5209 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5210 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5213 void helper_stgi(void)
5215 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5216 env
->hflags2
|= HF2_GIF_MASK
;
5219 void helper_clgi(void)
5221 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5222 env
->hflags2
&= ~HF2_GIF_MASK
;
5225 void helper_skinit(void)
5227 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5228 /* XXX: not implemented */
5229 raise_exception(EXCP06_ILLOP
);
5232 void helper_invlpga(int aflag
)
5235 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5240 addr
= (uint32_t)EAX
;
5242 /* XXX: could use the ASID to see if it is needed to do the
5244 tlb_flush_page(env
, addr
);
5247 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5249 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5252 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5253 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5254 helper_vmexit(type
, param
);
5257 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5258 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5259 helper_vmexit(type
, param
);
5262 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5263 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5264 helper_vmexit(type
, param
);
5267 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5268 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5269 helper_vmexit(type
, param
);
5272 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5273 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5274 helper_vmexit(type
, param
);
5278 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5279 /* FIXME: this should be read in at vmrun (faster this way?) */
5280 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5282 switch((uint32_t)ECX
) {
5287 case 0xc0000000 ... 0xc0001fff:
5288 t0
= (8192 + ECX
- 0xc0000000) * 2;
5292 case 0xc0010000 ... 0xc0011fff:
5293 t0
= (16384 + ECX
- 0xc0010000) * 2;
5298 helper_vmexit(type
, param
);
5303 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5304 helper_vmexit(type
, param
);
5308 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5309 helper_vmexit(type
, param
);
5315 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5316 uint32_t next_eip_addend
)
5318 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5319 /* FIXME: this should be read in at vmrun (faster this way?) */
5320 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5321 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5322 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5324 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5325 env
->eip
+ next_eip_addend
);
5326 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5331 /* Note: currently only 32 bits of exit_code are used */
5332 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5336 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5337 exit_code
, exit_info_1
,
5338 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5341 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5342 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5343 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5345 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5348 /* Save the VM state in the vmcb */
5349 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5351 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5353 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5355 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5358 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5359 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5361 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5362 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5364 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5365 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5366 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5367 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5368 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5370 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5371 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5372 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5373 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5374 int_ctl
|= V_IRQ_MASK
;
5375 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5377 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5378 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5379 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5380 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5381 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5382 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5383 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5385 /* Reload the host state from vm_hsave */
5386 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5387 env
->hflags
&= ~HF_SVMI_MASK
;
5389 env
->intercept_exceptions
= 0;
5390 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5391 env
->tsc_offset
= 0;
5393 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5394 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5396 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5397 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5399 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5400 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5401 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5402 /* we need to set the efer after the crs so the hidden flags get
5405 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5407 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5408 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5409 CC_OP
= CC_OP_EFLAGS
;
5411 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5413 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5415 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5417 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5420 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5421 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5422 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5424 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5425 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5428 cpu_x86_set_cpl(env
, 0);
5429 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5430 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5432 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5433 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5434 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5435 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5436 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5438 env
->hflags2
&= ~HF2_GIF_MASK
;
5439 /* FIXME: Resets the current ASID register to zero (host ASID). */
5441 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5443 /* Clears the TSC_OFFSET inside the processor. */
5445 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5446 from the page table indicated the host's CR3. If the PDPEs contain
5447 illegal state, the processor causes a shutdown. */
5449 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5450 env
->cr
[0] |= CR0_PE_MASK
;
5451 env
->eflags
&= ~VM_MASK
;
5453 /* Disables all breakpoints in the host DR7 register. */
5455 /* Checks the reloaded host state for consistency. */
5457 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5458 host's code segment or non-canonical (in the case of long mode), a
5459 #GP fault is delivered inside the host.) */
5461 /* remove any pending exception */
5462 env
->exception_index
= -1;
5463 env
->error_code
= 0;
5464 env
->old_exception
= -1;
5472 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5473 void helper_enter_mmx(void)
5476 *(uint32_t *)(env
->fptags
) = 0;
5477 *(uint32_t *)(env
->fptags
+ 4) = 0;
5480 void helper_emms(void)
5482 /* set to empty state */
5483 *(uint32_t *)(env
->fptags
) = 0x01010101;
5484 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5488 void helper_movq(void *d
, void *s
)
5490 *(uint64_t *)d
= *(uint64_t *)s
;
5494 #include "ops_sse.h"
5497 #include "ops_sse.h"
5500 #include "helper_template.h"
5504 #include "helper_template.h"
5508 #include "helper_template.h"
5511 #ifdef TARGET_X86_64
5514 #include "helper_template.h"
5519 /* bit operations */
5520 target_ulong
helper_bsf(target_ulong t0
)
5527 while ((res
& 1) == 0) {
5534 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5537 target_ulong res
, mask
;
5539 if (wordsize
> 0 && t0
== 0) {
5543 count
= TARGET_LONG_BITS
- 1;
5544 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5545 while ((res
& mask
) == 0) {
5550 return wordsize
- 1 - count
;
5555 target_ulong
helper_bsr(target_ulong t0
)
5557 return helper_lzcnt(t0
, 0);
5560 static int compute_all_eflags(void)
5565 static int compute_c_eflags(void)
5567 return CC_SRC
& CC_C
;
5570 uint32_t helper_cc_compute_all(int op
)
5573 default: /* should never happen */ return 0;
5575 case CC_OP_EFLAGS
: return compute_all_eflags();
5577 case CC_OP_MULB
: return compute_all_mulb();
5578 case CC_OP_MULW
: return compute_all_mulw();
5579 case CC_OP_MULL
: return compute_all_mull();
5581 case CC_OP_ADDB
: return compute_all_addb();
5582 case CC_OP_ADDW
: return compute_all_addw();
5583 case CC_OP_ADDL
: return compute_all_addl();
5585 case CC_OP_ADCB
: return compute_all_adcb();
5586 case CC_OP_ADCW
: return compute_all_adcw();
5587 case CC_OP_ADCL
: return compute_all_adcl();
5589 case CC_OP_SUBB
: return compute_all_subb();
5590 case CC_OP_SUBW
: return compute_all_subw();
5591 case CC_OP_SUBL
: return compute_all_subl();
5593 case CC_OP_SBBB
: return compute_all_sbbb();
5594 case CC_OP_SBBW
: return compute_all_sbbw();
5595 case CC_OP_SBBL
: return compute_all_sbbl();
5597 case CC_OP_LOGICB
: return compute_all_logicb();
5598 case CC_OP_LOGICW
: return compute_all_logicw();
5599 case CC_OP_LOGICL
: return compute_all_logicl();
5601 case CC_OP_INCB
: return compute_all_incb();
5602 case CC_OP_INCW
: return compute_all_incw();
5603 case CC_OP_INCL
: return compute_all_incl();
5605 case CC_OP_DECB
: return compute_all_decb();
5606 case CC_OP_DECW
: return compute_all_decw();
5607 case CC_OP_DECL
: return compute_all_decl();
5609 case CC_OP_SHLB
: return compute_all_shlb();
5610 case CC_OP_SHLW
: return compute_all_shlw();
5611 case CC_OP_SHLL
: return compute_all_shll();
5613 case CC_OP_SARB
: return compute_all_sarb();
5614 case CC_OP_SARW
: return compute_all_sarw();
5615 case CC_OP_SARL
: return compute_all_sarl();
5617 #ifdef TARGET_X86_64
5618 case CC_OP_MULQ
: return compute_all_mulq();
5620 case CC_OP_ADDQ
: return compute_all_addq();
5622 case CC_OP_ADCQ
: return compute_all_adcq();
5624 case CC_OP_SUBQ
: return compute_all_subq();
5626 case CC_OP_SBBQ
: return compute_all_sbbq();
5628 case CC_OP_LOGICQ
: return compute_all_logicq();
5630 case CC_OP_INCQ
: return compute_all_incq();
5632 case CC_OP_DECQ
: return compute_all_decq();
5634 case CC_OP_SHLQ
: return compute_all_shlq();
5636 case CC_OP_SARQ
: return compute_all_sarq();
5641 uint32_t helper_cc_compute_c(int op
)
5644 default: /* should never happen */ return 0;
5646 case CC_OP_EFLAGS
: return compute_c_eflags();
5648 case CC_OP_MULB
: return compute_c_mull();
5649 case CC_OP_MULW
: return compute_c_mull();
5650 case CC_OP_MULL
: return compute_c_mull();
5652 case CC_OP_ADDB
: return compute_c_addb();
5653 case CC_OP_ADDW
: return compute_c_addw();
5654 case CC_OP_ADDL
: return compute_c_addl();
5656 case CC_OP_ADCB
: return compute_c_adcb();
5657 case CC_OP_ADCW
: return compute_c_adcw();
5658 case CC_OP_ADCL
: return compute_c_adcl();
5660 case CC_OP_SUBB
: return compute_c_subb();
5661 case CC_OP_SUBW
: return compute_c_subw();
5662 case CC_OP_SUBL
: return compute_c_subl();
5664 case CC_OP_SBBB
: return compute_c_sbbb();
5665 case CC_OP_SBBW
: return compute_c_sbbw();
5666 case CC_OP_SBBL
: return compute_c_sbbl();
5668 case CC_OP_LOGICB
: return compute_c_logicb();
5669 case CC_OP_LOGICW
: return compute_c_logicw();
5670 case CC_OP_LOGICL
: return compute_c_logicl();
5672 case CC_OP_INCB
: return compute_c_incl();
5673 case CC_OP_INCW
: return compute_c_incl();
5674 case CC_OP_INCL
: return compute_c_incl();
5676 case CC_OP_DECB
: return compute_c_incl();
5677 case CC_OP_DECW
: return compute_c_incl();
5678 case CC_OP_DECL
: return compute_c_incl();
5680 case CC_OP_SHLB
: return compute_c_shlb();
5681 case CC_OP_SHLW
: return compute_c_shlw();
5682 case CC_OP_SHLL
: return compute_c_shll();
5684 case CC_OP_SARB
: return compute_c_sarl();
5685 case CC_OP_SARW
: return compute_c_sarl();
5686 case CC_OP_SARL
: return compute_c_sarl();
5688 #ifdef TARGET_X86_64
5689 case CC_OP_MULQ
: return compute_c_mull();
5691 case CC_OP_ADDQ
: return compute_c_addq();
5693 case CC_OP_ADCQ
: return compute_c_adcq();
5695 case CC_OP_SUBQ
: return compute_c_subq();
5697 case CC_OP_SBBQ
: return compute_c_sbbq();
5699 case CC_OP_LOGICQ
: return compute_c_logicq();
5701 case CC_OP_INCQ
: return compute_c_incl();
5703 case CC_OP_DECQ
: return compute_c_incl();
5705 case CC_OP_SHLQ
: return compute_c_shlq();
5707 case CC_OP_SARQ
: return compute_c_sarl();