4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
39 #define raise_exception_err(a, b)\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
46 static const uint8_t parity_table
[256] = {
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
67 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
75 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
82 static const uint8_t rclw_table
[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
90 static const uint8_t rclb_table
[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk
[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock
);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock
);
122 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
124 load_eflags(t0
, update_mask
);
127 target_ulong
helper_read_eflags(void)
130 eflags
= helper_cc_compute_all(CC_OP
);
131 eflags
|= (DF
& DF_MASK
);
132 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
148 index
= selector
& ~7;
149 if ((index
+ 7) > dt
->limit
)
151 ptr
= dt
->base
+ index
;
152 *e1_ptr
= ldl_kernel(ptr
);
153 *e2_ptr
= ldl_kernel(ptr
+ 4);
157 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
160 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
161 if (e2
& DESC_G_MASK
)
162 limit
= (limit
<< 12) | 0xfff;
166 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
168 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
173 sc
->base
= get_seg_base(e1
, e2
);
174 sc
->limit
= get_seg_limit(e1
, e2
);
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg
, int selector
)
182 cpu_x86_load_seg_cache(env
, seg
, selector
,
183 (selector
<< 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
187 uint32_t *esp_ptr
, int dpl
)
189 int type
, index
, shift
;
194 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
195 for(i
=0;i
<env
->tr
.limit
;i
++) {
196 printf("%02x ", env
->tr
.base
[i
]);
197 if ((i
& 7) == 7) printf("\n");
203 if (!(env
->tr
.flags
& DESC_P_MASK
))
204 cpu_abort(env
, "invalid tss");
205 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
207 cpu_abort(env
, "invalid tss type");
209 index
= (dpl
* 4 + 2) << shift
;
210 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
211 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
213 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
214 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
216 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg
, int selector
)
227 if ((selector
& 0xfffc) != 0) {
228 if (load_segment(&e1
, &e2
, selector
) != 0)
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if (!(e2
& DESC_S_MASK
))
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
234 cpl
= env
->hflags
& HF_CPL_MASK
;
235 if (seg_reg
== R_CS
) {
236 if (!(e2
& DESC_CS_MASK
))
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 /* XXX: is it correct ? */
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 } else if (seg_reg
== R_SS
) {
244 /* SS must be writable data */
245 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
247 if (dpl
!= cpl
|| dpl
!= rpl
)
248 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 /* not readable code */
251 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
252 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
255 if (dpl
< cpl
|| dpl
< rpl
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
259 if (!(e2
& DESC_P_MASK
))
260 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
261 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
262 get_seg_base(e1
, e2
),
263 get_seg_limit(e1
, e2
),
266 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
267 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector
,
277 uint32_t e1
, uint32_t e2
, int source
,
280 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
281 target_ulong tss_base
;
282 uint32_t new_regs
[8], new_segs
[6];
283 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
284 uint32_t old_eflags
, eflags_mask
;
289 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
292 /* if task gate, we read the TSS segment and we load it */
294 if (!(e2
& DESC_P_MASK
))
295 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
296 tss_selector
= e1
>> 16;
297 if (tss_selector
& 4)
298 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
299 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
300 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (e2
& DESC_S_MASK
)
302 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
303 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 if (!(e2
& DESC_P_MASK
))
309 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
315 tss_limit
= get_seg_limit(e1
, e2
);
316 tss_base
= get_seg_base(e1
, e2
);
317 if ((tss_selector
& 4) != 0 ||
318 tss_limit
< tss_limit_max
)
319 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
320 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
322 old_tss_limit_max
= 103;
324 old_tss_limit_max
= 43;
326 /* read all the registers from the new TSS */
329 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
330 new_eip
= ldl_kernel(tss_base
+ 0x20);
331 new_eflags
= ldl_kernel(tss_base
+ 0x24);
332 for(i
= 0; i
< 8; i
++)
333 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
334 for(i
= 0; i
< 6; i
++)
335 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
336 new_ldt
= lduw_kernel(tss_base
+ 0x60);
337 new_trap
= ldl_kernel(tss_base
+ 0x64);
341 new_eip
= lduw_kernel(tss_base
+ 0x0e);
342 new_eflags
= lduw_kernel(tss_base
+ 0x10);
343 for(i
= 0; i
< 8; i
++)
344 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
345 for(i
= 0; i
< 4; i
++)
346 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
347 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
352 /* XXX: avoid a compiler warning, see
353 http://support.amd.com/us/Processor_TechDocs/24593.pdf
354 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
357 /* NOTE: we must avoid memory exceptions during the task switch,
358 so we make dummy accesses before */
359 /* XXX: it can still fail in some cases, so a bigger hack is
360 necessary to valid the TLB after having done the accesses */
362 v1
= ldub_kernel(env
->tr
.base
);
363 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
364 stb_kernel(env
->tr
.base
, v1
);
365 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
367 /* clear busy bit (it is restartable) */
368 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
371 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
372 e2
= ldl_kernel(ptr
+ 4);
373 e2
&= ~DESC_TSS_BUSY_MASK
;
374 stl_kernel(ptr
+ 4, e2
);
376 old_eflags
= compute_eflags();
377 if (source
== SWITCH_TSS_IRET
)
378 old_eflags
&= ~NT_MASK
;
380 /* save the current state in the old TSS */
383 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
384 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
388 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
389 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
390 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
391 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
392 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
393 for(i
= 0; i
< 6; i
++)
394 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
397 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
398 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
402 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
403 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
404 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
405 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
406 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
407 for(i
= 0; i
< 4; i
++)
408 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
411 /* now if an exception occurs, it will occurs in the next task
414 if (source
== SWITCH_TSS_CALL
) {
415 stw_kernel(tss_base
, env
->tr
.selector
);
416 new_eflags
|= NT_MASK
;
420 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
423 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
424 e2
= ldl_kernel(ptr
+ 4);
425 e2
|= DESC_TSS_BUSY_MASK
;
426 stl_kernel(ptr
+ 4, e2
);
429 /* set the new CPU state */
430 /* from this point, any exception which occurs can give problems */
431 env
->cr
[0] |= CR0_TS_MASK
;
432 env
->hflags
|= HF_TS_MASK
;
433 env
->tr
.selector
= tss_selector
;
434 env
->tr
.base
= tss_base
;
435 env
->tr
.limit
= tss_limit
;
436 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
438 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
439 cpu_x86_update_cr3(env
, new_cr3
);
442 /* load all registers without an exception, then reload them with
443 possible exception */
445 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
446 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
448 eflags_mask
&= 0xffff;
449 load_eflags(new_eflags
, eflags_mask
);
450 /* XXX: what to do in 16 bit case ? */
459 if (new_eflags
& VM_MASK
) {
460 for(i
= 0; i
< 6; i
++)
461 load_seg_vm(i
, new_segs
[i
]);
462 /* in vm86, CPL is always 3 */
463 cpu_x86_set_cpl(env
, 3);
465 /* CPL is set the RPL of CS */
466 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
467 /* first just selectors as the rest may trigger exceptions */
468 for(i
= 0; i
< 6; i
++)
469 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
472 env
->ldt
.selector
= new_ldt
& ~4;
479 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 if ((new_ldt
& 0xfffc) != 0) {
483 index
= new_ldt
& ~7;
484 if ((index
+ 7) > dt
->limit
)
485 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
486 ptr
= dt
->base
+ index
;
487 e1
= ldl_kernel(ptr
);
488 e2
= ldl_kernel(ptr
+ 4);
489 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
490 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
491 if (!(e2
& DESC_P_MASK
))
492 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
493 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
496 /* load the segments */
497 if (!(new_eflags
& VM_MASK
)) {
498 tss_load_seg(R_CS
, new_segs
[R_CS
]);
499 tss_load_seg(R_SS
, new_segs
[R_SS
]);
500 tss_load_seg(R_ES
, new_segs
[R_ES
]);
501 tss_load_seg(R_DS
, new_segs
[R_DS
]);
502 tss_load_seg(R_FS
, new_segs
[R_FS
]);
503 tss_load_seg(R_GS
, new_segs
[R_GS
]);
506 /* check that EIP is in the CS segment limits */
507 if (new_eip
> env
->segs
[R_CS
].limit
) {
508 /* XXX: different exception if CALL ? */
509 raise_exception_err(EXCP0D_GPF
, 0);
512 #ifndef CONFIG_USER_ONLY
513 /* reset local breakpoints */
514 if (env
->dr
[7] & 0x55) {
515 for (i
= 0; i
< 4; i
++) {
516 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
517 hw_breakpoint_remove(env
, i
);
524 /* check if Port I/O is allowed in TSS */
525 static inline void check_io(int addr
, int size
)
527 int io_offset
, val
, mask
;
529 /* TSS must be a valid 32 bit one */
530 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
531 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
534 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
535 io_offset
+= (addr
>> 3);
536 /* Note: the check needs two bytes */
537 if ((io_offset
+ 1) > env
->tr
.limit
)
539 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
541 mask
= (1 << size
) - 1;
542 /* all bits must be zero to allow the I/O */
543 if ((val
& mask
) != 0) {
545 raise_exception_err(EXCP0D_GPF
, 0);
549 void helper_check_iob(uint32_t t0
)
554 void helper_check_iow(uint32_t t0
)
559 void helper_check_iol(uint32_t t0
)
564 void helper_outb(uint32_t port
, uint32_t data
)
566 cpu_outb(port
, data
& 0xff);
569 target_ulong
helper_inb(uint32_t port
)
571 return cpu_inb(port
);
574 void helper_outw(uint32_t port
, uint32_t data
)
576 cpu_outw(port
, data
& 0xffff);
579 target_ulong
helper_inw(uint32_t port
)
581 return cpu_inw(port
);
584 void helper_outl(uint32_t port
, uint32_t data
)
586 cpu_outl(port
, data
);
589 target_ulong
helper_inl(uint32_t port
)
591 return cpu_inl(port
);
594 static inline unsigned int get_sp_mask(unsigned int e2
)
596 if (e2
& DESC_B_MASK
)
602 static int exeption_has_error_code(int intno
)
618 #define SET_ESP(val, sp_mask)\
620 if ((sp_mask) == 0xffff)\
621 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622 else if ((sp_mask) == 0xffffffffLL)\
623 ESP = (uint32_t)(val);\
628 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
631 /* in 64-bit machines, this can overflow. So this segment addition macro
632 * can be used to trim the value to 32-bit whenever needed */
633 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
635 /* XXX: add a is_user flag to have proper security support */
636 #define PUSHW(ssp, sp, sp_mask, val)\
639 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
642 #define PUSHL(ssp, sp, sp_mask, val)\
645 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
648 #define POPW(ssp, sp, sp_mask, val)\
650 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
654 #define POPL(ssp, sp, sp_mask, val)\
656 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
660 /* protected mode interrupt */
661 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
662 unsigned int next_eip
, int is_hw
)
665 target_ulong ptr
, ssp
;
666 int type
, dpl
, selector
, ss_dpl
, cpl
;
667 int has_error_code
, new_stack
, shift
;
668 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
669 uint32_t old_eip
, sp_mask
;
672 if (!is_int
&& !is_hw
)
673 has_error_code
= exeption_has_error_code(intno
);
680 if (intno
* 8 + 7 > dt
->limit
)
681 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
682 ptr
= dt
->base
+ intno
* 8;
683 e1
= ldl_kernel(ptr
);
684 e2
= ldl_kernel(ptr
+ 4);
685 /* check gate type */
686 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
688 case 5: /* task gate */
689 /* must do that check here to return the correct error code */
690 if (!(e2
& DESC_P_MASK
))
691 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
692 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
693 if (has_error_code
) {
696 /* push the error code */
697 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
699 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
703 esp
= (ESP
- (2 << shift
)) & mask
;
704 ssp
= env
->segs
[R_SS
].base
+ esp
;
706 stl_kernel(ssp
, error_code
);
708 stw_kernel(ssp
, error_code
);
712 case 6: /* 286 interrupt gate */
713 case 7: /* 286 trap gate */
714 case 14: /* 386 interrupt gate */
715 case 15: /* 386 trap gate */
718 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
721 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
722 cpl
= env
->hflags
& HF_CPL_MASK
;
723 /* check privilege if software int */
724 if (is_int
&& dpl
< cpl
)
725 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
726 /* check valid bit */
727 if (!(e2
& DESC_P_MASK
))
728 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
730 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
731 if ((selector
& 0xfffc) == 0)
732 raise_exception_err(EXCP0D_GPF
, 0);
734 if (load_segment(&e1
, &e2
, selector
) != 0)
735 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
736 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
737 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
738 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
740 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
741 if (!(e2
& DESC_P_MASK
))
742 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
743 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
744 /* to inner privilege */
745 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
746 if ((ss
& 0xfffc) == 0)
747 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
749 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
750 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
751 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
752 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
754 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
755 if (!(ss_e2
& DESC_S_MASK
) ||
756 (ss_e2
& DESC_CS_MASK
) ||
757 !(ss_e2
& DESC_W_MASK
))
758 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
759 if (!(ss_e2
& DESC_P_MASK
))
760 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
762 sp_mask
= get_sp_mask(ss_e2
);
763 ssp
= get_seg_base(ss_e1
, ss_e2
);
764 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
765 /* to same privilege */
766 if (env
->eflags
& VM_MASK
)
767 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
769 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
770 ssp
= env
->segs
[R_SS
].base
;
774 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
775 new_stack
= 0; /* avoid warning */
776 sp_mask
= 0; /* avoid warning */
777 ssp
= 0; /* avoid warning */
778 esp
= 0; /* avoid warning */
784 /* XXX: check that enough room is available */
785 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
786 if (env
->eflags
& VM_MASK
)
792 if (env
->eflags
& VM_MASK
) {
793 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
795 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
796 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
798 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
799 PUSHL(ssp
, esp
, sp_mask
, ESP
);
801 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
802 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
803 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
804 if (has_error_code
) {
805 PUSHL(ssp
, esp
, sp_mask
, error_code
);
809 if (env
->eflags
& VM_MASK
) {
810 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
812 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
813 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
815 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
816 PUSHW(ssp
, esp
, sp_mask
, ESP
);
818 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
819 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
820 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
821 if (has_error_code
) {
822 PUSHW(ssp
, esp
, sp_mask
, error_code
);
827 if (env
->eflags
& VM_MASK
) {
828 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
831 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
833 ss
= (ss
& ~3) | dpl
;
834 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
835 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
837 SET_ESP(esp
, sp_mask
);
839 selector
= (selector
& ~3) | dpl
;
840 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
841 get_seg_base(e1
, e2
),
842 get_seg_limit(e1
, e2
),
844 cpu_x86_set_cpl(env
, dpl
);
847 /* interrupt gate clear IF mask */
848 if ((type
& 1) == 0) {
849 env
->eflags
&= ~IF_MASK
;
851 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
856 #define PUSHQ(sp, val)\
859 stq_kernel(sp, (val));\
862 #define POPQ(sp, val)\
864 val = ldq_kernel(sp);\
868 static inline target_ulong
get_rsp_from_tss(int level
)
873 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
874 env
->tr
.base
, env
->tr
.limit
);
877 if (!(env
->tr
.flags
& DESC_P_MASK
))
878 cpu_abort(env
, "invalid tss");
879 index
= 8 * level
+ 4;
880 if ((index
+ 7) > env
->tr
.limit
)
881 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
882 return ldq_kernel(env
->tr
.base
+ index
);
885 /* 64 bit interrupt */
886 static void do_interrupt64(int intno
, int is_int
, int error_code
,
887 target_ulong next_eip
, int is_hw
)
891 int type
, dpl
, selector
, cpl
, ist
;
892 int has_error_code
, new_stack
;
893 uint32_t e1
, e2
, e3
, ss
;
894 target_ulong old_eip
, esp
, offset
;
897 if (!is_int
&& !is_hw
)
898 has_error_code
= exeption_has_error_code(intno
);
905 if (intno
* 16 + 15 > dt
->limit
)
906 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
907 ptr
= dt
->base
+ intno
* 16;
908 e1
= ldl_kernel(ptr
);
909 e2
= ldl_kernel(ptr
+ 4);
910 e3
= ldl_kernel(ptr
+ 8);
911 /* check gate type */
912 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
914 case 14: /* 386 interrupt gate */
915 case 15: /* 386 trap gate */
918 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
921 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
922 cpl
= env
->hflags
& HF_CPL_MASK
;
923 /* check privilege if software int */
924 if (is_int
&& dpl
< cpl
)
925 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
926 /* check valid bit */
927 if (!(e2
& DESC_P_MASK
))
928 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
930 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
932 if ((selector
& 0xfffc) == 0)
933 raise_exception_err(EXCP0D_GPF
, 0);
935 if (load_segment(&e1
, &e2
, selector
) != 0)
936 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
937 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
938 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
939 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 if (!(e2
& DESC_P_MASK
))
943 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
944 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
945 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
946 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
947 /* to inner privilege */
949 esp
= get_rsp_from_tss(ist
+ 3);
951 esp
= get_rsp_from_tss(dpl
);
952 esp
&= ~0xfLL
; /* align stack */
955 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
956 /* to same privilege */
957 if (env
->eflags
& VM_MASK
)
958 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
961 esp
= get_rsp_from_tss(ist
+ 3);
964 esp
&= ~0xfLL
; /* align stack */
967 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
968 new_stack
= 0; /* avoid warning */
969 esp
= 0; /* avoid warning */
972 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
974 PUSHQ(esp
, compute_eflags());
975 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
977 if (has_error_code
) {
978 PUSHQ(esp
, error_code
);
983 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
987 selector
= (selector
& ~3) | dpl
;
988 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
989 get_seg_base(e1
, e2
),
990 get_seg_limit(e1
, e2
),
992 cpu_x86_set_cpl(env
, dpl
);
995 /* interrupt gate clear IF mask */
996 if ((type
& 1) == 0) {
997 env
->eflags
&= ~IF_MASK
;
999 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1003 #ifdef TARGET_X86_64
1004 #if defined(CONFIG_USER_ONLY)
1005 void helper_syscall(int next_eip_addend
)
1007 env
->exception_index
= EXCP_SYSCALL
;
1008 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1012 void helper_syscall(int next_eip_addend
)
1016 if (!(env
->efer
& MSR_EFER_SCE
)) {
1017 raise_exception_err(EXCP06_ILLOP
, 0);
1019 selector
= (env
->star
>> 32) & 0xffff;
1020 if (env
->hflags
& HF_LMA_MASK
) {
1023 ECX
= env
->eip
+ next_eip_addend
;
1024 env
->regs
[11] = compute_eflags();
1026 code64
= env
->hflags
& HF_CS64_MASK
;
1028 cpu_x86_set_cpl(env
, 0);
1029 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1031 DESC_G_MASK
| DESC_P_MASK
|
1033 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1034 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1036 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1038 DESC_W_MASK
| DESC_A_MASK
);
1039 env
->eflags
&= ~env
->fmask
;
1040 load_eflags(env
->eflags
, 0);
1042 env
->eip
= env
->lstar
;
1044 env
->eip
= env
->cstar
;
1046 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1048 cpu_x86_set_cpl(env
, 0);
1049 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1053 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1054 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1056 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1058 DESC_W_MASK
| DESC_A_MASK
);
1059 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1060 env
->eip
= (uint32_t)env
->star
;
1066 #ifdef TARGET_X86_64
1067 void helper_sysret(int dflag
)
1071 if (!(env
->efer
& MSR_EFER_SCE
)) {
1072 raise_exception_err(EXCP06_ILLOP
, 0);
1074 cpl
= env
->hflags
& HF_CPL_MASK
;
1075 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1076 raise_exception_err(EXCP0D_GPF
, 0);
1078 selector
= (env
->star
>> 48) & 0xffff;
1079 if (env
->hflags
& HF_LMA_MASK
) {
1081 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1083 DESC_G_MASK
| DESC_P_MASK
|
1084 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1085 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1089 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1091 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1092 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1093 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1094 env
->eip
= (uint32_t)ECX
;
1096 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1098 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1099 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1100 DESC_W_MASK
| DESC_A_MASK
);
1101 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1102 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1103 cpu_x86_set_cpl(env
, 3);
1105 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1107 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1108 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1109 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1110 env
->eip
= (uint32_t)ECX
;
1111 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1113 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1114 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1115 DESC_W_MASK
| DESC_A_MASK
);
1116 env
->eflags
|= IF_MASK
;
1117 cpu_x86_set_cpl(env
, 3);
1122 /* real mode interrupt */
1123 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1124 unsigned int next_eip
)
1127 target_ulong ptr
, ssp
;
1129 uint32_t offset
, esp
;
1130 uint32_t old_cs
, old_eip
;
1132 /* real mode (simpler !) */
1134 if (intno
* 4 + 3 > dt
->limit
)
1135 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1136 ptr
= dt
->base
+ intno
* 4;
1137 offset
= lduw_kernel(ptr
);
1138 selector
= lduw_kernel(ptr
+ 2);
1140 ssp
= env
->segs
[R_SS
].base
;
1145 old_cs
= env
->segs
[R_CS
].selector
;
1146 /* XXX: use SS segment size ? */
1147 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1148 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1149 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1151 /* update processor state */
1152 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1154 env
->segs
[R_CS
].selector
= selector
;
1155 env
->segs
[R_CS
].base
= (selector
<< 4);
1156 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1159 /* fake user mode interrupt */
1160 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1161 target_ulong next_eip
)
1165 int dpl
, cpl
, shift
;
1169 if (env
->hflags
& HF_LMA_MASK
) {
1174 ptr
= dt
->base
+ (intno
<< shift
);
1175 e2
= ldl_kernel(ptr
+ 4);
1177 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1178 cpl
= env
->hflags
& HF_CPL_MASK
;
1179 /* check privilege if software int */
1180 if (is_int
&& dpl
< cpl
)
1181 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1183 /* Since we emulate only user space, we cannot do more than
1184 exiting the emulation with the suitable exception and error
1190 #if !defined(CONFIG_USER_ONLY)
1191 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1194 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1195 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1198 type
= SVM_EVTINJ_TYPE_SOFT
;
1200 type
= SVM_EVTINJ_TYPE_EXEPT
;
1201 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1202 if (!rm
&& exeption_has_error_code(intno
)) {
1203 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1204 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1206 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1212 * Begin execution of an interruption. is_int is TRUE if coming from
1213 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214 * instruction. It is only relevant if is_int is TRUE.
1216 void do_interrupt(int intno
, int is_int
, int error_code
,
1217 target_ulong next_eip
, int is_hw
)
1219 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1220 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1222 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1223 count
, intno
, error_code
, is_int
,
1224 env
->hflags
& HF_CPL_MASK
,
1225 env
->segs
[R_CS
].selector
, EIP
,
1226 (int)env
->segs
[R_CS
].base
+ EIP
,
1227 env
->segs
[R_SS
].selector
, ESP
);
1228 if (intno
== 0x0e) {
1229 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1231 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1234 log_cpu_state(env
, X86_DUMP_CCOP
);
1240 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1241 for(i
= 0; i
< 16; i
++) {
1242 qemu_log(" %02x", ldub(ptr
+ i
));
1250 if (env
->cr
[0] & CR0_PE_MASK
) {
1251 #if !defined(CONFIG_USER_ONLY)
1252 if (env
->hflags
& HF_SVMI_MASK
)
1253 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1255 #ifdef TARGET_X86_64
1256 if (env
->hflags
& HF_LMA_MASK
) {
1257 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1261 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1264 #if !defined(CONFIG_USER_ONLY)
1265 if (env
->hflags
& HF_SVMI_MASK
)
1266 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1268 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1271 #if !defined(CONFIG_USER_ONLY)
1272 if (env
->hflags
& HF_SVMI_MASK
) {
1273 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1274 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1279 /* This should come from sysemu.h - if we could include it here... */
1280 void qemu_system_reset_request(void);
1283 * Check nested exceptions and change to double or triple fault if
1284 * needed. It should only be called, if this is not an interrupt.
1285 * Returns the new exception number.
1287 static int check_exception(int intno
, int *error_code
)
1289 int first_contributory
= env
->old_exception
== 0 ||
1290 (env
->old_exception
>= 10 &&
1291 env
->old_exception
<= 13);
1292 int second_contributory
= intno
== 0 ||
1293 (intno
>= 10 && intno
<= 13);
1295 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1296 env
->old_exception
, intno
);
1298 #if !defined(CONFIG_USER_ONLY)
1299 if (env
->old_exception
== EXCP08_DBLE
) {
1300 if (env
->hflags
& HF_SVMI_MASK
)
1301 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1303 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1305 qemu_system_reset_request();
1310 if ((first_contributory
&& second_contributory
)
1311 || (env
->old_exception
== EXCP0E_PAGE
&&
1312 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1313 intno
= EXCP08_DBLE
;
1317 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1318 (intno
== EXCP08_DBLE
))
1319 env
->old_exception
= intno
;
1325 * Signal an interruption. It is executed in the main CPU loop.
1326 * is_int is TRUE if coming from the int instruction. next_eip is the
1327 * EIP value AFTER the interrupt instruction. It is only relevant if
1330 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1331 int next_eip_addend
)
1334 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1335 intno
= check_exception(intno
, &error_code
);
1337 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1340 env
->exception_index
= intno
;
1341 env
->error_code
= error_code
;
1342 env
->exception_is_int
= is_int
;
1343 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1347 /* shortcuts to generate exceptions */
1349 void raise_exception_err(int exception_index
, int error_code
)
1351 raise_interrupt(exception_index
, 0, error_code
, 0);
1354 void raise_exception(int exception_index
)
1356 raise_interrupt(exception_index
, 0, 0, 0);
1359 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1362 raise_exception(exception_index
);
1366 #if defined(CONFIG_USER_ONLY)
1368 void do_smm_enter(void)
1372 void helper_rsm(void)
1378 #ifdef TARGET_X86_64
1379 #define SMM_REVISION_ID 0x00020064
1381 #define SMM_REVISION_ID 0x00020000
1384 void do_smm_enter(void)
1386 target_ulong sm_state
;
1390 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1391 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1393 env
->hflags
|= HF_SMM_MASK
;
1394 cpu_smm_update(env
);
1396 sm_state
= env
->smbase
+ 0x8000;
1398 #ifdef TARGET_X86_64
1399 for(i
= 0; i
< 6; i
++) {
1401 offset
= 0x7e00 + i
* 16;
1402 stw_phys(sm_state
+ offset
, dt
->selector
);
1403 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1404 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1405 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1408 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1409 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1411 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1412 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1413 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1414 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1416 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1417 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1419 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1420 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1421 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1422 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1424 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1426 stq_phys(sm_state
+ 0x7ff8, EAX
);
1427 stq_phys(sm_state
+ 0x7ff0, ECX
);
1428 stq_phys(sm_state
+ 0x7fe8, EDX
);
1429 stq_phys(sm_state
+ 0x7fe0, EBX
);
1430 stq_phys(sm_state
+ 0x7fd8, ESP
);
1431 stq_phys(sm_state
+ 0x7fd0, EBP
);
1432 stq_phys(sm_state
+ 0x7fc8, ESI
);
1433 stq_phys(sm_state
+ 0x7fc0, EDI
);
1434 for(i
= 8; i
< 16; i
++)
1435 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1436 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1437 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1438 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1439 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1441 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1442 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1443 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1445 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1446 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1448 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1449 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1450 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1451 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1452 stl_phys(sm_state
+ 0x7fec, EDI
);
1453 stl_phys(sm_state
+ 0x7fe8, ESI
);
1454 stl_phys(sm_state
+ 0x7fe4, EBP
);
1455 stl_phys(sm_state
+ 0x7fe0, ESP
);
1456 stl_phys(sm_state
+ 0x7fdc, EBX
);
1457 stl_phys(sm_state
+ 0x7fd8, EDX
);
1458 stl_phys(sm_state
+ 0x7fd4, ECX
);
1459 stl_phys(sm_state
+ 0x7fd0, EAX
);
1460 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1461 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1463 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1464 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1465 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1466 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1468 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1469 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1470 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1471 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1473 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1474 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1476 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1477 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1479 for(i
= 0; i
< 6; i
++) {
1482 offset
= 0x7f84 + i
* 12;
1484 offset
= 0x7f2c + (i
- 3) * 12;
1485 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1486 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1487 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1488 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1490 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1492 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1493 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1495 /* init SMM cpu state */
1497 #ifdef TARGET_X86_64
1498 cpu_load_efer(env
, 0);
1500 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1501 env
->eip
= 0x00008000;
1502 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1504 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1508 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1510 cpu_x86_update_cr0(env
,
1511 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1512 cpu_x86_update_cr4(env
, 0);
1513 env
->dr
[7] = 0x00000400;
1514 CC_OP
= CC_OP_EFLAGS
;
1517 void helper_rsm(void)
1519 target_ulong sm_state
;
1523 sm_state
= env
->smbase
+ 0x8000;
1524 #ifdef TARGET_X86_64
1525 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1527 for(i
= 0; i
< 6; i
++) {
1528 offset
= 0x7e00 + i
* 16;
1529 cpu_x86_load_seg_cache(env
, i
,
1530 lduw_phys(sm_state
+ offset
),
1531 ldq_phys(sm_state
+ offset
+ 8),
1532 ldl_phys(sm_state
+ offset
+ 4),
1533 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1536 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1537 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1539 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1540 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1541 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1542 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1544 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1545 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1547 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1548 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1549 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1550 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1552 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1553 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1554 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1555 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1556 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1557 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1558 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1559 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1560 for(i
= 8; i
< 16; i
++)
1561 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1562 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1563 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1564 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1565 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1566 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1568 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1569 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1570 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1572 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1573 if (val
& 0x20000) {
1574 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1577 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1578 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1579 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1580 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1581 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1582 EDI
= ldl_phys(sm_state
+ 0x7fec);
1583 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1584 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1585 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1586 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1587 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1588 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1589 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1590 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1591 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1593 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1594 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1595 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1596 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1598 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1599 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1600 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1601 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1603 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1604 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1606 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1607 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1609 for(i
= 0; i
< 6; i
++) {
1611 offset
= 0x7f84 + i
* 12;
1613 offset
= 0x7f2c + (i
- 3) * 12;
1614 cpu_x86_load_seg_cache(env
, i
,
1615 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1616 ldl_phys(sm_state
+ offset
+ 8),
1617 ldl_phys(sm_state
+ offset
+ 4),
1618 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1620 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1622 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1623 if (val
& 0x20000) {
1624 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1627 CC_OP
= CC_OP_EFLAGS
;
1628 env
->hflags
&= ~HF_SMM_MASK
;
1629 cpu_smm_update(env
);
1631 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1632 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1635 #endif /* !CONFIG_USER_ONLY */
1638 /* division, flags are undefined */
1640 void helper_divb_AL(target_ulong t0
)
1642 unsigned int num
, den
, q
, r
;
1644 num
= (EAX
& 0xffff);
1647 raise_exception(EXCP00_DIVZ
);
1651 raise_exception(EXCP00_DIVZ
);
1653 r
= (num
% den
) & 0xff;
1654 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1657 void helper_idivb_AL(target_ulong t0
)
1664 raise_exception(EXCP00_DIVZ
);
1668 raise_exception(EXCP00_DIVZ
);
1670 r
= (num
% den
) & 0xff;
1671 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1674 void helper_divw_AX(target_ulong t0
)
1676 unsigned int num
, den
, q
, r
;
1678 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1679 den
= (t0
& 0xffff);
1681 raise_exception(EXCP00_DIVZ
);
1685 raise_exception(EXCP00_DIVZ
);
1687 r
= (num
% den
) & 0xffff;
1688 EAX
= (EAX
& ~0xffff) | q
;
1689 EDX
= (EDX
& ~0xffff) | r
;
1692 void helper_idivw_AX(target_ulong t0
)
1696 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1699 raise_exception(EXCP00_DIVZ
);
1702 if (q
!= (int16_t)q
)
1703 raise_exception(EXCP00_DIVZ
);
1705 r
= (num
% den
) & 0xffff;
1706 EAX
= (EAX
& ~0xffff) | q
;
1707 EDX
= (EDX
& ~0xffff) | r
;
1710 void helper_divl_EAX(target_ulong t0
)
1712 unsigned int den
, r
;
1715 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1718 raise_exception(EXCP00_DIVZ
);
1723 raise_exception(EXCP00_DIVZ
);
1728 void helper_idivl_EAX(target_ulong t0
)
1733 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1736 raise_exception(EXCP00_DIVZ
);
1740 if (q
!= (int32_t)q
)
1741 raise_exception(EXCP00_DIVZ
);
1748 /* XXX: exception */
1749 void helper_aam(int base
)
1755 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1759 void helper_aad(int base
)
1763 ah
= (EAX
>> 8) & 0xff;
1764 al
= ((ah
* base
) + al
) & 0xff;
1765 EAX
= (EAX
& ~0xffff) | al
;
1769 void helper_aaa(void)
1775 eflags
= helper_cc_compute_all(CC_OP
);
1778 ah
= (EAX
>> 8) & 0xff;
1780 icarry
= (al
> 0xf9);
1781 if (((al
& 0x0f) > 9 ) || af
) {
1782 al
= (al
+ 6) & 0x0f;
1783 ah
= (ah
+ 1 + icarry
) & 0xff;
1784 eflags
|= CC_C
| CC_A
;
1786 eflags
&= ~(CC_C
| CC_A
);
1789 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1793 void helper_aas(void)
1799 eflags
= helper_cc_compute_all(CC_OP
);
1802 ah
= (EAX
>> 8) & 0xff;
1805 if (((al
& 0x0f) > 9 ) || af
) {
1806 al
= (al
- 6) & 0x0f;
1807 ah
= (ah
- 1 - icarry
) & 0xff;
1808 eflags
|= CC_C
| CC_A
;
1810 eflags
&= ~(CC_C
| CC_A
);
1813 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1817 void helper_daa(void)
1822 eflags
= helper_cc_compute_all(CC_OP
);
1828 if (((al
& 0x0f) > 9 ) || af
) {
1829 al
= (al
+ 6) & 0xff;
1832 if ((al
> 0x9f) || cf
) {
1833 al
= (al
+ 0x60) & 0xff;
1836 EAX
= (EAX
& ~0xff) | al
;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags
|= (al
== 0) << 6; /* zf */
1839 eflags
|= parity_table
[al
]; /* pf */
1840 eflags
|= (al
& 0x80); /* sf */
1844 void helper_das(void)
1846 int al
, al1
, af
, cf
;
1849 eflags
= helper_cc_compute_all(CC_OP
);
1856 if (((al
& 0x0f) > 9 ) || af
) {
1860 al
= (al
- 6) & 0xff;
1862 if ((al1
> 0x99) || cf
) {
1863 al
= (al
- 0x60) & 0xff;
1866 EAX
= (EAX
& ~0xff) | al
;
1867 /* well, speed is not an issue here, so we compute the flags by hand */
1868 eflags
|= (al
== 0) << 6; /* zf */
1869 eflags
|= parity_table
[al
]; /* pf */
1870 eflags
|= (al
& 0x80); /* sf */
1874 void helper_into(int next_eip_addend
)
1877 eflags
= helper_cc_compute_all(CC_OP
);
1878 if (eflags
& CC_O
) {
1879 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1883 void helper_cmpxchg8b(target_ulong a0
)
1888 eflags
= helper_cc_compute_all(CC_OP
);
1890 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1891 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1894 /* always do the store */
1896 EDX
= (uint32_t)(d
>> 32);
1903 #ifdef TARGET_X86_64
1904 void helper_cmpxchg16b(target_ulong a0
)
1909 if ((a0
& 0xf) != 0)
1910 raise_exception(EXCP0D_GPF
);
1911 eflags
= helper_cc_compute_all(CC_OP
);
1914 if (d0
== EAX
&& d1
== EDX
) {
1919 /* always do the store */
1930 void helper_single_step(void)
1932 #ifndef CONFIG_USER_ONLY
1933 check_hw_breakpoints(env
, 1);
1934 env
->dr
[6] |= DR6_BS
;
1936 raise_exception(EXCP01_DB
);
1939 void helper_cpuid(void)
1941 uint32_t eax
, ebx
, ecx
, edx
;
1943 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1945 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1952 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1955 uint32_t esp_mask
, esp
, ebp
;
1957 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1958 ssp
= env
->segs
[R_SS
].base
;
1967 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1970 stl(ssp
+ (esp
& esp_mask
), t1
);
1977 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1980 stw(ssp
+ (esp
& esp_mask
), t1
);
1984 #ifdef TARGET_X86_64
1985 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1987 target_ulong esp
, ebp
;
2007 stw(esp
, lduw(ebp
));
2015 void helper_lldt(int selector
)
2019 int index
, entry_limit
;
2023 if ((selector
& 0xfffc) == 0) {
2024 /* XXX: NULL selector case: invalid LDT */
2029 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2031 index
= selector
& ~7;
2032 #ifdef TARGET_X86_64
2033 if (env
->hflags
& HF_LMA_MASK
)
2038 if ((index
+ entry_limit
) > dt
->limit
)
2039 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2040 ptr
= dt
->base
+ index
;
2041 e1
= ldl_kernel(ptr
);
2042 e2
= ldl_kernel(ptr
+ 4);
2043 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2044 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2045 if (!(e2
& DESC_P_MASK
))
2046 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2047 #ifdef TARGET_X86_64
2048 if (env
->hflags
& HF_LMA_MASK
) {
2050 e3
= ldl_kernel(ptr
+ 8);
2051 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2052 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2056 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2059 env
->ldt
.selector
= selector
;
2062 void helper_ltr(int selector
)
2066 int index
, type
, entry_limit
;
2070 if ((selector
& 0xfffc) == 0) {
2071 /* NULL selector case: invalid TR */
2077 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2079 index
= selector
& ~7;
2080 #ifdef TARGET_X86_64
2081 if (env
->hflags
& HF_LMA_MASK
)
2086 if ((index
+ entry_limit
) > dt
->limit
)
2087 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2088 ptr
= dt
->base
+ index
;
2089 e1
= ldl_kernel(ptr
);
2090 e2
= ldl_kernel(ptr
+ 4);
2091 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2092 if ((e2
& DESC_S_MASK
) ||
2093 (type
!= 1 && type
!= 9))
2094 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2095 if (!(e2
& DESC_P_MASK
))
2096 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2097 #ifdef TARGET_X86_64
2098 if (env
->hflags
& HF_LMA_MASK
) {
2100 e3
= ldl_kernel(ptr
+ 8);
2101 e4
= ldl_kernel(ptr
+ 12);
2102 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2103 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2104 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2105 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2109 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2111 e2
|= DESC_TSS_BUSY_MASK
;
2112 stl_kernel(ptr
+ 4, e2
);
2114 env
->tr
.selector
= selector
;
2117 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118 void helper_load_seg(int seg_reg
, int selector
)
2127 cpl
= env
->hflags
& HF_CPL_MASK
;
2128 if ((selector
& 0xfffc) == 0) {
2129 /* null selector case */
2131 #ifdef TARGET_X86_64
2132 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2135 raise_exception_err(EXCP0D_GPF
, 0);
2136 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2143 index
= selector
& ~7;
2144 if ((index
+ 7) > dt
->limit
)
2145 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2146 ptr
= dt
->base
+ index
;
2147 e1
= ldl_kernel(ptr
);
2148 e2
= ldl_kernel(ptr
+ 4);
2150 if (!(e2
& DESC_S_MASK
))
2151 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2153 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2154 if (seg_reg
== R_SS
) {
2155 /* must be writable segment */
2156 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2157 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2158 if (rpl
!= cpl
|| dpl
!= cpl
)
2159 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2161 /* must be readable segment */
2162 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2163 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2165 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2166 /* if not conforming code, test rights */
2167 if (dpl
< cpl
|| dpl
< rpl
)
2168 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2172 if (!(e2
& DESC_P_MASK
)) {
2173 if (seg_reg
== R_SS
)
2174 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2176 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2179 /* set the access bit if not already set */
2180 if (!(e2
& DESC_A_MASK
)) {
2182 stl_kernel(ptr
+ 4, e2
);
2185 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2186 get_seg_base(e1
, e2
),
2187 get_seg_limit(e1
, e2
),
2190 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2196 /* protected mode jump */
2197 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2198 int next_eip_addend
)
2201 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2202 target_ulong next_eip
;
2204 if ((new_cs
& 0xfffc) == 0)
2205 raise_exception_err(EXCP0D_GPF
, 0);
2206 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2207 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2208 cpl
= env
->hflags
& HF_CPL_MASK
;
2209 if (e2
& DESC_S_MASK
) {
2210 if (!(e2
& DESC_CS_MASK
))
2211 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2212 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2213 if (e2
& DESC_C_MASK
) {
2214 /* conforming code segment */
2216 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2218 /* non conforming code segment */
2221 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2223 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2225 if (!(e2
& DESC_P_MASK
))
2226 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2227 limit
= get_seg_limit(e1
, e2
);
2228 if (new_eip
> limit
&&
2229 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2230 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2231 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2232 get_seg_base(e1
, e2
), limit
, e2
);
2235 /* jump to call or task gate */
2236 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2238 cpl
= env
->hflags
& HF_CPL_MASK
;
2239 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2241 case 1: /* 286 TSS */
2242 case 9: /* 386 TSS */
2243 case 5: /* task gate */
2244 if (dpl
< cpl
|| dpl
< rpl
)
2245 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2246 next_eip
= env
->eip
+ next_eip_addend
;
2247 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2248 CC_OP
= CC_OP_EFLAGS
;
2250 case 4: /* 286 call gate */
2251 case 12: /* 386 call gate */
2252 if ((dpl
< cpl
) || (dpl
< rpl
))
2253 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2254 if (!(e2
& DESC_P_MASK
))
2255 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2257 new_eip
= (e1
& 0xffff);
2259 new_eip
|= (e2
& 0xffff0000);
2260 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2261 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2262 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2263 /* must be code segment */
2264 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2265 (DESC_S_MASK
| DESC_CS_MASK
)))
2266 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2267 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2268 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2269 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2270 if (!(e2
& DESC_P_MASK
))
2271 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2272 limit
= get_seg_limit(e1
, e2
);
2273 if (new_eip
> limit
)
2274 raise_exception_err(EXCP0D_GPF
, 0);
2275 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2276 get_seg_base(e1
, e2
), limit
, e2
);
2280 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2286 /* real mode call */
2287 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2288 int shift
, int next_eip
)
2291 uint32_t esp
, esp_mask
;
2296 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2297 ssp
= env
->segs
[R_SS
].base
;
2299 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2300 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2302 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2303 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2306 SET_ESP(esp
, esp_mask
);
2308 env
->segs
[R_CS
].selector
= new_cs
;
2309 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2312 /* protected mode call */
2313 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2314 int shift
, int next_eip_addend
)
2317 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2318 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2319 uint32_t val
, limit
, old_sp_mask
;
2320 target_ulong ssp
, old_ssp
, next_eip
;
2322 next_eip
= env
->eip
+ next_eip_addend
;
2323 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2324 LOG_PCALL_STATE(env
);
2325 if ((new_cs
& 0xfffc) == 0)
2326 raise_exception_err(EXCP0D_GPF
, 0);
2327 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2328 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2329 cpl
= env
->hflags
& HF_CPL_MASK
;
2330 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2331 if (e2
& DESC_S_MASK
) {
2332 if (!(e2
& DESC_CS_MASK
))
2333 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2334 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2335 if (e2
& DESC_C_MASK
) {
2336 /* conforming code segment */
2338 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2340 /* non conforming code segment */
2343 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2345 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2347 if (!(e2
& DESC_P_MASK
))
2348 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2350 #ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2356 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2357 PUSHQ(rsp
, next_eip
);
2358 /* from this point, not restartable */
2360 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2361 get_seg_base(e1
, e2
),
2362 get_seg_limit(e1
, e2
), e2
);
2368 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2369 ssp
= env
->segs
[R_SS
].base
;
2371 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2372 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2374 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2375 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2378 limit
= get_seg_limit(e1
, e2
);
2379 if (new_eip
> limit
)
2380 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp
, sp_mask
);
2383 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2384 get_seg_base(e1
, e2
), limit
, e2
);
2388 /* check gate type */
2389 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2390 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl
< cpl
|| dpl
< rpl
)
2397 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2398 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2399 CC_OP
= CC_OP_EFLAGS
;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2405 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2410 if (dpl
< cpl
|| dpl
< rpl
)
2411 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2412 /* check valid bit */
2413 if (!(e2
& DESC_P_MASK
))
2414 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2415 selector
= e1
>> 16;
2416 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2417 param_count
= e2
& 0x1f;
2418 if ((selector
& 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF
, 0);
2421 if (load_segment(&e1
, &e2
, selector
) != 0)
2422 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2423 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2424 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2425 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2427 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2428 if (!(e2
& DESC_P_MASK
))
2429 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2431 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2432 /* to inner privilege */
2433 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2434 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2435 ss
, sp
, param_count
, ESP
);
2436 if ((ss
& 0xfffc) == 0)
2437 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2438 if ((ss
& 3) != dpl
)
2439 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2440 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2441 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2442 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2444 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2445 if (!(ss_e2
& DESC_S_MASK
) ||
2446 (ss_e2
& DESC_CS_MASK
) ||
2447 !(ss_e2
& DESC_W_MASK
))
2448 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2449 if (!(ss_e2
& DESC_P_MASK
))
2450 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2452 // push_size = ((param_count * 2) + 8) << shift;
2454 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2455 old_ssp
= env
->segs
[R_SS
].base
;
2457 sp_mask
= get_sp_mask(ss_e2
);
2458 ssp
= get_seg_base(ss_e1
, ss_e2
);
2460 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2461 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2462 for(i
= param_count
- 1; i
>= 0; i
--) {
2463 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2464 PUSHL(ssp
, sp
, sp_mask
, val
);
2467 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2468 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2469 for(i
= param_count
- 1; i
>= 0; i
--) {
2470 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2471 PUSHW(ssp
, sp
, sp_mask
, val
);
2476 /* to same privilege */
2478 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2479 ssp
= env
->segs
[R_SS
].base
;
2480 // push_size = (4 << shift);
2485 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2486 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2488 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2489 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2492 /* from this point, not restartable */
2495 ss
= (ss
& ~3) | dpl
;
2496 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2498 get_seg_limit(ss_e1
, ss_e2
),
2502 selector
= (selector
& ~3) | dpl
;
2503 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2504 get_seg_base(e1
, e2
),
2505 get_seg_limit(e1
, e2
),
2507 cpu_x86_set_cpl(env
, dpl
);
2508 SET_ESP(sp
, sp_mask
);
2513 /* real and vm86 mode iret */
2514 void helper_iret_real(int shift
)
2516 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2520 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2522 ssp
= env
->segs
[R_SS
].base
;
2525 POPL(ssp
, sp
, sp_mask
, new_eip
);
2526 POPL(ssp
, sp
, sp_mask
, new_cs
);
2528 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2531 POPW(ssp
, sp
, sp_mask
, new_eip
);
2532 POPW(ssp
, sp
, sp_mask
, new_cs
);
2533 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2535 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2536 env
->segs
[R_CS
].selector
= new_cs
;
2537 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2539 if (env
->eflags
& VM_MASK
)
2540 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2542 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2544 eflags_mask
&= 0xffff;
2545 load_eflags(new_eflags
, eflags_mask
);
2546 env
->hflags2
&= ~HF2_NMI_MASK
;
2549 static inline void validate_seg(int seg_reg
, int cpl
)
2554 /* XXX: on x86_64, we do not want to nullify FS and GS because
2555 they may still contain a valid base. I would be interested to
2556 know how a real x86_64 CPU behaves */
2557 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2558 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2561 e2
= env
->segs
[seg_reg
].flags
;
2562 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2563 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2564 /* data or non conforming code segment */
2566 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2571 /* protected mode iret */
2572 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2574 uint32_t new_cs
, new_eflags
, new_ss
;
2575 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2576 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2577 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2578 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2580 #ifdef TARGET_X86_64
2585 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2587 ssp
= env
->segs
[R_SS
].base
;
2588 new_eflags
= 0; /* avoid warning */
2589 #ifdef TARGET_X86_64
2595 POPQ(sp
, new_eflags
);
2601 POPL(ssp
, sp
, sp_mask
, new_eip
);
2602 POPL(ssp
, sp
, sp_mask
, new_cs
);
2605 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2606 if (new_eflags
& VM_MASK
)
2607 goto return_to_vm86
;
2611 POPW(ssp
, sp
, sp_mask
, new_eip
);
2612 POPW(ssp
, sp
, sp_mask
, new_cs
);
2614 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2616 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2617 new_cs
, new_eip
, shift
, addend
);
2618 LOG_PCALL_STATE(env
);
2619 if ((new_cs
& 0xfffc) == 0)
2620 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2621 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2622 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2623 if (!(e2
& DESC_S_MASK
) ||
2624 !(e2
& DESC_CS_MASK
))
2625 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2626 cpl
= env
->hflags
& HF_CPL_MASK
;
2629 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2630 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2631 if (e2
& DESC_C_MASK
) {
2633 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2636 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2638 if (!(e2
& DESC_P_MASK
))
2639 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2642 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2643 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2644 /* return to same privilege level */
2645 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2646 get_seg_base(e1
, e2
),
2647 get_seg_limit(e1
, e2
),
2650 /* return to different privilege level */
2651 #ifdef TARGET_X86_64
2660 POPL(ssp
, sp
, sp_mask
, new_esp
);
2661 POPL(ssp
, sp
, sp_mask
, new_ss
);
2665 POPW(ssp
, sp
, sp_mask
, new_esp
);
2666 POPW(ssp
, sp
, sp_mask
, new_ss
);
2668 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2670 if ((new_ss
& 0xfffc) == 0) {
2671 #ifdef TARGET_X86_64
2672 /* NULL ss is allowed in long mode if cpl != 3*/
2673 /* XXX: test CS64 ? */
2674 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2675 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2677 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2678 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2679 DESC_W_MASK
| DESC_A_MASK
);
2680 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2684 raise_exception_err(EXCP0D_GPF
, 0);
2687 if ((new_ss
& 3) != rpl
)
2688 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2689 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2690 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2691 if (!(ss_e2
& DESC_S_MASK
) ||
2692 (ss_e2
& DESC_CS_MASK
) ||
2693 !(ss_e2
& DESC_W_MASK
))
2694 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2695 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2697 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2698 if (!(ss_e2
& DESC_P_MASK
))
2699 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2700 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2701 get_seg_base(ss_e1
, ss_e2
),
2702 get_seg_limit(ss_e1
, ss_e2
),
2706 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2707 get_seg_base(e1
, e2
),
2708 get_seg_limit(e1
, e2
),
2710 cpu_x86_set_cpl(env
, rpl
);
2712 #ifdef TARGET_X86_64
2713 if (env
->hflags
& HF_CS64_MASK
)
2717 sp_mask
= get_sp_mask(ss_e2
);
2719 /* validate data segments */
2720 validate_seg(R_ES
, rpl
);
2721 validate_seg(R_DS
, rpl
);
2722 validate_seg(R_FS
, rpl
);
2723 validate_seg(R_GS
, rpl
);
2727 SET_ESP(sp
, sp_mask
);
2730 /* NOTE: 'cpl' is the _old_ CPL */
2731 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2733 eflags_mask
|= IOPL_MASK
;
2734 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2736 eflags_mask
|= IF_MASK
;
2738 eflags_mask
&= 0xffff;
2739 load_eflags(new_eflags
, eflags_mask
);
2744 POPL(ssp
, sp
, sp_mask
, new_esp
);
2745 POPL(ssp
, sp
, sp_mask
, new_ss
);
2746 POPL(ssp
, sp
, sp_mask
, new_es
);
2747 POPL(ssp
, sp
, sp_mask
, new_ds
);
2748 POPL(ssp
, sp
, sp_mask
, new_fs
);
2749 POPL(ssp
, sp
, sp_mask
, new_gs
);
2751 /* modify processor state */
2752 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2753 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2754 load_seg_vm(R_CS
, new_cs
& 0xffff);
2755 cpu_x86_set_cpl(env
, 3);
2756 load_seg_vm(R_SS
, new_ss
& 0xffff);
2757 load_seg_vm(R_ES
, new_es
& 0xffff);
2758 load_seg_vm(R_DS
, new_ds
& 0xffff);
2759 load_seg_vm(R_FS
, new_fs
& 0xffff);
2760 load_seg_vm(R_GS
, new_gs
& 0xffff);
2762 env
->eip
= new_eip
& 0xffff;
2766 void helper_iret_protected(int shift
, int next_eip
)
2768 int tss_selector
, type
;
2771 /* specific case for TSS */
2772 if (env
->eflags
& NT_MASK
) {
2773 #ifdef TARGET_X86_64
2774 if (env
->hflags
& HF_LMA_MASK
)
2775 raise_exception_err(EXCP0D_GPF
, 0);
2777 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2778 if (tss_selector
& 4)
2779 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2780 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2781 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2782 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2783 /* NOTE: we check both segment and busy TSS */
2785 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2786 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2788 helper_ret_protected(shift
, 1, 0);
2790 env
->hflags2
&= ~HF2_NMI_MASK
;
2793 void helper_lret_protected(int shift
, int addend
)
2795 helper_ret_protected(shift
, 0, addend
);
2798 void helper_sysenter(void)
2800 if (env
->sysenter_cs
== 0) {
2801 raise_exception_err(EXCP0D_GPF
, 0);
2803 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2804 cpu_x86_set_cpl(env
, 0);
2806 #ifdef TARGET_X86_64
2807 if (env
->hflags
& HF_LMA_MASK
) {
2808 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2810 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2812 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2816 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2818 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2820 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2822 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2824 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2826 DESC_W_MASK
| DESC_A_MASK
);
2827 ESP
= env
->sysenter_esp
;
2828 EIP
= env
->sysenter_eip
;
2831 void helper_sysexit(int dflag
)
2835 cpl
= env
->hflags
& HF_CPL_MASK
;
2836 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2837 raise_exception_err(EXCP0D_GPF
, 0);
2839 cpu_x86_set_cpl(env
, 3);
2840 #ifdef TARGET_X86_64
2842 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2844 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2845 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2846 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2847 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2849 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2850 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2851 DESC_W_MASK
| DESC_A_MASK
);
2855 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2857 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2858 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2859 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2860 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2862 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2863 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2864 DESC_W_MASK
| DESC_A_MASK
);
2870 #if defined(CONFIG_USER_ONLY)
2871 target_ulong
helper_read_crN(int reg
)
2876 void helper_write_crN(int reg
, target_ulong t0
)
2880 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2884 target_ulong
helper_read_crN(int reg
)
2888 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2894 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2895 val
= cpu_get_apic_tpr(env
->apic_state
);
2904 void helper_write_crN(int reg
, target_ulong t0
)
2906 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2909 cpu_x86_update_cr0(env
, t0
);
2912 cpu_x86_update_cr3(env
, t0
);
2915 cpu_x86_update_cr4(env
, t0
);
2918 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2919 cpu_set_apic_tpr(env
->apic_state
, t0
);
2921 env
->v_tpr
= t0
& 0x0f;
2929 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2934 hw_breakpoint_remove(env
, reg
);
2936 hw_breakpoint_insert(env
, reg
);
2937 } else if (reg
== 7) {
2938 for (i
= 0; i
< 4; i
++)
2939 hw_breakpoint_remove(env
, i
);
2941 for (i
= 0; i
< 4; i
++)
2942 hw_breakpoint_insert(env
, i
);
2948 void helper_lmsw(target_ulong t0
)
2950 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951 if already set to one. */
2952 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2953 helper_write_crN(0, t0
);
2956 void helper_clts(void)
2958 env
->cr
[0] &= ~CR0_TS_MASK
;
2959 env
->hflags
&= ~HF_TS_MASK
;
2962 void helper_invlpg(target_ulong addr
)
2964 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2965 tlb_flush_page(env
, addr
);
2968 void helper_rdtsc(void)
2972 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2973 raise_exception(EXCP0D_GPF
);
2975 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2977 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2978 EAX
= (uint32_t)(val
);
2979 EDX
= (uint32_t)(val
>> 32);
2982 void helper_rdtscp(void)
2985 ECX
= (uint32_t)(env
->tsc_aux
);
2988 void helper_rdpmc(void)
2990 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2991 raise_exception(EXCP0D_GPF
);
2993 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2995 /* currently unimplemented */
2996 raise_exception_err(EXCP06_ILLOP
, 0);
2999 #if defined(CONFIG_USER_ONLY)
3000 void helper_wrmsr(void)
3004 void helper_rdmsr(void)
3008 void helper_wrmsr(void)
3012 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3014 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3016 switch((uint32_t)ECX
) {
3017 case MSR_IA32_SYSENTER_CS
:
3018 env
->sysenter_cs
= val
& 0xffff;
3020 case MSR_IA32_SYSENTER_ESP
:
3021 env
->sysenter_esp
= val
;
3023 case MSR_IA32_SYSENTER_EIP
:
3024 env
->sysenter_eip
= val
;
3026 case MSR_IA32_APICBASE
:
3027 cpu_set_apic_base(env
->apic_state
, val
);
3031 uint64_t update_mask
;
3033 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3034 update_mask
|= MSR_EFER_SCE
;
3035 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3036 update_mask
|= MSR_EFER_LME
;
3037 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3038 update_mask
|= MSR_EFER_FFXSR
;
3039 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3040 update_mask
|= MSR_EFER_NXE
;
3041 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3042 update_mask
|= MSR_EFER_SVME
;
3043 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3044 update_mask
|= MSR_EFER_FFXSR
;
3045 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3046 (val
& update_mask
));
3055 case MSR_VM_HSAVE_PA
:
3056 env
->vm_hsave
= val
;
3058 #ifdef TARGET_X86_64
3069 env
->segs
[R_FS
].base
= val
;
3072 env
->segs
[R_GS
].base
= val
;
3074 case MSR_KERNELGSBASE
:
3075 env
->kernelgsbase
= val
;
3078 case MSR_MTRRphysBase(0):
3079 case MSR_MTRRphysBase(1):
3080 case MSR_MTRRphysBase(2):
3081 case MSR_MTRRphysBase(3):
3082 case MSR_MTRRphysBase(4):
3083 case MSR_MTRRphysBase(5):
3084 case MSR_MTRRphysBase(6):
3085 case MSR_MTRRphysBase(7):
3086 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3088 case MSR_MTRRphysMask(0):
3089 case MSR_MTRRphysMask(1):
3090 case MSR_MTRRphysMask(2):
3091 case MSR_MTRRphysMask(3):
3092 case MSR_MTRRphysMask(4):
3093 case MSR_MTRRphysMask(5):
3094 case MSR_MTRRphysMask(6):
3095 case MSR_MTRRphysMask(7):
3096 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3098 case MSR_MTRRfix64K_00000
:
3099 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3101 case MSR_MTRRfix16K_80000
:
3102 case MSR_MTRRfix16K_A0000
:
3103 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3105 case MSR_MTRRfix4K_C0000
:
3106 case MSR_MTRRfix4K_C8000
:
3107 case MSR_MTRRfix4K_D0000
:
3108 case MSR_MTRRfix4K_D8000
:
3109 case MSR_MTRRfix4K_E0000
:
3110 case MSR_MTRRfix4K_E8000
:
3111 case MSR_MTRRfix4K_F0000
:
3112 case MSR_MTRRfix4K_F8000
:
3113 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3115 case MSR_MTRRdefType
:
3116 env
->mtrr_deftype
= val
;
3118 case MSR_MCG_STATUS
:
3119 env
->mcg_status
= val
;
3122 if ((env
->mcg_cap
& MCG_CTL_P
)
3123 && (val
== 0 || val
== ~(uint64_t)0))
3130 if ((uint32_t)ECX
>= MSR_MC0_CTL
3131 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3132 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3133 if ((offset
& 0x3) != 0
3134 || (val
== 0 || val
== ~(uint64_t)0))
3135 env
->mce_banks
[offset
] = val
;
3138 /* XXX: exception ? */
3143 void helper_rdmsr(void)
3147 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3149 switch((uint32_t)ECX
) {
3150 case MSR_IA32_SYSENTER_CS
:
3151 val
= env
->sysenter_cs
;
3153 case MSR_IA32_SYSENTER_ESP
:
3154 val
= env
->sysenter_esp
;
3156 case MSR_IA32_SYSENTER_EIP
:
3157 val
= env
->sysenter_eip
;
3159 case MSR_IA32_APICBASE
:
3160 val
= cpu_get_apic_base(env
->apic_state
);
3171 case MSR_VM_HSAVE_PA
:
3172 val
= env
->vm_hsave
;
3174 case MSR_IA32_PERF_STATUS
:
3175 /* tsc_increment_by_tick */
3177 /* CPU multiplier */
3178 val
|= (((uint64_t)4ULL) << 40);
3180 #ifdef TARGET_X86_64
3191 val
= env
->segs
[R_FS
].base
;
3194 val
= env
->segs
[R_GS
].base
;
3196 case MSR_KERNELGSBASE
:
3197 val
= env
->kernelgsbase
;
3203 case MSR_MTRRphysBase(0):
3204 case MSR_MTRRphysBase(1):
3205 case MSR_MTRRphysBase(2):
3206 case MSR_MTRRphysBase(3):
3207 case MSR_MTRRphysBase(4):
3208 case MSR_MTRRphysBase(5):
3209 case MSR_MTRRphysBase(6):
3210 case MSR_MTRRphysBase(7):
3211 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3213 case MSR_MTRRphysMask(0):
3214 case MSR_MTRRphysMask(1):
3215 case MSR_MTRRphysMask(2):
3216 case MSR_MTRRphysMask(3):
3217 case MSR_MTRRphysMask(4):
3218 case MSR_MTRRphysMask(5):
3219 case MSR_MTRRphysMask(6):
3220 case MSR_MTRRphysMask(7):
3221 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3223 case MSR_MTRRfix64K_00000
:
3224 val
= env
->mtrr_fixed
[0];
3226 case MSR_MTRRfix16K_80000
:
3227 case MSR_MTRRfix16K_A0000
:
3228 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3230 case MSR_MTRRfix4K_C0000
:
3231 case MSR_MTRRfix4K_C8000
:
3232 case MSR_MTRRfix4K_D0000
:
3233 case MSR_MTRRfix4K_D8000
:
3234 case MSR_MTRRfix4K_E0000
:
3235 case MSR_MTRRfix4K_E8000
:
3236 case MSR_MTRRfix4K_F0000
:
3237 case MSR_MTRRfix4K_F8000
:
3238 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3240 case MSR_MTRRdefType
:
3241 val
= env
->mtrr_deftype
;
3244 if (env
->cpuid_features
& CPUID_MTRR
)
3245 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3247 /* XXX: exception ? */
3254 if (env
->mcg_cap
& MCG_CTL_P
)
3259 case MSR_MCG_STATUS
:
3260 val
= env
->mcg_status
;
3263 if ((uint32_t)ECX
>= MSR_MC0_CTL
3264 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3265 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3266 val
= env
->mce_banks
[offset
];
3269 /* XXX: exception ? */
3273 EAX
= (uint32_t)(val
);
3274 EDX
= (uint32_t)(val
>> 32);
3278 target_ulong
helper_lsl(target_ulong selector1
)
3281 uint32_t e1
, e2
, eflags
, selector
;
3282 int rpl
, dpl
, cpl
, type
;
3284 selector
= selector1
& 0xffff;
3285 eflags
= helper_cc_compute_all(CC_OP
);
3286 if ((selector
& 0xfffc) == 0)
3288 if (load_segment(&e1
, &e2
, selector
) != 0)
3291 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3292 cpl
= env
->hflags
& HF_CPL_MASK
;
3293 if (e2
& DESC_S_MASK
) {
3294 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3297 if (dpl
< cpl
|| dpl
< rpl
)
3301 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3312 if (dpl
< cpl
|| dpl
< rpl
) {
3314 CC_SRC
= eflags
& ~CC_Z
;
3318 limit
= get_seg_limit(e1
, e2
);
3319 CC_SRC
= eflags
| CC_Z
;
3323 target_ulong
helper_lar(target_ulong selector1
)
3325 uint32_t e1
, e2
, eflags
, selector
;
3326 int rpl
, dpl
, cpl
, type
;
3328 selector
= selector1
& 0xffff;
3329 eflags
= helper_cc_compute_all(CC_OP
);
3330 if ((selector
& 0xfffc) == 0)
3332 if (load_segment(&e1
, &e2
, selector
) != 0)
3335 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3336 cpl
= env
->hflags
& HF_CPL_MASK
;
3337 if (e2
& DESC_S_MASK
) {
3338 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3341 if (dpl
< cpl
|| dpl
< rpl
)
3345 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3359 if (dpl
< cpl
|| dpl
< rpl
) {
3361 CC_SRC
= eflags
& ~CC_Z
;
3365 CC_SRC
= eflags
| CC_Z
;
3366 return e2
& 0x00f0ff00;
3369 void helper_verr(target_ulong selector1
)
3371 uint32_t e1
, e2
, eflags
, selector
;
3374 selector
= selector1
& 0xffff;
3375 eflags
= helper_cc_compute_all(CC_OP
);
3376 if ((selector
& 0xfffc) == 0)
3378 if (load_segment(&e1
, &e2
, selector
) != 0)
3380 if (!(e2
& DESC_S_MASK
))
3383 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3384 cpl
= env
->hflags
& HF_CPL_MASK
;
3385 if (e2
& DESC_CS_MASK
) {
3386 if (!(e2
& DESC_R_MASK
))
3388 if (!(e2
& DESC_C_MASK
)) {
3389 if (dpl
< cpl
|| dpl
< rpl
)
3393 if (dpl
< cpl
|| dpl
< rpl
) {
3395 CC_SRC
= eflags
& ~CC_Z
;
3399 CC_SRC
= eflags
| CC_Z
;
3402 void helper_verw(target_ulong selector1
)
3404 uint32_t e1
, e2
, eflags
, selector
;
3407 selector
= selector1
& 0xffff;
3408 eflags
= helper_cc_compute_all(CC_OP
);
3409 if ((selector
& 0xfffc) == 0)
3411 if (load_segment(&e1
, &e2
, selector
) != 0)
3413 if (!(e2
& DESC_S_MASK
))
3416 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3417 cpl
= env
->hflags
& HF_CPL_MASK
;
3418 if (e2
& DESC_CS_MASK
) {
3421 if (dpl
< cpl
|| dpl
< rpl
)
3423 if (!(e2
& DESC_W_MASK
)) {
3425 CC_SRC
= eflags
& ~CC_Z
;
3429 CC_SRC
= eflags
| CC_Z
;
3432 /* x87 FPU helpers */
3434 static inline double CPU86_LDouble_to_double(CPU86_LDouble a
)
3441 u
.f64
= floatx_to_float64(a
, &env
->fp_status
);
3445 static inline CPU86_LDouble
double_to_CPU86_LDouble(double a
)
3453 return float64_to_floatx(u
.f64
, &env
->fp_status
);
3456 static void fpu_set_exception(int mask
)
3459 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3460 env
->fpus
|= FPUS_SE
| FPUS_B
;
3463 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3465 if (floatx_is_zero(b
)) {
3466 fpu_set_exception(FPUS_ZE
);
3468 return floatx_div(a
, b
, &env
->fp_status
);
3471 static void fpu_raise_exception(void)
3473 if (env
->cr
[0] & CR0_NE_MASK
) {
3474 raise_exception(EXCP10_COPR
);
3476 #if !defined(CONFIG_USER_ONLY)
3483 void helper_flds_FT0(uint32_t val
)
3490 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3493 void helper_fldl_FT0(uint64_t val
)
3500 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3503 void helper_fildl_FT0(int32_t val
)
3505 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3508 void helper_flds_ST0(uint32_t val
)
3515 new_fpstt
= (env
->fpstt
- 1) & 7;
3517 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3518 env
->fpstt
= new_fpstt
;
3519 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3522 void helper_fldl_ST0(uint64_t val
)
3529 new_fpstt
= (env
->fpstt
- 1) & 7;
3531 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3532 env
->fpstt
= new_fpstt
;
3533 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3536 void helper_fildl_ST0(int32_t val
)
3539 new_fpstt
= (env
->fpstt
- 1) & 7;
3540 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3541 env
->fpstt
= new_fpstt
;
3542 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3545 void helper_fildll_ST0(int64_t val
)
3548 new_fpstt
= (env
->fpstt
- 1) & 7;
3549 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3550 env
->fpstt
= new_fpstt
;
3551 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3554 uint32_t helper_fsts_ST0(void)
3560 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3564 uint64_t helper_fstl_ST0(void)
3570 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3574 int32_t helper_fist_ST0(void)
3577 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3578 if (val
!= (int16_t)val
)
3583 int32_t helper_fistl_ST0(void)
3586 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3590 int64_t helper_fistll_ST0(void)
3593 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3597 int32_t helper_fistt_ST0(void)
3600 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3601 if (val
!= (int16_t)val
)
3606 int32_t helper_fisttl_ST0(void)
3609 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3613 int64_t helper_fisttll_ST0(void)
3616 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3620 void helper_fldt_ST0(target_ulong ptr
)
3623 new_fpstt
= (env
->fpstt
- 1) & 7;
3624 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3625 env
->fpstt
= new_fpstt
;
3626 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3629 void helper_fstt_ST0(target_ulong ptr
)
3631 helper_fstt(ST0
, ptr
);
3634 void helper_fpush(void)
3639 void helper_fpop(void)
3644 void helper_fdecstp(void)
3646 env
->fpstt
= (env
->fpstt
- 1) & 7;
3647 env
->fpus
&= (~0x4700);
3650 void helper_fincstp(void)
3652 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3653 env
->fpus
&= (~0x4700);
3658 void helper_ffree_STN(int st_index
)
3660 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3663 void helper_fmov_ST0_FT0(void)
3668 void helper_fmov_FT0_STN(int st_index
)
3673 void helper_fmov_ST0_STN(int st_index
)
3678 void helper_fmov_STN_ST0(int st_index
)
3683 void helper_fxchg_ST0_STN(int st_index
)
3691 /* FPU operations */
3693 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3695 void helper_fcom_ST0_FT0(void)
3699 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3700 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3703 void helper_fucom_ST0_FT0(void)
3707 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3708 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3711 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3713 void helper_fcomi_ST0_FT0(void)
3718 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3719 eflags
= helper_cc_compute_all(CC_OP
);
3720 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3724 void helper_fucomi_ST0_FT0(void)
3729 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3730 eflags
= helper_cc_compute_all(CC_OP
);
3731 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3735 void helper_fadd_ST0_FT0(void)
3737 ST0
= floatx_add(ST0
, FT0
, &env
->fp_status
);
3740 void helper_fmul_ST0_FT0(void)
3742 ST0
= floatx_mul(ST0
, FT0
, &env
->fp_status
);
3745 void helper_fsub_ST0_FT0(void)
3747 ST0
= floatx_sub(ST0
, FT0
, &env
->fp_status
);
3750 void helper_fsubr_ST0_FT0(void)
3752 ST0
= floatx_sub(FT0
, ST0
, &env
->fp_status
);
3755 void helper_fdiv_ST0_FT0(void)
3757 ST0
= helper_fdiv(ST0
, FT0
);
3760 void helper_fdivr_ST0_FT0(void)
3762 ST0
= helper_fdiv(FT0
, ST0
);
3765 /* fp operations between STN and ST0 */
3767 void helper_fadd_STN_ST0(int st_index
)
3769 ST(st_index
) = floatx_add(ST(st_index
), ST0
, &env
->fp_status
);
3772 void helper_fmul_STN_ST0(int st_index
)
3774 ST(st_index
) = floatx_mul(ST(st_index
), ST0
, &env
->fp_status
);
3777 void helper_fsub_STN_ST0(int st_index
)
3779 ST(st_index
) = floatx_sub(ST(st_index
), ST0
, &env
->fp_status
);
3782 void helper_fsubr_STN_ST0(int st_index
)
3784 ST(st_index
) = floatx_sub(ST0
, ST(st_index
), &env
->fp_status
);
3787 void helper_fdiv_STN_ST0(int st_index
)
3791 *p
= helper_fdiv(*p
, ST0
);
3794 void helper_fdivr_STN_ST0(int st_index
)
3798 *p
= helper_fdiv(ST0
, *p
);
3801 /* misc FPU operations */
3802 void helper_fchs_ST0(void)
3804 ST0
= floatx_chs(ST0
);
3807 void helper_fabs_ST0(void)
3809 ST0
= floatx_abs(ST0
);
3812 void helper_fld1_ST0(void)
3817 void helper_fldl2t_ST0(void)
3822 void helper_fldl2e_ST0(void)
3827 void helper_fldpi_ST0(void)
3832 void helper_fldlg2_ST0(void)
3837 void helper_fldln2_ST0(void)
3842 void helper_fldz_ST0(void)
3847 void helper_fldz_FT0(void)
3852 uint32_t helper_fnstsw(void)
3854 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3857 uint32_t helper_fnstcw(void)
3862 static void update_fp_status(void)
3866 /* set rounding mode */
3867 switch(env
->fpuc
& RC_MASK
) {
3870 rnd_type
= float_round_nearest_even
;
3873 rnd_type
= float_round_down
;
3876 rnd_type
= float_round_up
;
3879 rnd_type
= float_round_to_zero
;
3882 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3884 switch((env
->fpuc
>> 8) & 3) {
3896 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3900 void helper_fldcw(uint32_t val
)
3906 void helper_fclex(void)
3908 env
->fpus
&= 0x7f00;
3911 void helper_fwait(void)
3913 if (env
->fpus
& FPUS_SE
)
3914 fpu_raise_exception();
3917 void helper_fninit(void)
3934 void helper_fbld_ST0(target_ulong ptr
)
3942 for(i
= 8; i
>= 0; i
--) {
3944 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3946 tmp
= int64_to_floatx(val
, &env
->fp_status
);
3947 if (ldub(ptr
+ 9) & 0x80) {
3954 void helper_fbst_ST0(target_ulong ptr
)
3957 target_ulong mem_ref
, mem_end
;
3960 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3962 mem_end
= mem_ref
+ 9;
3969 while (mem_ref
< mem_end
) {
3974 v
= ((v
/ 10) << 4) | (v
% 10);
3977 while (mem_ref
< mem_end
) {
3982 void helper_f2xm1(void)
3984 ST0
= pow(2.0,ST0
) - 1.0;
3987 void helper_fyl2x(void)
3989 CPU86_LDouble fptemp
;
3993 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3997 env
->fpus
&= (~0x4700);
4002 void helper_fptan(void)
4004 CPU86_LDouble fptemp
;
4007 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4013 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4014 /* the above code is for |arg| < 2**52 only */
4018 void helper_fpatan(void)
4020 CPU86_LDouble fptemp
, fpsrcop
;
4024 ST1
= atan2(fpsrcop
,fptemp
);
4028 void helper_fxtract(void)
4030 CPU86_LDoubleU temp
;
4034 if (floatx_is_zero(ST0
)) {
4035 /* Easy way to generate -inf and raising division by 0 exception */
4036 ST0
= floatx_div(floatx_chs(floatx_one
), floatx_zero
, &env
->fp_status
);
4042 expdif
= EXPD(temp
) - EXPBIAS
;
4043 /*DP exponent bias*/
4044 ST0
= int32_to_floatx(expdif
, &env
->fp_status
);
4051 void helper_fprem1(void)
4053 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4054 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4056 signed long long int q
;
4058 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4059 ST0
= 0.0 / 0.0; /* NaN */
4060 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4066 fpsrcop1
.d
= fpsrcop
;
4068 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4071 /* optimisation? taken from the AMD docs */
4072 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4073 /* ST0 is unchanged */
4078 dblq
= fpsrcop
/ fptemp
;
4079 /* round dblq towards nearest integer */
4081 ST0
= fpsrcop
- fptemp
* dblq
;
4083 /* convert dblq to q by truncating towards zero */
4085 q
= (signed long long int)(-dblq
);
4087 q
= (signed long long int)dblq
;
4089 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4090 /* (C0,C3,C1) <-- (q2,q1,q0) */
4091 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4092 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4093 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4095 env
->fpus
|= 0x400; /* C2 <-- 1 */
4096 fptemp
= pow(2.0, expdif
- 50);
4097 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4098 /* fpsrcop = integer obtained by chopping */
4099 fpsrcop
= (fpsrcop
< 0.0) ?
4100 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4101 ST0
-= (ST1
* fpsrcop
* fptemp
);
4105 void helper_fprem(void)
4107 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4108 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4110 signed long long int q
;
4112 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4113 ST0
= 0.0 / 0.0; /* NaN */
4114 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4118 fpsrcop
= (CPU86_LDouble
)ST0
;
4119 fptemp
= (CPU86_LDouble
)ST1
;
4120 fpsrcop1
.d
= fpsrcop
;
4122 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4125 /* optimisation? taken from the AMD docs */
4126 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4127 /* ST0 is unchanged */
4131 if ( expdif
< 53 ) {
4132 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4133 /* round dblq towards zero */
4134 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4135 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4137 /* convert dblq to q by truncating towards zero */
4139 q
= (signed long long int)(-dblq
);
4141 q
= (signed long long int)dblq
;
4143 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4144 /* (C0,C3,C1) <-- (q2,q1,q0) */
4145 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4146 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4147 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4149 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4150 env
->fpus
|= 0x400; /* C2 <-- 1 */
4151 fptemp
= pow(2.0, (double)(expdif
- N
));
4152 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4153 /* fpsrcop = integer obtained by chopping */
4154 fpsrcop
= (fpsrcop
< 0.0) ?
4155 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4156 ST0
-= (ST1
* fpsrcop
* fptemp
);
4160 void helper_fyl2xp1(void)
4162 CPU86_LDouble fptemp
;
4165 if ((fptemp
+1.0)>0.0) {
4166 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4170 env
->fpus
&= (~0x4700);
4175 void helper_fsqrt(void)
4177 if (floatx_is_neg(ST0
)) {
4178 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4181 ST0
= floatx_sqrt(ST0
, &env
->fp_status
);
4184 void helper_fsincos(void)
4186 CPU86_LDouble fptemp
;
4189 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4195 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4196 /* the above code is for |arg| < 2**63 only */
4200 void helper_frndint(void)
4202 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4205 void helper_fscale(void)
4207 if (floatx_is_any_nan(ST1
)) {
4210 int n
= floatx_to_int32_round_to_zero(ST1
, &env
->fp_status
);
4211 ST0
= floatx_scalbn(ST0
, n
, &env
->fp_status
);
4215 void helper_fsin(void)
4217 CPU86_LDouble fptemp
;
4220 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4224 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4225 /* the above code is for |arg| < 2**53 only */
4229 void helper_fcos(void)
4231 CPU86_LDouble fptemp
;
4234 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4238 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4239 /* the above code is for |arg5 < 2**63 only */
4243 void helper_fxam_ST0(void)
4245 CPU86_LDoubleU temp
;
4250 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4252 env
->fpus
|= 0x200; /* C1 <-- 1 */
4254 /* XXX: test fptags too */
4255 expdif
= EXPD(temp
);
4256 if (expdif
== MAXEXPD
) {
4257 #ifdef USE_X86LDOUBLE
4258 if (MANTD(temp
) == 0x8000000000000000ULL
)
4260 if (MANTD(temp
) == 0)
4262 env
->fpus
|= 0x500 /*Infinity*/;
4264 env
->fpus
|= 0x100 /*NaN*/;
4265 } else if (expdif
== 0) {
4266 if (MANTD(temp
) == 0)
4267 env
->fpus
|= 0x4000 /*Zero*/;
4269 env
->fpus
|= 0x4400 /*Denormal*/;
4275 void helper_fstenv(target_ulong ptr
, int data32
)
4277 int fpus
, fptag
, exp
, i
;
4281 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4283 for (i
=7; i
>=0; i
--) {
4285 if (env
->fptags
[i
]) {
4288 tmp
.d
= env
->fpregs
[i
].d
;
4291 if (exp
== 0 && mant
== 0) {
4294 } else if (exp
== 0 || exp
== MAXEXPD
4295 #ifdef USE_X86LDOUBLE
4296 || (mant
& (1LL << 63)) == 0
4299 /* NaNs, infinity, denormal */
4306 stl(ptr
, env
->fpuc
);
4308 stl(ptr
+ 8, fptag
);
4309 stl(ptr
+ 12, 0); /* fpip */
4310 stl(ptr
+ 16, 0); /* fpcs */
4311 stl(ptr
+ 20, 0); /* fpoo */
4312 stl(ptr
+ 24, 0); /* fpos */
4315 stw(ptr
, env
->fpuc
);
4317 stw(ptr
+ 4, fptag
);
4325 void helper_fldenv(target_ulong ptr
, int data32
)
4330 env
->fpuc
= lduw(ptr
);
4331 fpus
= lduw(ptr
+ 4);
4332 fptag
= lduw(ptr
+ 8);
4335 env
->fpuc
= lduw(ptr
);
4336 fpus
= lduw(ptr
+ 2);
4337 fptag
= lduw(ptr
+ 4);
4339 env
->fpstt
= (fpus
>> 11) & 7;
4340 env
->fpus
= fpus
& ~0x3800;
4341 for(i
= 0;i
< 8; i
++) {
4342 env
->fptags
[i
] = ((fptag
& 3) == 3);
4347 void helper_fsave(target_ulong ptr
, int data32
)
4352 helper_fstenv(ptr
, data32
);
4354 ptr
+= (14 << data32
);
4355 for(i
= 0;i
< 8; i
++) {
4357 helper_fstt(tmp
, ptr
);
4375 void helper_frstor(target_ulong ptr
, int data32
)
4380 helper_fldenv(ptr
, data32
);
4381 ptr
+= (14 << data32
);
4383 for(i
= 0;i
< 8; i
++) {
4384 tmp
= helper_fldt(ptr
);
4390 void helper_fxsave(target_ulong ptr
, int data64
)
4392 int fpus
, fptag
, i
, nb_xmm_regs
;
4396 /* The operand must be 16 byte aligned */
4398 raise_exception(EXCP0D_GPF
);
4401 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4403 for(i
= 0; i
< 8; i
++) {
4404 fptag
|= (env
->fptags
[i
] << i
);
4406 stw(ptr
, env
->fpuc
);
4408 stw(ptr
+ 4, fptag
^ 0xff);
4409 #ifdef TARGET_X86_64
4411 stq(ptr
+ 0x08, 0); /* rip */
4412 stq(ptr
+ 0x10, 0); /* rdp */
4416 stl(ptr
+ 0x08, 0); /* eip */
4417 stl(ptr
+ 0x0c, 0); /* sel */
4418 stl(ptr
+ 0x10, 0); /* dp */
4419 stl(ptr
+ 0x14, 0); /* sel */
4423 for(i
= 0;i
< 8; i
++) {
4425 helper_fstt(tmp
, addr
);
4429 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4430 /* XXX: finish it */
4431 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4432 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4433 if (env
->hflags
& HF_CS64_MASK
)
4438 /* Fast FXSAVE leaves out the XMM registers */
4439 if (!(env
->efer
& MSR_EFER_FFXSR
)
4440 || (env
->hflags
& HF_CPL_MASK
)
4441 || !(env
->hflags
& HF_LMA_MASK
)) {
4442 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4443 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4444 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4451 void helper_fxrstor(target_ulong ptr
, int data64
)
4453 int i
, fpus
, fptag
, nb_xmm_regs
;
4457 /* The operand must be 16 byte aligned */
4459 raise_exception(EXCP0D_GPF
);
4462 env
->fpuc
= lduw(ptr
);
4463 fpus
= lduw(ptr
+ 2);
4464 fptag
= lduw(ptr
+ 4);
4465 env
->fpstt
= (fpus
>> 11) & 7;
4466 env
->fpus
= fpus
& ~0x3800;
4468 for(i
= 0;i
< 8; i
++) {
4469 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4473 for(i
= 0;i
< 8; i
++) {
4474 tmp
= helper_fldt(addr
);
4479 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4480 /* XXX: finish it */
4481 env
->mxcsr
= ldl(ptr
+ 0x18);
4483 if (env
->hflags
& HF_CS64_MASK
)
4488 /* Fast FXRESTORE leaves out the XMM registers */
4489 if (!(env
->efer
& MSR_EFER_FFXSR
)
4490 || (env
->hflags
& HF_CPL_MASK
)
4491 || !(env
->hflags
& HF_LMA_MASK
)) {
4492 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4493 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4494 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4501 #ifndef USE_X86LDOUBLE
4503 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4505 CPU86_LDoubleU temp
;
4510 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4511 /* exponent + sign */
4512 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4513 e
|= SIGND(temp
) >> 16;
4517 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4519 CPU86_LDoubleU temp
;
4523 /* XXX: handle overflow ? */
4524 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4525 e
|= (upper
>> 4) & 0x800; /* sign */
4526 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4528 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4531 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4538 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4540 CPU86_LDoubleU temp
;
4543 *pmant
= temp
.l
.lower
;
4544 *pexp
= temp
.l
.upper
;
4547 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4549 CPU86_LDoubleU temp
;
4551 temp
.l
.upper
= upper
;
4552 temp
.l
.lower
= mant
;
4557 #ifdef TARGET_X86_64
4559 //#define DEBUG_MULDIV
4561 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4570 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4574 add128(plow
, phigh
, 1, 0);
4577 /* return TRUE if overflow */
4578 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4580 uint64_t q
, r
, a1
, a0
;
4593 /* XXX: use a better algorithm */
4594 for(i
= 0; i
< 64; i
++) {
4596 a1
= (a1
<< 1) | (a0
>> 63);
4597 if (ab
|| a1
>= b
) {
4603 a0
= (a0
<< 1) | qb
;
4605 #if defined(DEBUG_MULDIV)
4606 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4607 *phigh
, *plow
, b
, a0
, a1
);
4615 /* return TRUE if overflow */
4616 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4619 sa
= ((int64_t)*phigh
< 0);
4621 neg128(plow
, phigh
);
4625 if (div64(plow
, phigh
, b
) != 0)
4628 if (*plow
> (1ULL << 63))
4632 if (*plow
>= (1ULL << 63))
4640 void helper_mulq_EAX_T0(target_ulong t0
)
4644 mulu64(&r0
, &r1
, EAX
, t0
);
4651 void helper_imulq_EAX_T0(target_ulong t0
)
4655 muls64(&r0
, &r1
, EAX
, t0
);
4659 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4662 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4666 muls64(&r0
, &r1
, t0
, t1
);
4668 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4672 void helper_divq_EAX(target_ulong t0
)
4676 raise_exception(EXCP00_DIVZ
);
4680 if (div64(&r0
, &r1
, t0
))
4681 raise_exception(EXCP00_DIVZ
);
4686 void helper_idivq_EAX(target_ulong t0
)
4690 raise_exception(EXCP00_DIVZ
);
4694 if (idiv64(&r0
, &r1
, t0
))
4695 raise_exception(EXCP00_DIVZ
);
4701 static void do_hlt(void)
4703 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4705 env
->exception_index
= EXCP_HLT
;
4709 void helper_hlt(int next_eip_addend
)
4711 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4712 EIP
+= next_eip_addend
;
4717 void helper_monitor(target_ulong ptr
)
4719 if ((uint32_t)ECX
!= 0)
4720 raise_exception(EXCP0D_GPF
);
4721 /* XXX: store address ? */
4722 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4725 void helper_mwait(int next_eip_addend
)
4727 if ((uint32_t)ECX
!= 0)
4728 raise_exception(EXCP0D_GPF
);
4729 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4730 EIP
+= next_eip_addend
;
4732 /* XXX: not complete but not completely erroneous */
4733 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4734 /* more than one CPU: do not sleep because another CPU may
4741 void helper_debug(void)
4743 env
->exception_index
= EXCP_DEBUG
;
4747 void helper_reset_rf(void)
4749 env
->eflags
&= ~RF_MASK
;
4752 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4754 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4757 void helper_raise_exception(int exception_index
)
4759 raise_exception(exception_index
);
4762 void helper_cli(void)
4764 env
->eflags
&= ~IF_MASK
;
4767 void helper_sti(void)
4769 env
->eflags
|= IF_MASK
;
4773 /* vm86plus instructions */
4774 void helper_cli_vm(void)
4776 env
->eflags
&= ~VIF_MASK
;
4779 void helper_sti_vm(void)
4781 env
->eflags
|= VIF_MASK
;
4782 if (env
->eflags
& VIP_MASK
) {
4783 raise_exception(EXCP0D_GPF
);
4788 void helper_set_inhibit_irq(void)
4790 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4793 void helper_reset_inhibit_irq(void)
4795 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4798 void helper_boundw(target_ulong a0
, int v
)
4802 high
= ldsw(a0
+ 2);
4804 if (v
< low
|| v
> high
) {
4805 raise_exception(EXCP05_BOUND
);
4809 void helper_boundl(target_ulong a0
, int v
)
4814 if (v
< low
|| v
> high
) {
4815 raise_exception(EXCP05_BOUND
);
4819 #if !defined(CONFIG_USER_ONLY)
4821 #define MMUSUFFIX _mmu
4824 #include "softmmu_template.h"
4827 #include "softmmu_template.h"
4830 #include "softmmu_template.h"
4833 #include "softmmu_template.h"
4837 #if !defined(CONFIG_USER_ONLY)
4838 /* try to fill the TLB and return an exception if error. If retaddr is
4839 NULL, it means that the function was called in C code (i.e. not
4840 from generated code or from helper.c) */
4841 /* XXX: fix it to restore all registers */
4842 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4844 TranslationBlock
*tb
;
4847 CPUX86State
*saved_env
;
4849 /* XXX: hack to restore env in all cases, even if not called from
4852 env
= cpu_single_env
;
4854 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4857 /* now we have a real cpu fault */
4858 pc
= (unsigned long)retaddr
;
4859 tb
= tb_find_pc(pc
);
4861 /* the PC is inside the translated code. It means that we have
4862 a virtual CPU fault */
4863 cpu_restore_state(tb
, env
, pc
);
4866 raise_exception_err(env
->exception_index
, env
->error_code
);
4872 /* Secure Virtual Machine helpers */
4874 #if defined(CONFIG_USER_ONLY)
4876 void helper_vmrun(int aflag
, int next_eip_addend
)
4879 void helper_vmmcall(void)
4882 void helper_vmload(int aflag
)
4885 void helper_vmsave(int aflag
)
4888 void helper_stgi(void)
4891 void helper_clgi(void)
4894 void helper_skinit(void)
4897 void helper_invlpga(int aflag
)
4900 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4903 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4907 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4908 uint32_t next_eip_addend
)
4913 static inline void svm_save_seg(target_phys_addr_t addr
,
4914 const SegmentCache
*sc
)
4916 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4918 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4920 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4922 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4923 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4926 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4930 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4931 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4932 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4933 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4934 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4937 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4938 CPUState
*env
, int seg_reg
)
4940 SegmentCache sc1
, *sc
= &sc1
;
4941 svm_load_seg(addr
, sc
);
4942 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4943 sc
->base
, sc
->limit
, sc
->flags
);
4946 void helper_vmrun(int aflag
, int next_eip_addend
)
4952 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4957 addr
= (uint32_t)EAX
;
4959 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4961 env
->vm_vmcb
= addr
;
4963 /* save the current CPU state in the hsave page */
4964 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4965 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4967 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4968 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4970 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4971 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4972 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4973 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4974 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4975 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4977 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4978 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4980 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4982 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4984 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4986 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4989 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4990 EIP
+ next_eip_addend
);
4991 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4992 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4994 /* load the interception bitmaps so we do not need to access the
4996 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4997 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4998 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4999 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
5000 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
5001 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
5003 /* enable intercepts */
5004 env
->hflags
|= HF_SVMI_MASK
;
5006 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
5008 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5009 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5011 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
5012 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5014 /* clear exit_info_2 so we behave like the real hardware */
5015 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
5017 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
5018 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
5019 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
5020 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
5021 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5022 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5023 if (int_ctl
& V_INTR_MASKING_MASK
) {
5024 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
5025 env
->hflags2
|= HF2_VINTR_MASK
;
5026 if (env
->eflags
& IF_MASK
)
5027 env
->hflags2
|= HF2_HIF_MASK
;
5031 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5033 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5034 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5035 CC_OP
= CC_OP_EFLAGS
;
5037 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5039 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5041 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5043 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5046 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5048 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5049 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5050 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5051 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5052 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5054 /* FIXME: guest state consistency checks */
5056 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5057 case TLB_CONTROL_DO_NOTHING
:
5059 case TLB_CONTROL_FLUSH_ALL_ASID
:
5060 /* FIXME: this is not 100% correct but should work for now */
5065 env
->hflags2
|= HF2_GIF_MASK
;
5067 if (int_ctl
& V_IRQ_MASK
) {
5068 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5071 /* maybe we need to inject an event */
5072 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5073 if (event_inj
& SVM_EVTINJ_VALID
) {
5074 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5075 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5076 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5078 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5079 /* FIXME: need to implement valid_err */
5080 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5081 case SVM_EVTINJ_TYPE_INTR
:
5082 env
->exception_index
= vector
;
5083 env
->error_code
= event_inj_err
;
5084 env
->exception_is_int
= 0;
5085 env
->exception_next_eip
= -1;
5086 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5087 /* XXX: is it always correct ? */
5088 do_interrupt(vector
, 0, 0, 0, 1);
5090 case SVM_EVTINJ_TYPE_NMI
:
5091 env
->exception_index
= EXCP02_NMI
;
5092 env
->error_code
= event_inj_err
;
5093 env
->exception_is_int
= 0;
5094 env
->exception_next_eip
= EIP
;
5095 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5098 case SVM_EVTINJ_TYPE_EXEPT
:
5099 env
->exception_index
= vector
;
5100 env
->error_code
= event_inj_err
;
5101 env
->exception_is_int
= 0;
5102 env
->exception_next_eip
= -1;
5103 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5106 case SVM_EVTINJ_TYPE_SOFT
:
5107 env
->exception_index
= vector
;
5108 env
->error_code
= event_inj_err
;
5109 env
->exception_is_int
= 1;
5110 env
->exception_next_eip
= EIP
;
5111 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5115 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5119 void helper_vmmcall(void)
5121 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5122 raise_exception(EXCP06_ILLOP
);
5125 void helper_vmload(int aflag
)
5128 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5133 addr
= (uint32_t)EAX
;
5135 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5136 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5137 env
->segs
[R_FS
].base
);
5139 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5141 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5143 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5145 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5148 #ifdef TARGET_X86_64
5149 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5150 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5151 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5152 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5154 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5155 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5156 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5157 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5160 void helper_vmsave(int aflag
)
5163 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5168 addr
= (uint32_t)EAX
;
5170 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5171 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5172 env
->segs
[R_FS
].base
);
5174 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5176 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5178 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5180 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5183 #ifdef TARGET_X86_64
5184 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5185 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5186 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5187 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5189 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5190 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5191 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5192 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5195 void helper_stgi(void)
5197 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5198 env
->hflags2
|= HF2_GIF_MASK
;
5201 void helper_clgi(void)
5203 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5204 env
->hflags2
&= ~HF2_GIF_MASK
;
5207 void helper_skinit(void)
5209 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5210 /* XXX: not implemented */
5211 raise_exception(EXCP06_ILLOP
);
5214 void helper_invlpga(int aflag
)
5217 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5222 addr
= (uint32_t)EAX
;
5224 /* XXX: could use the ASID to see if it is needed to do the
5226 tlb_flush_page(env
, addr
);
5229 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5231 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5234 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5235 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5236 helper_vmexit(type
, param
);
5239 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5240 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5241 helper_vmexit(type
, param
);
5244 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5245 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5246 helper_vmexit(type
, param
);
5249 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5250 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5251 helper_vmexit(type
, param
);
5254 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5255 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5256 helper_vmexit(type
, param
);
5260 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5261 /* FIXME: this should be read in at vmrun (faster this way?) */
5262 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5264 switch((uint32_t)ECX
) {
5269 case 0xc0000000 ... 0xc0001fff:
5270 t0
= (8192 + ECX
- 0xc0000000) * 2;
5274 case 0xc0010000 ... 0xc0011fff:
5275 t0
= (16384 + ECX
- 0xc0010000) * 2;
5280 helper_vmexit(type
, param
);
5285 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5286 helper_vmexit(type
, param
);
5290 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5291 helper_vmexit(type
, param
);
5297 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5298 uint32_t next_eip_addend
)
5300 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5301 /* FIXME: this should be read in at vmrun (faster this way?) */
5302 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5303 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5304 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5306 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5307 env
->eip
+ next_eip_addend
);
5308 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5313 /* Note: currently only 32 bits of exit_code are used */
5314 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5318 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5319 exit_code
, exit_info_1
,
5320 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5323 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5324 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5325 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5327 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5330 /* Save the VM state in the vmcb */
5331 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5333 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5335 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5337 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5340 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5341 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5343 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5344 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5346 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5347 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5348 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5349 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5350 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5352 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5353 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5354 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5355 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5356 int_ctl
|= V_IRQ_MASK
;
5357 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5359 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5360 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5361 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5362 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5363 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5364 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5365 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5367 /* Reload the host state from vm_hsave */
5368 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5369 env
->hflags
&= ~HF_SVMI_MASK
;
5371 env
->intercept_exceptions
= 0;
5372 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5373 env
->tsc_offset
= 0;
5375 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5376 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5378 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5379 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5381 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5382 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5383 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5384 /* we need to set the efer after the crs so the hidden flags get
5387 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5389 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5390 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5391 CC_OP
= CC_OP_EFLAGS
;
5393 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5395 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5397 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5399 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5402 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5403 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5404 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5406 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5407 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5410 cpu_x86_set_cpl(env
, 0);
5411 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5412 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5414 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5415 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5416 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5417 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5418 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5420 env
->hflags2
&= ~HF2_GIF_MASK
;
5421 /* FIXME: Resets the current ASID register to zero (host ASID). */
5423 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5425 /* Clears the TSC_OFFSET inside the processor. */
5427 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5428 from the page table indicated the host's CR3. If the PDPEs contain
5429 illegal state, the processor causes a shutdown. */
5431 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5432 env
->cr
[0] |= CR0_PE_MASK
;
5433 env
->eflags
&= ~VM_MASK
;
5435 /* Disables all breakpoints in the host DR7 register. */
5437 /* Checks the reloaded host state for consistency. */
5439 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5440 host's code segment or non-canonical (in the case of long mode), a
5441 #GP fault is delivered inside the host.) */
5443 /* remove any pending exception */
5444 env
->exception_index
= -1;
5445 env
->error_code
= 0;
5446 env
->old_exception
= -1;
5454 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5455 void helper_enter_mmx(void)
5458 *(uint32_t *)(env
->fptags
) = 0;
5459 *(uint32_t *)(env
->fptags
+ 4) = 0;
5462 void helper_emms(void)
5464 /* set to empty state */
5465 *(uint32_t *)(env
->fptags
) = 0x01010101;
5466 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5470 void helper_movq(void *d
, void *s
)
5472 *(uint64_t *)d
= *(uint64_t *)s
;
5476 #include "ops_sse.h"
5479 #include "ops_sse.h"
5482 #include "helper_template.h"
5486 #include "helper_template.h"
5490 #include "helper_template.h"
5493 #ifdef TARGET_X86_64
5496 #include "helper_template.h"
5501 /* bit operations */
5502 target_ulong
helper_bsf(target_ulong t0
)
5509 while ((res
& 1) == 0) {
5516 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5519 target_ulong res
, mask
;
5521 if (wordsize
> 0 && t0
== 0) {
5525 count
= TARGET_LONG_BITS
- 1;
5526 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5527 while ((res
& mask
) == 0) {
5532 return wordsize
- 1 - count
;
5537 target_ulong
helper_bsr(target_ulong t0
)
5539 return helper_lzcnt(t0
, 0);
5542 static int compute_all_eflags(void)
5547 static int compute_c_eflags(void)
5549 return CC_SRC
& CC_C
;
5552 uint32_t helper_cc_compute_all(int op
)
5555 default: /* should never happen */ return 0;
5557 case CC_OP_EFLAGS
: return compute_all_eflags();
5559 case CC_OP_MULB
: return compute_all_mulb();
5560 case CC_OP_MULW
: return compute_all_mulw();
5561 case CC_OP_MULL
: return compute_all_mull();
5563 case CC_OP_ADDB
: return compute_all_addb();
5564 case CC_OP_ADDW
: return compute_all_addw();
5565 case CC_OP_ADDL
: return compute_all_addl();
5567 case CC_OP_ADCB
: return compute_all_adcb();
5568 case CC_OP_ADCW
: return compute_all_adcw();
5569 case CC_OP_ADCL
: return compute_all_adcl();
5571 case CC_OP_SUBB
: return compute_all_subb();
5572 case CC_OP_SUBW
: return compute_all_subw();
5573 case CC_OP_SUBL
: return compute_all_subl();
5575 case CC_OP_SBBB
: return compute_all_sbbb();
5576 case CC_OP_SBBW
: return compute_all_sbbw();
5577 case CC_OP_SBBL
: return compute_all_sbbl();
5579 case CC_OP_LOGICB
: return compute_all_logicb();
5580 case CC_OP_LOGICW
: return compute_all_logicw();
5581 case CC_OP_LOGICL
: return compute_all_logicl();
5583 case CC_OP_INCB
: return compute_all_incb();
5584 case CC_OP_INCW
: return compute_all_incw();
5585 case CC_OP_INCL
: return compute_all_incl();
5587 case CC_OP_DECB
: return compute_all_decb();
5588 case CC_OP_DECW
: return compute_all_decw();
5589 case CC_OP_DECL
: return compute_all_decl();
5591 case CC_OP_SHLB
: return compute_all_shlb();
5592 case CC_OP_SHLW
: return compute_all_shlw();
5593 case CC_OP_SHLL
: return compute_all_shll();
5595 case CC_OP_SARB
: return compute_all_sarb();
5596 case CC_OP_SARW
: return compute_all_sarw();
5597 case CC_OP_SARL
: return compute_all_sarl();
5599 #ifdef TARGET_X86_64
5600 case CC_OP_MULQ
: return compute_all_mulq();
5602 case CC_OP_ADDQ
: return compute_all_addq();
5604 case CC_OP_ADCQ
: return compute_all_adcq();
5606 case CC_OP_SUBQ
: return compute_all_subq();
5608 case CC_OP_SBBQ
: return compute_all_sbbq();
5610 case CC_OP_LOGICQ
: return compute_all_logicq();
5612 case CC_OP_INCQ
: return compute_all_incq();
5614 case CC_OP_DECQ
: return compute_all_decq();
5616 case CC_OP_SHLQ
: return compute_all_shlq();
5618 case CC_OP_SARQ
: return compute_all_sarq();
5623 uint32_t helper_cc_compute_c(int op
)
5626 default: /* should never happen */ return 0;
5628 case CC_OP_EFLAGS
: return compute_c_eflags();
5630 case CC_OP_MULB
: return compute_c_mull();
5631 case CC_OP_MULW
: return compute_c_mull();
5632 case CC_OP_MULL
: return compute_c_mull();
5634 case CC_OP_ADDB
: return compute_c_addb();
5635 case CC_OP_ADDW
: return compute_c_addw();
5636 case CC_OP_ADDL
: return compute_c_addl();
5638 case CC_OP_ADCB
: return compute_c_adcb();
5639 case CC_OP_ADCW
: return compute_c_adcw();
5640 case CC_OP_ADCL
: return compute_c_adcl();
5642 case CC_OP_SUBB
: return compute_c_subb();
5643 case CC_OP_SUBW
: return compute_c_subw();
5644 case CC_OP_SUBL
: return compute_c_subl();
5646 case CC_OP_SBBB
: return compute_c_sbbb();
5647 case CC_OP_SBBW
: return compute_c_sbbw();
5648 case CC_OP_SBBL
: return compute_c_sbbl();
5650 case CC_OP_LOGICB
: return compute_c_logicb();
5651 case CC_OP_LOGICW
: return compute_c_logicw();
5652 case CC_OP_LOGICL
: return compute_c_logicl();
5654 case CC_OP_INCB
: return compute_c_incl();
5655 case CC_OP_INCW
: return compute_c_incl();
5656 case CC_OP_INCL
: return compute_c_incl();
5658 case CC_OP_DECB
: return compute_c_incl();
5659 case CC_OP_DECW
: return compute_c_incl();
5660 case CC_OP_DECL
: return compute_c_incl();
5662 case CC_OP_SHLB
: return compute_c_shlb();
5663 case CC_OP_SHLW
: return compute_c_shlw();
5664 case CC_OP_SHLL
: return compute_c_shll();
5666 case CC_OP_SARB
: return compute_c_sarl();
5667 case CC_OP_SARW
: return compute_c_sarl();
5668 case CC_OP_SARL
: return compute_c_sarl();
5670 #ifdef TARGET_X86_64
5671 case CC_OP_MULQ
: return compute_c_mull();
5673 case CC_OP_ADDQ
: return compute_c_addq();
5675 case CC_OP_ADCQ
: return compute_c_adcq();
5677 case CC_OP_SUBQ
: return compute_c_subq();
5679 case CC_OP_SBBQ
: return compute_c_sbbq();
5681 case CC_OP_LOGICQ
: return compute_c_logicq();
5683 case CC_OP_INCQ
: return compute_c_incl();
5685 case CC_OP_DECQ
: return compute_c_incl();
5687 case CC_OP_SHLQ
: return compute_c_shlq();
5689 case CC_OP_SARQ
: return compute_c_sarl();