4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
39 #define raise_exception_err(a, b)\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
46 static const uint8_t parity_table
[256] = {
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
67 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
75 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
82 static const uint8_t rclw_table
[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
90 static const uint8_t rclb_table
[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk
[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock
);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock
);
122 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
124 load_eflags(t0
, update_mask
);
127 target_ulong
helper_read_eflags(void)
130 eflags
= helper_cc_compute_all(CC_OP
);
131 eflags
|= (DF
& DF_MASK
);
132 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
148 index
= selector
& ~7;
149 if ((index
+ 7) > dt
->limit
)
151 ptr
= dt
->base
+ index
;
152 *e1_ptr
= ldl_kernel(ptr
);
153 *e2_ptr
= ldl_kernel(ptr
+ 4);
157 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
160 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
161 if (e2
& DESC_G_MASK
)
162 limit
= (limit
<< 12) | 0xfff;
166 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
168 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
173 sc
->base
= get_seg_base(e1
, e2
);
174 sc
->limit
= get_seg_limit(e1
, e2
);
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg
, int selector
)
182 cpu_x86_load_seg_cache(env
, seg
, selector
,
183 (selector
<< 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
187 uint32_t *esp_ptr
, int dpl
)
189 int type
, index
, shift
;
194 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
195 for(i
=0;i
<env
->tr
.limit
;i
++) {
196 printf("%02x ", env
->tr
.base
[i
]);
197 if ((i
& 7) == 7) printf("\n");
203 if (!(env
->tr
.flags
& DESC_P_MASK
))
204 cpu_abort(env
, "invalid tss");
205 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
207 cpu_abort(env
, "invalid tss type");
209 index
= (dpl
* 4 + 2) << shift
;
210 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
211 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
213 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
214 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
216 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg
, int selector
)
227 if ((selector
& 0xfffc) != 0) {
228 if (load_segment(&e1
, &e2
, selector
) != 0)
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if (!(e2
& DESC_S_MASK
))
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
234 cpl
= env
->hflags
& HF_CPL_MASK
;
235 if (seg_reg
== R_CS
) {
236 if (!(e2
& DESC_CS_MASK
))
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 /* XXX: is it correct ? */
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 } else if (seg_reg
== R_SS
) {
244 /* SS must be writable data */
245 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
247 if (dpl
!= cpl
|| dpl
!= rpl
)
248 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 /* not readable code */
251 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
252 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
255 if (dpl
< cpl
|| dpl
< rpl
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
259 if (!(e2
& DESC_P_MASK
))
260 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
261 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
262 get_seg_base(e1
, e2
),
263 get_seg_limit(e1
, e2
),
266 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
267 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector
,
277 uint32_t e1
, uint32_t e2
, int source
,
280 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
281 target_ulong tss_base
;
282 uint32_t new_regs
[8], new_segs
[6];
283 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
284 uint32_t old_eflags
, eflags_mask
;
289 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
292 /* if task gate, we read the TSS segment and we load it */
294 if (!(e2
& DESC_P_MASK
))
295 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
296 tss_selector
= e1
>> 16;
297 if (tss_selector
& 4)
298 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
299 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
300 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (e2
& DESC_S_MASK
)
302 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
303 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 if (!(e2
& DESC_P_MASK
))
309 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
315 tss_limit
= get_seg_limit(e1
, e2
);
316 tss_base
= get_seg_base(e1
, e2
);
317 if ((tss_selector
& 4) != 0 ||
318 tss_limit
< tss_limit_max
)
319 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
320 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
322 old_tss_limit_max
= 103;
324 old_tss_limit_max
= 43;
326 /* read all the registers from the new TSS */
329 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
330 new_eip
= ldl_kernel(tss_base
+ 0x20);
331 new_eflags
= ldl_kernel(tss_base
+ 0x24);
332 for(i
= 0; i
< 8; i
++)
333 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
334 for(i
= 0; i
< 6; i
++)
335 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
336 new_ldt
= lduw_kernel(tss_base
+ 0x60);
337 new_trap
= ldl_kernel(tss_base
+ 0x64);
341 new_eip
= lduw_kernel(tss_base
+ 0x0e);
342 new_eflags
= lduw_kernel(tss_base
+ 0x10);
343 for(i
= 0; i
< 8; i
++)
344 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
345 for(i
= 0; i
< 4; i
++)
346 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
347 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
352 /* XXX: avoid a compiler warning, see
353 http://support.amd.com/us/Processor_TechDocs/24593.pdf
354 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
357 /* NOTE: we must avoid memory exceptions during the task switch,
358 so we make dummy accesses before */
359 /* XXX: it can still fail in some cases, so a bigger hack is
360 necessary to valid the TLB after having done the accesses */
362 v1
= ldub_kernel(env
->tr
.base
);
363 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
364 stb_kernel(env
->tr
.base
, v1
);
365 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
367 /* clear busy bit (it is restartable) */
368 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
371 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
372 e2
= ldl_kernel(ptr
+ 4);
373 e2
&= ~DESC_TSS_BUSY_MASK
;
374 stl_kernel(ptr
+ 4, e2
);
376 old_eflags
= compute_eflags();
377 if (source
== SWITCH_TSS_IRET
)
378 old_eflags
&= ~NT_MASK
;
380 /* save the current state in the old TSS */
383 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
384 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
388 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
389 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
390 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
391 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
392 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
393 for(i
= 0; i
< 6; i
++)
394 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
397 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
398 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
402 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
403 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
404 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
405 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
406 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
407 for(i
= 0; i
< 4; i
++)
408 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
411 /* now if an exception occurs, it will occurs in the next task
414 if (source
== SWITCH_TSS_CALL
) {
415 stw_kernel(tss_base
, env
->tr
.selector
);
416 new_eflags
|= NT_MASK
;
420 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
423 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
424 e2
= ldl_kernel(ptr
+ 4);
425 e2
|= DESC_TSS_BUSY_MASK
;
426 stl_kernel(ptr
+ 4, e2
);
429 /* set the new CPU state */
430 /* from this point, any exception which occurs can give problems */
431 env
->cr
[0] |= CR0_TS_MASK
;
432 env
->hflags
|= HF_TS_MASK
;
433 env
->tr
.selector
= tss_selector
;
434 env
->tr
.base
= tss_base
;
435 env
->tr
.limit
= tss_limit
;
436 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
438 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
439 cpu_x86_update_cr3(env
, new_cr3
);
442 /* load all registers without an exception, then reload them with
443 possible exception */
445 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
446 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
448 eflags_mask
&= 0xffff;
449 load_eflags(new_eflags
, eflags_mask
);
450 /* XXX: what to do in 16 bit case ? */
459 if (new_eflags
& VM_MASK
) {
460 for(i
= 0; i
< 6; i
++)
461 load_seg_vm(i
, new_segs
[i
]);
462 /* in vm86, CPL is always 3 */
463 cpu_x86_set_cpl(env
, 3);
465 /* CPL is set the RPL of CS */
466 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
467 /* first just selectors as the rest may trigger exceptions */
468 for(i
= 0; i
< 6; i
++)
469 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
472 env
->ldt
.selector
= new_ldt
& ~4;
479 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 if ((new_ldt
& 0xfffc) != 0) {
483 index
= new_ldt
& ~7;
484 if ((index
+ 7) > dt
->limit
)
485 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
486 ptr
= dt
->base
+ index
;
487 e1
= ldl_kernel(ptr
);
488 e2
= ldl_kernel(ptr
+ 4);
489 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
490 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
491 if (!(e2
& DESC_P_MASK
))
492 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
493 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
496 /* load the segments */
497 if (!(new_eflags
& VM_MASK
)) {
498 tss_load_seg(R_CS
, new_segs
[R_CS
]);
499 tss_load_seg(R_SS
, new_segs
[R_SS
]);
500 tss_load_seg(R_ES
, new_segs
[R_ES
]);
501 tss_load_seg(R_DS
, new_segs
[R_DS
]);
502 tss_load_seg(R_FS
, new_segs
[R_FS
]);
503 tss_load_seg(R_GS
, new_segs
[R_GS
]);
506 /* check that EIP is in the CS segment limits */
507 if (new_eip
> env
->segs
[R_CS
].limit
) {
508 /* XXX: different exception if CALL ? */
509 raise_exception_err(EXCP0D_GPF
, 0);
512 #ifndef CONFIG_USER_ONLY
513 /* reset local breakpoints */
514 if (env
->dr
[7] & 0x55) {
515 for (i
= 0; i
< 4; i
++) {
516 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
517 hw_breakpoint_remove(env
, i
);
524 /* check if Port I/O is allowed in TSS */
525 static inline void check_io(int addr
, int size
)
527 int io_offset
, val
, mask
;
529 /* TSS must be a valid 32 bit one */
530 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
531 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
534 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
535 io_offset
+= (addr
>> 3);
536 /* Note: the check needs two bytes */
537 if ((io_offset
+ 1) > env
->tr
.limit
)
539 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
541 mask
= (1 << size
) - 1;
542 /* all bits must be zero to allow the I/O */
543 if ((val
& mask
) != 0) {
545 raise_exception_err(EXCP0D_GPF
, 0);
549 void helper_check_iob(uint32_t t0
)
554 void helper_check_iow(uint32_t t0
)
559 void helper_check_iol(uint32_t t0
)
564 void helper_outb(uint32_t port
, uint32_t data
)
566 cpu_outb(port
, data
& 0xff);
569 target_ulong
helper_inb(uint32_t port
)
571 return cpu_inb(port
);
574 void helper_outw(uint32_t port
, uint32_t data
)
576 cpu_outw(port
, data
& 0xffff);
579 target_ulong
helper_inw(uint32_t port
)
581 return cpu_inw(port
);
584 void helper_outl(uint32_t port
, uint32_t data
)
586 cpu_outl(port
, data
);
589 target_ulong
helper_inl(uint32_t port
)
591 return cpu_inl(port
);
594 static inline unsigned int get_sp_mask(unsigned int e2
)
596 if (e2
& DESC_B_MASK
)
602 static int exeption_has_error_code(int intno
)
618 #define SET_ESP(val, sp_mask)\
620 if ((sp_mask) == 0xffff)\
621 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622 else if ((sp_mask) == 0xffffffffLL)\
623 ESP = (uint32_t)(val);\
628 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
631 /* in 64-bit machines, this can overflow. So this segment addition macro
632 * can be used to trim the value to 32-bit whenever needed */
633 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
635 /* XXX: add a is_user flag to have proper security support */
636 #define PUSHW(ssp, sp, sp_mask, val)\
639 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
642 #define PUSHL(ssp, sp, sp_mask, val)\
645 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
648 #define POPW(ssp, sp, sp_mask, val)\
650 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
654 #define POPL(ssp, sp, sp_mask, val)\
656 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
660 /* protected mode interrupt */
661 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
662 unsigned int next_eip
, int is_hw
)
665 target_ulong ptr
, ssp
;
666 int type
, dpl
, selector
, ss_dpl
, cpl
;
667 int has_error_code
, new_stack
, shift
;
668 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
669 uint32_t old_eip
, sp_mask
;
672 if (!is_int
&& !is_hw
)
673 has_error_code
= exeption_has_error_code(intno
);
680 if (intno
* 8 + 7 > dt
->limit
)
681 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
682 ptr
= dt
->base
+ intno
* 8;
683 e1
= ldl_kernel(ptr
);
684 e2
= ldl_kernel(ptr
+ 4);
685 /* check gate type */
686 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
688 case 5: /* task gate */
689 /* must do that check here to return the correct error code */
690 if (!(e2
& DESC_P_MASK
))
691 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
692 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
693 if (has_error_code
) {
696 /* push the error code */
697 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
699 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
703 esp
= (ESP
- (2 << shift
)) & mask
;
704 ssp
= env
->segs
[R_SS
].base
+ esp
;
706 stl_kernel(ssp
, error_code
);
708 stw_kernel(ssp
, error_code
);
712 case 6: /* 286 interrupt gate */
713 case 7: /* 286 trap gate */
714 case 14: /* 386 interrupt gate */
715 case 15: /* 386 trap gate */
718 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
721 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
722 cpl
= env
->hflags
& HF_CPL_MASK
;
723 /* check privilege if software int */
724 if (is_int
&& dpl
< cpl
)
725 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
726 /* check valid bit */
727 if (!(e2
& DESC_P_MASK
))
728 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
730 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
731 if ((selector
& 0xfffc) == 0)
732 raise_exception_err(EXCP0D_GPF
, 0);
734 if (load_segment(&e1
, &e2
, selector
) != 0)
735 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
736 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
737 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
738 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
740 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
741 if (!(e2
& DESC_P_MASK
))
742 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
743 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
744 /* to inner privilege */
745 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
746 if ((ss
& 0xfffc) == 0)
747 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
749 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
750 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
751 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
752 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
754 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
755 if (!(ss_e2
& DESC_S_MASK
) ||
756 (ss_e2
& DESC_CS_MASK
) ||
757 !(ss_e2
& DESC_W_MASK
))
758 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
759 if (!(ss_e2
& DESC_P_MASK
))
760 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
762 sp_mask
= get_sp_mask(ss_e2
);
763 ssp
= get_seg_base(ss_e1
, ss_e2
);
764 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
765 /* to same privilege */
766 if (env
->eflags
& VM_MASK
)
767 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
769 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
770 ssp
= env
->segs
[R_SS
].base
;
774 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
775 new_stack
= 0; /* avoid warning */
776 sp_mask
= 0; /* avoid warning */
777 ssp
= 0; /* avoid warning */
778 esp
= 0; /* avoid warning */
784 /* XXX: check that enough room is available */
785 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
786 if (env
->eflags
& VM_MASK
)
792 if (env
->eflags
& VM_MASK
) {
793 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
795 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
796 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
798 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
799 PUSHL(ssp
, esp
, sp_mask
, ESP
);
801 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
802 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
803 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
804 if (has_error_code
) {
805 PUSHL(ssp
, esp
, sp_mask
, error_code
);
809 if (env
->eflags
& VM_MASK
) {
810 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
812 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
813 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
815 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
816 PUSHW(ssp
, esp
, sp_mask
, ESP
);
818 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
819 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
820 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
821 if (has_error_code
) {
822 PUSHW(ssp
, esp
, sp_mask
, error_code
);
827 if (env
->eflags
& VM_MASK
) {
828 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
831 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
833 ss
= (ss
& ~3) | dpl
;
834 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
835 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
837 SET_ESP(esp
, sp_mask
);
839 selector
= (selector
& ~3) | dpl
;
840 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
841 get_seg_base(e1
, e2
),
842 get_seg_limit(e1
, e2
),
844 cpu_x86_set_cpl(env
, dpl
);
847 /* interrupt gate clear IF mask */
848 if ((type
& 1) == 0) {
849 env
->eflags
&= ~IF_MASK
;
851 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
856 #define PUSHQ(sp, val)\
859 stq_kernel(sp, (val));\
862 #define POPQ(sp, val)\
864 val = ldq_kernel(sp);\
868 static inline target_ulong
get_rsp_from_tss(int level
)
873 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
874 env
->tr
.base
, env
->tr
.limit
);
877 if (!(env
->tr
.flags
& DESC_P_MASK
))
878 cpu_abort(env
, "invalid tss");
879 index
= 8 * level
+ 4;
880 if ((index
+ 7) > env
->tr
.limit
)
881 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
882 return ldq_kernel(env
->tr
.base
+ index
);
885 /* 64 bit interrupt */
886 static void do_interrupt64(int intno
, int is_int
, int error_code
,
887 target_ulong next_eip
, int is_hw
)
891 int type
, dpl
, selector
, cpl
, ist
;
892 int has_error_code
, new_stack
;
893 uint32_t e1
, e2
, e3
, ss
;
894 target_ulong old_eip
, esp
, offset
;
897 if (!is_int
&& !is_hw
)
898 has_error_code
= exeption_has_error_code(intno
);
905 if (intno
* 16 + 15 > dt
->limit
)
906 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
907 ptr
= dt
->base
+ intno
* 16;
908 e1
= ldl_kernel(ptr
);
909 e2
= ldl_kernel(ptr
+ 4);
910 e3
= ldl_kernel(ptr
+ 8);
911 /* check gate type */
912 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
914 case 14: /* 386 interrupt gate */
915 case 15: /* 386 trap gate */
918 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
921 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
922 cpl
= env
->hflags
& HF_CPL_MASK
;
923 /* check privilege if software int */
924 if (is_int
&& dpl
< cpl
)
925 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
926 /* check valid bit */
927 if (!(e2
& DESC_P_MASK
))
928 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
930 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
932 if ((selector
& 0xfffc) == 0)
933 raise_exception_err(EXCP0D_GPF
, 0);
935 if (load_segment(&e1
, &e2
, selector
) != 0)
936 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
937 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
938 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
939 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 if (!(e2
& DESC_P_MASK
))
943 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
944 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
945 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
946 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
947 /* to inner privilege */
949 esp
= get_rsp_from_tss(ist
+ 3);
951 esp
= get_rsp_from_tss(dpl
);
952 esp
&= ~0xfLL
; /* align stack */
955 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
956 /* to same privilege */
957 if (env
->eflags
& VM_MASK
)
958 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
961 esp
= get_rsp_from_tss(ist
+ 3);
964 esp
&= ~0xfLL
; /* align stack */
967 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
968 new_stack
= 0; /* avoid warning */
969 esp
= 0; /* avoid warning */
972 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
974 PUSHQ(esp
, compute_eflags());
975 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
977 if (has_error_code
) {
978 PUSHQ(esp
, error_code
);
983 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
987 selector
= (selector
& ~3) | dpl
;
988 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
989 get_seg_base(e1
, e2
),
990 get_seg_limit(e1
, e2
),
992 cpu_x86_set_cpl(env
, dpl
);
995 /* interrupt gate clear IF mask */
996 if ((type
& 1) == 0) {
997 env
->eflags
&= ~IF_MASK
;
999 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1003 #ifdef TARGET_X86_64
1004 #if defined(CONFIG_USER_ONLY)
1005 void helper_syscall(int next_eip_addend
)
1007 env
->exception_index
= EXCP_SYSCALL
;
1008 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1012 void helper_syscall(int next_eip_addend
)
1016 if (!(env
->efer
& MSR_EFER_SCE
)) {
1017 raise_exception_err(EXCP06_ILLOP
, 0);
1019 selector
= (env
->star
>> 32) & 0xffff;
1020 if (env
->hflags
& HF_LMA_MASK
) {
1023 ECX
= env
->eip
+ next_eip_addend
;
1024 env
->regs
[11] = compute_eflags();
1026 code64
= env
->hflags
& HF_CS64_MASK
;
1028 cpu_x86_set_cpl(env
, 0);
1029 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1031 DESC_G_MASK
| DESC_P_MASK
|
1033 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1034 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1036 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1038 DESC_W_MASK
| DESC_A_MASK
);
1039 env
->eflags
&= ~env
->fmask
;
1040 load_eflags(env
->eflags
, 0);
1042 env
->eip
= env
->lstar
;
1044 env
->eip
= env
->cstar
;
1046 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1048 cpu_x86_set_cpl(env
, 0);
1049 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1053 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1054 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1056 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1058 DESC_W_MASK
| DESC_A_MASK
);
1059 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1060 env
->eip
= (uint32_t)env
->star
;
1066 #ifdef TARGET_X86_64
1067 void helper_sysret(int dflag
)
1071 if (!(env
->efer
& MSR_EFER_SCE
)) {
1072 raise_exception_err(EXCP06_ILLOP
, 0);
1074 cpl
= env
->hflags
& HF_CPL_MASK
;
1075 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1076 raise_exception_err(EXCP0D_GPF
, 0);
1078 selector
= (env
->star
>> 48) & 0xffff;
1079 if (env
->hflags
& HF_LMA_MASK
) {
1081 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1083 DESC_G_MASK
| DESC_P_MASK
|
1084 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1085 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1089 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1091 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1092 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1093 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1094 env
->eip
= (uint32_t)ECX
;
1096 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1098 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1099 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1100 DESC_W_MASK
| DESC_A_MASK
);
1101 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1102 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1103 cpu_x86_set_cpl(env
, 3);
1105 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1107 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1108 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1109 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1110 env
->eip
= (uint32_t)ECX
;
1111 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1113 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1114 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1115 DESC_W_MASK
| DESC_A_MASK
);
1116 env
->eflags
|= IF_MASK
;
1117 cpu_x86_set_cpl(env
, 3);
1122 /* real mode interrupt */
1123 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1124 unsigned int next_eip
)
1127 target_ulong ptr
, ssp
;
1129 uint32_t offset
, esp
;
1130 uint32_t old_cs
, old_eip
;
1132 /* real mode (simpler !) */
1134 if (intno
* 4 + 3 > dt
->limit
)
1135 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1136 ptr
= dt
->base
+ intno
* 4;
1137 offset
= lduw_kernel(ptr
);
1138 selector
= lduw_kernel(ptr
+ 2);
1140 ssp
= env
->segs
[R_SS
].base
;
1145 old_cs
= env
->segs
[R_CS
].selector
;
1146 /* XXX: use SS segment size ? */
1147 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1148 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1149 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1151 /* update processor state */
1152 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1154 env
->segs
[R_CS
].selector
= selector
;
1155 env
->segs
[R_CS
].base
= (selector
<< 4);
1156 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1159 /* fake user mode interrupt */
1160 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1161 target_ulong next_eip
)
1165 int dpl
, cpl
, shift
;
1169 if (env
->hflags
& HF_LMA_MASK
) {
1174 ptr
= dt
->base
+ (intno
<< shift
);
1175 e2
= ldl_kernel(ptr
+ 4);
1177 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1178 cpl
= env
->hflags
& HF_CPL_MASK
;
1179 /* check privilege if software int */
1180 if (is_int
&& dpl
< cpl
)
1181 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1183 /* Since we emulate only user space, we cannot do more than
1184 exiting the emulation with the suitable exception and error
1190 #if !defined(CONFIG_USER_ONLY)
1191 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1194 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1195 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1198 type
= SVM_EVTINJ_TYPE_SOFT
;
1200 type
= SVM_EVTINJ_TYPE_EXEPT
;
1201 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1202 if (!rm
&& exeption_has_error_code(intno
)) {
1203 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1204 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1206 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1212 * Begin execution of an interruption. is_int is TRUE if coming from
1213 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214 * instruction. It is only relevant if is_int is TRUE.
1216 void do_interrupt(int intno
, int is_int
, int error_code
,
1217 target_ulong next_eip
, int is_hw
)
1219 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1220 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1222 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1223 count
, intno
, error_code
, is_int
,
1224 env
->hflags
& HF_CPL_MASK
,
1225 env
->segs
[R_CS
].selector
, EIP
,
1226 (int)env
->segs
[R_CS
].base
+ EIP
,
1227 env
->segs
[R_SS
].selector
, ESP
);
1228 if (intno
== 0x0e) {
1229 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1231 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1234 log_cpu_state(env
, X86_DUMP_CCOP
);
1240 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1241 for(i
= 0; i
< 16; i
++) {
1242 qemu_log(" %02x", ldub(ptr
+ i
));
1250 if (env
->cr
[0] & CR0_PE_MASK
) {
1251 #if !defined(CONFIG_USER_ONLY)
1252 if (env
->hflags
& HF_SVMI_MASK
)
1253 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1255 #ifdef TARGET_X86_64
1256 if (env
->hflags
& HF_LMA_MASK
) {
1257 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1261 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1264 #if !defined(CONFIG_USER_ONLY)
1265 if (env
->hflags
& HF_SVMI_MASK
)
1266 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1268 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1271 #if !defined(CONFIG_USER_ONLY)
1272 if (env
->hflags
& HF_SVMI_MASK
) {
1273 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1274 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1279 /* This should come from sysemu.h - if we could include it here... */
1280 void qemu_system_reset_request(void);
1283 * Check nested exceptions and change to double or triple fault if
1284 * needed. It should only be called, if this is not an interrupt.
1285 * Returns the new exception number.
1287 static int check_exception(int intno
, int *error_code
)
1289 int first_contributory
= env
->old_exception
== 0 ||
1290 (env
->old_exception
>= 10 &&
1291 env
->old_exception
<= 13);
1292 int second_contributory
= intno
== 0 ||
1293 (intno
>= 10 && intno
<= 13);
1295 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1296 env
->old_exception
, intno
);
1298 #if !defined(CONFIG_USER_ONLY)
1299 if (env
->old_exception
== EXCP08_DBLE
) {
1300 if (env
->hflags
& HF_SVMI_MASK
)
1301 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1303 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1305 qemu_system_reset_request();
1310 if ((first_contributory
&& second_contributory
)
1311 || (env
->old_exception
== EXCP0E_PAGE
&&
1312 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1313 intno
= EXCP08_DBLE
;
1317 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1318 (intno
== EXCP08_DBLE
))
1319 env
->old_exception
= intno
;
1325 * Signal an interruption. It is executed in the main CPU loop.
1326 * is_int is TRUE if coming from the int instruction. next_eip is the
1327 * EIP value AFTER the interrupt instruction. It is only relevant if
1330 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1331 int next_eip_addend
)
1334 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1335 intno
= check_exception(intno
, &error_code
);
1337 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1340 env
->exception_index
= intno
;
1341 env
->error_code
= error_code
;
1342 env
->exception_is_int
= is_int
;
1343 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1347 /* shortcuts to generate exceptions */
1349 void raise_exception_err(int exception_index
, int error_code
)
1351 raise_interrupt(exception_index
, 0, error_code
, 0);
1354 void raise_exception(int exception_index
)
1356 raise_interrupt(exception_index
, 0, 0, 0);
1359 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1362 raise_exception(exception_index
);
1366 #if defined(CONFIG_USER_ONLY)
1368 void do_smm_enter(void)
1372 void helper_rsm(void)
1378 #ifdef TARGET_X86_64
1379 #define SMM_REVISION_ID 0x00020064
1381 #define SMM_REVISION_ID 0x00020000
1384 void do_smm_enter(void)
1386 target_ulong sm_state
;
1390 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1391 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1393 env
->hflags
|= HF_SMM_MASK
;
1394 cpu_smm_update(env
);
1396 sm_state
= env
->smbase
+ 0x8000;
1398 #ifdef TARGET_X86_64
1399 for(i
= 0; i
< 6; i
++) {
1401 offset
= 0x7e00 + i
* 16;
1402 stw_phys(sm_state
+ offset
, dt
->selector
);
1403 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1404 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1405 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1408 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1409 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1411 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1412 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1413 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1414 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1416 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1417 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1419 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1420 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1421 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1422 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1424 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1426 stq_phys(sm_state
+ 0x7ff8, EAX
);
1427 stq_phys(sm_state
+ 0x7ff0, ECX
);
1428 stq_phys(sm_state
+ 0x7fe8, EDX
);
1429 stq_phys(sm_state
+ 0x7fe0, EBX
);
1430 stq_phys(sm_state
+ 0x7fd8, ESP
);
1431 stq_phys(sm_state
+ 0x7fd0, EBP
);
1432 stq_phys(sm_state
+ 0x7fc8, ESI
);
1433 stq_phys(sm_state
+ 0x7fc0, EDI
);
1434 for(i
= 8; i
< 16; i
++)
1435 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1436 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1437 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1438 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1439 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1441 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1442 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1443 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1445 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1446 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1448 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1449 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1450 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1451 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1452 stl_phys(sm_state
+ 0x7fec, EDI
);
1453 stl_phys(sm_state
+ 0x7fe8, ESI
);
1454 stl_phys(sm_state
+ 0x7fe4, EBP
);
1455 stl_phys(sm_state
+ 0x7fe0, ESP
);
1456 stl_phys(sm_state
+ 0x7fdc, EBX
);
1457 stl_phys(sm_state
+ 0x7fd8, EDX
);
1458 stl_phys(sm_state
+ 0x7fd4, ECX
);
1459 stl_phys(sm_state
+ 0x7fd0, EAX
);
1460 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1461 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1463 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1464 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1465 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1466 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1468 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1469 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1470 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1471 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1473 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1474 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1476 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1477 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1479 for(i
= 0; i
< 6; i
++) {
1482 offset
= 0x7f84 + i
* 12;
1484 offset
= 0x7f2c + (i
- 3) * 12;
1485 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1486 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1487 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1488 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1490 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1492 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1493 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1495 /* init SMM cpu state */
1497 #ifdef TARGET_X86_64
1498 cpu_load_efer(env
, 0);
1500 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1501 env
->eip
= 0x00008000;
1502 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1504 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1508 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1510 cpu_x86_update_cr0(env
,
1511 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1512 cpu_x86_update_cr4(env
, 0);
1513 env
->dr
[7] = 0x00000400;
1514 CC_OP
= CC_OP_EFLAGS
;
1517 void helper_rsm(void)
1519 target_ulong sm_state
;
1523 sm_state
= env
->smbase
+ 0x8000;
1524 #ifdef TARGET_X86_64
1525 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1527 for(i
= 0; i
< 6; i
++) {
1528 offset
= 0x7e00 + i
* 16;
1529 cpu_x86_load_seg_cache(env
, i
,
1530 lduw_phys(sm_state
+ offset
),
1531 ldq_phys(sm_state
+ offset
+ 8),
1532 ldl_phys(sm_state
+ offset
+ 4),
1533 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1536 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1537 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1539 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1540 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1541 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1542 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1544 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1545 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1547 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1548 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1549 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1550 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1552 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1553 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1554 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1555 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1556 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1557 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1558 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1559 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1560 for(i
= 8; i
< 16; i
++)
1561 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1562 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1563 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1564 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1565 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1566 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1568 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1569 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1570 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1572 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1573 if (val
& 0x20000) {
1574 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1577 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1578 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1579 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1580 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1581 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1582 EDI
= ldl_phys(sm_state
+ 0x7fec);
1583 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1584 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1585 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1586 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1587 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1588 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1589 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1590 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1591 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1593 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1594 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1595 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1596 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1598 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1599 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1600 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1601 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1603 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1604 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1606 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1607 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1609 for(i
= 0; i
< 6; i
++) {
1611 offset
= 0x7f84 + i
* 12;
1613 offset
= 0x7f2c + (i
- 3) * 12;
1614 cpu_x86_load_seg_cache(env
, i
,
1615 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1616 ldl_phys(sm_state
+ offset
+ 8),
1617 ldl_phys(sm_state
+ offset
+ 4),
1618 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1620 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1622 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1623 if (val
& 0x20000) {
1624 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1627 CC_OP
= CC_OP_EFLAGS
;
1628 env
->hflags
&= ~HF_SMM_MASK
;
1629 cpu_smm_update(env
);
1631 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1632 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1635 #endif /* !CONFIG_USER_ONLY */
1638 /* division, flags are undefined */
1640 void helper_divb_AL(target_ulong t0
)
1642 unsigned int num
, den
, q
, r
;
1644 num
= (EAX
& 0xffff);
1647 raise_exception(EXCP00_DIVZ
);
1651 raise_exception(EXCP00_DIVZ
);
1653 r
= (num
% den
) & 0xff;
1654 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1657 void helper_idivb_AL(target_ulong t0
)
1664 raise_exception(EXCP00_DIVZ
);
1668 raise_exception(EXCP00_DIVZ
);
1670 r
= (num
% den
) & 0xff;
1671 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1674 void helper_divw_AX(target_ulong t0
)
1676 unsigned int num
, den
, q
, r
;
1678 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1679 den
= (t0
& 0xffff);
1681 raise_exception(EXCP00_DIVZ
);
1685 raise_exception(EXCP00_DIVZ
);
1687 r
= (num
% den
) & 0xffff;
1688 EAX
= (EAX
& ~0xffff) | q
;
1689 EDX
= (EDX
& ~0xffff) | r
;
1692 void helper_idivw_AX(target_ulong t0
)
1696 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1699 raise_exception(EXCP00_DIVZ
);
1702 if (q
!= (int16_t)q
)
1703 raise_exception(EXCP00_DIVZ
);
1705 r
= (num
% den
) & 0xffff;
1706 EAX
= (EAX
& ~0xffff) | q
;
1707 EDX
= (EDX
& ~0xffff) | r
;
1710 void helper_divl_EAX(target_ulong t0
)
1712 unsigned int den
, r
;
1715 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1718 raise_exception(EXCP00_DIVZ
);
1723 raise_exception(EXCP00_DIVZ
);
1728 void helper_idivl_EAX(target_ulong t0
)
1733 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1736 raise_exception(EXCP00_DIVZ
);
1740 if (q
!= (int32_t)q
)
1741 raise_exception(EXCP00_DIVZ
);
1748 /* XXX: exception */
1749 void helper_aam(int base
)
1755 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1759 void helper_aad(int base
)
1763 ah
= (EAX
>> 8) & 0xff;
1764 al
= ((ah
* base
) + al
) & 0xff;
1765 EAX
= (EAX
& ~0xffff) | al
;
1769 void helper_aaa(void)
1775 eflags
= helper_cc_compute_all(CC_OP
);
1778 ah
= (EAX
>> 8) & 0xff;
1780 icarry
= (al
> 0xf9);
1781 if (((al
& 0x0f) > 9 ) || af
) {
1782 al
= (al
+ 6) & 0x0f;
1783 ah
= (ah
+ 1 + icarry
) & 0xff;
1784 eflags
|= CC_C
| CC_A
;
1786 eflags
&= ~(CC_C
| CC_A
);
1789 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1793 void helper_aas(void)
1799 eflags
= helper_cc_compute_all(CC_OP
);
1802 ah
= (EAX
>> 8) & 0xff;
1805 if (((al
& 0x0f) > 9 ) || af
) {
1806 al
= (al
- 6) & 0x0f;
1807 ah
= (ah
- 1 - icarry
) & 0xff;
1808 eflags
|= CC_C
| CC_A
;
1810 eflags
&= ~(CC_C
| CC_A
);
1813 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1817 void helper_daa(void)
1822 eflags
= helper_cc_compute_all(CC_OP
);
1828 if (((al
& 0x0f) > 9 ) || af
) {
1829 al
= (al
+ 6) & 0xff;
1832 if ((al
> 0x9f) || cf
) {
1833 al
= (al
+ 0x60) & 0xff;
1836 EAX
= (EAX
& ~0xff) | al
;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags
|= (al
== 0) << 6; /* zf */
1839 eflags
|= parity_table
[al
]; /* pf */
1840 eflags
|= (al
& 0x80); /* sf */
1844 void helper_das(void)
1846 int al
, al1
, af
, cf
;
1849 eflags
= helper_cc_compute_all(CC_OP
);
1856 if (((al
& 0x0f) > 9 ) || af
) {
1860 al
= (al
- 6) & 0xff;
1862 if ((al1
> 0x99) || cf
) {
1863 al
= (al
- 0x60) & 0xff;
1866 EAX
= (EAX
& ~0xff) | al
;
1867 /* well, speed is not an issue here, so we compute the flags by hand */
1868 eflags
|= (al
== 0) << 6; /* zf */
1869 eflags
|= parity_table
[al
]; /* pf */
1870 eflags
|= (al
& 0x80); /* sf */
1874 void helper_into(int next_eip_addend
)
1877 eflags
= helper_cc_compute_all(CC_OP
);
1878 if (eflags
& CC_O
) {
1879 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1883 void helper_cmpxchg8b(target_ulong a0
)
1888 eflags
= helper_cc_compute_all(CC_OP
);
1890 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1891 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1894 /* always do the store */
1896 EDX
= (uint32_t)(d
>> 32);
1903 #ifdef TARGET_X86_64
1904 void helper_cmpxchg16b(target_ulong a0
)
1909 if ((a0
& 0xf) != 0)
1910 raise_exception(EXCP0D_GPF
);
1911 eflags
= helper_cc_compute_all(CC_OP
);
1914 if (d0
== EAX
&& d1
== EDX
) {
1919 /* always do the store */
1930 void helper_single_step(void)
1932 #ifndef CONFIG_USER_ONLY
1933 check_hw_breakpoints(env
, 1);
1934 env
->dr
[6] |= DR6_BS
;
1936 raise_exception(EXCP01_DB
);
1939 void helper_cpuid(void)
1941 uint32_t eax
, ebx
, ecx
, edx
;
1943 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1945 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1952 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1955 uint32_t esp_mask
, esp
, ebp
;
1957 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1958 ssp
= env
->segs
[R_SS
].base
;
1967 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1970 stl(ssp
+ (esp
& esp_mask
), t1
);
1977 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1980 stw(ssp
+ (esp
& esp_mask
), t1
);
1984 #ifdef TARGET_X86_64
1985 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1987 target_ulong esp
, ebp
;
2007 stw(esp
, lduw(ebp
));
2015 void helper_lldt(int selector
)
2019 int index
, entry_limit
;
2023 if ((selector
& 0xfffc) == 0) {
2024 /* XXX: NULL selector case: invalid LDT */
2029 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2031 index
= selector
& ~7;
2032 #ifdef TARGET_X86_64
2033 if (env
->hflags
& HF_LMA_MASK
)
2038 if ((index
+ entry_limit
) > dt
->limit
)
2039 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2040 ptr
= dt
->base
+ index
;
2041 e1
= ldl_kernel(ptr
);
2042 e2
= ldl_kernel(ptr
+ 4);
2043 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2044 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2045 if (!(e2
& DESC_P_MASK
))
2046 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2047 #ifdef TARGET_X86_64
2048 if (env
->hflags
& HF_LMA_MASK
) {
2050 e3
= ldl_kernel(ptr
+ 8);
2051 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2052 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2056 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2059 env
->ldt
.selector
= selector
;
2062 void helper_ltr(int selector
)
2066 int index
, type
, entry_limit
;
2070 if ((selector
& 0xfffc) == 0) {
2071 /* NULL selector case: invalid TR */
2077 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2079 index
= selector
& ~7;
2080 #ifdef TARGET_X86_64
2081 if (env
->hflags
& HF_LMA_MASK
)
2086 if ((index
+ entry_limit
) > dt
->limit
)
2087 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2088 ptr
= dt
->base
+ index
;
2089 e1
= ldl_kernel(ptr
);
2090 e2
= ldl_kernel(ptr
+ 4);
2091 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2092 if ((e2
& DESC_S_MASK
) ||
2093 (type
!= 1 && type
!= 9))
2094 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2095 if (!(e2
& DESC_P_MASK
))
2096 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2097 #ifdef TARGET_X86_64
2098 if (env
->hflags
& HF_LMA_MASK
) {
2100 e3
= ldl_kernel(ptr
+ 8);
2101 e4
= ldl_kernel(ptr
+ 12);
2102 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2103 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2104 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2105 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2109 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2111 e2
|= DESC_TSS_BUSY_MASK
;
2112 stl_kernel(ptr
+ 4, e2
);
2114 env
->tr
.selector
= selector
;
2117 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118 void helper_load_seg(int seg_reg
, int selector
)
2127 cpl
= env
->hflags
& HF_CPL_MASK
;
2128 if ((selector
& 0xfffc) == 0) {
2129 /* null selector case */
2131 #ifdef TARGET_X86_64
2132 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2135 raise_exception_err(EXCP0D_GPF
, 0);
2136 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2143 index
= selector
& ~7;
2144 if ((index
+ 7) > dt
->limit
)
2145 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2146 ptr
= dt
->base
+ index
;
2147 e1
= ldl_kernel(ptr
);
2148 e2
= ldl_kernel(ptr
+ 4);
2150 if (!(e2
& DESC_S_MASK
))
2151 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2153 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2154 if (seg_reg
== R_SS
) {
2155 /* must be writable segment */
2156 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2157 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2158 if (rpl
!= cpl
|| dpl
!= cpl
)
2159 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2161 /* must be readable segment */
2162 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2163 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2165 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2166 /* if not conforming code, test rights */
2167 if (dpl
< cpl
|| dpl
< rpl
)
2168 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2172 if (!(e2
& DESC_P_MASK
)) {
2173 if (seg_reg
== R_SS
)
2174 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2176 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2179 /* set the access bit if not already set */
2180 if (!(e2
& DESC_A_MASK
)) {
2182 stl_kernel(ptr
+ 4, e2
);
2185 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2186 get_seg_base(e1
, e2
),
2187 get_seg_limit(e1
, e2
),
2190 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2196 /* protected mode jump */
2197 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2198 int next_eip_addend
)
2201 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2202 target_ulong next_eip
;
2204 if ((new_cs
& 0xfffc) == 0)
2205 raise_exception_err(EXCP0D_GPF
, 0);
2206 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2207 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2208 cpl
= env
->hflags
& HF_CPL_MASK
;
2209 if (e2
& DESC_S_MASK
) {
2210 if (!(e2
& DESC_CS_MASK
))
2211 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2212 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2213 if (e2
& DESC_C_MASK
) {
2214 /* conforming code segment */
2216 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2218 /* non conforming code segment */
2221 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2223 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2225 if (!(e2
& DESC_P_MASK
))
2226 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2227 limit
= get_seg_limit(e1
, e2
);
2228 if (new_eip
> limit
&&
2229 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2230 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2231 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2232 get_seg_base(e1
, e2
), limit
, e2
);
2235 /* jump to call or task gate */
2236 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2238 cpl
= env
->hflags
& HF_CPL_MASK
;
2239 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2241 case 1: /* 286 TSS */
2242 case 9: /* 386 TSS */
2243 case 5: /* task gate */
2244 if (dpl
< cpl
|| dpl
< rpl
)
2245 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2246 next_eip
= env
->eip
+ next_eip_addend
;
2247 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2248 CC_OP
= CC_OP_EFLAGS
;
2250 case 4: /* 286 call gate */
2251 case 12: /* 386 call gate */
2252 if ((dpl
< cpl
) || (dpl
< rpl
))
2253 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2254 if (!(e2
& DESC_P_MASK
))
2255 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2257 new_eip
= (e1
& 0xffff);
2259 new_eip
|= (e2
& 0xffff0000);
2260 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2261 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2262 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2263 /* must be code segment */
2264 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2265 (DESC_S_MASK
| DESC_CS_MASK
)))
2266 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2267 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2268 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2269 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2270 if (!(e2
& DESC_P_MASK
))
2271 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2272 limit
= get_seg_limit(e1
, e2
);
2273 if (new_eip
> limit
)
2274 raise_exception_err(EXCP0D_GPF
, 0);
2275 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2276 get_seg_base(e1
, e2
), limit
, e2
);
2280 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2286 /* real mode call */
2287 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2288 int shift
, int next_eip
)
2291 uint32_t esp
, esp_mask
;
2296 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2297 ssp
= env
->segs
[R_SS
].base
;
2299 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2300 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2302 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2303 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2306 SET_ESP(esp
, esp_mask
);
2308 env
->segs
[R_CS
].selector
= new_cs
;
2309 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2312 /* protected mode call */
2313 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2314 int shift
, int next_eip_addend
)
2317 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2318 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2319 uint32_t val
, limit
, old_sp_mask
;
2320 target_ulong ssp
, old_ssp
, next_eip
;
2322 next_eip
= env
->eip
+ next_eip_addend
;
2323 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2324 LOG_PCALL_STATE(env
);
2325 if ((new_cs
& 0xfffc) == 0)
2326 raise_exception_err(EXCP0D_GPF
, 0);
2327 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2328 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2329 cpl
= env
->hflags
& HF_CPL_MASK
;
2330 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2331 if (e2
& DESC_S_MASK
) {
2332 if (!(e2
& DESC_CS_MASK
))
2333 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2334 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2335 if (e2
& DESC_C_MASK
) {
2336 /* conforming code segment */
2338 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2340 /* non conforming code segment */
2343 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2345 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2347 if (!(e2
& DESC_P_MASK
))
2348 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2350 #ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2356 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2357 PUSHQ(rsp
, next_eip
);
2358 /* from this point, not restartable */
2360 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2361 get_seg_base(e1
, e2
),
2362 get_seg_limit(e1
, e2
), e2
);
2368 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2369 ssp
= env
->segs
[R_SS
].base
;
2371 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2372 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2374 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2375 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2378 limit
= get_seg_limit(e1
, e2
);
2379 if (new_eip
> limit
)
2380 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp
, sp_mask
);
2383 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2384 get_seg_base(e1
, e2
), limit
, e2
);
2388 /* check gate type */
2389 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2390 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl
< cpl
|| dpl
< rpl
)
2397 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2398 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2399 CC_OP
= CC_OP_EFLAGS
;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2405 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2410 if (dpl
< cpl
|| dpl
< rpl
)
2411 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2412 /* check valid bit */
2413 if (!(e2
& DESC_P_MASK
))
2414 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2415 selector
= e1
>> 16;
2416 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2417 param_count
= e2
& 0x1f;
2418 if ((selector
& 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF
, 0);
2421 if (load_segment(&e1
, &e2
, selector
) != 0)
2422 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2423 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2424 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2425 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2427 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2428 if (!(e2
& DESC_P_MASK
))
2429 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2431 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2432 /* to inner privilege */
2433 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2434 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2435 ss
, sp
, param_count
, ESP
);
2436 if ((ss
& 0xfffc) == 0)
2437 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2438 if ((ss
& 3) != dpl
)
2439 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2440 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2441 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2442 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2444 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2445 if (!(ss_e2
& DESC_S_MASK
) ||
2446 (ss_e2
& DESC_CS_MASK
) ||
2447 !(ss_e2
& DESC_W_MASK
))
2448 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2449 if (!(ss_e2
& DESC_P_MASK
))
2450 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2452 // push_size = ((param_count * 2) + 8) << shift;
2454 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2455 old_ssp
= env
->segs
[R_SS
].base
;
2457 sp_mask
= get_sp_mask(ss_e2
);
2458 ssp
= get_seg_base(ss_e1
, ss_e2
);
2460 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2461 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2462 for(i
= param_count
- 1; i
>= 0; i
--) {
2463 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2464 PUSHL(ssp
, sp
, sp_mask
, val
);
2467 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2468 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2469 for(i
= param_count
- 1; i
>= 0; i
--) {
2470 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2471 PUSHW(ssp
, sp
, sp_mask
, val
);
2476 /* to same privilege */
2478 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2479 ssp
= env
->segs
[R_SS
].base
;
2480 // push_size = (4 << shift);
2485 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2486 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2488 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2489 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2492 /* from this point, not restartable */
2495 ss
= (ss
& ~3) | dpl
;
2496 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2498 get_seg_limit(ss_e1
, ss_e2
),
2502 selector
= (selector
& ~3) | dpl
;
2503 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2504 get_seg_base(e1
, e2
),
2505 get_seg_limit(e1
, e2
),
2507 cpu_x86_set_cpl(env
, dpl
);
2508 SET_ESP(sp
, sp_mask
);
2513 /* real and vm86 mode iret */
2514 void helper_iret_real(int shift
)
2516 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2520 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2522 ssp
= env
->segs
[R_SS
].base
;
2525 POPL(ssp
, sp
, sp_mask
, new_eip
);
2526 POPL(ssp
, sp
, sp_mask
, new_cs
);
2528 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2531 POPW(ssp
, sp
, sp_mask
, new_eip
);
2532 POPW(ssp
, sp
, sp_mask
, new_cs
);
2533 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2535 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2536 env
->segs
[R_CS
].selector
= new_cs
;
2537 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2539 if (env
->eflags
& VM_MASK
)
2540 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2542 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2544 eflags_mask
&= 0xffff;
2545 load_eflags(new_eflags
, eflags_mask
);
2546 env
->hflags2
&= ~HF2_NMI_MASK
;
2549 static inline void validate_seg(int seg_reg
, int cpl
)
2554 /* XXX: on x86_64, we do not want to nullify FS and GS because
2555 they may still contain a valid base. I would be interested to
2556 know how a real x86_64 CPU behaves */
2557 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2558 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2561 e2
= env
->segs
[seg_reg
].flags
;
2562 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2563 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2564 /* data or non conforming code segment */
2566 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2571 /* protected mode iret */
2572 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2574 uint32_t new_cs
, new_eflags
, new_ss
;
2575 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2576 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2577 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2578 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2580 #ifdef TARGET_X86_64
2585 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2587 ssp
= env
->segs
[R_SS
].base
;
2588 new_eflags
= 0; /* avoid warning */
2589 #ifdef TARGET_X86_64
2595 POPQ(sp
, new_eflags
);
2601 POPL(ssp
, sp
, sp_mask
, new_eip
);
2602 POPL(ssp
, sp
, sp_mask
, new_cs
);
2605 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2606 if (new_eflags
& VM_MASK
)
2607 goto return_to_vm86
;
2611 POPW(ssp
, sp
, sp_mask
, new_eip
);
2612 POPW(ssp
, sp
, sp_mask
, new_cs
);
2614 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2616 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2617 new_cs
, new_eip
, shift
, addend
);
2618 LOG_PCALL_STATE(env
);
2619 if ((new_cs
& 0xfffc) == 0)
2620 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2621 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2622 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2623 if (!(e2
& DESC_S_MASK
) ||
2624 !(e2
& DESC_CS_MASK
))
2625 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2626 cpl
= env
->hflags
& HF_CPL_MASK
;
2629 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2630 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2631 if (e2
& DESC_C_MASK
) {
2633 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2636 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2638 if (!(e2
& DESC_P_MASK
))
2639 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2642 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2643 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2644 /* return to same privilege level */
2645 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2646 get_seg_base(e1
, e2
),
2647 get_seg_limit(e1
, e2
),
2650 /* return to different privilege level */
2651 #ifdef TARGET_X86_64
2660 POPL(ssp
, sp
, sp_mask
, new_esp
);
2661 POPL(ssp
, sp
, sp_mask
, new_ss
);
2665 POPW(ssp
, sp
, sp_mask
, new_esp
);
2666 POPW(ssp
, sp
, sp_mask
, new_ss
);
2668 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2670 if ((new_ss
& 0xfffc) == 0) {
2671 #ifdef TARGET_X86_64
2672 /* NULL ss is allowed in long mode if cpl != 3*/
2673 /* XXX: test CS64 ? */
2674 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2675 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2677 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2678 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2679 DESC_W_MASK
| DESC_A_MASK
);
2680 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2684 raise_exception_err(EXCP0D_GPF
, 0);
2687 if ((new_ss
& 3) != rpl
)
2688 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2689 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2690 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2691 if (!(ss_e2
& DESC_S_MASK
) ||
2692 (ss_e2
& DESC_CS_MASK
) ||
2693 !(ss_e2
& DESC_W_MASK
))
2694 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2695 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2697 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2698 if (!(ss_e2
& DESC_P_MASK
))
2699 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2700 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2701 get_seg_base(ss_e1
, ss_e2
),
2702 get_seg_limit(ss_e1
, ss_e2
),
2706 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2707 get_seg_base(e1
, e2
),
2708 get_seg_limit(e1
, e2
),
2710 cpu_x86_set_cpl(env
, rpl
);
2712 #ifdef TARGET_X86_64
2713 if (env
->hflags
& HF_CS64_MASK
)
2717 sp_mask
= get_sp_mask(ss_e2
);
2719 /* validate data segments */
2720 validate_seg(R_ES
, rpl
);
2721 validate_seg(R_DS
, rpl
);
2722 validate_seg(R_FS
, rpl
);
2723 validate_seg(R_GS
, rpl
);
2727 SET_ESP(sp
, sp_mask
);
2730 /* NOTE: 'cpl' is the _old_ CPL */
2731 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2733 eflags_mask
|= IOPL_MASK
;
2734 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2736 eflags_mask
|= IF_MASK
;
2738 eflags_mask
&= 0xffff;
2739 load_eflags(new_eflags
, eflags_mask
);
2744 POPL(ssp
, sp
, sp_mask
, new_esp
);
2745 POPL(ssp
, sp
, sp_mask
, new_ss
);
2746 POPL(ssp
, sp
, sp_mask
, new_es
);
2747 POPL(ssp
, sp
, sp_mask
, new_ds
);
2748 POPL(ssp
, sp
, sp_mask
, new_fs
);
2749 POPL(ssp
, sp
, sp_mask
, new_gs
);
2751 /* modify processor state */
2752 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2753 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2754 load_seg_vm(R_CS
, new_cs
& 0xffff);
2755 cpu_x86_set_cpl(env
, 3);
2756 load_seg_vm(R_SS
, new_ss
& 0xffff);
2757 load_seg_vm(R_ES
, new_es
& 0xffff);
2758 load_seg_vm(R_DS
, new_ds
& 0xffff);
2759 load_seg_vm(R_FS
, new_fs
& 0xffff);
2760 load_seg_vm(R_GS
, new_gs
& 0xffff);
2762 env
->eip
= new_eip
& 0xffff;
2766 void helper_iret_protected(int shift
, int next_eip
)
2768 int tss_selector
, type
;
2771 /* specific case for TSS */
2772 if (env
->eflags
& NT_MASK
) {
2773 #ifdef TARGET_X86_64
2774 if (env
->hflags
& HF_LMA_MASK
)
2775 raise_exception_err(EXCP0D_GPF
, 0);
2777 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2778 if (tss_selector
& 4)
2779 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2780 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2781 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2782 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2783 /* NOTE: we check both segment and busy TSS */
2785 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2786 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2788 helper_ret_protected(shift
, 1, 0);
2790 env
->hflags2
&= ~HF2_NMI_MASK
;
2793 void helper_lret_protected(int shift
, int addend
)
2795 helper_ret_protected(shift
, 0, addend
);
2798 void helper_sysenter(void)
2800 if (env
->sysenter_cs
== 0) {
2801 raise_exception_err(EXCP0D_GPF
, 0);
2803 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2804 cpu_x86_set_cpl(env
, 0);
2806 #ifdef TARGET_X86_64
2807 if (env
->hflags
& HF_LMA_MASK
) {
2808 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2810 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2812 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2816 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2818 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2820 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2822 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2824 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2826 DESC_W_MASK
| DESC_A_MASK
);
2827 ESP
= env
->sysenter_esp
;
2828 EIP
= env
->sysenter_eip
;
2831 void helper_sysexit(int dflag
)
2835 cpl
= env
->hflags
& HF_CPL_MASK
;
2836 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2837 raise_exception_err(EXCP0D_GPF
, 0);
2839 cpu_x86_set_cpl(env
, 3);
2840 #ifdef TARGET_X86_64
2842 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2844 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2845 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2846 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2847 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2849 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2850 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2851 DESC_W_MASK
| DESC_A_MASK
);
2855 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2857 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2858 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2859 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2860 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2862 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2863 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2864 DESC_W_MASK
| DESC_A_MASK
);
2870 #if defined(CONFIG_USER_ONLY)
2871 target_ulong
helper_read_crN(int reg
)
2876 void helper_write_crN(int reg
, target_ulong t0
)
2880 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2884 target_ulong
helper_read_crN(int reg
)
2888 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2894 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2895 val
= cpu_get_apic_tpr(env
->apic_state
);
2904 void helper_write_crN(int reg
, target_ulong t0
)
2906 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2909 cpu_x86_update_cr0(env
, t0
);
2912 cpu_x86_update_cr3(env
, t0
);
2915 cpu_x86_update_cr4(env
, t0
);
2918 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2919 cpu_set_apic_tpr(env
->apic_state
, t0
);
2921 env
->v_tpr
= t0
& 0x0f;
2929 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2934 hw_breakpoint_remove(env
, reg
);
2936 hw_breakpoint_insert(env
, reg
);
2937 } else if (reg
== 7) {
2938 for (i
= 0; i
< 4; i
++)
2939 hw_breakpoint_remove(env
, i
);
2941 for (i
= 0; i
< 4; i
++)
2942 hw_breakpoint_insert(env
, i
);
2948 void helper_lmsw(target_ulong t0
)
2950 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951 if already set to one. */
2952 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2953 helper_write_crN(0, t0
);
2956 void helper_clts(void)
2958 env
->cr
[0] &= ~CR0_TS_MASK
;
2959 env
->hflags
&= ~HF_TS_MASK
;
2962 void helper_invlpg(target_ulong addr
)
2964 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2965 tlb_flush_page(env
, addr
);
2968 void helper_rdtsc(void)
2972 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2973 raise_exception(EXCP0D_GPF
);
2975 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2977 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2978 EAX
= (uint32_t)(val
);
2979 EDX
= (uint32_t)(val
>> 32);
2982 void helper_rdtscp(void)
2985 ECX
= (uint32_t)(env
->tsc_aux
);
2988 void helper_rdpmc(void)
2990 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2991 raise_exception(EXCP0D_GPF
);
2993 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2995 /* currently unimplemented */
2996 raise_exception_err(EXCP06_ILLOP
, 0);
2999 #if defined(CONFIG_USER_ONLY)
3000 void helper_wrmsr(void)
3004 void helper_rdmsr(void)
3008 void helper_wrmsr(void)
3012 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3014 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3016 switch((uint32_t)ECX
) {
3017 case MSR_IA32_SYSENTER_CS
:
3018 env
->sysenter_cs
= val
& 0xffff;
3020 case MSR_IA32_SYSENTER_ESP
:
3021 env
->sysenter_esp
= val
;
3023 case MSR_IA32_SYSENTER_EIP
:
3024 env
->sysenter_eip
= val
;
3026 case MSR_IA32_APICBASE
:
3027 cpu_set_apic_base(env
->apic_state
, val
);
3031 uint64_t update_mask
;
3033 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3034 update_mask
|= MSR_EFER_SCE
;
3035 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3036 update_mask
|= MSR_EFER_LME
;
3037 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3038 update_mask
|= MSR_EFER_FFXSR
;
3039 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3040 update_mask
|= MSR_EFER_NXE
;
3041 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3042 update_mask
|= MSR_EFER_SVME
;
3043 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3044 update_mask
|= MSR_EFER_FFXSR
;
3045 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3046 (val
& update_mask
));
3055 case MSR_VM_HSAVE_PA
:
3056 env
->vm_hsave
= val
;
3058 #ifdef TARGET_X86_64
3069 env
->segs
[R_FS
].base
= val
;
3072 env
->segs
[R_GS
].base
= val
;
3074 case MSR_KERNELGSBASE
:
3075 env
->kernelgsbase
= val
;
3078 case MSR_MTRRphysBase(0):
3079 case MSR_MTRRphysBase(1):
3080 case MSR_MTRRphysBase(2):
3081 case MSR_MTRRphysBase(3):
3082 case MSR_MTRRphysBase(4):
3083 case MSR_MTRRphysBase(5):
3084 case MSR_MTRRphysBase(6):
3085 case MSR_MTRRphysBase(7):
3086 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3088 case MSR_MTRRphysMask(0):
3089 case MSR_MTRRphysMask(1):
3090 case MSR_MTRRphysMask(2):
3091 case MSR_MTRRphysMask(3):
3092 case MSR_MTRRphysMask(4):
3093 case MSR_MTRRphysMask(5):
3094 case MSR_MTRRphysMask(6):
3095 case MSR_MTRRphysMask(7):
3096 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3098 case MSR_MTRRfix64K_00000
:
3099 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3101 case MSR_MTRRfix16K_80000
:
3102 case MSR_MTRRfix16K_A0000
:
3103 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3105 case MSR_MTRRfix4K_C0000
:
3106 case MSR_MTRRfix4K_C8000
:
3107 case MSR_MTRRfix4K_D0000
:
3108 case MSR_MTRRfix4K_D8000
:
3109 case MSR_MTRRfix4K_E0000
:
3110 case MSR_MTRRfix4K_E8000
:
3111 case MSR_MTRRfix4K_F0000
:
3112 case MSR_MTRRfix4K_F8000
:
3113 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3115 case MSR_MTRRdefType
:
3116 env
->mtrr_deftype
= val
;
3118 case MSR_MCG_STATUS
:
3119 env
->mcg_status
= val
;
3122 if ((env
->mcg_cap
& MCG_CTL_P
)
3123 && (val
== 0 || val
== ~(uint64_t)0))
3130 if ((uint32_t)ECX
>= MSR_MC0_CTL
3131 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3132 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3133 if ((offset
& 0x3) != 0
3134 || (val
== 0 || val
== ~(uint64_t)0))
3135 env
->mce_banks
[offset
] = val
;
3138 /* XXX: exception ? */
3143 void helper_rdmsr(void)
3147 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3149 switch((uint32_t)ECX
) {
3150 case MSR_IA32_SYSENTER_CS
:
3151 val
= env
->sysenter_cs
;
3153 case MSR_IA32_SYSENTER_ESP
:
3154 val
= env
->sysenter_esp
;
3156 case MSR_IA32_SYSENTER_EIP
:
3157 val
= env
->sysenter_eip
;
3159 case MSR_IA32_APICBASE
:
3160 val
= cpu_get_apic_base(env
->apic_state
);
3171 case MSR_VM_HSAVE_PA
:
3172 val
= env
->vm_hsave
;
3174 case MSR_IA32_PERF_STATUS
:
3175 /* tsc_increment_by_tick */
3177 /* CPU multiplier */
3178 val
|= (((uint64_t)4ULL) << 40);
3180 #ifdef TARGET_X86_64
3191 val
= env
->segs
[R_FS
].base
;
3194 val
= env
->segs
[R_GS
].base
;
3196 case MSR_KERNELGSBASE
:
3197 val
= env
->kernelgsbase
;
3203 case MSR_MTRRphysBase(0):
3204 case MSR_MTRRphysBase(1):
3205 case MSR_MTRRphysBase(2):
3206 case MSR_MTRRphysBase(3):
3207 case MSR_MTRRphysBase(4):
3208 case MSR_MTRRphysBase(5):
3209 case MSR_MTRRphysBase(6):
3210 case MSR_MTRRphysBase(7):
3211 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3213 case MSR_MTRRphysMask(0):
3214 case MSR_MTRRphysMask(1):
3215 case MSR_MTRRphysMask(2):
3216 case MSR_MTRRphysMask(3):
3217 case MSR_MTRRphysMask(4):
3218 case MSR_MTRRphysMask(5):
3219 case MSR_MTRRphysMask(6):
3220 case MSR_MTRRphysMask(7):
3221 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3223 case MSR_MTRRfix64K_00000
:
3224 val
= env
->mtrr_fixed
[0];
3226 case MSR_MTRRfix16K_80000
:
3227 case MSR_MTRRfix16K_A0000
:
3228 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3230 case MSR_MTRRfix4K_C0000
:
3231 case MSR_MTRRfix4K_C8000
:
3232 case MSR_MTRRfix4K_D0000
:
3233 case MSR_MTRRfix4K_D8000
:
3234 case MSR_MTRRfix4K_E0000
:
3235 case MSR_MTRRfix4K_E8000
:
3236 case MSR_MTRRfix4K_F0000
:
3237 case MSR_MTRRfix4K_F8000
:
3238 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3240 case MSR_MTRRdefType
:
3241 val
= env
->mtrr_deftype
;
3244 if (env
->cpuid_features
& CPUID_MTRR
)
3245 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3247 /* XXX: exception ? */
3254 if (env
->mcg_cap
& MCG_CTL_P
)
3259 case MSR_MCG_STATUS
:
3260 val
= env
->mcg_status
;
3263 if ((uint32_t)ECX
>= MSR_MC0_CTL
3264 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3265 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3266 val
= env
->mce_banks
[offset
];
3269 /* XXX: exception ? */
3273 EAX
= (uint32_t)(val
);
3274 EDX
= (uint32_t)(val
>> 32);
3278 target_ulong
helper_lsl(target_ulong selector1
)
3281 uint32_t e1
, e2
, eflags
, selector
;
3282 int rpl
, dpl
, cpl
, type
;
3284 selector
= selector1
& 0xffff;
3285 eflags
= helper_cc_compute_all(CC_OP
);
3286 if ((selector
& 0xfffc) == 0)
3288 if (load_segment(&e1
, &e2
, selector
) != 0)
3291 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3292 cpl
= env
->hflags
& HF_CPL_MASK
;
3293 if (e2
& DESC_S_MASK
) {
3294 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3297 if (dpl
< cpl
|| dpl
< rpl
)
3301 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3312 if (dpl
< cpl
|| dpl
< rpl
) {
3314 CC_SRC
= eflags
& ~CC_Z
;
3318 limit
= get_seg_limit(e1
, e2
);
3319 CC_SRC
= eflags
| CC_Z
;
3323 target_ulong
helper_lar(target_ulong selector1
)
3325 uint32_t e1
, e2
, eflags
, selector
;
3326 int rpl
, dpl
, cpl
, type
;
3328 selector
= selector1
& 0xffff;
3329 eflags
= helper_cc_compute_all(CC_OP
);
3330 if ((selector
& 0xfffc) == 0)
3332 if (load_segment(&e1
, &e2
, selector
) != 0)
3335 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3336 cpl
= env
->hflags
& HF_CPL_MASK
;
3337 if (e2
& DESC_S_MASK
) {
3338 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3341 if (dpl
< cpl
|| dpl
< rpl
)
3345 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3359 if (dpl
< cpl
|| dpl
< rpl
) {
3361 CC_SRC
= eflags
& ~CC_Z
;
3365 CC_SRC
= eflags
| CC_Z
;
3366 return e2
& 0x00f0ff00;
3369 void helper_verr(target_ulong selector1
)
3371 uint32_t e1
, e2
, eflags
, selector
;
3374 selector
= selector1
& 0xffff;
3375 eflags
= helper_cc_compute_all(CC_OP
);
3376 if ((selector
& 0xfffc) == 0)
3378 if (load_segment(&e1
, &e2
, selector
) != 0)
3380 if (!(e2
& DESC_S_MASK
))
3383 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3384 cpl
= env
->hflags
& HF_CPL_MASK
;
3385 if (e2
& DESC_CS_MASK
) {
3386 if (!(e2
& DESC_R_MASK
))
3388 if (!(e2
& DESC_C_MASK
)) {
3389 if (dpl
< cpl
|| dpl
< rpl
)
3393 if (dpl
< cpl
|| dpl
< rpl
) {
3395 CC_SRC
= eflags
& ~CC_Z
;
3399 CC_SRC
= eflags
| CC_Z
;
3402 void helper_verw(target_ulong selector1
)
3404 uint32_t e1
, e2
, eflags
, selector
;
3407 selector
= selector1
& 0xffff;
3408 eflags
= helper_cc_compute_all(CC_OP
);
3409 if ((selector
& 0xfffc) == 0)
3411 if (load_segment(&e1
, &e2
, selector
) != 0)
3413 if (!(e2
& DESC_S_MASK
))
3416 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3417 cpl
= env
->hflags
& HF_CPL_MASK
;
3418 if (e2
& DESC_CS_MASK
) {
3421 if (dpl
< cpl
|| dpl
< rpl
)
3423 if (!(e2
& DESC_W_MASK
)) {
3425 CC_SRC
= eflags
& ~CC_Z
;
3429 CC_SRC
= eflags
| CC_Z
;
3432 /* x87 FPU helpers */
3434 static void fpu_set_exception(int mask
)
3437 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3438 env
->fpus
|= FPUS_SE
| FPUS_B
;
3441 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3444 fpu_set_exception(FPUS_ZE
);
3448 static void fpu_raise_exception(void)
3450 if (env
->cr
[0] & CR0_NE_MASK
) {
3451 raise_exception(EXCP10_COPR
);
3453 #if !defined(CONFIG_USER_ONLY)
3460 void helper_flds_FT0(uint32_t val
)
3467 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3470 void helper_fldl_FT0(uint64_t val
)
3477 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3480 void helper_fildl_FT0(int32_t val
)
3482 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3485 void helper_flds_ST0(uint32_t val
)
3492 new_fpstt
= (env
->fpstt
- 1) & 7;
3494 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3495 env
->fpstt
= new_fpstt
;
3496 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3499 void helper_fldl_ST0(uint64_t val
)
3506 new_fpstt
= (env
->fpstt
- 1) & 7;
3508 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3509 env
->fpstt
= new_fpstt
;
3510 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3513 void helper_fildl_ST0(int32_t val
)
3516 new_fpstt
= (env
->fpstt
- 1) & 7;
3517 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3518 env
->fpstt
= new_fpstt
;
3519 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3522 void helper_fildll_ST0(int64_t val
)
3525 new_fpstt
= (env
->fpstt
- 1) & 7;
3526 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3527 env
->fpstt
= new_fpstt
;
3528 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3531 uint32_t helper_fsts_ST0(void)
3537 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3541 uint64_t helper_fstl_ST0(void)
3547 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3551 int32_t helper_fist_ST0(void)
3554 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3555 if (val
!= (int16_t)val
)
3560 int32_t helper_fistl_ST0(void)
3563 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3567 int64_t helper_fistll_ST0(void)
3570 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3574 int32_t helper_fistt_ST0(void)
3577 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3578 if (val
!= (int16_t)val
)
3583 int32_t helper_fisttl_ST0(void)
3586 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3590 int64_t helper_fisttll_ST0(void)
3593 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3597 void helper_fldt_ST0(target_ulong ptr
)
3600 new_fpstt
= (env
->fpstt
- 1) & 7;
3601 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3602 env
->fpstt
= new_fpstt
;
3603 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3606 void helper_fstt_ST0(target_ulong ptr
)
3608 helper_fstt(ST0
, ptr
);
3611 void helper_fpush(void)
3616 void helper_fpop(void)
3621 void helper_fdecstp(void)
3623 env
->fpstt
= (env
->fpstt
- 1) & 7;
3624 env
->fpus
&= (~0x4700);
3627 void helper_fincstp(void)
3629 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3630 env
->fpus
&= (~0x4700);
3635 void helper_ffree_STN(int st_index
)
3637 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3640 void helper_fmov_ST0_FT0(void)
3645 void helper_fmov_FT0_STN(int st_index
)
3650 void helper_fmov_ST0_STN(int st_index
)
3655 void helper_fmov_STN_ST0(int st_index
)
3660 void helper_fxchg_ST0_STN(int st_index
)
3668 /* FPU operations */
3670 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3672 void helper_fcom_ST0_FT0(void)
3676 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3677 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3680 void helper_fucom_ST0_FT0(void)
3684 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3685 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3688 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3690 void helper_fcomi_ST0_FT0(void)
3695 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3696 eflags
= helper_cc_compute_all(CC_OP
);
3697 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3701 void helper_fucomi_ST0_FT0(void)
3706 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3707 eflags
= helper_cc_compute_all(CC_OP
);
3708 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3712 void helper_fadd_ST0_FT0(void)
3717 void helper_fmul_ST0_FT0(void)
3722 void helper_fsub_ST0_FT0(void)
3727 void helper_fsubr_ST0_FT0(void)
3732 void helper_fdiv_ST0_FT0(void)
3734 ST0
= helper_fdiv(ST0
, FT0
);
3737 void helper_fdivr_ST0_FT0(void)
3739 ST0
= helper_fdiv(FT0
, ST0
);
3742 /* fp operations between STN and ST0 */
3744 void helper_fadd_STN_ST0(int st_index
)
3746 ST(st_index
) += ST0
;
3749 void helper_fmul_STN_ST0(int st_index
)
3751 ST(st_index
) *= ST0
;
3754 void helper_fsub_STN_ST0(int st_index
)
3756 ST(st_index
) -= ST0
;
3759 void helper_fsubr_STN_ST0(int st_index
)
3766 void helper_fdiv_STN_ST0(int st_index
)
3770 *p
= helper_fdiv(*p
, ST0
);
3773 void helper_fdivr_STN_ST0(int st_index
)
3777 *p
= helper_fdiv(ST0
, *p
);
3780 /* misc FPU operations */
3781 void helper_fchs_ST0(void)
3783 ST0
= floatx_chs(ST0
);
3786 void helper_fabs_ST0(void)
3788 ST0
= floatx_abs(ST0
);
3791 void helper_fld1_ST0(void)
3796 void helper_fldl2t_ST0(void)
3801 void helper_fldl2e_ST0(void)
3806 void helper_fldpi_ST0(void)
3811 void helper_fldlg2_ST0(void)
3816 void helper_fldln2_ST0(void)
3821 void helper_fldz_ST0(void)
3826 void helper_fldz_FT0(void)
3831 uint32_t helper_fnstsw(void)
3833 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3836 uint32_t helper_fnstcw(void)
3841 static void update_fp_status(void)
3845 /* set rounding mode */
3846 switch(env
->fpuc
& RC_MASK
) {
3849 rnd_type
= float_round_nearest_even
;
3852 rnd_type
= float_round_down
;
3855 rnd_type
= float_round_up
;
3858 rnd_type
= float_round_to_zero
;
3861 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3863 switch((env
->fpuc
>> 8) & 3) {
3875 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3879 void helper_fldcw(uint32_t val
)
3885 void helper_fclex(void)
3887 env
->fpus
&= 0x7f00;
3890 void helper_fwait(void)
3892 if (env
->fpus
& FPUS_SE
)
3893 fpu_raise_exception();
3896 void helper_fninit(void)
3913 void helper_fbld_ST0(target_ulong ptr
)
3921 for(i
= 8; i
>= 0; i
--) {
3923 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3926 if (ldub(ptr
+ 9) & 0x80)
3932 void helper_fbst_ST0(target_ulong ptr
)
3935 target_ulong mem_ref
, mem_end
;
3938 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3940 mem_end
= mem_ref
+ 9;
3947 while (mem_ref
< mem_end
) {
3952 v
= ((v
/ 10) << 4) | (v
% 10);
3955 while (mem_ref
< mem_end
) {
3960 void helper_f2xm1(void)
3962 ST0
= pow(2.0,ST0
) - 1.0;
3965 void helper_fyl2x(void)
3967 CPU86_LDouble fptemp
;
3971 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3975 env
->fpus
&= (~0x4700);
3980 void helper_fptan(void)
3982 CPU86_LDouble fptemp
;
3985 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3991 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3992 /* the above code is for |arg| < 2**52 only */
3996 void helper_fpatan(void)
3998 CPU86_LDouble fptemp
, fpsrcop
;
4002 ST1
= atan2(fpsrcop
,fptemp
);
4006 void helper_fxtract(void)
4008 CPU86_LDoubleU temp
;
4009 unsigned int expdif
;
4012 expdif
= EXPD(temp
) - EXPBIAS
;
4013 /*DP exponent bias*/
4020 void helper_fprem1(void)
4022 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4023 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4025 signed long long int q
;
4027 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4028 ST0
= 0.0 / 0.0; /* NaN */
4029 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4035 fpsrcop1
.d
= fpsrcop
;
4037 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4040 /* optimisation? taken from the AMD docs */
4041 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4042 /* ST0 is unchanged */
4047 dblq
= fpsrcop
/ fptemp
;
4048 /* round dblq towards nearest integer */
4050 ST0
= fpsrcop
- fptemp
* dblq
;
4052 /* convert dblq to q by truncating towards zero */
4054 q
= (signed long long int)(-dblq
);
4056 q
= (signed long long int)dblq
;
4058 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4059 /* (C0,C3,C1) <-- (q2,q1,q0) */
4060 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4061 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4062 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4064 env
->fpus
|= 0x400; /* C2 <-- 1 */
4065 fptemp
= pow(2.0, expdif
- 50);
4066 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4067 /* fpsrcop = integer obtained by chopping */
4068 fpsrcop
= (fpsrcop
< 0.0) ?
4069 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4070 ST0
-= (ST1
* fpsrcop
* fptemp
);
4074 void helper_fprem(void)
4076 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4077 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4079 signed long long int q
;
4081 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4082 ST0
= 0.0 / 0.0; /* NaN */
4083 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4087 fpsrcop
= (CPU86_LDouble
)ST0
;
4088 fptemp
= (CPU86_LDouble
)ST1
;
4089 fpsrcop1
.d
= fpsrcop
;
4091 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4094 /* optimisation? taken from the AMD docs */
4095 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4096 /* ST0 is unchanged */
4100 if ( expdif
< 53 ) {
4101 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4102 /* round dblq towards zero */
4103 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4104 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4106 /* convert dblq to q by truncating towards zero */
4108 q
= (signed long long int)(-dblq
);
4110 q
= (signed long long int)dblq
;
4112 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4113 /* (C0,C3,C1) <-- (q2,q1,q0) */
4114 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4115 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4116 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4118 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4119 env
->fpus
|= 0x400; /* C2 <-- 1 */
4120 fptemp
= pow(2.0, (double)(expdif
- N
));
4121 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4122 /* fpsrcop = integer obtained by chopping */
4123 fpsrcop
= (fpsrcop
< 0.0) ?
4124 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4125 ST0
-= (ST1
* fpsrcop
* fptemp
);
4129 void helper_fyl2xp1(void)
4131 CPU86_LDouble fptemp
;
4134 if ((fptemp
+1.0)>0.0) {
4135 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4139 env
->fpus
&= (~0x4700);
4144 void helper_fsqrt(void)
4146 CPU86_LDouble fptemp
;
4150 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4156 void helper_fsincos(void)
4158 CPU86_LDouble fptemp
;
4161 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4167 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4168 /* the above code is for |arg| < 2**63 only */
4172 void helper_frndint(void)
4174 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4177 void helper_fscale(void)
4179 ST0
= ldexp (ST0
, (int)(ST1
));
4182 void helper_fsin(void)
4184 CPU86_LDouble fptemp
;
4187 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4191 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4192 /* the above code is for |arg| < 2**53 only */
4196 void helper_fcos(void)
4198 CPU86_LDouble fptemp
;
4201 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4205 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4206 /* the above code is for |arg5 < 2**63 only */
4210 void helper_fxam_ST0(void)
4212 CPU86_LDoubleU temp
;
4217 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4219 env
->fpus
|= 0x200; /* C1 <-- 1 */
4221 /* XXX: test fptags too */
4222 expdif
= EXPD(temp
);
4223 if (expdif
== MAXEXPD
) {
4224 #ifdef USE_X86LDOUBLE
4225 if (MANTD(temp
) == 0x8000000000000000ULL
)
4227 if (MANTD(temp
) == 0)
4229 env
->fpus
|= 0x500 /*Infinity*/;
4231 env
->fpus
|= 0x100 /*NaN*/;
4232 } else if (expdif
== 0) {
4233 if (MANTD(temp
) == 0)
4234 env
->fpus
|= 0x4000 /*Zero*/;
4236 env
->fpus
|= 0x4400 /*Denormal*/;
4242 void helper_fstenv(target_ulong ptr
, int data32
)
4244 int fpus
, fptag
, exp
, i
;
4248 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4250 for (i
=7; i
>=0; i
--) {
4252 if (env
->fptags
[i
]) {
4255 tmp
.d
= env
->fpregs
[i
].d
;
4258 if (exp
== 0 && mant
== 0) {
4261 } else if (exp
== 0 || exp
== MAXEXPD
4262 #ifdef USE_X86LDOUBLE
4263 || (mant
& (1LL << 63)) == 0
4266 /* NaNs, infinity, denormal */
4273 stl(ptr
, env
->fpuc
);
4275 stl(ptr
+ 8, fptag
);
4276 stl(ptr
+ 12, 0); /* fpip */
4277 stl(ptr
+ 16, 0); /* fpcs */
4278 stl(ptr
+ 20, 0); /* fpoo */
4279 stl(ptr
+ 24, 0); /* fpos */
4282 stw(ptr
, env
->fpuc
);
4284 stw(ptr
+ 4, fptag
);
4292 void helper_fldenv(target_ulong ptr
, int data32
)
4297 env
->fpuc
= lduw(ptr
);
4298 fpus
= lduw(ptr
+ 4);
4299 fptag
= lduw(ptr
+ 8);
4302 env
->fpuc
= lduw(ptr
);
4303 fpus
= lduw(ptr
+ 2);
4304 fptag
= lduw(ptr
+ 4);
4306 env
->fpstt
= (fpus
>> 11) & 7;
4307 env
->fpus
= fpus
& ~0x3800;
4308 for(i
= 0;i
< 8; i
++) {
4309 env
->fptags
[i
] = ((fptag
& 3) == 3);
4314 void helper_fsave(target_ulong ptr
, int data32
)
4319 helper_fstenv(ptr
, data32
);
4321 ptr
+= (14 << data32
);
4322 for(i
= 0;i
< 8; i
++) {
4324 helper_fstt(tmp
, ptr
);
4342 void helper_frstor(target_ulong ptr
, int data32
)
4347 helper_fldenv(ptr
, data32
);
4348 ptr
+= (14 << data32
);
4350 for(i
= 0;i
< 8; i
++) {
4351 tmp
= helper_fldt(ptr
);
4357 void helper_fxsave(target_ulong ptr
, int data64
)
4359 int fpus
, fptag
, i
, nb_xmm_regs
;
4363 /* The operand must be 16 byte aligned */
4365 raise_exception(EXCP0D_GPF
);
4368 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4370 for(i
= 0; i
< 8; i
++) {
4371 fptag
|= (env
->fptags
[i
] << i
);
4373 stw(ptr
, env
->fpuc
);
4375 stw(ptr
+ 4, fptag
^ 0xff);
4376 #ifdef TARGET_X86_64
4378 stq(ptr
+ 0x08, 0); /* rip */
4379 stq(ptr
+ 0x10, 0); /* rdp */
4383 stl(ptr
+ 0x08, 0); /* eip */
4384 stl(ptr
+ 0x0c, 0); /* sel */
4385 stl(ptr
+ 0x10, 0); /* dp */
4386 stl(ptr
+ 0x14, 0); /* sel */
4390 for(i
= 0;i
< 8; i
++) {
4392 helper_fstt(tmp
, addr
);
4396 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4397 /* XXX: finish it */
4398 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4399 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4400 if (env
->hflags
& HF_CS64_MASK
)
4405 /* Fast FXSAVE leaves out the XMM registers */
4406 if (!(env
->efer
& MSR_EFER_FFXSR
)
4407 || (env
->hflags
& HF_CPL_MASK
)
4408 || !(env
->hflags
& HF_LMA_MASK
)) {
4409 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4410 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4411 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4418 void helper_fxrstor(target_ulong ptr
, int data64
)
4420 int i
, fpus
, fptag
, nb_xmm_regs
;
4424 /* The operand must be 16 byte aligned */
4426 raise_exception(EXCP0D_GPF
);
4429 env
->fpuc
= lduw(ptr
);
4430 fpus
= lduw(ptr
+ 2);
4431 fptag
= lduw(ptr
+ 4);
4432 env
->fpstt
= (fpus
>> 11) & 7;
4433 env
->fpus
= fpus
& ~0x3800;
4435 for(i
= 0;i
< 8; i
++) {
4436 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4440 for(i
= 0;i
< 8; i
++) {
4441 tmp
= helper_fldt(addr
);
4446 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4447 /* XXX: finish it */
4448 env
->mxcsr
= ldl(ptr
+ 0x18);
4450 if (env
->hflags
& HF_CS64_MASK
)
4455 /* Fast FXRESTORE leaves out the XMM registers */
4456 if (!(env
->efer
& MSR_EFER_FFXSR
)
4457 || (env
->hflags
& HF_CPL_MASK
)
4458 || !(env
->hflags
& HF_LMA_MASK
)) {
4459 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4460 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4461 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4468 #ifndef USE_X86LDOUBLE
4470 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4472 CPU86_LDoubleU temp
;
4477 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4478 /* exponent + sign */
4479 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4480 e
|= SIGND(temp
) >> 16;
4484 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4486 CPU86_LDoubleU temp
;
4490 /* XXX: handle overflow ? */
4491 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4492 e
|= (upper
>> 4) & 0x800; /* sign */
4493 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4495 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4498 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4505 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4507 CPU86_LDoubleU temp
;
4510 *pmant
= temp
.l
.lower
;
4511 *pexp
= temp
.l
.upper
;
4514 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4516 CPU86_LDoubleU temp
;
4518 temp
.l
.upper
= upper
;
4519 temp
.l
.lower
= mant
;
4524 #ifdef TARGET_X86_64
4526 //#define DEBUG_MULDIV
4528 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4537 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4541 add128(plow
, phigh
, 1, 0);
4544 /* return TRUE if overflow */
4545 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4547 uint64_t q
, r
, a1
, a0
;
4560 /* XXX: use a better algorithm */
4561 for(i
= 0; i
< 64; i
++) {
4563 a1
= (a1
<< 1) | (a0
>> 63);
4564 if (ab
|| a1
>= b
) {
4570 a0
= (a0
<< 1) | qb
;
4572 #if defined(DEBUG_MULDIV)
4573 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4574 *phigh
, *plow
, b
, a0
, a1
);
4582 /* return TRUE if overflow */
4583 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4586 sa
= ((int64_t)*phigh
< 0);
4588 neg128(plow
, phigh
);
4592 if (div64(plow
, phigh
, b
) != 0)
4595 if (*plow
> (1ULL << 63))
4599 if (*plow
>= (1ULL << 63))
4607 void helper_mulq_EAX_T0(target_ulong t0
)
4611 mulu64(&r0
, &r1
, EAX
, t0
);
4618 void helper_imulq_EAX_T0(target_ulong t0
)
4622 muls64(&r0
, &r1
, EAX
, t0
);
4626 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4629 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4633 muls64(&r0
, &r1
, t0
, t1
);
4635 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4639 void helper_divq_EAX(target_ulong t0
)
4643 raise_exception(EXCP00_DIVZ
);
4647 if (div64(&r0
, &r1
, t0
))
4648 raise_exception(EXCP00_DIVZ
);
4653 void helper_idivq_EAX(target_ulong t0
)
4657 raise_exception(EXCP00_DIVZ
);
4661 if (idiv64(&r0
, &r1
, t0
))
4662 raise_exception(EXCP00_DIVZ
);
4668 static void do_hlt(void)
4670 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4672 env
->exception_index
= EXCP_HLT
;
4676 void helper_hlt(int next_eip_addend
)
4678 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4679 EIP
+= next_eip_addend
;
4684 void helper_monitor(target_ulong ptr
)
4686 if ((uint32_t)ECX
!= 0)
4687 raise_exception(EXCP0D_GPF
);
4688 /* XXX: store address ? */
4689 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4692 void helper_mwait(int next_eip_addend
)
4694 if ((uint32_t)ECX
!= 0)
4695 raise_exception(EXCP0D_GPF
);
4696 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4697 EIP
+= next_eip_addend
;
4699 /* XXX: not complete but not completely erroneous */
4700 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4701 /* more than one CPU: do not sleep because another CPU may
4708 void helper_debug(void)
4710 env
->exception_index
= EXCP_DEBUG
;
4714 void helper_reset_rf(void)
4716 env
->eflags
&= ~RF_MASK
;
4719 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4721 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4724 void helper_raise_exception(int exception_index
)
4726 raise_exception(exception_index
);
4729 void helper_cli(void)
4731 env
->eflags
&= ~IF_MASK
;
4734 void helper_sti(void)
4736 env
->eflags
|= IF_MASK
;
4740 /* vm86plus instructions */
4741 void helper_cli_vm(void)
4743 env
->eflags
&= ~VIF_MASK
;
4746 void helper_sti_vm(void)
4748 env
->eflags
|= VIF_MASK
;
4749 if (env
->eflags
& VIP_MASK
) {
4750 raise_exception(EXCP0D_GPF
);
4755 void helper_set_inhibit_irq(void)
4757 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4760 void helper_reset_inhibit_irq(void)
4762 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4765 void helper_boundw(target_ulong a0
, int v
)
4769 high
= ldsw(a0
+ 2);
4771 if (v
< low
|| v
> high
) {
4772 raise_exception(EXCP05_BOUND
);
4776 void helper_boundl(target_ulong a0
, int v
)
4781 if (v
< low
|| v
> high
) {
4782 raise_exception(EXCP05_BOUND
);
4786 static float approx_rsqrt(float a
)
4788 return 1.0 / sqrt(a
);
4791 static float approx_rcp(float a
)
4796 #if !defined(CONFIG_USER_ONLY)
4798 #define MMUSUFFIX _mmu
4801 #include "softmmu_template.h"
4804 #include "softmmu_template.h"
4807 #include "softmmu_template.h"
4810 #include "softmmu_template.h"
4814 #if !defined(CONFIG_USER_ONLY)
4815 /* try to fill the TLB and return an exception if error. If retaddr is
4816 NULL, it means that the function was called in C code (i.e. not
4817 from generated code or from helper.c) */
4818 /* XXX: fix it to restore all registers */
4819 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4821 TranslationBlock
*tb
;
4824 CPUX86State
*saved_env
;
4826 /* XXX: hack to restore env in all cases, even if not called from
4829 env
= cpu_single_env
;
4831 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4834 /* now we have a real cpu fault */
4835 pc
= (unsigned long)retaddr
;
4836 tb
= tb_find_pc(pc
);
4838 /* the PC is inside the translated code. It means that we have
4839 a virtual CPU fault */
4840 cpu_restore_state(tb
, env
, pc
, NULL
);
4843 raise_exception_err(env
->exception_index
, env
->error_code
);
4849 /* Secure Virtual Machine helpers */
4851 #if defined(CONFIG_USER_ONLY)
4853 void helper_vmrun(int aflag
, int next_eip_addend
)
4856 void helper_vmmcall(void)
4859 void helper_vmload(int aflag
)
4862 void helper_vmsave(int aflag
)
4865 void helper_stgi(void)
4868 void helper_clgi(void)
4871 void helper_skinit(void)
4874 void helper_invlpga(int aflag
)
4877 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4880 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4884 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4885 uint32_t next_eip_addend
)
4890 static inline void svm_save_seg(target_phys_addr_t addr
,
4891 const SegmentCache
*sc
)
4893 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4895 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4897 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4899 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4900 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4903 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4907 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4908 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4909 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4910 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4911 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4914 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4915 CPUState
*env
, int seg_reg
)
4917 SegmentCache sc1
, *sc
= &sc1
;
4918 svm_load_seg(addr
, sc
);
4919 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4920 sc
->base
, sc
->limit
, sc
->flags
);
4923 void helper_vmrun(int aflag
, int next_eip_addend
)
4929 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4934 addr
= (uint32_t)EAX
;
4936 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4938 env
->vm_vmcb
= addr
;
4940 /* save the current CPU state in the hsave page */
4941 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4942 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4944 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4945 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4947 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4948 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4949 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4950 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4951 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4952 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4954 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4955 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4957 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4959 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4961 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4963 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4966 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4967 EIP
+ next_eip_addend
);
4968 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4969 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4971 /* load the interception bitmaps so we do not need to access the
4973 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4974 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4975 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4976 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4977 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4978 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4980 /* enable intercepts */
4981 env
->hflags
|= HF_SVMI_MASK
;
4983 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4985 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4986 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4988 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4989 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4991 /* clear exit_info_2 so we behave like the real hardware */
4992 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4994 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4995 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4996 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4997 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4998 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4999 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5000 if (int_ctl
& V_INTR_MASKING_MASK
) {
5001 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
5002 env
->hflags2
|= HF2_VINTR_MASK
;
5003 if (env
->eflags
& IF_MASK
)
5004 env
->hflags2
|= HF2_HIF_MASK
;
5008 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5010 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5011 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5012 CC_OP
= CC_OP_EFLAGS
;
5014 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5016 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5018 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5020 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5023 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5025 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5026 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5027 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5028 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5029 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5031 /* FIXME: guest state consistency checks */
5033 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5034 case TLB_CONTROL_DO_NOTHING
:
5036 case TLB_CONTROL_FLUSH_ALL_ASID
:
5037 /* FIXME: this is not 100% correct but should work for now */
5042 env
->hflags2
|= HF2_GIF_MASK
;
5044 if (int_ctl
& V_IRQ_MASK
) {
5045 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5048 /* maybe we need to inject an event */
5049 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5050 if (event_inj
& SVM_EVTINJ_VALID
) {
5051 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5052 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5053 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5055 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5056 /* FIXME: need to implement valid_err */
5057 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5058 case SVM_EVTINJ_TYPE_INTR
:
5059 env
->exception_index
= vector
;
5060 env
->error_code
= event_inj_err
;
5061 env
->exception_is_int
= 0;
5062 env
->exception_next_eip
= -1;
5063 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5064 /* XXX: is it always correct ? */
5065 do_interrupt(vector
, 0, 0, 0, 1);
5067 case SVM_EVTINJ_TYPE_NMI
:
5068 env
->exception_index
= EXCP02_NMI
;
5069 env
->error_code
= event_inj_err
;
5070 env
->exception_is_int
= 0;
5071 env
->exception_next_eip
= EIP
;
5072 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5075 case SVM_EVTINJ_TYPE_EXEPT
:
5076 env
->exception_index
= vector
;
5077 env
->error_code
= event_inj_err
;
5078 env
->exception_is_int
= 0;
5079 env
->exception_next_eip
= -1;
5080 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5083 case SVM_EVTINJ_TYPE_SOFT
:
5084 env
->exception_index
= vector
;
5085 env
->error_code
= event_inj_err
;
5086 env
->exception_is_int
= 1;
5087 env
->exception_next_eip
= EIP
;
5088 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5092 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5096 void helper_vmmcall(void)
5098 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5099 raise_exception(EXCP06_ILLOP
);
5102 void helper_vmload(int aflag
)
5105 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5110 addr
= (uint32_t)EAX
;
5112 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5113 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5114 env
->segs
[R_FS
].base
);
5116 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5118 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5120 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5122 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5125 #ifdef TARGET_X86_64
5126 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5127 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5128 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5129 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5131 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5132 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5133 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5134 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5137 void helper_vmsave(int aflag
)
5140 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5145 addr
= (uint32_t)EAX
;
5147 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5148 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5149 env
->segs
[R_FS
].base
);
5151 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5153 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5155 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5157 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5160 #ifdef TARGET_X86_64
5161 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5162 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5163 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5164 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5166 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5167 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5168 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5169 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5172 void helper_stgi(void)
5174 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5175 env
->hflags2
|= HF2_GIF_MASK
;
5178 void helper_clgi(void)
5180 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5181 env
->hflags2
&= ~HF2_GIF_MASK
;
5184 void helper_skinit(void)
5186 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5187 /* XXX: not implemented */
5188 raise_exception(EXCP06_ILLOP
);
5191 void helper_invlpga(int aflag
)
5194 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5199 addr
= (uint32_t)EAX
;
5201 /* XXX: could use the ASID to see if it is needed to do the
5203 tlb_flush_page(env
, addr
);
5206 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5208 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5211 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5212 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5213 helper_vmexit(type
, param
);
5216 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5217 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5218 helper_vmexit(type
, param
);
5221 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5222 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5223 helper_vmexit(type
, param
);
5226 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5227 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5228 helper_vmexit(type
, param
);
5231 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5232 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5233 helper_vmexit(type
, param
);
5237 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5238 /* FIXME: this should be read in at vmrun (faster this way?) */
5239 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5241 switch((uint32_t)ECX
) {
5246 case 0xc0000000 ... 0xc0001fff:
5247 t0
= (8192 + ECX
- 0xc0000000) * 2;
5251 case 0xc0010000 ... 0xc0011fff:
5252 t0
= (16384 + ECX
- 0xc0010000) * 2;
5257 helper_vmexit(type
, param
);
5262 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5263 helper_vmexit(type
, param
);
5267 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5268 helper_vmexit(type
, param
);
5274 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5275 uint32_t next_eip_addend
)
5277 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5278 /* FIXME: this should be read in at vmrun (faster this way?) */
5279 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5280 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5281 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5283 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5284 env
->eip
+ next_eip_addend
);
5285 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5290 /* Note: currently only 32 bits of exit_code are used */
5291 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5295 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5296 exit_code
, exit_info_1
,
5297 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5300 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5301 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5302 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5304 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5307 /* Save the VM state in the vmcb */
5308 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5310 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5312 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5314 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5317 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5318 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5320 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5321 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5323 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5324 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5325 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5326 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5327 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5329 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5330 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5331 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5332 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5333 int_ctl
|= V_IRQ_MASK
;
5334 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5336 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5337 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5338 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5339 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5340 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5341 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5342 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5344 /* Reload the host state from vm_hsave */
5345 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5346 env
->hflags
&= ~HF_SVMI_MASK
;
5348 env
->intercept_exceptions
= 0;
5349 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5350 env
->tsc_offset
= 0;
5352 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5353 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5355 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5356 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5358 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5359 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5360 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5361 /* we need to set the efer after the crs so the hidden flags get
5364 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5366 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5367 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5368 CC_OP
= CC_OP_EFLAGS
;
5370 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5372 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5374 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5376 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5379 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5380 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5381 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5383 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5384 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5387 cpu_x86_set_cpl(env
, 0);
5388 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5389 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5391 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5392 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5393 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5394 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5395 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5397 env
->hflags2
&= ~HF2_GIF_MASK
;
5398 /* FIXME: Resets the current ASID register to zero (host ASID). */
5400 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5402 /* Clears the TSC_OFFSET inside the processor. */
5404 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5405 from the page table indicated the host's CR3. If the PDPEs contain
5406 illegal state, the processor causes a shutdown. */
5408 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5409 env
->cr
[0] |= CR0_PE_MASK
;
5410 env
->eflags
&= ~VM_MASK
;
5412 /* Disables all breakpoints in the host DR7 register. */
5414 /* Checks the reloaded host state for consistency. */
5416 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5417 host's code segment or non-canonical (in the case of long mode), a
5418 #GP fault is delivered inside the host.) */
5420 /* remove any pending exception */
5421 env
->exception_index
= -1;
5422 env
->error_code
= 0;
5423 env
->old_exception
= -1;
5431 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5432 void helper_enter_mmx(void)
5435 *(uint32_t *)(env
->fptags
) = 0;
5436 *(uint32_t *)(env
->fptags
+ 4) = 0;
5439 void helper_emms(void)
5441 /* set to empty state */
5442 *(uint32_t *)(env
->fptags
) = 0x01010101;
5443 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5447 void helper_movq(void *d
, void *s
)
5449 *(uint64_t *)d
= *(uint64_t *)s
;
5453 #include "ops_sse.h"
5456 #include "ops_sse.h"
5459 #include "helper_template.h"
5463 #include "helper_template.h"
5467 #include "helper_template.h"
5470 #ifdef TARGET_X86_64
5473 #include "helper_template.h"
5478 /* bit operations */
5479 target_ulong
helper_bsf(target_ulong t0
)
5486 while ((res
& 1) == 0) {
5493 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5496 target_ulong res
, mask
;
5498 if (wordsize
> 0 && t0
== 0) {
5502 count
= TARGET_LONG_BITS
- 1;
5503 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5504 while ((res
& mask
) == 0) {
5509 return wordsize
- 1 - count
;
5514 target_ulong
helper_bsr(target_ulong t0
)
5516 return helper_lzcnt(t0
, 0);
5519 static int compute_all_eflags(void)
5524 static int compute_c_eflags(void)
5526 return CC_SRC
& CC_C
;
5529 uint32_t helper_cc_compute_all(int op
)
5532 default: /* should never happen */ return 0;
5534 case CC_OP_EFLAGS
: return compute_all_eflags();
5536 case CC_OP_MULB
: return compute_all_mulb();
5537 case CC_OP_MULW
: return compute_all_mulw();
5538 case CC_OP_MULL
: return compute_all_mull();
5540 case CC_OP_ADDB
: return compute_all_addb();
5541 case CC_OP_ADDW
: return compute_all_addw();
5542 case CC_OP_ADDL
: return compute_all_addl();
5544 case CC_OP_ADCB
: return compute_all_adcb();
5545 case CC_OP_ADCW
: return compute_all_adcw();
5546 case CC_OP_ADCL
: return compute_all_adcl();
5548 case CC_OP_SUBB
: return compute_all_subb();
5549 case CC_OP_SUBW
: return compute_all_subw();
5550 case CC_OP_SUBL
: return compute_all_subl();
5552 case CC_OP_SBBB
: return compute_all_sbbb();
5553 case CC_OP_SBBW
: return compute_all_sbbw();
5554 case CC_OP_SBBL
: return compute_all_sbbl();
5556 case CC_OP_LOGICB
: return compute_all_logicb();
5557 case CC_OP_LOGICW
: return compute_all_logicw();
5558 case CC_OP_LOGICL
: return compute_all_logicl();
5560 case CC_OP_INCB
: return compute_all_incb();
5561 case CC_OP_INCW
: return compute_all_incw();
5562 case CC_OP_INCL
: return compute_all_incl();
5564 case CC_OP_DECB
: return compute_all_decb();
5565 case CC_OP_DECW
: return compute_all_decw();
5566 case CC_OP_DECL
: return compute_all_decl();
5568 case CC_OP_SHLB
: return compute_all_shlb();
5569 case CC_OP_SHLW
: return compute_all_shlw();
5570 case CC_OP_SHLL
: return compute_all_shll();
5572 case CC_OP_SARB
: return compute_all_sarb();
5573 case CC_OP_SARW
: return compute_all_sarw();
5574 case CC_OP_SARL
: return compute_all_sarl();
5576 #ifdef TARGET_X86_64
5577 case CC_OP_MULQ
: return compute_all_mulq();
5579 case CC_OP_ADDQ
: return compute_all_addq();
5581 case CC_OP_ADCQ
: return compute_all_adcq();
5583 case CC_OP_SUBQ
: return compute_all_subq();
5585 case CC_OP_SBBQ
: return compute_all_sbbq();
5587 case CC_OP_LOGICQ
: return compute_all_logicq();
5589 case CC_OP_INCQ
: return compute_all_incq();
5591 case CC_OP_DECQ
: return compute_all_decq();
5593 case CC_OP_SHLQ
: return compute_all_shlq();
5595 case CC_OP_SARQ
: return compute_all_sarq();
5600 uint32_t helper_cc_compute_c(int op
)
5603 default: /* should never happen */ return 0;
5605 case CC_OP_EFLAGS
: return compute_c_eflags();
5607 case CC_OP_MULB
: return compute_c_mull();
5608 case CC_OP_MULW
: return compute_c_mull();
5609 case CC_OP_MULL
: return compute_c_mull();
5611 case CC_OP_ADDB
: return compute_c_addb();
5612 case CC_OP_ADDW
: return compute_c_addw();
5613 case CC_OP_ADDL
: return compute_c_addl();
5615 case CC_OP_ADCB
: return compute_c_adcb();
5616 case CC_OP_ADCW
: return compute_c_adcw();
5617 case CC_OP_ADCL
: return compute_c_adcl();
5619 case CC_OP_SUBB
: return compute_c_subb();
5620 case CC_OP_SUBW
: return compute_c_subw();
5621 case CC_OP_SUBL
: return compute_c_subl();
5623 case CC_OP_SBBB
: return compute_c_sbbb();
5624 case CC_OP_SBBW
: return compute_c_sbbw();
5625 case CC_OP_SBBL
: return compute_c_sbbl();
5627 case CC_OP_LOGICB
: return compute_c_logicb();
5628 case CC_OP_LOGICW
: return compute_c_logicw();
5629 case CC_OP_LOGICL
: return compute_c_logicl();
5631 case CC_OP_INCB
: return compute_c_incl();
5632 case CC_OP_INCW
: return compute_c_incl();
5633 case CC_OP_INCL
: return compute_c_incl();
5635 case CC_OP_DECB
: return compute_c_incl();
5636 case CC_OP_DECW
: return compute_c_incl();
5637 case CC_OP_DECL
: return compute_c_incl();
5639 case CC_OP_SHLB
: return compute_c_shlb();
5640 case CC_OP_SHLW
: return compute_c_shlw();
5641 case CC_OP_SHLL
: return compute_c_shll();
5643 case CC_OP_SARB
: return compute_c_sarl();
5644 case CC_OP_SARW
: return compute_c_sarl();
5645 case CC_OP_SARL
: return compute_c_sarl();
5647 #ifdef TARGET_X86_64
5648 case CC_OP_MULQ
: return compute_c_mull();
5650 case CC_OP_ADDQ
: return compute_c_addq();
5652 case CC_OP_ADCQ
: return compute_c_adcq();
5654 case CC_OP_SUBQ
: return compute_c_subq();
5656 case CC_OP_SBBQ
: return compute_c_sbbq();
5658 case CC_OP_LOGICQ
: return compute_c_logicq();
5660 case CC_OP_INCQ
: return compute_c_incl();
5662 case CC_OP_DECQ
: return compute_c_incl();
5664 case CC_OP_SHLQ
: return compute_c_shlq();
5666 case CC_OP_SARQ
: return compute_c_sarl();