4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
28 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29 # define LOG_PCALL_STATE(env) \
30 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 # define LOG_PCALL(...) do { } while (0)
33 # define LOG_PCALL_STATE(env) do { } while (0)
38 #define raise_exception_err(a, b)\
40 qemu_log("raise_exception line=%d\n", __LINE__);\
41 (raise_exception_err)(a, b);\
45 static const uint8_t parity_table
[256] = {
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
74 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
75 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
81 static const uint8_t rclw_table
[32] = {
82 0, 1, 2, 3, 4, 5, 6, 7,
83 8, 9,10,11,12,13,14,15,
84 16, 0, 1, 2, 3, 4, 5, 6,
85 7, 8, 9,10,11,12,13,14,
89 static const uint8_t rclb_table
[32] = {
90 0, 1, 2, 3, 4, 5, 6, 7,
91 8, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 0, 1, 2, 3, 4, 5,
93 6, 7, 8, 0, 1, 2, 3, 4,
96 static const CPU86_LDouble f15rk
[7] =
98 0.00000000000000000000L,
99 1.00000000000000000000L,
100 3.14159265358979323851L, /*pi*/
101 0.30102999566398119523L, /*lg2*/
102 0.69314718055994530943L, /*ln2*/
103 1.44269504088896340739L, /*l2e*/
104 3.32192809488736234781L, /*l2t*/
107 /* broken thread support */
109 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
111 void helper_lock(void)
113 spin_lock(&global_cpu_lock
);
116 void helper_unlock(void)
118 spin_unlock(&global_cpu_lock
);
121 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
123 load_eflags(t0
, update_mask
);
126 target_ulong
helper_read_eflags(void)
129 eflags
= helper_cc_compute_all(CC_OP
);
130 eflags
|= (DF
& DF_MASK
);
131 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
135 /* return non zero if error */
136 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
147 index
= selector
& ~7;
148 if ((index
+ 7) > dt
->limit
)
150 ptr
= dt
->base
+ index
;
151 *e1_ptr
= ldl_kernel(ptr
);
152 *e2_ptr
= ldl_kernel(ptr
+ 4);
156 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
159 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
160 if (e2
& DESC_G_MASK
)
161 limit
= (limit
<< 12) | 0xfff;
165 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
167 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
170 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
172 sc
->base
= get_seg_base(e1
, e2
);
173 sc
->limit
= get_seg_limit(e1
, e2
);
177 /* init the segment cache in vm86 mode. */
178 static inline void load_seg_vm(int seg
, int selector
)
181 cpu_x86_load_seg_cache(env
, seg
, selector
,
182 (selector
<< 4), 0xffff, 0);
185 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
186 uint32_t *esp_ptr
, int dpl
)
188 int type
, index
, shift
;
193 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
194 for(i
=0;i
<env
->tr
.limit
;i
++) {
195 printf("%02x ", env
->tr
.base
[i
]);
196 if ((i
& 7) == 7) printf("\n");
202 if (!(env
->tr
.flags
& DESC_P_MASK
))
203 cpu_abort(env
, "invalid tss");
204 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
206 cpu_abort(env
, "invalid tss type");
208 index
= (dpl
* 4 + 2) << shift
;
209 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
210 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
212 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
213 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
215 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
216 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
220 /* XXX: merge with load_seg() */
221 static void tss_load_seg(int seg_reg
, int selector
)
226 if ((selector
& 0xfffc) != 0) {
227 if (load_segment(&e1
, &e2
, selector
) != 0)
228 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
229 if (!(e2
& DESC_S_MASK
))
230 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
233 cpl
= env
->hflags
& HF_CPL_MASK
;
234 if (seg_reg
== R_CS
) {
235 if (!(e2
& DESC_CS_MASK
))
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
237 /* XXX: is it correct ? */
239 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
240 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 } else if (seg_reg
== R_SS
) {
243 /* SS must be writable data */
244 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 if (dpl
!= cpl
|| dpl
!= rpl
)
247 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
249 /* not readable code */
250 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
252 /* if data or non conforming code, checks the rights */
253 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
254 if (dpl
< cpl
|| dpl
< rpl
)
255 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
258 if (!(e2
& DESC_P_MASK
))
259 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
260 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
261 get_seg_base(e1
, e2
),
262 get_seg_limit(e1
, e2
),
265 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
266 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
270 #define SWITCH_TSS_JMP 0
271 #define SWITCH_TSS_IRET 1
272 #define SWITCH_TSS_CALL 2
274 /* XXX: restore CPU state in registers (PowerPC case) */
275 static void switch_tss(int tss_selector
,
276 uint32_t e1
, uint32_t e2
, int source
,
279 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
280 target_ulong tss_base
;
281 uint32_t new_regs
[8], new_segs
[6];
282 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
283 uint32_t old_eflags
, eflags_mask
;
288 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
289 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
291 /* if task gate, we read the TSS segment and we load it */
293 if (!(e2
& DESC_P_MASK
))
294 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
295 tss_selector
= e1
>> 16;
296 if (tss_selector
& 4)
297 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
298 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
299 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (e2
& DESC_S_MASK
)
301 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
302 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
304 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
307 if (!(e2
& DESC_P_MASK
))
308 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
314 tss_limit
= get_seg_limit(e1
, e2
);
315 tss_base
= get_seg_base(e1
, e2
);
316 if ((tss_selector
& 4) != 0 ||
317 tss_limit
< tss_limit_max
)
318 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
319 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
321 old_tss_limit_max
= 103;
323 old_tss_limit_max
= 43;
325 /* read all the registers from the new TSS */
328 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
329 new_eip
= ldl_kernel(tss_base
+ 0x20);
330 new_eflags
= ldl_kernel(tss_base
+ 0x24);
331 for(i
= 0; i
< 8; i
++)
332 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
333 for(i
= 0; i
< 6; i
++)
334 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
335 new_ldt
= lduw_kernel(tss_base
+ 0x60);
336 new_trap
= ldl_kernel(tss_base
+ 0x64);
340 new_eip
= lduw_kernel(tss_base
+ 0x0e);
341 new_eflags
= lduw_kernel(tss_base
+ 0x10);
342 for(i
= 0; i
< 8; i
++)
343 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
344 for(i
= 0; i
< 4; i
++)
345 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
346 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
352 /* NOTE: we must avoid memory exceptions during the task switch,
353 so we make dummy accesses before */
354 /* XXX: it can still fail in some cases, so a bigger hack is
355 necessary to valid the TLB after having done the accesses */
357 v1
= ldub_kernel(env
->tr
.base
);
358 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
359 stb_kernel(env
->tr
.base
, v1
);
360 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
362 /* clear busy bit (it is restartable) */
363 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
366 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
367 e2
= ldl_kernel(ptr
+ 4);
368 e2
&= ~DESC_TSS_BUSY_MASK
;
369 stl_kernel(ptr
+ 4, e2
);
371 old_eflags
= compute_eflags();
372 if (source
== SWITCH_TSS_IRET
)
373 old_eflags
&= ~NT_MASK
;
375 /* save the current state in the old TSS */
378 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
379 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
388 for(i
= 0; i
< 6; i
++)
389 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
392 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
393 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
402 for(i
= 0; i
< 4; i
++)
403 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
406 /* now if an exception occurs, it will occurs in the next task
409 if (source
== SWITCH_TSS_CALL
) {
410 stw_kernel(tss_base
, env
->tr
.selector
);
411 new_eflags
|= NT_MASK
;
415 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
418 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
419 e2
= ldl_kernel(ptr
+ 4);
420 e2
|= DESC_TSS_BUSY_MASK
;
421 stl_kernel(ptr
+ 4, e2
);
424 /* set the new CPU state */
425 /* from this point, any exception which occurs can give problems */
426 env
->cr
[0] |= CR0_TS_MASK
;
427 env
->hflags
|= HF_TS_MASK
;
428 env
->tr
.selector
= tss_selector
;
429 env
->tr
.base
= tss_base
;
430 env
->tr
.limit
= tss_limit
;
431 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
433 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
434 cpu_x86_update_cr3(env
, new_cr3
);
437 /* load all registers without an exception, then reload them with
438 possible exception */
440 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
441 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
443 eflags_mask
&= 0xffff;
444 load_eflags(new_eflags
, eflags_mask
);
445 /* XXX: what to do in 16 bit case ? */
454 if (new_eflags
& VM_MASK
) {
455 for(i
= 0; i
< 6; i
++)
456 load_seg_vm(i
, new_segs
[i
]);
457 /* in vm86, CPL is always 3 */
458 cpu_x86_set_cpl(env
, 3);
460 /* CPL is set the RPL of CS */
461 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
462 /* first just selectors as the rest may trigger exceptions */
463 for(i
= 0; i
< 6; i
++)
464 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
467 env
->ldt
.selector
= new_ldt
& ~4;
474 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
476 if ((new_ldt
& 0xfffc) != 0) {
478 index
= new_ldt
& ~7;
479 if ((index
+ 7) > dt
->limit
)
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 ptr
= dt
->base
+ index
;
482 e1
= ldl_kernel(ptr
);
483 e2
= ldl_kernel(ptr
+ 4);
484 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
485 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
486 if (!(e2
& DESC_P_MASK
))
487 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
488 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
491 /* load the segments */
492 if (!(new_eflags
& VM_MASK
)) {
493 tss_load_seg(R_CS
, new_segs
[R_CS
]);
494 tss_load_seg(R_SS
, new_segs
[R_SS
]);
495 tss_load_seg(R_ES
, new_segs
[R_ES
]);
496 tss_load_seg(R_DS
, new_segs
[R_DS
]);
497 tss_load_seg(R_FS
, new_segs
[R_FS
]);
498 tss_load_seg(R_GS
, new_segs
[R_GS
]);
501 /* check that EIP is in the CS segment limits */
502 if (new_eip
> env
->segs
[R_CS
].limit
) {
503 /* XXX: different exception if CALL ? */
504 raise_exception_err(EXCP0D_GPF
, 0);
507 #ifndef CONFIG_USER_ONLY
508 /* reset local breakpoints */
509 if (env
->dr
[7] & 0x55) {
510 for (i
= 0; i
< 4; i
++) {
511 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
512 hw_breakpoint_remove(env
, i
);
519 /* check if Port I/O is allowed in TSS */
520 static inline void check_io(int addr
, int size
)
522 int io_offset
, val
, mask
;
524 /* TSS must be a valid 32 bit one */
525 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
526 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
529 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
530 io_offset
+= (addr
>> 3);
531 /* Note: the check needs two bytes */
532 if ((io_offset
+ 1) > env
->tr
.limit
)
534 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
536 mask
= (1 << size
) - 1;
537 /* all bits must be zero to allow the I/O */
538 if ((val
& mask
) != 0) {
540 raise_exception_err(EXCP0D_GPF
, 0);
544 void helper_check_iob(uint32_t t0
)
549 void helper_check_iow(uint32_t t0
)
554 void helper_check_iol(uint32_t t0
)
559 void helper_outb(uint32_t port
, uint32_t data
)
561 cpu_outb(env
, port
, data
& 0xff);
564 target_ulong
helper_inb(uint32_t port
)
566 return cpu_inb(env
, port
);
569 void helper_outw(uint32_t port
, uint32_t data
)
571 cpu_outw(env
, port
, data
& 0xffff);
574 target_ulong
helper_inw(uint32_t port
)
576 return cpu_inw(env
, port
);
579 void helper_outl(uint32_t port
, uint32_t data
)
581 cpu_outl(env
, port
, data
);
584 target_ulong
helper_inl(uint32_t port
)
586 return cpu_inl(env
, port
);
589 static inline unsigned int get_sp_mask(unsigned int e2
)
591 if (e2
& DESC_B_MASK
)
597 static int exeption_has_error_code(int intno
)
613 #define SET_ESP(val, sp_mask)\
615 if ((sp_mask) == 0xffff)\
616 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617 else if ((sp_mask) == 0xffffffffLL)\
618 ESP = (uint32_t)(val);\
623 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
626 /* in 64-bit machines, this can overflow. So this segment addition macro
627 * can be used to trim the value to 32-bit whenever needed */
628 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
630 /* XXX: add a is_user flag to have proper security support */
631 #define PUSHW(ssp, sp, sp_mask, val)\
634 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
637 #define PUSHL(ssp, sp, sp_mask, val)\
640 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
643 #define POPW(ssp, sp, sp_mask, val)\
645 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
649 #define POPL(ssp, sp, sp_mask, val)\
651 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
655 /* protected mode interrupt */
656 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
657 unsigned int next_eip
, int is_hw
)
660 target_ulong ptr
, ssp
;
661 int type
, dpl
, selector
, ss_dpl
, cpl
;
662 int has_error_code
, new_stack
, shift
;
663 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
664 uint32_t old_eip
, sp_mask
;
667 if (!is_int
&& !is_hw
)
668 has_error_code
= exeption_has_error_code(intno
);
675 if (intno
* 8 + 7 > dt
->limit
)
676 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
677 ptr
= dt
->base
+ intno
* 8;
678 e1
= ldl_kernel(ptr
);
679 e2
= ldl_kernel(ptr
+ 4);
680 /* check gate type */
681 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
683 case 5: /* task gate */
684 /* must do that check here to return the correct error code */
685 if (!(e2
& DESC_P_MASK
))
686 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
687 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
688 if (has_error_code
) {
691 /* push the error code */
692 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
694 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
698 esp
= (ESP
- (2 << shift
)) & mask
;
699 ssp
= env
->segs
[R_SS
].base
+ esp
;
701 stl_kernel(ssp
, error_code
);
703 stw_kernel(ssp
, error_code
);
707 case 6: /* 286 interrupt gate */
708 case 7: /* 286 trap gate */
709 case 14: /* 386 interrupt gate */
710 case 15: /* 386 trap gate */
713 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
716 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
717 cpl
= env
->hflags
& HF_CPL_MASK
;
718 /* check privilege if software int */
719 if (is_int
&& dpl
< cpl
)
720 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
721 /* check valid bit */
722 if (!(e2
& DESC_P_MASK
))
723 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
725 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
726 if ((selector
& 0xfffc) == 0)
727 raise_exception_err(EXCP0D_GPF
, 0);
729 if (load_segment(&e1
, &e2
, selector
) != 0)
730 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
731 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
732 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
733 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
735 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
736 if (!(e2
& DESC_P_MASK
))
737 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
738 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
739 /* to inner privilege */
740 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
741 if ((ss
& 0xfffc) == 0)
742 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
745 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
746 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
747 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
749 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
750 if (!(ss_e2
& DESC_S_MASK
) ||
751 (ss_e2
& DESC_CS_MASK
) ||
752 !(ss_e2
& DESC_W_MASK
))
753 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
754 if (!(ss_e2
& DESC_P_MASK
))
755 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
757 sp_mask
= get_sp_mask(ss_e2
);
758 ssp
= get_seg_base(ss_e1
, ss_e2
);
759 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
760 /* to same privilege */
761 if (env
->eflags
& VM_MASK
)
762 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
764 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
765 ssp
= env
->segs
[R_SS
].base
;
769 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
770 new_stack
= 0; /* avoid warning */
771 sp_mask
= 0; /* avoid warning */
772 ssp
= 0; /* avoid warning */
773 esp
= 0; /* avoid warning */
779 /* XXX: check that enough room is available */
780 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
781 if (env
->eflags
& VM_MASK
)
787 if (env
->eflags
& VM_MASK
) {
788 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
789 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
793 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, ESP
);
796 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
797 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
798 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
799 if (has_error_code
) {
800 PUSHL(ssp
, esp
, sp_mask
, error_code
);
804 if (env
->eflags
& VM_MASK
) {
805 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
806 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
810 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, ESP
);
813 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
814 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
815 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
816 if (has_error_code
) {
817 PUSHW(ssp
, esp
, sp_mask
, error_code
);
822 if (env
->eflags
& VM_MASK
) {
823 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
828 ss
= (ss
& ~3) | dpl
;
829 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
830 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
832 SET_ESP(esp
, sp_mask
);
834 selector
= (selector
& ~3) | dpl
;
835 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
836 get_seg_base(e1
, e2
),
837 get_seg_limit(e1
, e2
),
839 cpu_x86_set_cpl(env
, dpl
);
842 /* interrupt gate clear IF mask */
843 if ((type
& 1) == 0) {
844 env
->eflags
&= ~IF_MASK
;
846 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
851 #define PUSHQ(sp, val)\
854 stq_kernel(sp, (val));\
857 #define POPQ(sp, val)\
859 val = ldq_kernel(sp);\
863 static inline target_ulong
get_rsp_from_tss(int level
)
868 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
869 env
->tr
.base
, env
->tr
.limit
);
872 if (!(env
->tr
.flags
& DESC_P_MASK
))
873 cpu_abort(env
, "invalid tss");
874 index
= 8 * level
+ 4;
875 if ((index
+ 7) > env
->tr
.limit
)
876 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
877 return ldq_kernel(env
->tr
.base
+ index
);
880 /* 64 bit interrupt */
881 static void do_interrupt64(int intno
, int is_int
, int error_code
,
882 target_ulong next_eip
, int is_hw
)
886 int type
, dpl
, selector
, cpl
, ist
;
887 int has_error_code
, new_stack
;
888 uint32_t e1
, e2
, e3
, ss
;
889 target_ulong old_eip
, esp
, offset
;
892 if (!is_int
&& !is_hw
)
893 has_error_code
= exeption_has_error_code(intno
);
900 if (intno
* 16 + 15 > dt
->limit
)
901 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
902 ptr
= dt
->base
+ intno
* 16;
903 e1
= ldl_kernel(ptr
);
904 e2
= ldl_kernel(ptr
+ 4);
905 e3
= ldl_kernel(ptr
+ 8);
906 /* check gate type */
907 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
909 case 14: /* 386 interrupt gate */
910 case 15: /* 386 trap gate */
913 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
916 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
917 cpl
= env
->hflags
& HF_CPL_MASK
;
918 /* check privilege if software int */
919 if (is_int
&& dpl
< cpl
)
920 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
921 /* check valid bit */
922 if (!(e2
& DESC_P_MASK
))
923 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
925 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
927 if ((selector
& 0xfffc) == 0)
928 raise_exception_err(EXCP0D_GPF
, 0);
930 if (load_segment(&e1
, &e2
, selector
) != 0)
931 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
932 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
933 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
934 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
936 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
937 if (!(e2
& DESC_P_MASK
))
938 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
939 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
940 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
941 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
942 /* to inner privilege */
944 esp
= get_rsp_from_tss(ist
+ 3);
946 esp
= get_rsp_from_tss(dpl
);
947 esp
&= ~0xfLL
; /* align stack */
950 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
951 /* to same privilege */
952 if (env
->eflags
& VM_MASK
)
953 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
956 esp
= get_rsp_from_tss(ist
+ 3);
959 esp
&= ~0xfLL
; /* align stack */
962 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
963 new_stack
= 0; /* avoid warning */
964 esp
= 0; /* avoid warning */
967 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
969 PUSHQ(esp
, compute_eflags());
970 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
972 if (has_error_code
) {
973 PUSHQ(esp
, error_code
);
978 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
982 selector
= (selector
& ~3) | dpl
;
983 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
984 get_seg_base(e1
, e2
),
985 get_seg_limit(e1
, e2
),
987 cpu_x86_set_cpl(env
, dpl
);
990 /* interrupt gate clear IF mask */
991 if ((type
& 1) == 0) {
992 env
->eflags
&= ~IF_MASK
;
994 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
999 #if defined(CONFIG_USER_ONLY)
1000 void helper_syscall(int next_eip_addend
)
1002 env
->exception_index
= EXCP_SYSCALL
;
1003 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1007 void helper_syscall(int next_eip_addend
)
1011 if (!(env
->efer
& MSR_EFER_SCE
)) {
1012 raise_exception_err(EXCP06_ILLOP
, 0);
1014 selector
= (env
->star
>> 32) & 0xffff;
1015 if (env
->hflags
& HF_LMA_MASK
) {
1018 ECX
= env
->eip
+ next_eip_addend
;
1019 env
->regs
[11] = compute_eflags();
1021 code64
= env
->hflags
& HF_CS64_MASK
;
1023 cpu_x86_set_cpl(env
, 0);
1024 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1026 DESC_G_MASK
| DESC_P_MASK
|
1028 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1029 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1033 DESC_W_MASK
| DESC_A_MASK
);
1034 env
->eflags
&= ~env
->fmask
;
1035 load_eflags(env
->eflags
, 0);
1037 env
->eip
= env
->lstar
;
1039 env
->eip
= env
->cstar
;
1041 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1043 cpu_x86_set_cpl(env
, 0);
1044 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1046 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1048 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1049 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1053 DESC_W_MASK
| DESC_A_MASK
);
1054 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1055 env
->eip
= (uint32_t)env
->star
;
1061 #ifdef TARGET_X86_64
1062 void helper_sysret(int dflag
)
1066 if (!(env
->efer
& MSR_EFER_SCE
)) {
1067 raise_exception_err(EXCP06_ILLOP
, 0);
1069 cpl
= env
->hflags
& HF_CPL_MASK
;
1070 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1071 raise_exception_err(EXCP0D_GPF
, 0);
1073 selector
= (env
->star
>> 48) & 0xffff;
1074 if (env
->hflags
& HF_LMA_MASK
) {
1076 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1078 DESC_G_MASK
| DESC_P_MASK
|
1079 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1080 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1084 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1086 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1087 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1088 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1089 env
->eip
= (uint32_t)ECX
;
1091 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1093 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1094 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1095 DESC_W_MASK
| DESC_A_MASK
);
1096 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1097 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1098 cpu_x86_set_cpl(env
, 3);
1100 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1102 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1103 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1104 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1105 env
->eip
= (uint32_t)ECX
;
1106 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1108 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1109 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1110 DESC_W_MASK
| DESC_A_MASK
);
1111 env
->eflags
|= IF_MASK
;
1112 cpu_x86_set_cpl(env
, 3);
1115 if (kqemu_is_ok(env
)) {
1116 if (env
->hflags
& HF_LMA_MASK
)
1117 CC_OP
= CC_OP_EFLAGS
;
1118 env
->exception_index
= -1;
1125 /* real mode interrupt */
1126 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1127 unsigned int next_eip
)
1130 target_ulong ptr
, ssp
;
1132 uint32_t offset
, esp
;
1133 uint32_t old_cs
, old_eip
;
1135 /* real mode (simpler !) */
1137 if (intno
* 4 + 3 > dt
->limit
)
1138 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1139 ptr
= dt
->base
+ intno
* 4;
1140 offset
= lduw_kernel(ptr
);
1141 selector
= lduw_kernel(ptr
+ 2);
1143 ssp
= env
->segs
[R_SS
].base
;
1148 old_cs
= env
->segs
[R_CS
].selector
;
1149 /* XXX: use SS segment size ? */
1150 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1151 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1152 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1154 /* update processor state */
1155 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1157 env
->segs
[R_CS
].selector
= selector
;
1158 env
->segs
[R_CS
].base
= (selector
<< 4);
1159 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1162 /* fake user mode interrupt */
1163 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1164 target_ulong next_eip
)
1168 int dpl
, cpl
, shift
;
1172 if (env
->hflags
& HF_LMA_MASK
) {
1177 ptr
= dt
->base
+ (intno
<< shift
);
1178 e2
= ldl_kernel(ptr
+ 4);
1180 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1181 cpl
= env
->hflags
& HF_CPL_MASK
;
1182 /* check privilege if software int */
1183 if (is_int
&& dpl
< cpl
)
1184 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1186 /* Since we emulate only user space, we cannot do more than
1187 exiting the emulation with the suitable exception and error
1193 #if !defined(CONFIG_USER_ONLY)
1194 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1197 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1198 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1201 type
= SVM_EVTINJ_TYPE_SOFT
;
1203 type
= SVM_EVTINJ_TYPE_EXEPT
;
1204 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1205 if (!rm
&& exeption_has_error_code(intno
)) {
1206 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1207 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1209 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1215 * Begin execution of an interruption. is_int is TRUE if coming from
1216 * the int instruction. next_eip is the EIP value AFTER the interrupt
1217 * instruction. It is only relevant if is_int is TRUE.
1219 void do_interrupt(int intno
, int is_int
, int error_code
,
1220 target_ulong next_eip
, int is_hw
)
1222 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1223 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1225 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1226 count
, intno
, error_code
, is_int
,
1227 env
->hflags
& HF_CPL_MASK
,
1228 env
->segs
[R_CS
].selector
, EIP
,
1229 (int)env
->segs
[R_CS
].base
+ EIP
,
1230 env
->segs
[R_SS
].selector
, ESP
);
1231 if (intno
== 0x0e) {
1232 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1234 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1237 log_cpu_state(env
, X86_DUMP_CCOP
);
1243 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1244 for(i
= 0; i
< 16; i
++) {
1245 qemu_log(" %02x", ldub(ptr
+ i
));
1253 if (env
->cr
[0] & CR0_PE_MASK
) {
1254 #if !defined(CONFIG_USER_ONLY)
1255 if (env
->hflags
& HF_SVMI_MASK
)
1256 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1258 #ifdef TARGET_X86_64
1259 if (env
->hflags
& HF_LMA_MASK
) {
1260 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1264 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1267 #if !defined(CONFIG_USER_ONLY)
1268 if (env
->hflags
& HF_SVMI_MASK
)
1269 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1271 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1274 #if !defined(CONFIG_USER_ONLY)
1275 if (env
->hflags
& HF_SVMI_MASK
) {
1276 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1277 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1282 /* This should come from sysemu.h - if we could include it here... */
1283 void qemu_system_reset_request(void);
1286 * Check nested exceptions and change to double or triple fault if
1287 * needed. It should only be called, if this is not an interrupt.
1288 * Returns the new exception number.
1290 static int check_exception(int intno
, int *error_code
)
1292 int first_contributory
= env
->old_exception
== 0 ||
1293 (env
->old_exception
>= 10 &&
1294 env
->old_exception
<= 13);
1295 int second_contributory
= intno
== 0 ||
1296 (intno
>= 10 && intno
<= 13);
1298 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1299 env
->old_exception
, intno
);
1301 #if !defined(CONFIG_USER_ONLY)
1302 if (env
->old_exception
== EXCP08_DBLE
) {
1303 if (env
->hflags
& HF_SVMI_MASK
)
1304 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1306 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1308 qemu_system_reset_request();
1313 if ((first_contributory
&& second_contributory
)
1314 || (env
->old_exception
== EXCP0E_PAGE
&&
1315 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1316 intno
= EXCP08_DBLE
;
1320 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1321 (intno
== EXCP08_DBLE
))
1322 env
->old_exception
= intno
;
1328 * Signal an interruption. It is executed in the main CPU loop.
1329 * is_int is TRUE if coming from the int instruction. next_eip is the
1330 * EIP value AFTER the interrupt instruction. It is only relevant if
1333 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1334 int next_eip_addend
)
1337 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1338 intno
= check_exception(intno
, &error_code
);
1340 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1343 env
->exception_index
= intno
;
1344 env
->error_code
= error_code
;
1345 env
->exception_is_int
= is_int
;
1346 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1350 /* shortcuts to generate exceptions */
1352 void raise_exception_err(int exception_index
, int error_code
)
1354 raise_interrupt(exception_index
, 0, error_code
, 0);
1357 void raise_exception(int exception_index
)
1359 raise_interrupt(exception_index
, 0, 0, 0);
1364 #if defined(CONFIG_USER_ONLY)
1366 void do_smm_enter(void)
1370 void helper_rsm(void)
1376 #ifdef TARGET_X86_64
1377 #define SMM_REVISION_ID 0x00020064
1379 #define SMM_REVISION_ID 0x00020000
1382 void do_smm_enter(void)
1384 target_ulong sm_state
;
1388 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1389 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1391 env
->hflags
|= HF_SMM_MASK
;
1392 cpu_smm_update(env
);
1394 sm_state
= env
->smbase
+ 0x8000;
1396 #ifdef TARGET_X86_64
1397 for(i
= 0; i
< 6; i
++) {
1399 offset
= 0x7e00 + i
* 16;
1400 stw_phys(sm_state
+ offset
, dt
->selector
);
1401 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1402 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1403 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1406 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1407 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1409 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1410 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1411 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1412 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1414 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1415 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1417 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1418 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1419 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1420 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1422 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1424 stq_phys(sm_state
+ 0x7ff8, EAX
);
1425 stq_phys(sm_state
+ 0x7ff0, ECX
);
1426 stq_phys(sm_state
+ 0x7fe8, EDX
);
1427 stq_phys(sm_state
+ 0x7fe0, EBX
);
1428 stq_phys(sm_state
+ 0x7fd8, ESP
);
1429 stq_phys(sm_state
+ 0x7fd0, EBP
);
1430 stq_phys(sm_state
+ 0x7fc8, ESI
);
1431 stq_phys(sm_state
+ 0x7fc0, EDI
);
1432 for(i
= 8; i
< 16; i
++)
1433 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1434 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1435 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1436 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1437 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1439 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1440 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1441 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1443 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1444 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1446 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1447 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1448 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1449 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1450 stl_phys(sm_state
+ 0x7fec, EDI
);
1451 stl_phys(sm_state
+ 0x7fe8, ESI
);
1452 stl_phys(sm_state
+ 0x7fe4, EBP
);
1453 stl_phys(sm_state
+ 0x7fe0, ESP
);
1454 stl_phys(sm_state
+ 0x7fdc, EBX
);
1455 stl_phys(sm_state
+ 0x7fd8, EDX
);
1456 stl_phys(sm_state
+ 0x7fd4, ECX
);
1457 stl_phys(sm_state
+ 0x7fd0, EAX
);
1458 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1459 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1461 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1462 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1463 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1464 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1466 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1467 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1468 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1469 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1471 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1472 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1474 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1475 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1477 for(i
= 0; i
< 6; i
++) {
1480 offset
= 0x7f84 + i
* 12;
1482 offset
= 0x7f2c + (i
- 3) * 12;
1483 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1484 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1485 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1486 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1488 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1490 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1491 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1493 /* init SMM cpu state */
1495 #ifdef TARGET_X86_64
1496 cpu_load_efer(env
, 0);
1498 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1499 env
->eip
= 0x00008000;
1500 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1502 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1503 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1508 cpu_x86_update_cr0(env
,
1509 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1510 cpu_x86_update_cr4(env
, 0);
1511 env
->dr
[7] = 0x00000400;
1512 CC_OP
= CC_OP_EFLAGS
;
1515 void helper_rsm(void)
1517 target_ulong sm_state
;
1521 sm_state
= env
->smbase
+ 0x8000;
1522 #ifdef TARGET_X86_64
1523 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1525 for(i
= 0; i
< 6; i
++) {
1526 offset
= 0x7e00 + i
* 16;
1527 cpu_x86_load_seg_cache(env
, i
,
1528 lduw_phys(sm_state
+ offset
),
1529 ldq_phys(sm_state
+ offset
+ 8),
1530 ldl_phys(sm_state
+ offset
+ 4),
1531 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1534 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1535 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1537 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1538 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1539 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1540 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1542 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1543 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1545 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1546 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1547 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1548 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1550 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1551 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1552 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1553 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1554 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1555 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1556 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1557 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1558 for(i
= 8; i
< 16; i
++)
1559 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1560 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1561 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1562 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1563 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1564 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1566 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1567 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1568 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1570 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1571 if (val
& 0x20000) {
1572 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1575 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1576 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1577 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1578 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1579 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1580 EDI
= ldl_phys(sm_state
+ 0x7fec);
1581 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1582 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1583 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1584 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1585 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1586 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1587 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1588 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1589 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1591 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1592 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1593 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1594 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1596 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1597 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1598 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1599 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1601 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1602 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1604 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1605 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1607 for(i
= 0; i
< 6; i
++) {
1609 offset
= 0x7f84 + i
* 12;
1611 offset
= 0x7f2c + (i
- 3) * 12;
1612 cpu_x86_load_seg_cache(env
, i
,
1613 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1614 ldl_phys(sm_state
+ offset
+ 8),
1615 ldl_phys(sm_state
+ offset
+ 4),
1616 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1618 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1620 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1621 if (val
& 0x20000) {
1622 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1625 CC_OP
= CC_OP_EFLAGS
;
1626 env
->hflags
&= ~HF_SMM_MASK
;
1627 cpu_smm_update(env
);
1629 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1630 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1633 #endif /* !CONFIG_USER_ONLY */
1636 /* division, flags are undefined */
1638 void helper_divb_AL(target_ulong t0
)
1640 unsigned int num
, den
, q
, r
;
1642 num
= (EAX
& 0xffff);
1645 raise_exception(EXCP00_DIVZ
);
1649 raise_exception(EXCP00_DIVZ
);
1651 r
= (num
% den
) & 0xff;
1652 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1655 void helper_idivb_AL(target_ulong t0
)
1662 raise_exception(EXCP00_DIVZ
);
1666 raise_exception(EXCP00_DIVZ
);
1668 r
= (num
% den
) & 0xff;
1669 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1672 void helper_divw_AX(target_ulong t0
)
1674 unsigned int num
, den
, q
, r
;
1676 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1677 den
= (t0
& 0xffff);
1679 raise_exception(EXCP00_DIVZ
);
1683 raise_exception(EXCP00_DIVZ
);
1685 r
= (num
% den
) & 0xffff;
1686 EAX
= (EAX
& ~0xffff) | q
;
1687 EDX
= (EDX
& ~0xffff) | r
;
1690 void helper_idivw_AX(target_ulong t0
)
1694 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1697 raise_exception(EXCP00_DIVZ
);
1700 if (q
!= (int16_t)q
)
1701 raise_exception(EXCP00_DIVZ
);
1703 r
= (num
% den
) & 0xffff;
1704 EAX
= (EAX
& ~0xffff) | q
;
1705 EDX
= (EDX
& ~0xffff) | r
;
1708 void helper_divl_EAX(target_ulong t0
)
1710 unsigned int den
, r
;
1713 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1716 raise_exception(EXCP00_DIVZ
);
1721 raise_exception(EXCP00_DIVZ
);
1726 void helper_idivl_EAX(target_ulong t0
)
1731 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1734 raise_exception(EXCP00_DIVZ
);
1738 if (q
!= (int32_t)q
)
1739 raise_exception(EXCP00_DIVZ
);
1746 /* XXX: exception */
1747 void helper_aam(int base
)
1753 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1757 void helper_aad(int base
)
1761 ah
= (EAX
>> 8) & 0xff;
1762 al
= ((ah
* base
) + al
) & 0xff;
1763 EAX
= (EAX
& ~0xffff) | al
;
1767 void helper_aaa(void)
1773 eflags
= helper_cc_compute_all(CC_OP
);
1776 ah
= (EAX
>> 8) & 0xff;
1778 icarry
= (al
> 0xf9);
1779 if (((al
& 0x0f) > 9 ) || af
) {
1780 al
= (al
+ 6) & 0x0f;
1781 ah
= (ah
+ 1 + icarry
) & 0xff;
1782 eflags
|= CC_C
| CC_A
;
1784 eflags
&= ~(CC_C
| CC_A
);
1787 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1791 void helper_aas(void)
1797 eflags
= helper_cc_compute_all(CC_OP
);
1800 ah
= (EAX
>> 8) & 0xff;
1803 if (((al
& 0x0f) > 9 ) || af
) {
1804 al
= (al
- 6) & 0x0f;
1805 ah
= (ah
- 1 - icarry
) & 0xff;
1806 eflags
|= CC_C
| CC_A
;
1808 eflags
&= ~(CC_C
| CC_A
);
1811 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1815 void helper_daa(void)
1820 eflags
= helper_cc_compute_all(CC_OP
);
1826 if (((al
& 0x0f) > 9 ) || af
) {
1827 al
= (al
+ 6) & 0xff;
1830 if ((al
> 0x9f) || cf
) {
1831 al
= (al
+ 0x60) & 0xff;
1834 EAX
= (EAX
& ~0xff) | al
;
1835 /* well, speed is not an issue here, so we compute the flags by hand */
1836 eflags
|= (al
== 0) << 6; /* zf */
1837 eflags
|= parity_table
[al
]; /* pf */
1838 eflags
|= (al
& 0x80); /* sf */
1842 void helper_das(void)
1844 int al
, al1
, af
, cf
;
1847 eflags
= helper_cc_compute_all(CC_OP
);
1854 if (((al
& 0x0f) > 9 ) || af
) {
1858 al
= (al
- 6) & 0xff;
1860 if ((al1
> 0x99) || cf
) {
1861 al
= (al
- 0x60) & 0xff;
1864 EAX
= (EAX
& ~0xff) | al
;
1865 /* well, speed is not an issue here, so we compute the flags by hand */
1866 eflags
|= (al
== 0) << 6; /* zf */
1867 eflags
|= parity_table
[al
]; /* pf */
1868 eflags
|= (al
& 0x80); /* sf */
1872 void helper_into(int next_eip_addend
)
1875 eflags
= helper_cc_compute_all(CC_OP
);
1876 if (eflags
& CC_O
) {
1877 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1881 void helper_cmpxchg8b(target_ulong a0
)
1886 eflags
= helper_cc_compute_all(CC_OP
);
1888 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1889 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1892 /* always do the store */
1894 EDX
= (uint32_t)(d
>> 32);
1901 #ifdef TARGET_X86_64
1902 void helper_cmpxchg16b(target_ulong a0
)
1907 if ((a0
& 0xf) != 0)
1908 raise_exception(EXCP0D_GPF
);
1909 eflags
= helper_cc_compute_all(CC_OP
);
1912 if (d0
== EAX
&& d1
== EDX
) {
1917 /* always do the store */
1928 void helper_single_step(void)
1930 #ifndef CONFIG_USER_ONLY
1931 check_hw_breakpoints(env
, 1);
1932 env
->dr
[6] |= DR6_BS
;
1934 raise_exception(EXCP01_DB
);
1937 void helper_cpuid(void)
1939 uint32_t eax
, ebx
, ecx
, edx
;
1941 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1943 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1950 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1953 uint32_t esp_mask
, esp
, ebp
;
1955 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1956 ssp
= env
->segs
[R_SS
].base
;
1965 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1968 stl(ssp
+ (esp
& esp_mask
), t1
);
1975 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1978 stw(ssp
+ (esp
& esp_mask
), t1
);
1982 #ifdef TARGET_X86_64
1983 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1985 target_ulong esp
, ebp
;
2005 stw(esp
, lduw(ebp
));
2013 void helper_lldt(int selector
)
2017 int index
, entry_limit
;
2021 if ((selector
& 0xfffc) == 0) {
2022 /* XXX: NULL selector case: invalid LDT */
2027 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2029 index
= selector
& ~7;
2030 #ifdef TARGET_X86_64
2031 if (env
->hflags
& HF_LMA_MASK
)
2036 if ((index
+ entry_limit
) > dt
->limit
)
2037 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2038 ptr
= dt
->base
+ index
;
2039 e1
= ldl_kernel(ptr
);
2040 e2
= ldl_kernel(ptr
+ 4);
2041 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2042 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2043 if (!(e2
& DESC_P_MASK
))
2044 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2045 #ifdef TARGET_X86_64
2046 if (env
->hflags
& HF_LMA_MASK
) {
2048 e3
= ldl_kernel(ptr
+ 8);
2049 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2050 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2054 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2057 env
->ldt
.selector
= selector
;
2060 void helper_ltr(int selector
)
2064 int index
, type
, entry_limit
;
2068 if ((selector
& 0xfffc) == 0) {
2069 /* NULL selector case: invalid TR */
2075 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2077 index
= selector
& ~7;
2078 #ifdef TARGET_X86_64
2079 if (env
->hflags
& HF_LMA_MASK
)
2084 if ((index
+ entry_limit
) > dt
->limit
)
2085 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2086 ptr
= dt
->base
+ index
;
2087 e1
= ldl_kernel(ptr
);
2088 e2
= ldl_kernel(ptr
+ 4);
2089 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2090 if ((e2
& DESC_S_MASK
) ||
2091 (type
!= 1 && type
!= 9))
2092 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2093 if (!(e2
& DESC_P_MASK
))
2094 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2095 #ifdef TARGET_X86_64
2096 if (env
->hflags
& HF_LMA_MASK
) {
2098 e3
= ldl_kernel(ptr
+ 8);
2099 e4
= ldl_kernel(ptr
+ 12);
2100 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2101 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2102 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2103 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2107 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2109 e2
|= DESC_TSS_BUSY_MASK
;
2110 stl_kernel(ptr
+ 4, e2
);
2112 env
->tr
.selector
= selector
;
2115 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2116 void helper_load_seg(int seg_reg
, int selector
)
2125 cpl
= env
->hflags
& HF_CPL_MASK
;
2126 if ((selector
& 0xfffc) == 0) {
2127 /* null selector case */
2129 #ifdef TARGET_X86_64
2130 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2133 raise_exception_err(EXCP0D_GPF
, 0);
2134 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2141 index
= selector
& ~7;
2142 if ((index
+ 7) > dt
->limit
)
2143 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2144 ptr
= dt
->base
+ index
;
2145 e1
= ldl_kernel(ptr
);
2146 e2
= ldl_kernel(ptr
+ 4);
2148 if (!(e2
& DESC_S_MASK
))
2149 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2151 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2152 if (seg_reg
== R_SS
) {
2153 /* must be writable segment */
2154 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2155 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2156 if (rpl
!= cpl
|| dpl
!= cpl
)
2157 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2159 /* must be readable segment */
2160 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2161 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2163 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2164 /* if not conforming code, test rights */
2165 if (dpl
< cpl
|| dpl
< rpl
)
2166 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2170 if (!(e2
& DESC_P_MASK
)) {
2171 if (seg_reg
== R_SS
)
2172 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2174 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2177 /* set the access bit if not already set */
2178 if (!(e2
& DESC_A_MASK
)) {
2180 stl_kernel(ptr
+ 4, e2
);
2183 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2184 get_seg_base(e1
, e2
),
2185 get_seg_limit(e1
, e2
),
2188 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2189 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2194 /* protected mode jump */
2195 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2196 int next_eip_addend
)
2199 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2200 target_ulong next_eip
;
2202 if ((new_cs
& 0xfffc) == 0)
2203 raise_exception_err(EXCP0D_GPF
, 0);
2204 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2205 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2206 cpl
= env
->hflags
& HF_CPL_MASK
;
2207 if (e2
& DESC_S_MASK
) {
2208 if (!(e2
& DESC_CS_MASK
))
2209 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2210 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2211 if (e2
& DESC_C_MASK
) {
2212 /* conforming code segment */
2214 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2216 /* non conforming code segment */
2219 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2221 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2223 if (!(e2
& DESC_P_MASK
))
2224 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2225 limit
= get_seg_limit(e1
, e2
);
2226 if (new_eip
> limit
&&
2227 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2228 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2229 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2230 get_seg_base(e1
, e2
), limit
, e2
);
2233 /* jump to call or task gate */
2234 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2236 cpl
= env
->hflags
& HF_CPL_MASK
;
2237 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2239 case 1: /* 286 TSS */
2240 case 9: /* 386 TSS */
2241 case 5: /* task gate */
2242 if (dpl
< cpl
|| dpl
< rpl
)
2243 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2244 next_eip
= env
->eip
+ next_eip_addend
;
2245 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2246 CC_OP
= CC_OP_EFLAGS
;
2248 case 4: /* 286 call gate */
2249 case 12: /* 386 call gate */
2250 if ((dpl
< cpl
) || (dpl
< rpl
))
2251 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2252 if (!(e2
& DESC_P_MASK
))
2253 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2255 new_eip
= (e1
& 0xffff);
2257 new_eip
|= (e2
& 0xffff0000);
2258 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2259 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2260 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2261 /* must be code segment */
2262 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2263 (DESC_S_MASK
| DESC_CS_MASK
)))
2264 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2265 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2266 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2267 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2268 if (!(e2
& DESC_P_MASK
))
2269 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2270 limit
= get_seg_limit(e1
, e2
);
2271 if (new_eip
> limit
)
2272 raise_exception_err(EXCP0D_GPF
, 0);
2273 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2274 get_seg_base(e1
, e2
), limit
, e2
);
2278 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2284 /* real mode call */
2285 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2286 int shift
, int next_eip
)
2289 uint32_t esp
, esp_mask
;
2294 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2295 ssp
= env
->segs
[R_SS
].base
;
2297 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2298 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2300 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2301 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2304 SET_ESP(esp
, esp_mask
);
2306 env
->segs
[R_CS
].selector
= new_cs
;
2307 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2310 /* protected mode call */
2311 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2312 int shift
, int next_eip_addend
)
2315 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2316 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2317 uint32_t val
, limit
, old_sp_mask
;
2318 target_ulong ssp
, old_ssp
, next_eip
;
2320 next_eip
= env
->eip
+ next_eip_addend
;
2321 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2322 LOG_PCALL_STATE(env
);
2323 if ((new_cs
& 0xfffc) == 0)
2324 raise_exception_err(EXCP0D_GPF
, 0);
2325 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2326 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2327 cpl
= env
->hflags
& HF_CPL_MASK
;
2328 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2329 if (e2
& DESC_S_MASK
) {
2330 if (!(e2
& DESC_CS_MASK
))
2331 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2332 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2333 if (e2
& DESC_C_MASK
) {
2334 /* conforming code segment */
2336 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2338 /* non conforming code segment */
2341 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2343 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2345 if (!(e2
& DESC_P_MASK
))
2346 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2348 #ifdef TARGET_X86_64
2349 /* XXX: check 16/32 bit cases in long mode */
2354 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2355 PUSHQ(rsp
, next_eip
);
2356 /* from this point, not restartable */
2358 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2359 get_seg_base(e1
, e2
),
2360 get_seg_limit(e1
, e2
), e2
);
2366 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2367 ssp
= env
->segs
[R_SS
].base
;
2369 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2370 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2372 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2373 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2376 limit
= get_seg_limit(e1
, e2
);
2377 if (new_eip
> limit
)
2378 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2379 /* from this point, not restartable */
2380 SET_ESP(sp
, sp_mask
);
2381 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2382 get_seg_base(e1
, e2
), limit
, e2
);
2386 /* check gate type */
2387 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2388 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2391 case 1: /* available 286 TSS */
2392 case 9: /* available 386 TSS */
2393 case 5: /* task gate */
2394 if (dpl
< cpl
|| dpl
< rpl
)
2395 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2396 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2397 CC_OP
= CC_OP_EFLAGS
;
2399 case 4: /* 286 call gate */
2400 case 12: /* 386 call gate */
2403 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2408 if (dpl
< cpl
|| dpl
< rpl
)
2409 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2410 /* check valid bit */
2411 if (!(e2
& DESC_P_MASK
))
2412 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2413 selector
= e1
>> 16;
2414 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2415 param_count
= e2
& 0x1f;
2416 if ((selector
& 0xfffc) == 0)
2417 raise_exception_err(EXCP0D_GPF
, 0);
2419 if (load_segment(&e1
, &e2
, selector
) != 0)
2420 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2421 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2422 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2423 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2425 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2426 if (!(e2
& DESC_P_MASK
))
2427 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2429 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2430 /* to inner privilege */
2431 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2432 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2433 ss
, sp
, param_count
, ESP
);
2434 if ((ss
& 0xfffc) == 0)
2435 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2436 if ((ss
& 3) != dpl
)
2437 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2438 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2439 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2440 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2442 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2443 if (!(ss_e2
& DESC_S_MASK
) ||
2444 (ss_e2
& DESC_CS_MASK
) ||
2445 !(ss_e2
& DESC_W_MASK
))
2446 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2447 if (!(ss_e2
& DESC_P_MASK
))
2448 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2450 // push_size = ((param_count * 2) + 8) << shift;
2452 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2453 old_ssp
= env
->segs
[R_SS
].base
;
2455 sp_mask
= get_sp_mask(ss_e2
);
2456 ssp
= get_seg_base(ss_e1
, ss_e2
);
2458 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2459 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2460 for(i
= param_count
- 1; i
>= 0; i
--) {
2461 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2462 PUSHL(ssp
, sp
, sp_mask
, val
);
2465 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2466 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2467 for(i
= param_count
- 1; i
>= 0; i
--) {
2468 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2469 PUSHW(ssp
, sp
, sp_mask
, val
);
2474 /* to same privilege */
2476 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2477 ssp
= env
->segs
[R_SS
].base
;
2478 // push_size = (4 << shift);
2483 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2484 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2486 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2487 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2490 /* from this point, not restartable */
2493 ss
= (ss
& ~3) | dpl
;
2494 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2496 get_seg_limit(ss_e1
, ss_e2
),
2500 selector
= (selector
& ~3) | dpl
;
2501 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2502 get_seg_base(e1
, e2
),
2503 get_seg_limit(e1
, e2
),
2505 cpu_x86_set_cpl(env
, dpl
);
2506 SET_ESP(sp
, sp_mask
);
2510 if (kqemu_is_ok(env
)) {
2511 env
->exception_index
= -1;
2517 /* real and vm86 mode iret */
2518 void helper_iret_real(int shift
)
2520 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2524 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2526 ssp
= env
->segs
[R_SS
].base
;
2529 POPL(ssp
, sp
, sp_mask
, new_eip
);
2530 POPL(ssp
, sp
, sp_mask
, new_cs
);
2532 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2535 POPW(ssp
, sp
, sp_mask
, new_eip
);
2536 POPW(ssp
, sp
, sp_mask
, new_cs
);
2537 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2539 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2540 env
->segs
[R_CS
].selector
= new_cs
;
2541 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2543 if (env
->eflags
& VM_MASK
)
2544 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2546 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2548 eflags_mask
&= 0xffff;
2549 load_eflags(new_eflags
, eflags_mask
);
2550 env
->hflags2
&= ~HF2_NMI_MASK
;
2553 static inline void validate_seg(int seg_reg
, int cpl
)
2558 /* XXX: on x86_64, we do not want to nullify FS and GS because
2559 they may still contain a valid base. I would be interested to
2560 know how a real x86_64 CPU behaves */
2561 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2562 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2565 e2
= env
->segs
[seg_reg
].flags
;
2566 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2567 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2568 /* data or non conforming code segment */
2570 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2575 /* protected mode iret */
2576 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2578 uint32_t new_cs
, new_eflags
, new_ss
;
2579 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2580 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2581 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2582 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2584 #ifdef TARGET_X86_64
2589 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2591 ssp
= env
->segs
[R_SS
].base
;
2592 new_eflags
= 0; /* avoid warning */
2593 #ifdef TARGET_X86_64
2599 POPQ(sp
, new_eflags
);
2605 POPL(ssp
, sp
, sp_mask
, new_eip
);
2606 POPL(ssp
, sp
, sp_mask
, new_cs
);
2609 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2610 if (new_eflags
& VM_MASK
)
2611 goto return_to_vm86
;
2615 POPW(ssp
, sp
, sp_mask
, new_eip
);
2616 POPW(ssp
, sp
, sp_mask
, new_cs
);
2618 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2620 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2621 new_cs
, new_eip
, shift
, addend
);
2622 LOG_PCALL_STATE(env
);
2623 if ((new_cs
& 0xfffc) == 0)
2624 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2625 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2626 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2627 if (!(e2
& DESC_S_MASK
) ||
2628 !(e2
& DESC_CS_MASK
))
2629 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2630 cpl
= env
->hflags
& HF_CPL_MASK
;
2633 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2634 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2635 if (e2
& DESC_C_MASK
) {
2637 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2640 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2642 if (!(e2
& DESC_P_MASK
))
2643 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2646 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2647 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2648 /* return to same privilege level */
2649 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2650 get_seg_base(e1
, e2
),
2651 get_seg_limit(e1
, e2
),
2654 /* return to different privilege level */
2655 #ifdef TARGET_X86_64
2664 POPL(ssp
, sp
, sp_mask
, new_esp
);
2665 POPL(ssp
, sp
, sp_mask
, new_ss
);
2669 POPW(ssp
, sp
, sp_mask
, new_esp
);
2670 POPW(ssp
, sp
, sp_mask
, new_ss
);
2672 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2674 if ((new_ss
& 0xfffc) == 0) {
2675 #ifdef TARGET_X86_64
2676 /* NULL ss is allowed in long mode if cpl != 3*/
2677 /* XXX: test CS64 ? */
2678 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2679 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2681 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2682 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2683 DESC_W_MASK
| DESC_A_MASK
);
2684 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2688 raise_exception_err(EXCP0D_GPF
, 0);
2691 if ((new_ss
& 3) != rpl
)
2692 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2693 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2694 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2695 if (!(ss_e2
& DESC_S_MASK
) ||
2696 (ss_e2
& DESC_CS_MASK
) ||
2697 !(ss_e2
& DESC_W_MASK
))
2698 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2699 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2701 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2702 if (!(ss_e2
& DESC_P_MASK
))
2703 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2704 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2705 get_seg_base(ss_e1
, ss_e2
),
2706 get_seg_limit(ss_e1
, ss_e2
),
2710 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2711 get_seg_base(e1
, e2
),
2712 get_seg_limit(e1
, e2
),
2714 cpu_x86_set_cpl(env
, rpl
);
2716 #ifdef TARGET_X86_64
2717 if (env
->hflags
& HF_CS64_MASK
)
2721 sp_mask
= get_sp_mask(ss_e2
);
2723 /* validate data segments */
2724 validate_seg(R_ES
, rpl
);
2725 validate_seg(R_DS
, rpl
);
2726 validate_seg(R_FS
, rpl
);
2727 validate_seg(R_GS
, rpl
);
2731 SET_ESP(sp
, sp_mask
);
2734 /* NOTE: 'cpl' is the _old_ CPL */
2735 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2737 eflags_mask
|= IOPL_MASK
;
2738 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2740 eflags_mask
|= IF_MASK
;
2742 eflags_mask
&= 0xffff;
2743 load_eflags(new_eflags
, eflags_mask
);
2748 POPL(ssp
, sp
, sp_mask
, new_esp
);
2749 POPL(ssp
, sp
, sp_mask
, new_ss
);
2750 POPL(ssp
, sp
, sp_mask
, new_es
);
2751 POPL(ssp
, sp
, sp_mask
, new_ds
);
2752 POPL(ssp
, sp
, sp_mask
, new_fs
);
2753 POPL(ssp
, sp
, sp_mask
, new_gs
);
2755 /* modify processor state */
2756 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2757 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2758 load_seg_vm(R_CS
, new_cs
& 0xffff);
2759 cpu_x86_set_cpl(env
, 3);
2760 load_seg_vm(R_SS
, new_ss
& 0xffff);
2761 load_seg_vm(R_ES
, new_es
& 0xffff);
2762 load_seg_vm(R_DS
, new_ds
& 0xffff);
2763 load_seg_vm(R_FS
, new_fs
& 0xffff);
2764 load_seg_vm(R_GS
, new_gs
& 0xffff);
2766 env
->eip
= new_eip
& 0xffff;
2770 void helper_iret_protected(int shift
, int next_eip
)
2772 int tss_selector
, type
;
2775 /* specific case for TSS */
2776 if (env
->eflags
& NT_MASK
) {
2777 #ifdef TARGET_X86_64
2778 if (env
->hflags
& HF_LMA_MASK
)
2779 raise_exception_err(EXCP0D_GPF
, 0);
2781 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2782 if (tss_selector
& 4)
2783 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2784 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2785 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2786 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2787 /* NOTE: we check both segment and busy TSS */
2789 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2790 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2792 helper_ret_protected(shift
, 1, 0);
2794 env
->hflags2
&= ~HF2_NMI_MASK
;
2796 if (kqemu_is_ok(env
)) {
2797 CC_OP
= CC_OP_EFLAGS
;
2798 env
->exception_index
= -1;
2804 void helper_lret_protected(int shift
, int addend
)
2806 helper_ret_protected(shift
, 0, addend
);
2808 if (kqemu_is_ok(env
)) {
2809 env
->exception_index
= -1;
2815 void helper_sysenter(void)
2817 if (env
->sysenter_cs
== 0) {
2818 raise_exception_err(EXCP0D_GPF
, 0);
2820 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2821 cpu_x86_set_cpl(env
, 0);
2823 #ifdef TARGET_X86_64
2824 if (env
->hflags
& HF_LMA_MASK
) {
2825 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2827 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2829 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2833 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2835 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2837 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2839 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2841 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2843 DESC_W_MASK
| DESC_A_MASK
);
2844 ESP
= env
->sysenter_esp
;
2845 EIP
= env
->sysenter_eip
;
2848 void helper_sysexit(int dflag
)
2852 cpl
= env
->hflags
& HF_CPL_MASK
;
2853 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2854 raise_exception_err(EXCP0D_GPF
, 0);
2856 cpu_x86_set_cpl(env
, 3);
2857 #ifdef TARGET_X86_64
2859 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2861 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2862 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2863 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2864 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2866 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2867 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2868 DESC_W_MASK
| DESC_A_MASK
);
2872 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2874 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2875 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2876 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2877 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2879 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2880 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2881 DESC_W_MASK
| DESC_A_MASK
);
2886 if (kqemu_is_ok(env
)) {
2887 env
->exception_index
= -1;
2893 #if defined(CONFIG_USER_ONLY)
2894 target_ulong
helper_read_crN(int reg
)
2899 void helper_write_crN(int reg
, target_ulong t0
)
2903 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2907 target_ulong
helper_read_crN(int reg
)
2911 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2917 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2918 val
= cpu_get_apic_tpr(env
);
2927 void helper_write_crN(int reg
, target_ulong t0
)
2929 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2932 cpu_x86_update_cr0(env
, t0
);
2935 cpu_x86_update_cr3(env
, t0
);
2938 cpu_x86_update_cr4(env
, t0
);
2941 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2942 cpu_set_apic_tpr(env
, t0
);
2944 env
->v_tpr
= t0
& 0x0f;
2952 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2957 hw_breakpoint_remove(env
, reg
);
2959 hw_breakpoint_insert(env
, reg
);
2960 } else if (reg
== 7) {
2961 for (i
= 0; i
< 4; i
++)
2962 hw_breakpoint_remove(env
, i
);
2964 for (i
= 0; i
< 4; i
++)
2965 hw_breakpoint_insert(env
, i
);
2971 void helper_lmsw(target_ulong t0
)
2973 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2974 if already set to one. */
2975 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2976 helper_write_crN(0, t0
);
2979 void helper_clts(void)
2981 env
->cr
[0] &= ~CR0_TS_MASK
;
2982 env
->hflags
&= ~HF_TS_MASK
;
2985 void helper_invlpg(target_ulong addr
)
2987 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2988 tlb_flush_page(env
, addr
);
2991 void helper_rdtsc(void)
2995 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2996 raise_exception(EXCP0D_GPF
);
2998 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3000 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
3001 EAX
= (uint32_t)(val
);
3002 EDX
= (uint32_t)(val
>> 32);
3005 void helper_rdpmc(void)
3007 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3008 raise_exception(EXCP0D_GPF
);
3010 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3012 /* currently unimplemented */
3013 raise_exception_err(EXCP06_ILLOP
, 0);
3016 #if defined(CONFIG_USER_ONLY)
3017 void helper_wrmsr(void)
3021 void helper_rdmsr(void)
3025 void helper_wrmsr(void)
3029 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3031 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3033 switch((uint32_t)ECX
) {
3034 case MSR_IA32_SYSENTER_CS
:
3035 env
->sysenter_cs
= val
& 0xffff;
3037 case MSR_IA32_SYSENTER_ESP
:
3038 env
->sysenter_esp
= val
;
3040 case MSR_IA32_SYSENTER_EIP
:
3041 env
->sysenter_eip
= val
;
3043 case MSR_IA32_APICBASE
:
3044 cpu_set_apic_base(env
, val
);
3048 uint64_t update_mask
;
3050 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3051 update_mask
|= MSR_EFER_SCE
;
3052 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3053 update_mask
|= MSR_EFER_LME
;
3054 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3055 update_mask
|= MSR_EFER_FFXSR
;
3056 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3057 update_mask
|= MSR_EFER_NXE
;
3058 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3059 update_mask
|= MSR_EFER_SVME
;
3060 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3061 update_mask
|= MSR_EFER_FFXSR
;
3062 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3063 (val
& update_mask
));
3072 case MSR_VM_HSAVE_PA
:
3073 env
->vm_hsave
= val
;
3075 #ifdef TARGET_X86_64
3086 env
->segs
[R_FS
].base
= val
;
3089 env
->segs
[R_GS
].base
= val
;
3091 case MSR_KERNELGSBASE
:
3092 env
->kernelgsbase
= val
;
3095 case MSR_MTRRphysBase(0):
3096 case MSR_MTRRphysBase(1):
3097 case MSR_MTRRphysBase(2):
3098 case MSR_MTRRphysBase(3):
3099 case MSR_MTRRphysBase(4):
3100 case MSR_MTRRphysBase(5):
3101 case MSR_MTRRphysBase(6):
3102 case MSR_MTRRphysBase(7):
3103 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3105 case MSR_MTRRphysMask(0):
3106 case MSR_MTRRphysMask(1):
3107 case MSR_MTRRphysMask(2):
3108 case MSR_MTRRphysMask(3):
3109 case MSR_MTRRphysMask(4):
3110 case MSR_MTRRphysMask(5):
3111 case MSR_MTRRphysMask(6):
3112 case MSR_MTRRphysMask(7):
3113 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3115 case MSR_MTRRfix64K_00000
:
3116 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3118 case MSR_MTRRfix16K_80000
:
3119 case MSR_MTRRfix16K_A0000
:
3120 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3122 case MSR_MTRRfix4K_C0000
:
3123 case MSR_MTRRfix4K_C8000
:
3124 case MSR_MTRRfix4K_D0000
:
3125 case MSR_MTRRfix4K_D8000
:
3126 case MSR_MTRRfix4K_E0000
:
3127 case MSR_MTRRfix4K_E8000
:
3128 case MSR_MTRRfix4K_F0000
:
3129 case MSR_MTRRfix4K_F8000
:
3130 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3132 case MSR_MTRRdefType
:
3133 env
->mtrr_deftype
= val
;
3135 case MSR_MCG_STATUS
:
3136 env
->mcg_status
= val
;
3139 if ((env
->mcg_cap
& MCG_CTL_P
)
3140 && (val
== 0 || val
== ~(uint64_t)0))
3144 if ((uint32_t)ECX
>= MSR_MC0_CTL
3145 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3146 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3147 if ((offset
& 0x3) != 0
3148 || (val
== 0 || val
== ~(uint64_t)0))
3149 env
->mce_banks
[offset
] = val
;
3152 /* XXX: exception ? */
3157 void helper_rdmsr(void)
3161 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3163 switch((uint32_t)ECX
) {
3164 case MSR_IA32_SYSENTER_CS
:
3165 val
= env
->sysenter_cs
;
3167 case MSR_IA32_SYSENTER_ESP
:
3168 val
= env
->sysenter_esp
;
3170 case MSR_IA32_SYSENTER_EIP
:
3171 val
= env
->sysenter_eip
;
3173 case MSR_IA32_APICBASE
:
3174 val
= cpu_get_apic_base(env
);
3185 case MSR_VM_HSAVE_PA
:
3186 val
= env
->vm_hsave
;
3188 case MSR_IA32_PERF_STATUS
:
3189 /* tsc_increment_by_tick */
3191 /* CPU multiplier */
3192 val
|= (((uint64_t)4ULL) << 40);
3194 #ifdef TARGET_X86_64
3205 val
= env
->segs
[R_FS
].base
;
3208 val
= env
->segs
[R_GS
].base
;
3210 case MSR_KERNELGSBASE
:
3211 val
= env
->kernelgsbase
;
3215 case MSR_QPI_COMMBASE
:
3216 if (env
->kqemu_enabled
) {
3217 val
= kqemu_comm_base
;
3223 case MSR_MTRRphysBase(0):
3224 case MSR_MTRRphysBase(1):
3225 case MSR_MTRRphysBase(2):
3226 case MSR_MTRRphysBase(3):
3227 case MSR_MTRRphysBase(4):
3228 case MSR_MTRRphysBase(5):
3229 case MSR_MTRRphysBase(6):
3230 case MSR_MTRRphysBase(7):
3231 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3233 case MSR_MTRRphysMask(0):
3234 case MSR_MTRRphysMask(1):
3235 case MSR_MTRRphysMask(2):
3236 case MSR_MTRRphysMask(3):
3237 case MSR_MTRRphysMask(4):
3238 case MSR_MTRRphysMask(5):
3239 case MSR_MTRRphysMask(6):
3240 case MSR_MTRRphysMask(7):
3241 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3243 case MSR_MTRRfix64K_00000
:
3244 val
= env
->mtrr_fixed
[0];
3246 case MSR_MTRRfix16K_80000
:
3247 case MSR_MTRRfix16K_A0000
:
3248 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3250 case MSR_MTRRfix4K_C0000
:
3251 case MSR_MTRRfix4K_C8000
:
3252 case MSR_MTRRfix4K_D0000
:
3253 case MSR_MTRRfix4K_D8000
:
3254 case MSR_MTRRfix4K_E0000
:
3255 case MSR_MTRRfix4K_E8000
:
3256 case MSR_MTRRfix4K_F0000
:
3257 case MSR_MTRRfix4K_F8000
:
3258 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3260 case MSR_MTRRdefType
:
3261 val
= env
->mtrr_deftype
;
3264 if (env
->cpuid_features
& CPUID_MTRR
)
3265 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3267 /* XXX: exception ? */
3274 if (env
->mcg_cap
& MCG_CTL_P
)
3279 case MSR_MCG_STATUS
:
3280 val
= env
->mcg_status
;
3283 if ((uint32_t)ECX
>= MSR_MC0_CTL
3284 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3285 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3286 val
= env
->mce_banks
[offset
];
3289 /* XXX: exception ? */
3293 EAX
= (uint32_t)(val
);
3294 EDX
= (uint32_t)(val
>> 32);
3298 target_ulong
helper_lsl(target_ulong selector1
)
3301 uint32_t e1
, e2
, eflags
, selector
;
3302 int rpl
, dpl
, cpl
, type
;
3304 selector
= selector1
& 0xffff;
3305 eflags
= helper_cc_compute_all(CC_OP
);
3306 if ((selector
& 0xfffc) == 0)
3308 if (load_segment(&e1
, &e2
, selector
) != 0)
3311 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3312 cpl
= env
->hflags
& HF_CPL_MASK
;
3313 if (e2
& DESC_S_MASK
) {
3314 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3317 if (dpl
< cpl
|| dpl
< rpl
)
3321 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3332 if (dpl
< cpl
|| dpl
< rpl
) {
3334 CC_SRC
= eflags
& ~CC_Z
;
3338 limit
= get_seg_limit(e1
, e2
);
3339 CC_SRC
= eflags
| CC_Z
;
3343 target_ulong
helper_lar(target_ulong selector1
)
3345 uint32_t e1
, e2
, eflags
, selector
;
3346 int rpl
, dpl
, cpl
, type
;
3348 selector
= selector1
& 0xffff;
3349 eflags
= helper_cc_compute_all(CC_OP
);
3350 if ((selector
& 0xfffc) == 0)
3352 if (load_segment(&e1
, &e2
, selector
) != 0)
3355 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3356 cpl
= env
->hflags
& HF_CPL_MASK
;
3357 if (e2
& DESC_S_MASK
) {
3358 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3361 if (dpl
< cpl
|| dpl
< rpl
)
3365 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3379 if (dpl
< cpl
|| dpl
< rpl
) {
3381 CC_SRC
= eflags
& ~CC_Z
;
3385 CC_SRC
= eflags
| CC_Z
;
3386 return e2
& 0x00f0ff00;
3389 void helper_verr(target_ulong selector1
)
3391 uint32_t e1
, e2
, eflags
, selector
;
3394 selector
= selector1
& 0xffff;
3395 eflags
= helper_cc_compute_all(CC_OP
);
3396 if ((selector
& 0xfffc) == 0)
3398 if (load_segment(&e1
, &e2
, selector
) != 0)
3400 if (!(e2
& DESC_S_MASK
))
3403 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3404 cpl
= env
->hflags
& HF_CPL_MASK
;
3405 if (e2
& DESC_CS_MASK
) {
3406 if (!(e2
& DESC_R_MASK
))
3408 if (!(e2
& DESC_C_MASK
)) {
3409 if (dpl
< cpl
|| dpl
< rpl
)
3413 if (dpl
< cpl
|| dpl
< rpl
) {
3415 CC_SRC
= eflags
& ~CC_Z
;
3419 CC_SRC
= eflags
| CC_Z
;
3422 void helper_verw(target_ulong selector1
)
3424 uint32_t e1
, e2
, eflags
, selector
;
3427 selector
= selector1
& 0xffff;
3428 eflags
= helper_cc_compute_all(CC_OP
);
3429 if ((selector
& 0xfffc) == 0)
3431 if (load_segment(&e1
, &e2
, selector
) != 0)
3433 if (!(e2
& DESC_S_MASK
))
3436 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3437 cpl
= env
->hflags
& HF_CPL_MASK
;
3438 if (e2
& DESC_CS_MASK
) {
3441 if (dpl
< cpl
|| dpl
< rpl
)
3443 if (!(e2
& DESC_W_MASK
)) {
3445 CC_SRC
= eflags
& ~CC_Z
;
3449 CC_SRC
= eflags
| CC_Z
;
3452 /* x87 FPU helpers */
3454 static void fpu_set_exception(int mask
)
3457 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3458 env
->fpus
|= FPUS_SE
| FPUS_B
;
3461 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3464 fpu_set_exception(FPUS_ZE
);
3468 static void fpu_raise_exception(void)
3470 if (env
->cr
[0] & CR0_NE_MASK
) {
3471 raise_exception(EXCP10_COPR
);
3473 #if !defined(CONFIG_USER_ONLY)
3480 void helper_flds_FT0(uint32_t val
)
3487 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3490 void helper_fldl_FT0(uint64_t val
)
3497 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3500 void helper_fildl_FT0(int32_t val
)
3502 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3505 void helper_flds_ST0(uint32_t val
)
3512 new_fpstt
= (env
->fpstt
- 1) & 7;
3514 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3515 env
->fpstt
= new_fpstt
;
3516 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3519 void helper_fldl_ST0(uint64_t val
)
3526 new_fpstt
= (env
->fpstt
- 1) & 7;
3528 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3529 env
->fpstt
= new_fpstt
;
3530 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3533 void helper_fildl_ST0(int32_t val
)
3536 new_fpstt
= (env
->fpstt
- 1) & 7;
3537 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3538 env
->fpstt
= new_fpstt
;
3539 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3542 void helper_fildll_ST0(int64_t val
)
3545 new_fpstt
= (env
->fpstt
- 1) & 7;
3546 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3547 env
->fpstt
= new_fpstt
;
3548 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3551 uint32_t helper_fsts_ST0(void)
3557 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3561 uint64_t helper_fstl_ST0(void)
3567 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3571 int32_t helper_fist_ST0(void)
3574 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3575 if (val
!= (int16_t)val
)
3580 int32_t helper_fistl_ST0(void)
3583 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3587 int64_t helper_fistll_ST0(void)
3590 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3594 int32_t helper_fistt_ST0(void)
3597 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3598 if (val
!= (int16_t)val
)
3603 int32_t helper_fisttl_ST0(void)
3606 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3610 int64_t helper_fisttll_ST0(void)
3613 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3617 void helper_fldt_ST0(target_ulong ptr
)
3620 new_fpstt
= (env
->fpstt
- 1) & 7;
3621 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3622 env
->fpstt
= new_fpstt
;
3623 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3626 void helper_fstt_ST0(target_ulong ptr
)
3628 helper_fstt(ST0
, ptr
);
3631 void helper_fpush(void)
3636 void helper_fpop(void)
3641 void helper_fdecstp(void)
3643 env
->fpstt
= (env
->fpstt
- 1) & 7;
3644 env
->fpus
&= (~0x4700);
3647 void helper_fincstp(void)
3649 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3650 env
->fpus
&= (~0x4700);
3655 void helper_ffree_STN(int st_index
)
3657 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3660 void helper_fmov_ST0_FT0(void)
3665 void helper_fmov_FT0_STN(int st_index
)
3670 void helper_fmov_ST0_STN(int st_index
)
3675 void helper_fmov_STN_ST0(int st_index
)
3680 void helper_fxchg_ST0_STN(int st_index
)
3688 /* FPU operations */
3690 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3692 void helper_fcom_ST0_FT0(void)
3696 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3697 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3700 void helper_fucom_ST0_FT0(void)
3704 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3705 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3708 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3710 void helper_fcomi_ST0_FT0(void)
3715 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3716 eflags
= helper_cc_compute_all(CC_OP
);
3717 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3721 void helper_fucomi_ST0_FT0(void)
3726 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3727 eflags
= helper_cc_compute_all(CC_OP
);
3728 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3732 void helper_fadd_ST0_FT0(void)
3737 void helper_fmul_ST0_FT0(void)
3742 void helper_fsub_ST0_FT0(void)
3747 void helper_fsubr_ST0_FT0(void)
3752 void helper_fdiv_ST0_FT0(void)
3754 ST0
= helper_fdiv(ST0
, FT0
);
3757 void helper_fdivr_ST0_FT0(void)
3759 ST0
= helper_fdiv(FT0
, ST0
);
3762 /* fp operations between STN and ST0 */
3764 void helper_fadd_STN_ST0(int st_index
)
3766 ST(st_index
) += ST0
;
3769 void helper_fmul_STN_ST0(int st_index
)
3771 ST(st_index
) *= ST0
;
3774 void helper_fsub_STN_ST0(int st_index
)
3776 ST(st_index
) -= ST0
;
3779 void helper_fsubr_STN_ST0(int st_index
)
3786 void helper_fdiv_STN_ST0(int st_index
)
3790 *p
= helper_fdiv(*p
, ST0
);
3793 void helper_fdivr_STN_ST0(int st_index
)
3797 *p
= helper_fdiv(ST0
, *p
);
3800 /* misc FPU operations */
3801 void helper_fchs_ST0(void)
3803 ST0
= floatx_chs(ST0
);
3806 void helper_fabs_ST0(void)
3808 ST0
= floatx_abs(ST0
);
3811 void helper_fld1_ST0(void)
3816 void helper_fldl2t_ST0(void)
3821 void helper_fldl2e_ST0(void)
3826 void helper_fldpi_ST0(void)
3831 void helper_fldlg2_ST0(void)
3836 void helper_fldln2_ST0(void)
3841 void helper_fldz_ST0(void)
3846 void helper_fldz_FT0(void)
3851 uint32_t helper_fnstsw(void)
3853 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3856 uint32_t helper_fnstcw(void)
3861 static void update_fp_status(void)
3865 /* set rounding mode */
3866 switch(env
->fpuc
& RC_MASK
) {
3869 rnd_type
= float_round_nearest_even
;
3872 rnd_type
= float_round_down
;
3875 rnd_type
= float_round_up
;
3878 rnd_type
= float_round_to_zero
;
3881 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3883 switch((env
->fpuc
>> 8) & 3) {
3895 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3899 void helper_fldcw(uint32_t val
)
3905 void helper_fclex(void)
3907 env
->fpus
&= 0x7f00;
3910 void helper_fwait(void)
3912 if (env
->fpus
& FPUS_SE
)
3913 fpu_raise_exception();
3916 void helper_fninit(void)
3933 void helper_fbld_ST0(target_ulong ptr
)
3941 for(i
= 8; i
>= 0; i
--) {
3943 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3946 if (ldub(ptr
+ 9) & 0x80)
3952 void helper_fbst_ST0(target_ulong ptr
)
3955 target_ulong mem_ref
, mem_end
;
3958 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3960 mem_end
= mem_ref
+ 9;
3967 while (mem_ref
< mem_end
) {
3972 v
= ((v
/ 10) << 4) | (v
% 10);
3975 while (mem_ref
< mem_end
) {
3980 void helper_f2xm1(void)
3982 ST0
= pow(2.0,ST0
) - 1.0;
3985 void helper_fyl2x(void)
3987 CPU86_LDouble fptemp
;
3991 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3995 env
->fpus
&= (~0x4700);
4000 void helper_fptan(void)
4002 CPU86_LDouble fptemp
;
4005 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4011 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4012 /* the above code is for |arg| < 2**52 only */
4016 void helper_fpatan(void)
4018 CPU86_LDouble fptemp
, fpsrcop
;
4022 ST1
= atan2(fpsrcop
,fptemp
);
4026 void helper_fxtract(void)
4028 CPU86_LDoubleU temp
;
4029 unsigned int expdif
;
4032 expdif
= EXPD(temp
) - EXPBIAS
;
4033 /*DP exponent bias*/
4040 void helper_fprem1(void)
4042 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4043 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4045 signed long long int q
;
4047 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4048 ST0
= 0.0 / 0.0; /* NaN */
4049 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4055 fpsrcop1
.d
= fpsrcop
;
4057 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4060 /* optimisation? taken from the AMD docs */
4061 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4062 /* ST0 is unchanged */
4067 dblq
= fpsrcop
/ fptemp
;
4068 /* round dblq towards nearest integer */
4070 ST0
= fpsrcop
- fptemp
* dblq
;
4072 /* convert dblq to q by truncating towards zero */
4074 q
= (signed long long int)(-dblq
);
4076 q
= (signed long long int)dblq
;
4078 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4079 /* (C0,C3,C1) <-- (q2,q1,q0) */
4080 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4081 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4082 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4084 env
->fpus
|= 0x400; /* C2 <-- 1 */
4085 fptemp
= pow(2.0, expdif
- 50);
4086 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4087 /* fpsrcop = integer obtained by chopping */
4088 fpsrcop
= (fpsrcop
< 0.0) ?
4089 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4090 ST0
-= (ST1
* fpsrcop
* fptemp
);
4094 void helper_fprem(void)
4096 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4097 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4099 signed long long int q
;
4101 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4102 ST0
= 0.0 / 0.0; /* NaN */
4103 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4107 fpsrcop
= (CPU86_LDouble
)ST0
;
4108 fptemp
= (CPU86_LDouble
)ST1
;
4109 fpsrcop1
.d
= fpsrcop
;
4111 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4114 /* optimisation? taken from the AMD docs */
4115 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4116 /* ST0 is unchanged */
4120 if ( expdif
< 53 ) {
4121 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4122 /* round dblq towards zero */
4123 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4124 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4126 /* convert dblq to q by truncating towards zero */
4128 q
= (signed long long int)(-dblq
);
4130 q
= (signed long long int)dblq
;
4132 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4133 /* (C0,C3,C1) <-- (q2,q1,q0) */
4134 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4135 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4136 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4138 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4139 env
->fpus
|= 0x400; /* C2 <-- 1 */
4140 fptemp
= pow(2.0, (double)(expdif
- N
));
4141 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4142 /* fpsrcop = integer obtained by chopping */
4143 fpsrcop
= (fpsrcop
< 0.0) ?
4144 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4145 ST0
-= (ST1
* fpsrcop
* fptemp
);
4149 void helper_fyl2xp1(void)
4151 CPU86_LDouble fptemp
;
4154 if ((fptemp
+1.0)>0.0) {
4155 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4159 env
->fpus
&= (~0x4700);
4164 void helper_fsqrt(void)
4166 CPU86_LDouble fptemp
;
4170 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4176 void helper_fsincos(void)
4178 CPU86_LDouble fptemp
;
4181 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4187 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4188 /* the above code is for |arg| < 2**63 only */
4192 void helper_frndint(void)
4194 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4197 void helper_fscale(void)
4199 ST0
= ldexp (ST0
, (int)(ST1
));
4202 void helper_fsin(void)
4204 CPU86_LDouble fptemp
;
4207 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4211 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4212 /* the above code is for |arg| < 2**53 only */
4216 void helper_fcos(void)
4218 CPU86_LDouble fptemp
;
4221 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4225 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4226 /* the above code is for |arg5 < 2**63 only */
4230 void helper_fxam_ST0(void)
4232 CPU86_LDoubleU temp
;
4237 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4239 env
->fpus
|= 0x200; /* C1 <-- 1 */
4241 /* XXX: test fptags too */
4242 expdif
= EXPD(temp
);
4243 if (expdif
== MAXEXPD
) {
4244 #ifdef USE_X86LDOUBLE
4245 if (MANTD(temp
) == 0x8000000000000000ULL
)
4247 if (MANTD(temp
) == 0)
4249 env
->fpus
|= 0x500 /*Infinity*/;
4251 env
->fpus
|= 0x100 /*NaN*/;
4252 } else if (expdif
== 0) {
4253 if (MANTD(temp
) == 0)
4254 env
->fpus
|= 0x4000 /*Zero*/;
4256 env
->fpus
|= 0x4400 /*Denormal*/;
4262 void helper_fstenv(target_ulong ptr
, int data32
)
4264 int fpus
, fptag
, exp
, i
;
4268 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4270 for (i
=7; i
>=0; i
--) {
4272 if (env
->fptags
[i
]) {
4275 tmp
.d
= env
->fpregs
[i
].d
;
4278 if (exp
== 0 && mant
== 0) {
4281 } else if (exp
== 0 || exp
== MAXEXPD
4282 #ifdef USE_X86LDOUBLE
4283 || (mant
& (1LL << 63)) == 0
4286 /* NaNs, infinity, denormal */
4293 stl(ptr
, env
->fpuc
);
4295 stl(ptr
+ 8, fptag
);
4296 stl(ptr
+ 12, 0); /* fpip */
4297 stl(ptr
+ 16, 0); /* fpcs */
4298 stl(ptr
+ 20, 0); /* fpoo */
4299 stl(ptr
+ 24, 0); /* fpos */
4302 stw(ptr
, env
->fpuc
);
4304 stw(ptr
+ 4, fptag
);
4312 void helper_fldenv(target_ulong ptr
, int data32
)
4317 env
->fpuc
= lduw(ptr
);
4318 fpus
= lduw(ptr
+ 4);
4319 fptag
= lduw(ptr
+ 8);
4322 env
->fpuc
= lduw(ptr
);
4323 fpus
= lduw(ptr
+ 2);
4324 fptag
= lduw(ptr
+ 4);
4326 env
->fpstt
= (fpus
>> 11) & 7;
4327 env
->fpus
= fpus
& ~0x3800;
4328 for(i
= 0;i
< 8; i
++) {
4329 env
->fptags
[i
] = ((fptag
& 3) == 3);
4334 void helper_fsave(target_ulong ptr
, int data32
)
4339 helper_fstenv(ptr
, data32
);
4341 ptr
+= (14 << data32
);
4342 for(i
= 0;i
< 8; i
++) {
4344 helper_fstt(tmp
, ptr
);
4362 void helper_frstor(target_ulong ptr
, int data32
)
4367 helper_fldenv(ptr
, data32
);
4368 ptr
+= (14 << data32
);
4370 for(i
= 0;i
< 8; i
++) {
4371 tmp
= helper_fldt(ptr
);
4377 void helper_fxsave(target_ulong ptr
, int data64
)
4379 int fpus
, fptag
, i
, nb_xmm_regs
;
4383 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4385 for(i
= 0; i
< 8; i
++) {
4386 fptag
|= (env
->fptags
[i
] << i
);
4388 stw(ptr
, env
->fpuc
);
4390 stw(ptr
+ 4, fptag
^ 0xff);
4391 #ifdef TARGET_X86_64
4393 stq(ptr
+ 0x08, 0); /* rip */
4394 stq(ptr
+ 0x10, 0); /* rdp */
4398 stl(ptr
+ 0x08, 0); /* eip */
4399 stl(ptr
+ 0x0c, 0); /* sel */
4400 stl(ptr
+ 0x10, 0); /* dp */
4401 stl(ptr
+ 0x14, 0); /* sel */
4405 for(i
= 0;i
< 8; i
++) {
4407 helper_fstt(tmp
, addr
);
4411 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4412 /* XXX: finish it */
4413 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4414 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4415 if (env
->hflags
& HF_CS64_MASK
)
4420 /* Fast FXSAVE leaves out the XMM registers */
4421 if (!(env
->efer
& MSR_EFER_FFXSR
)
4422 || (env
->hflags
& HF_CPL_MASK
)
4423 || !(env
->hflags
& HF_LMA_MASK
)) {
4424 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4425 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4426 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4433 void helper_fxrstor(target_ulong ptr
, int data64
)
4435 int i
, fpus
, fptag
, nb_xmm_regs
;
4439 env
->fpuc
= lduw(ptr
);
4440 fpus
= lduw(ptr
+ 2);
4441 fptag
= lduw(ptr
+ 4);
4442 env
->fpstt
= (fpus
>> 11) & 7;
4443 env
->fpus
= fpus
& ~0x3800;
4445 for(i
= 0;i
< 8; i
++) {
4446 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4450 for(i
= 0;i
< 8; i
++) {
4451 tmp
= helper_fldt(addr
);
4456 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4457 /* XXX: finish it */
4458 env
->mxcsr
= ldl(ptr
+ 0x18);
4460 if (env
->hflags
& HF_CS64_MASK
)
4465 /* Fast FXRESTORE leaves out the XMM registers */
4466 if (!(env
->efer
& MSR_EFER_FFXSR
)
4467 || (env
->hflags
& HF_CPL_MASK
)
4468 || !(env
->hflags
& HF_LMA_MASK
)) {
4469 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4470 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4471 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4478 #ifndef USE_X86LDOUBLE
4480 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4482 CPU86_LDoubleU temp
;
4487 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4488 /* exponent + sign */
4489 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4490 e
|= SIGND(temp
) >> 16;
4494 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4496 CPU86_LDoubleU temp
;
4500 /* XXX: handle overflow ? */
4501 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4502 e
|= (upper
>> 4) & 0x800; /* sign */
4503 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4505 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4508 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4515 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4517 CPU86_LDoubleU temp
;
4520 *pmant
= temp
.l
.lower
;
4521 *pexp
= temp
.l
.upper
;
4524 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4526 CPU86_LDoubleU temp
;
4528 temp
.l
.upper
= upper
;
4529 temp
.l
.lower
= mant
;
4534 #ifdef TARGET_X86_64
4536 //#define DEBUG_MULDIV
4538 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4547 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4551 add128(plow
, phigh
, 1, 0);
4554 /* return TRUE if overflow */
4555 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4557 uint64_t q
, r
, a1
, a0
;
4570 /* XXX: use a better algorithm */
4571 for(i
= 0; i
< 64; i
++) {
4573 a1
= (a1
<< 1) | (a0
>> 63);
4574 if (ab
|| a1
>= b
) {
4580 a0
= (a0
<< 1) | qb
;
4582 #if defined(DEBUG_MULDIV)
4583 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4584 *phigh
, *plow
, b
, a0
, a1
);
4592 /* return TRUE if overflow */
4593 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4596 sa
= ((int64_t)*phigh
< 0);
4598 neg128(plow
, phigh
);
4602 if (div64(plow
, phigh
, b
) != 0)
4605 if (*plow
> (1ULL << 63))
4609 if (*plow
>= (1ULL << 63))
4617 void helper_mulq_EAX_T0(target_ulong t0
)
4621 mulu64(&r0
, &r1
, EAX
, t0
);
4628 void helper_imulq_EAX_T0(target_ulong t0
)
4632 muls64(&r0
, &r1
, EAX
, t0
);
4636 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4639 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4643 muls64(&r0
, &r1
, t0
, t1
);
4645 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4649 void helper_divq_EAX(target_ulong t0
)
4653 raise_exception(EXCP00_DIVZ
);
4657 if (div64(&r0
, &r1
, t0
))
4658 raise_exception(EXCP00_DIVZ
);
4663 void helper_idivq_EAX(target_ulong t0
)
4667 raise_exception(EXCP00_DIVZ
);
4671 if (idiv64(&r0
, &r1
, t0
))
4672 raise_exception(EXCP00_DIVZ
);
4678 static void do_hlt(void)
4680 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4682 env
->exception_index
= EXCP_HLT
;
4686 void helper_hlt(int next_eip_addend
)
4688 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4689 EIP
+= next_eip_addend
;
4694 void helper_monitor(target_ulong ptr
)
4696 if ((uint32_t)ECX
!= 0)
4697 raise_exception(EXCP0D_GPF
);
4698 /* XXX: store address ? */
4699 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4702 void helper_mwait(int next_eip_addend
)
4704 if ((uint32_t)ECX
!= 0)
4705 raise_exception(EXCP0D_GPF
);
4706 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4707 EIP
+= next_eip_addend
;
4709 /* XXX: not complete but not completely erroneous */
4710 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4711 /* more than one CPU: do not sleep because another CPU may
4718 void helper_debug(void)
4720 env
->exception_index
= EXCP_DEBUG
;
4724 void helper_reset_rf(void)
4726 env
->eflags
&= ~RF_MASK
;
4729 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4731 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4734 void helper_raise_exception(int exception_index
)
4736 raise_exception(exception_index
);
4739 void helper_cli(void)
4741 env
->eflags
&= ~IF_MASK
;
4744 void helper_sti(void)
4746 env
->eflags
|= IF_MASK
;
4750 /* vm86plus instructions */
4751 void helper_cli_vm(void)
4753 env
->eflags
&= ~VIF_MASK
;
4756 void helper_sti_vm(void)
4758 env
->eflags
|= VIF_MASK
;
4759 if (env
->eflags
& VIP_MASK
) {
4760 raise_exception(EXCP0D_GPF
);
4765 void helper_set_inhibit_irq(void)
4767 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4770 void helper_reset_inhibit_irq(void)
4772 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4775 void helper_boundw(target_ulong a0
, int v
)
4779 high
= ldsw(a0
+ 2);
4781 if (v
< low
|| v
> high
) {
4782 raise_exception(EXCP05_BOUND
);
4786 void helper_boundl(target_ulong a0
, int v
)
4791 if (v
< low
|| v
> high
) {
4792 raise_exception(EXCP05_BOUND
);
4796 static float approx_rsqrt(float a
)
4798 return 1.0 / sqrt(a
);
4801 static float approx_rcp(float a
)
4806 #if !defined(CONFIG_USER_ONLY)
4808 #define MMUSUFFIX _mmu
4811 #include "softmmu_template.h"
4814 #include "softmmu_template.h"
4817 #include "softmmu_template.h"
4820 #include "softmmu_template.h"
4824 #if !defined(CONFIG_USER_ONLY)
4825 /* try to fill the TLB and return an exception if error. If retaddr is
4826 NULL, it means that the function was called in C code (i.e. not
4827 from generated code or from helper.c) */
4828 /* XXX: fix it to restore all registers */
4829 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4831 TranslationBlock
*tb
;
4834 CPUX86State
*saved_env
;
4836 /* XXX: hack to restore env in all cases, even if not called from
4839 env
= cpu_single_env
;
4841 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4844 /* now we have a real cpu fault */
4845 pc
= (unsigned long)retaddr
;
4846 tb
= tb_find_pc(pc
);
4848 /* the PC is inside the translated code. It means that we have
4849 a virtual CPU fault */
4850 cpu_restore_state(tb
, env
, pc
, NULL
);
4853 raise_exception_err(env
->exception_index
, env
->error_code
);
4859 /* Secure Virtual Machine helpers */
4861 #if defined(CONFIG_USER_ONLY)
4863 void helper_vmrun(int aflag
, int next_eip_addend
)
4866 void helper_vmmcall(void)
4869 void helper_vmload(int aflag
)
4872 void helper_vmsave(int aflag
)
4875 void helper_stgi(void)
4878 void helper_clgi(void)
4881 void helper_skinit(void)
4884 void helper_invlpga(int aflag
)
4887 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4890 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4894 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4895 uint32_t next_eip_addend
)
4900 static inline void svm_save_seg(target_phys_addr_t addr
,
4901 const SegmentCache
*sc
)
4903 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4905 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4907 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4909 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4910 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4913 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4917 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4918 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4919 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4920 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4921 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4924 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4925 CPUState
*env
, int seg_reg
)
4927 SegmentCache sc1
, *sc
= &sc1
;
4928 svm_load_seg(addr
, sc
);
4929 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4930 sc
->base
, sc
->limit
, sc
->flags
);
4933 void helper_vmrun(int aflag
, int next_eip_addend
)
4939 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4944 addr
= (uint32_t)EAX
;
4946 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4948 env
->vm_vmcb
= addr
;
4950 /* save the current CPU state in the hsave page */
4951 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4952 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4954 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4955 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4957 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4958 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4959 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4960 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4961 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4962 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4964 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4965 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4967 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4969 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4971 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4973 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4976 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4977 EIP
+ next_eip_addend
);
4978 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4979 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4981 /* load the interception bitmaps so we do not need to access the
4983 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4984 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4985 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4986 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4987 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4988 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4990 /* enable intercepts */
4991 env
->hflags
|= HF_SVMI_MASK
;
4993 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4995 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4996 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4998 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4999 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5001 /* clear exit_info_2 so we behave like the real hardware */
5002 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
5004 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
5005 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
5006 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
5007 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
5008 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5009 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5010 if (int_ctl
& V_INTR_MASKING_MASK
) {
5011 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
5012 env
->hflags2
|= HF2_VINTR_MASK
;
5013 if (env
->eflags
& IF_MASK
)
5014 env
->hflags2
|= HF2_HIF_MASK
;
5018 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5020 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5021 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5022 CC_OP
= CC_OP_EFLAGS
;
5024 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5026 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5028 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5030 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5033 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5035 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5036 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5037 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5038 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5039 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5041 /* FIXME: guest state consistency checks */
5043 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5044 case TLB_CONTROL_DO_NOTHING
:
5046 case TLB_CONTROL_FLUSH_ALL_ASID
:
5047 /* FIXME: this is not 100% correct but should work for now */
5052 env
->hflags2
|= HF2_GIF_MASK
;
5054 if (int_ctl
& V_IRQ_MASK
) {
5055 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5058 /* maybe we need to inject an event */
5059 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5060 if (event_inj
& SVM_EVTINJ_VALID
) {
5061 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5062 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5063 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5065 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5066 /* FIXME: need to implement valid_err */
5067 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5068 case SVM_EVTINJ_TYPE_INTR
:
5069 env
->exception_index
= vector
;
5070 env
->error_code
= event_inj_err
;
5071 env
->exception_is_int
= 0;
5072 env
->exception_next_eip
= -1;
5073 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5074 /* XXX: is it always correct ? */
5075 do_interrupt(vector
, 0, 0, 0, 1);
5077 case SVM_EVTINJ_TYPE_NMI
:
5078 env
->exception_index
= EXCP02_NMI
;
5079 env
->error_code
= event_inj_err
;
5080 env
->exception_is_int
= 0;
5081 env
->exception_next_eip
= EIP
;
5082 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5085 case SVM_EVTINJ_TYPE_EXEPT
:
5086 env
->exception_index
= vector
;
5087 env
->error_code
= event_inj_err
;
5088 env
->exception_is_int
= 0;
5089 env
->exception_next_eip
= -1;
5090 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5093 case SVM_EVTINJ_TYPE_SOFT
:
5094 env
->exception_index
= vector
;
5095 env
->error_code
= event_inj_err
;
5096 env
->exception_is_int
= 1;
5097 env
->exception_next_eip
= EIP
;
5098 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5102 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5106 void helper_vmmcall(void)
5108 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5109 raise_exception(EXCP06_ILLOP
);
5112 void helper_vmload(int aflag
)
5115 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5120 addr
= (uint32_t)EAX
;
5122 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5123 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5124 env
->segs
[R_FS
].base
);
5126 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5128 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5130 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5132 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5135 #ifdef TARGET_X86_64
5136 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5137 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5138 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5139 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5141 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5142 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5143 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5144 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5147 void helper_vmsave(int aflag
)
5150 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5155 addr
= (uint32_t)EAX
;
5157 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5158 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5159 env
->segs
[R_FS
].base
);
5161 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5163 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5165 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5167 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5170 #ifdef TARGET_X86_64
5171 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5172 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5173 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5174 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5176 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5177 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5178 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5179 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5182 void helper_stgi(void)
5184 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5185 env
->hflags2
|= HF2_GIF_MASK
;
5188 void helper_clgi(void)
5190 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5191 env
->hflags2
&= ~HF2_GIF_MASK
;
5194 void helper_skinit(void)
5196 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5197 /* XXX: not implemented */
5198 raise_exception(EXCP06_ILLOP
);
5201 void helper_invlpga(int aflag
)
5204 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5209 addr
= (uint32_t)EAX
;
5211 /* XXX: could use the ASID to see if it is needed to do the
5213 tlb_flush_page(env
, addr
);
5216 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5218 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5221 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5222 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5223 helper_vmexit(type
, param
);
5226 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5227 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5228 helper_vmexit(type
, param
);
5231 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5232 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5233 helper_vmexit(type
, param
);
5236 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5237 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5238 helper_vmexit(type
, param
);
5241 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5242 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5243 helper_vmexit(type
, param
);
5247 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5248 /* FIXME: this should be read in at vmrun (faster this way?) */
5249 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5251 switch((uint32_t)ECX
) {
5256 case 0xc0000000 ... 0xc0001fff:
5257 t0
= (8192 + ECX
- 0xc0000000) * 2;
5261 case 0xc0010000 ... 0xc0011fff:
5262 t0
= (16384 + ECX
- 0xc0010000) * 2;
5267 helper_vmexit(type
, param
);
5272 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5273 helper_vmexit(type
, param
);
5277 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5278 helper_vmexit(type
, param
);
5284 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5285 uint32_t next_eip_addend
)
5287 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5288 /* FIXME: this should be read in at vmrun (faster this way?) */
5289 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5290 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5291 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5293 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5294 env
->eip
+ next_eip_addend
);
5295 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5300 /* Note: currently only 32 bits of exit_code are used */
5301 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5305 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5306 exit_code
, exit_info_1
,
5307 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5310 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5311 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5312 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5314 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5317 /* Save the VM state in the vmcb */
5318 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5320 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5322 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5324 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5327 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5328 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5330 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5331 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5333 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5334 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5335 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5336 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5337 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5339 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5340 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5341 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5342 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5343 int_ctl
|= V_IRQ_MASK
;
5344 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5346 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5347 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5348 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5349 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5350 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5351 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5352 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5354 /* Reload the host state from vm_hsave */
5355 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5356 env
->hflags
&= ~HF_SVMI_MASK
;
5358 env
->intercept_exceptions
= 0;
5359 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5360 env
->tsc_offset
= 0;
5362 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5363 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5365 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5366 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5368 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5369 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5370 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5371 /* we need to set the efer after the crs so the hidden flags get
5374 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5376 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5377 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5378 CC_OP
= CC_OP_EFLAGS
;
5380 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5382 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5384 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5386 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5389 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5390 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5391 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5393 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5394 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5397 cpu_x86_set_cpl(env
, 0);
5398 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5399 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5401 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5402 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5403 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5404 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5406 env
->hflags2
&= ~HF2_GIF_MASK
;
5407 /* FIXME: Resets the current ASID register to zero (host ASID). */
5409 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5411 /* Clears the TSC_OFFSET inside the processor. */
5413 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5414 from the page table indicated the host's CR3. If the PDPEs contain
5415 illegal state, the processor causes a shutdown. */
5417 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5418 env
->cr
[0] |= CR0_PE_MASK
;
5419 env
->eflags
&= ~VM_MASK
;
5421 /* Disables all breakpoints in the host DR7 register. */
5423 /* Checks the reloaded host state for consistency. */
5425 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5426 host's code segment or non-canonical (in the case of long mode), a
5427 #GP fault is delivered inside the host.) */
5429 /* remove any pending exception */
5430 env
->exception_index
= -1;
5431 env
->error_code
= 0;
5432 env
->old_exception
= -1;
5440 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5441 void helper_enter_mmx(void)
5444 *(uint32_t *)(env
->fptags
) = 0;
5445 *(uint32_t *)(env
->fptags
+ 4) = 0;
5448 void helper_emms(void)
5450 /* set to empty state */
5451 *(uint32_t *)(env
->fptags
) = 0x01010101;
5452 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5456 void helper_movq(void *d
, void *s
)
5458 *(uint64_t *)d
= *(uint64_t *)s
;
5462 #include "ops_sse.h"
5465 #include "ops_sse.h"
5468 #include "helper_template.h"
5472 #include "helper_template.h"
5476 #include "helper_template.h"
5479 #ifdef TARGET_X86_64
5482 #include "helper_template.h"
5487 /* bit operations */
5488 target_ulong
helper_bsf(target_ulong t0
)
5495 while ((res
& 1) == 0) {
5502 target_ulong
helper_bsr(target_ulong t0
)
5505 target_ulong res
, mask
;
5508 count
= TARGET_LONG_BITS
- 1;
5509 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5510 while ((res
& mask
) == 0) {
5518 static int compute_all_eflags(void)
5523 static int compute_c_eflags(void)
5525 return CC_SRC
& CC_C
;
5528 uint32_t helper_cc_compute_all(int op
)
5531 default: /* should never happen */ return 0;
5533 case CC_OP_EFLAGS
: return compute_all_eflags();
5535 case CC_OP_MULB
: return compute_all_mulb();
5536 case CC_OP_MULW
: return compute_all_mulw();
5537 case CC_OP_MULL
: return compute_all_mull();
5539 case CC_OP_ADDB
: return compute_all_addb();
5540 case CC_OP_ADDW
: return compute_all_addw();
5541 case CC_OP_ADDL
: return compute_all_addl();
5543 case CC_OP_ADCB
: return compute_all_adcb();
5544 case CC_OP_ADCW
: return compute_all_adcw();
5545 case CC_OP_ADCL
: return compute_all_adcl();
5547 case CC_OP_SUBB
: return compute_all_subb();
5548 case CC_OP_SUBW
: return compute_all_subw();
5549 case CC_OP_SUBL
: return compute_all_subl();
5551 case CC_OP_SBBB
: return compute_all_sbbb();
5552 case CC_OP_SBBW
: return compute_all_sbbw();
5553 case CC_OP_SBBL
: return compute_all_sbbl();
5555 case CC_OP_LOGICB
: return compute_all_logicb();
5556 case CC_OP_LOGICW
: return compute_all_logicw();
5557 case CC_OP_LOGICL
: return compute_all_logicl();
5559 case CC_OP_INCB
: return compute_all_incb();
5560 case CC_OP_INCW
: return compute_all_incw();
5561 case CC_OP_INCL
: return compute_all_incl();
5563 case CC_OP_DECB
: return compute_all_decb();
5564 case CC_OP_DECW
: return compute_all_decw();
5565 case CC_OP_DECL
: return compute_all_decl();
5567 case CC_OP_SHLB
: return compute_all_shlb();
5568 case CC_OP_SHLW
: return compute_all_shlw();
5569 case CC_OP_SHLL
: return compute_all_shll();
5571 case CC_OP_SARB
: return compute_all_sarb();
5572 case CC_OP_SARW
: return compute_all_sarw();
5573 case CC_OP_SARL
: return compute_all_sarl();
5575 #ifdef TARGET_X86_64
5576 case CC_OP_MULQ
: return compute_all_mulq();
5578 case CC_OP_ADDQ
: return compute_all_addq();
5580 case CC_OP_ADCQ
: return compute_all_adcq();
5582 case CC_OP_SUBQ
: return compute_all_subq();
5584 case CC_OP_SBBQ
: return compute_all_sbbq();
5586 case CC_OP_LOGICQ
: return compute_all_logicq();
5588 case CC_OP_INCQ
: return compute_all_incq();
5590 case CC_OP_DECQ
: return compute_all_decq();
5592 case CC_OP_SHLQ
: return compute_all_shlq();
5594 case CC_OP_SARQ
: return compute_all_sarq();
5599 uint32_t helper_cc_compute_c(int op
)
5602 default: /* should never happen */ return 0;
5604 case CC_OP_EFLAGS
: return compute_c_eflags();
5606 case CC_OP_MULB
: return compute_c_mull();
5607 case CC_OP_MULW
: return compute_c_mull();
5608 case CC_OP_MULL
: return compute_c_mull();
5610 case CC_OP_ADDB
: return compute_c_addb();
5611 case CC_OP_ADDW
: return compute_c_addw();
5612 case CC_OP_ADDL
: return compute_c_addl();
5614 case CC_OP_ADCB
: return compute_c_adcb();
5615 case CC_OP_ADCW
: return compute_c_adcw();
5616 case CC_OP_ADCL
: return compute_c_adcl();
5618 case CC_OP_SUBB
: return compute_c_subb();
5619 case CC_OP_SUBW
: return compute_c_subw();
5620 case CC_OP_SUBL
: return compute_c_subl();
5622 case CC_OP_SBBB
: return compute_c_sbbb();
5623 case CC_OP_SBBW
: return compute_c_sbbw();
5624 case CC_OP_SBBL
: return compute_c_sbbl();
5626 case CC_OP_LOGICB
: return compute_c_logicb();
5627 case CC_OP_LOGICW
: return compute_c_logicw();
5628 case CC_OP_LOGICL
: return compute_c_logicl();
5630 case CC_OP_INCB
: return compute_c_incl();
5631 case CC_OP_INCW
: return compute_c_incl();
5632 case CC_OP_INCL
: return compute_c_incl();
5634 case CC_OP_DECB
: return compute_c_incl();
5635 case CC_OP_DECW
: return compute_c_incl();
5636 case CC_OP_DECL
: return compute_c_incl();
5638 case CC_OP_SHLB
: return compute_c_shlb();
5639 case CC_OP_SHLW
: return compute_c_shlw();
5640 case CC_OP_SHLL
: return compute_c_shll();
5642 case CC_OP_SARB
: return compute_c_sarl();
5643 case CC_OP_SARW
: return compute_c_sarl();
5644 case CC_OP_SARL
: return compute_c_sarl();
5646 #ifdef TARGET_X86_64
5647 case CC_OP_MULQ
: return compute_c_mull();
5649 case CC_OP_ADDQ
: return compute_c_addq();
5651 case CC_OP_ADCQ
: return compute_c_adcq();
5653 case CC_OP_SUBQ
: return compute_c_subq();
5655 case CC_OP_SBBQ
: return compute_c_sbbq();
5657 case CC_OP_LOGICQ
: return compute_c_logicq();
5659 case CC_OP_INCQ
: return compute_c_incl();
5661 case CC_OP_DECQ
: return compute_c_incl();
5663 case CC_OP_SHLQ
: return compute_c_shlq();
5665 case CC_OP_SARQ
: return compute_c_sarl();