4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "qemu-common.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "softmmu_exec.h"
32 #endif /* !defined(CONFIG_USER_ONLY) */
37 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
38 # define LOG_PCALL_STATE(env) \
39 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
41 # define LOG_PCALL(...) do { } while (0)
42 # define LOG_PCALL_STATE(env) do { } while (0)
45 /* n must be a constant to be efficient */
46 static inline target_long
lshift(target_long x
, int n
)
61 #define MAXTAN 9223372036854775808.0
63 /* the following deal with x86 long double-precision numbers */
64 #define MAXEXPD 0x7fff
66 #define EXPD(fp) (fp.l.upper & 0x7fff)
67 #define SIGND(fp) ((fp.l.upper) & 0x8000)
68 #define MANTD(fp) (fp.l.lower)
69 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
71 static inline void fpush(void)
73 env
->fpstt
= (env
->fpstt
- 1) & 7;
74 env
->fptags
[env
->fpstt
] = 0; /* validate stack entry */
77 static inline void fpop(void)
79 env
->fptags
[env
->fpstt
] = 1; /* invvalidate stack entry */
80 env
->fpstt
= (env
->fpstt
+ 1) & 7;
83 static inline floatx80
helper_fldt(target_ulong ptr
)
87 temp
.l
.lower
= ldq(ptr
);
88 temp
.l
.upper
= lduw(ptr
+ 8);
92 static inline void helper_fstt(floatx80 f
, target_ulong ptr
)
97 stq(ptr
, temp
.l
.lower
);
98 stw(ptr
+ 8, temp
.l
.upper
);
101 #define FPUS_IE (1 << 0)
102 #define FPUS_DE (1 << 1)
103 #define FPUS_ZE (1 << 2)
104 #define FPUS_OE (1 << 3)
105 #define FPUS_UE (1 << 4)
106 #define FPUS_PE (1 << 5)
107 #define FPUS_SF (1 << 6)
108 #define FPUS_SE (1 << 7)
109 #define FPUS_B (1 << 15)
113 static inline uint32_t compute_eflags(void)
115 return env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
118 /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
119 static inline void load_eflags(int eflags
, int update_mask
)
121 CC_SRC
= eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
122 DF
= 1 - (2 * ((eflags
>> 10) & 1));
123 env
->eflags
= (env
->eflags
& ~update_mask
) |
124 (eflags
& update_mask
) | 0x2;
127 /* load efer and update the corresponding hflags. XXX: do consistency
128 checks with cpuid bits ? */
129 static inline void cpu_load_efer(CPUState
*env
, uint64_t val
)
132 env
->hflags
&= ~(HF_LMA_MASK
| HF_SVME_MASK
);
133 if (env
->efer
& MSR_EFER_LMA
) {
134 env
->hflags
|= HF_LMA_MASK
;
136 if (env
->efer
& MSR_EFER_SVME
) {
137 env
->hflags
|= HF_SVME_MASK
;
142 #define raise_exception_err(a, b)\
144 qemu_log("raise_exception line=%d\n", __LINE__);\
145 (raise_exception_err)(a, b);\
149 static void QEMU_NORETURN
raise_exception_err(int exception_index
,
152 static const uint8_t parity_table
[256] = {
153 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
154 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
155 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
156 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
157 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
158 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
159 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
160 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
161 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
162 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
163 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
164 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
165 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
166 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
167 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
168 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
169 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
170 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
171 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
172 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
173 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
174 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
175 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
176 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
177 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
178 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
179 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
180 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
181 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
182 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
183 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
184 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
187 /* modulo 17 table */
188 static const uint8_t rclw_table
[32] = {
189 0, 1, 2, 3, 4, 5, 6, 7,
190 8, 9,10,11,12,13,14,15,
191 16, 0, 1, 2, 3, 4, 5, 6,
192 7, 8, 9,10,11,12,13,14,
196 static const uint8_t rclb_table
[32] = {
197 0, 1, 2, 3, 4, 5, 6, 7,
198 8, 0, 1, 2, 3, 4, 5, 6,
199 7, 8, 0, 1, 2, 3, 4, 5,
200 6, 7, 8, 0, 1, 2, 3, 4,
203 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
204 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
205 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
207 /* broken thread support */
209 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
211 void helper_lock(void)
213 spin_lock(&global_cpu_lock
);
216 void helper_unlock(void)
218 spin_unlock(&global_cpu_lock
);
221 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
223 load_eflags(t0
, update_mask
);
226 target_ulong
helper_read_eflags(void)
229 eflags
= helper_cc_compute_all(CC_OP
);
230 eflags
|= (DF
& DF_MASK
);
231 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
235 /* return non zero if error */
236 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
247 index
= selector
& ~7;
248 if ((index
+ 7) > dt
->limit
)
250 ptr
= dt
->base
+ index
;
251 *e1_ptr
= ldl_kernel(ptr
);
252 *e2_ptr
= ldl_kernel(ptr
+ 4);
256 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
259 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
260 if (e2
& DESC_G_MASK
)
261 limit
= (limit
<< 12) | 0xfff;
265 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
267 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
270 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
272 sc
->base
= get_seg_base(e1
, e2
);
273 sc
->limit
= get_seg_limit(e1
, e2
);
277 /* init the segment cache in vm86 mode. */
278 static inline void load_seg_vm(int seg
, int selector
)
281 cpu_x86_load_seg_cache(env
, seg
, selector
,
282 (selector
<< 4), 0xffff, 0);
285 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
286 uint32_t *esp_ptr
, int dpl
)
288 int type
, index
, shift
;
293 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
294 for(i
=0;i
<env
->tr
.limit
;i
++) {
295 printf("%02x ", env
->tr
.base
[i
]);
296 if ((i
& 7) == 7) printf("\n");
302 if (!(env
->tr
.flags
& DESC_P_MASK
))
303 cpu_abort(env
, "invalid tss");
304 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
306 cpu_abort(env
, "invalid tss type");
308 index
= (dpl
* 4 + 2) << shift
;
309 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
310 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
312 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
313 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
315 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
316 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
320 /* XXX: merge with load_seg() */
321 static void tss_load_seg(int seg_reg
, int selector
)
326 if ((selector
& 0xfffc) != 0) {
327 if (load_segment(&e1
, &e2
, selector
) != 0)
328 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
329 if (!(e2
& DESC_S_MASK
))
330 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
332 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
333 cpl
= env
->hflags
& HF_CPL_MASK
;
334 if (seg_reg
== R_CS
) {
335 if (!(e2
& DESC_CS_MASK
))
336 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
337 /* XXX: is it correct ? */
339 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
340 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
341 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
342 } else if (seg_reg
== R_SS
) {
343 /* SS must be writable data */
344 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
345 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
346 if (dpl
!= cpl
|| dpl
!= rpl
)
347 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
349 /* not readable code */
350 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
351 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
352 /* if data or non conforming code, checks the rights */
353 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
354 if (dpl
< cpl
|| dpl
< rpl
)
355 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
358 if (!(e2
& DESC_P_MASK
))
359 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
360 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
361 get_seg_base(e1
, e2
),
362 get_seg_limit(e1
, e2
),
365 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
366 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
370 #define SWITCH_TSS_JMP 0
371 #define SWITCH_TSS_IRET 1
372 #define SWITCH_TSS_CALL 2
374 /* XXX: restore CPU state in registers (PowerPC case) */
375 static void switch_tss(int tss_selector
,
376 uint32_t e1
, uint32_t e2
, int source
,
379 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
380 target_ulong tss_base
;
381 uint32_t new_regs
[8], new_segs
[6];
382 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
383 uint32_t old_eflags
, eflags_mask
;
388 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
389 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
391 /* if task gate, we read the TSS segment and we load it */
393 if (!(e2
& DESC_P_MASK
))
394 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
395 tss_selector
= e1
>> 16;
396 if (tss_selector
& 4)
397 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
398 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
399 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
400 if (e2
& DESC_S_MASK
)
401 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
402 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
404 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
407 if (!(e2
& DESC_P_MASK
))
408 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
414 tss_limit
= get_seg_limit(e1
, e2
);
415 tss_base
= get_seg_base(e1
, e2
);
416 if ((tss_selector
& 4) != 0 ||
417 tss_limit
< tss_limit_max
)
418 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
419 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
421 old_tss_limit_max
= 103;
423 old_tss_limit_max
= 43;
425 /* read all the registers from the new TSS */
428 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
429 new_eip
= ldl_kernel(tss_base
+ 0x20);
430 new_eflags
= ldl_kernel(tss_base
+ 0x24);
431 for(i
= 0; i
< 8; i
++)
432 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
433 for(i
= 0; i
< 6; i
++)
434 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
435 new_ldt
= lduw_kernel(tss_base
+ 0x60);
436 new_trap
= ldl_kernel(tss_base
+ 0x64);
440 new_eip
= lduw_kernel(tss_base
+ 0x0e);
441 new_eflags
= lduw_kernel(tss_base
+ 0x10);
442 for(i
= 0; i
< 8; i
++)
443 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
444 for(i
= 0; i
< 4; i
++)
445 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
446 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
451 /* XXX: avoid a compiler warning, see
452 http://support.amd.com/us/Processor_TechDocs/24593.pdf
453 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
456 /* NOTE: we must avoid memory exceptions during the task switch,
457 so we make dummy accesses before */
458 /* XXX: it can still fail in some cases, so a bigger hack is
459 necessary to valid the TLB after having done the accesses */
461 v1
= ldub_kernel(env
->tr
.base
);
462 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
463 stb_kernel(env
->tr
.base
, v1
);
464 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
466 /* clear busy bit (it is restartable) */
467 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
470 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
471 e2
= ldl_kernel(ptr
+ 4);
472 e2
&= ~DESC_TSS_BUSY_MASK
;
473 stl_kernel(ptr
+ 4, e2
);
475 old_eflags
= compute_eflags();
476 if (source
== SWITCH_TSS_IRET
)
477 old_eflags
&= ~NT_MASK
;
479 /* save the current state in the old TSS */
482 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
483 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
484 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
485 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
486 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
487 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
488 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
489 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
490 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
491 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
492 for(i
= 0; i
< 6; i
++)
493 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
496 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
497 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
498 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
499 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
500 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
501 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
502 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
503 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
504 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
505 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
506 for(i
= 0; i
< 4; i
++)
507 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
510 /* now if an exception occurs, it will occurs in the next task
513 if (source
== SWITCH_TSS_CALL
) {
514 stw_kernel(tss_base
, env
->tr
.selector
);
515 new_eflags
|= NT_MASK
;
519 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
522 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
523 e2
= ldl_kernel(ptr
+ 4);
524 e2
|= DESC_TSS_BUSY_MASK
;
525 stl_kernel(ptr
+ 4, e2
);
528 /* set the new CPU state */
529 /* from this point, any exception which occurs can give problems */
530 env
->cr
[0] |= CR0_TS_MASK
;
531 env
->hflags
|= HF_TS_MASK
;
532 env
->tr
.selector
= tss_selector
;
533 env
->tr
.base
= tss_base
;
534 env
->tr
.limit
= tss_limit
;
535 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
537 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
538 cpu_x86_update_cr3(env
, new_cr3
);
541 /* load all registers without an exception, then reload them with
542 possible exception */
544 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
545 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
547 eflags_mask
&= 0xffff;
548 load_eflags(new_eflags
, eflags_mask
);
549 /* XXX: what to do in 16 bit case ? */
558 if (new_eflags
& VM_MASK
) {
559 for(i
= 0; i
< 6; i
++)
560 load_seg_vm(i
, new_segs
[i
]);
561 /* in vm86, CPL is always 3 */
562 cpu_x86_set_cpl(env
, 3);
564 /* CPL is set the RPL of CS */
565 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
566 /* first just selectors as the rest may trigger exceptions */
567 for(i
= 0; i
< 6; i
++)
568 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
571 env
->ldt
.selector
= new_ldt
& ~4;
578 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
580 if ((new_ldt
& 0xfffc) != 0) {
582 index
= new_ldt
& ~7;
583 if ((index
+ 7) > dt
->limit
)
584 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
585 ptr
= dt
->base
+ index
;
586 e1
= ldl_kernel(ptr
);
587 e2
= ldl_kernel(ptr
+ 4);
588 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
589 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
590 if (!(e2
& DESC_P_MASK
))
591 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
592 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
595 /* load the segments */
596 if (!(new_eflags
& VM_MASK
)) {
597 tss_load_seg(R_CS
, new_segs
[R_CS
]);
598 tss_load_seg(R_SS
, new_segs
[R_SS
]);
599 tss_load_seg(R_ES
, new_segs
[R_ES
]);
600 tss_load_seg(R_DS
, new_segs
[R_DS
]);
601 tss_load_seg(R_FS
, new_segs
[R_FS
]);
602 tss_load_seg(R_GS
, new_segs
[R_GS
]);
605 /* check that EIP is in the CS segment limits */
606 if (new_eip
> env
->segs
[R_CS
].limit
) {
607 /* XXX: different exception if CALL ? */
608 raise_exception_err(EXCP0D_GPF
, 0);
611 #ifndef CONFIG_USER_ONLY
612 /* reset local breakpoints */
613 if (env
->dr
[7] & 0x55) {
614 for (i
= 0; i
< 4; i
++) {
615 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
616 hw_breakpoint_remove(env
, i
);
623 /* check if Port I/O is allowed in TSS */
624 static inline void check_io(int addr
, int size
)
626 int io_offset
, val
, mask
;
628 /* TSS must be a valid 32 bit one */
629 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
630 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
633 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
634 io_offset
+= (addr
>> 3);
635 /* Note: the check needs two bytes */
636 if ((io_offset
+ 1) > env
->tr
.limit
)
638 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
640 mask
= (1 << size
) - 1;
641 /* all bits must be zero to allow the I/O */
642 if ((val
& mask
) != 0) {
644 raise_exception_err(EXCP0D_GPF
, 0);
648 void helper_check_iob(uint32_t t0
)
653 void helper_check_iow(uint32_t t0
)
658 void helper_check_iol(uint32_t t0
)
663 void helper_outb(uint32_t port
, uint32_t data
)
665 cpu_outb(port
, data
& 0xff);
668 target_ulong
helper_inb(uint32_t port
)
670 return cpu_inb(port
);
673 void helper_outw(uint32_t port
, uint32_t data
)
675 cpu_outw(port
, data
& 0xffff);
678 target_ulong
helper_inw(uint32_t port
)
680 return cpu_inw(port
);
683 void helper_outl(uint32_t port
, uint32_t data
)
685 cpu_outl(port
, data
);
688 target_ulong
helper_inl(uint32_t port
)
690 return cpu_inl(port
);
693 static inline unsigned int get_sp_mask(unsigned int e2
)
695 if (e2
& DESC_B_MASK
)
701 static int exeption_has_error_code(int intno
)
717 #define SET_ESP(val, sp_mask)\
719 if ((sp_mask) == 0xffff)\
720 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
721 else if ((sp_mask) == 0xffffffffLL)\
722 ESP = (uint32_t)(val);\
727 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
730 /* in 64-bit machines, this can overflow. So this segment addition macro
731 * can be used to trim the value to 32-bit whenever needed */
732 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
734 /* XXX: add a is_user flag to have proper security support */
735 #define PUSHW(ssp, sp, sp_mask, val)\
738 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
741 #define PUSHL(ssp, sp, sp_mask, val)\
744 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
747 #define POPW(ssp, sp, sp_mask, val)\
749 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
753 #define POPL(ssp, sp, sp_mask, val)\
755 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
759 /* protected mode interrupt */
760 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
761 unsigned int next_eip
, int is_hw
)
764 target_ulong ptr
, ssp
;
765 int type
, dpl
, selector
, ss_dpl
, cpl
;
766 int has_error_code
, new_stack
, shift
;
767 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
768 uint32_t old_eip
, sp_mask
;
771 if (!is_int
&& !is_hw
)
772 has_error_code
= exeption_has_error_code(intno
);
779 if (intno
* 8 + 7 > dt
->limit
)
780 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
781 ptr
= dt
->base
+ intno
* 8;
782 e1
= ldl_kernel(ptr
);
783 e2
= ldl_kernel(ptr
+ 4);
784 /* check gate type */
785 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
787 case 5: /* task gate */
788 /* must do that check here to return the correct error code */
789 if (!(e2
& DESC_P_MASK
))
790 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
791 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
792 if (has_error_code
) {
795 /* push the error code */
796 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
798 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
802 esp
= (ESP
- (2 << shift
)) & mask
;
803 ssp
= env
->segs
[R_SS
].base
+ esp
;
805 stl_kernel(ssp
, error_code
);
807 stw_kernel(ssp
, error_code
);
811 case 6: /* 286 interrupt gate */
812 case 7: /* 286 trap gate */
813 case 14: /* 386 interrupt gate */
814 case 15: /* 386 trap gate */
817 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
820 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
821 cpl
= env
->hflags
& HF_CPL_MASK
;
822 /* check privilege if software int */
823 if (is_int
&& dpl
< cpl
)
824 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
825 /* check valid bit */
826 if (!(e2
& DESC_P_MASK
))
827 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
829 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
830 if ((selector
& 0xfffc) == 0)
831 raise_exception_err(EXCP0D_GPF
, 0);
833 if (load_segment(&e1
, &e2
, selector
) != 0)
834 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
835 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
836 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
837 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
839 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
840 if (!(e2
& DESC_P_MASK
))
841 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
842 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
843 /* to inner privilege */
844 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
845 if ((ss
& 0xfffc) == 0)
846 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
848 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
849 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
850 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
851 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
853 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
854 if (!(ss_e2
& DESC_S_MASK
) ||
855 (ss_e2
& DESC_CS_MASK
) ||
856 !(ss_e2
& DESC_W_MASK
))
857 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
858 if (!(ss_e2
& DESC_P_MASK
))
859 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
861 sp_mask
= get_sp_mask(ss_e2
);
862 ssp
= get_seg_base(ss_e1
, ss_e2
);
863 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
864 /* to same privilege */
865 if (env
->eflags
& VM_MASK
)
866 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
868 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
869 ssp
= env
->segs
[R_SS
].base
;
873 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
874 new_stack
= 0; /* avoid warning */
875 sp_mask
= 0; /* avoid warning */
876 ssp
= 0; /* avoid warning */
877 esp
= 0; /* avoid warning */
883 /* XXX: check that enough room is available */
884 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
885 if (env
->eflags
& VM_MASK
)
891 if (env
->eflags
& VM_MASK
) {
892 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
893 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
894 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
895 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
897 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
898 PUSHL(ssp
, esp
, sp_mask
, ESP
);
900 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
901 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
902 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
903 if (has_error_code
) {
904 PUSHL(ssp
, esp
, sp_mask
, error_code
);
908 if (env
->eflags
& VM_MASK
) {
909 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
910 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
911 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
912 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
914 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
915 PUSHW(ssp
, esp
, sp_mask
, ESP
);
917 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
918 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
919 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
920 if (has_error_code
) {
921 PUSHW(ssp
, esp
, sp_mask
, error_code
);
926 if (env
->eflags
& VM_MASK
) {
927 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
928 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
929 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
930 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
932 ss
= (ss
& ~3) | dpl
;
933 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
934 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
936 SET_ESP(esp
, sp_mask
);
938 selector
= (selector
& ~3) | dpl
;
939 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
940 get_seg_base(e1
, e2
),
941 get_seg_limit(e1
, e2
),
943 cpu_x86_set_cpl(env
, dpl
);
946 /* interrupt gate clear IF mask */
947 if ((type
& 1) == 0) {
948 env
->eflags
&= ~IF_MASK
;
950 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
955 #define PUSHQ(sp, val)\
958 stq_kernel(sp, (val));\
961 #define POPQ(sp, val)\
963 val = ldq_kernel(sp);\
967 static inline target_ulong
get_rsp_from_tss(int level
)
972 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
973 env
->tr
.base
, env
->tr
.limit
);
976 if (!(env
->tr
.flags
& DESC_P_MASK
))
977 cpu_abort(env
, "invalid tss");
978 index
= 8 * level
+ 4;
979 if ((index
+ 7) > env
->tr
.limit
)
980 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
981 return ldq_kernel(env
->tr
.base
+ index
);
984 /* 64 bit interrupt */
985 static void do_interrupt64(int intno
, int is_int
, int error_code
,
986 target_ulong next_eip
, int is_hw
)
990 int type
, dpl
, selector
, cpl
, ist
;
991 int has_error_code
, new_stack
;
992 uint32_t e1
, e2
, e3
, ss
;
993 target_ulong old_eip
, esp
, offset
;
996 if (!is_int
&& !is_hw
)
997 has_error_code
= exeption_has_error_code(intno
);
1004 if (intno
* 16 + 15 > dt
->limit
)
1005 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
1006 ptr
= dt
->base
+ intno
* 16;
1007 e1
= ldl_kernel(ptr
);
1008 e2
= ldl_kernel(ptr
+ 4);
1009 e3
= ldl_kernel(ptr
+ 8);
1010 /* check gate type */
1011 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1013 case 14: /* 386 interrupt gate */
1014 case 15: /* 386 trap gate */
1017 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
1020 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1021 cpl
= env
->hflags
& HF_CPL_MASK
;
1022 /* check privilege if software int */
1023 if (is_int
&& dpl
< cpl
)
1024 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
1025 /* check valid bit */
1026 if (!(e2
& DESC_P_MASK
))
1027 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
1028 selector
= e1
>> 16;
1029 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1031 if ((selector
& 0xfffc) == 0)
1032 raise_exception_err(EXCP0D_GPF
, 0);
1034 if (load_segment(&e1
, &e2
, selector
) != 0)
1035 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1036 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1037 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1038 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1040 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1041 if (!(e2
& DESC_P_MASK
))
1042 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1043 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
1044 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1045 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
1046 /* to inner privilege */
1048 esp
= get_rsp_from_tss(ist
+ 3);
1050 esp
= get_rsp_from_tss(dpl
);
1051 esp
&= ~0xfLL
; /* align stack */
1054 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
1055 /* to same privilege */
1056 if (env
->eflags
& VM_MASK
)
1057 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1060 esp
= get_rsp_from_tss(ist
+ 3);
1063 esp
&= ~0xfLL
; /* align stack */
1066 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1067 new_stack
= 0; /* avoid warning */
1068 esp
= 0; /* avoid warning */
1071 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
1073 PUSHQ(esp
, compute_eflags());
1074 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
1075 PUSHQ(esp
, old_eip
);
1076 if (has_error_code
) {
1077 PUSHQ(esp
, error_code
);
1082 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
1086 selector
= (selector
& ~3) | dpl
;
1087 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1088 get_seg_base(e1
, e2
),
1089 get_seg_limit(e1
, e2
),
1091 cpu_x86_set_cpl(env
, dpl
);
1094 /* interrupt gate clear IF mask */
1095 if ((type
& 1) == 0) {
1096 env
->eflags
&= ~IF_MASK
;
1098 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1102 #ifdef TARGET_X86_64
1103 #if defined(CONFIG_USER_ONLY)
1104 void helper_syscall(int next_eip_addend
)
1106 env
->exception_index
= EXCP_SYSCALL
;
1107 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1111 void helper_syscall(int next_eip_addend
)
1115 if (!(env
->efer
& MSR_EFER_SCE
)) {
1116 raise_exception_err(EXCP06_ILLOP
, 0);
1118 selector
= (env
->star
>> 32) & 0xffff;
1119 if (env
->hflags
& HF_LMA_MASK
) {
1122 ECX
= env
->eip
+ next_eip_addend
;
1123 env
->regs
[11] = compute_eflags();
1125 code64
= env
->hflags
& HF_CS64_MASK
;
1127 cpu_x86_set_cpl(env
, 0);
1128 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1130 DESC_G_MASK
| DESC_P_MASK
|
1132 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1133 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1135 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1137 DESC_W_MASK
| DESC_A_MASK
);
1138 env
->eflags
&= ~env
->fmask
;
1139 load_eflags(env
->eflags
, 0);
1141 env
->eip
= env
->lstar
;
1143 env
->eip
= env
->cstar
;
1145 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1147 cpu_x86_set_cpl(env
, 0);
1148 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1150 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1152 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1153 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1155 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1157 DESC_W_MASK
| DESC_A_MASK
);
1158 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1159 env
->eip
= (uint32_t)env
->star
;
1165 #ifdef TARGET_X86_64
1166 void helper_sysret(int dflag
)
1170 if (!(env
->efer
& MSR_EFER_SCE
)) {
1171 raise_exception_err(EXCP06_ILLOP
, 0);
1173 cpl
= env
->hflags
& HF_CPL_MASK
;
1174 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1175 raise_exception_err(EXCP0D_GPF
, 0);
1177 selector
= (env
->star
>> 48) & 0xffff;
1178 if (env
->hflags
& HF_LMA_MASK
) {
1180 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1182 DESC_G_MASK
| DESC_P_MASK
|
1183 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1184 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1188 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1190 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1191 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1192 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1193 env
->eip
= (uint32_t)ECX
;
1195 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1197 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1198 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1199 DESC_W_MASK
| DESC_A_MASK
);
1200 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1201 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1202 cpu_x86_set_cpl(env
, 3);
1204 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1206 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1207 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1208 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1209 env
->eip
= (uint32_t)ECX
;
1210 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1212 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1213 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1214 DESC_W_MASK
| DESC_A_MASK
);
1215 env
->eflags
|= IF_MASK
;
1216 cpu_x86_set_cpl(env
, 3);
1221 /* real mode interrupt */
1222 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1223 unsigned int next_eip
)
1226 target_ulong ptr
, ssp
;
1228 uint32_t offset
, esp
;
1229 uint32_t old_cs
, old_eip
;
1231 /* real mode (simpler !) */
1233 if (intno
* 4 + 3 > dt
->limit
)
1234 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1235 ptr
= dt
->base
+ intno
* 4;
1236 offset
= lduw_kernel(ptr
);
1237 selector
= lduw_kernel(ptr
+ 2);
1239 ssp
= env
->segs
[R_SS
].base
;
1244 old_cs
= env
->segs
[R_CS
].selector
;
1245 /* XXX: use SS segment size ? */
1246 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1247 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1248 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1250 /* update processor state */
1251 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1253 env
->segs
[R_CS
].selector
= selector
;
1254 env
->segs
[R_CS
].base
= (selector
<< 4);
1255 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1258 #if defined(CONFIG_USER_ONLY)
1259 /* fake user mode interrupt */
1260 static void do_interrupt_user(int intno
, int is_int
, int error_code
,
1261 target_ulong next_eip
)
1265 int dpl
, cpl
, shift
;
1269 if (env
->hflags
& HF_LMA_MASK
) {
1274 ptr
= dt
->base
+ (intno
<< shift
);
1275 e2
= ldl_kernel(ptr
+ 4);
1277 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1278 cpl
= env
->hflags
& HF_CPL_MASK
;
1279 /* check privilege if software int */
1280 if (is_int
&& dpl
< cpl
)
1281 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1283 /* Since we emulate only user space, we cannot do more than
1284 exiting the emulation with the suitable exception and error
1292 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1295 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1296 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1299 type
= SVM_EVTINJ_TYPE_SOFT
;
1301 type
= SVM_EVTINJ_TYPE_EXEPT
;
1302 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1303 if (!rm
&& exeption_has_error_code(intno
)) {
1304 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1305 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1307 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1313 * Begin execution of an interruption. is_int is TRUE if coming from
1314 * the int instruction. next_eip is the EIP value AFTER the interrupt
1315 * instruction. It is only relevant if is_int is TRUE.
1317 static void do_interrupt_all(int intno
, int is_int
, int error_code
,
1318 target_ulong next_eip
, int is_hw
)
1320 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1321 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1323 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1324 count
, intno
, error_code
, is_int
,
1325 env
->hflags
& HF_CPL_MASK
,
1326 env
->segs
[R_CS
].selector
, EIP
,
1327 (int)env
->segs
[R_CS
].base
+ EIP
,
1328 env
->segs
[R_SS
].selector
, ESP
);
1329 if (intno
== 0x0e) {
1330 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1332 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1335 log_cpu_state(env
, X86_DUMP_CCOP
);
1341 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1342 for(i
= 0; i
< 16; i
++) {
1343 qemu_log(" %02x", ldub(ptr
+ i
));
1351 if (env
->cr
[0] & CR0_PE_MASK
) {
1352 #if !defined(CONFIG_USER_ONLY)
1353 if (env
->hflags
& HF_SVMI_MASK
)
1354 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1356 #ifdef TARGET_X86_64
1357 if (env
->hflags
& HF_LMA_MASK
) {
1358 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1362 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1365 #if !defined(CONFIG_USER_ONLY)
1366 if (env
->hflags
& HF_SVMI_MASK
)
1367 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1369 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1372 #if !defined(CONFIG_USER_ONLY)
1373 if (env
->hflags
& HF_SVMI_MASK
) {
1374 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1375 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1380 void do_interrupt(CPUState
*env1
)
1382 CPUState
*saved_env
;
1386 #if defined(CONFIG_USER_ONLY)
1387 /* if user mode only, we simulate a fake exception
1388 which will be handled outside the cpu execution
1390 do_interrupt_user(env
->exception_index
,
1391 env
->exception_is_int
,
1393 env
->exception_next_eip
);
1394 /* successfully delivered */
1395 env
->old_exception
= -1;
1397 /* simulate a real cpu exception. On i386, it can
1398 trigger new exceptions, but we do not handle
1399 double or triple faults yet. */
1400 do_interrupt_all(env
->exception_index
,
1401 env
->exception_is_int
,
1403 env
->exception_next_eip
, 0);
1404 /* successfully delivered */
1405 env
->old_exception
= -1;
1410 void do_interrupt_x86_hardirq(CPUState
*env1
, int intno
, int is_hw
)
1412 CPUState
*saved_env
;
1416 do_interrupt_all(intno
, 0, 0, 0, is_hw
);
1420 /* This should come from sysemu.h - if we could include it here... */
1421 void qemu_system_reset_request(void);
1424 * Check nested exceptions and change to double or triple fault if
1425 * needed. It should only be called, if this is not an interrupt.
1426 * Returns the new exception number.
1428 static int check_exception(int intno
, int *error_code
)
1430 int first_contributory
= env
->old_exception
== 0 ||
1431 (env
->old_exception
>= 10 &&
1432 env
->old_exception
<= 13);
1433 int second_contributory
= intno
== 0 ||
1434 (intno
>= 10 && intno
<= 13);
1436 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1437 env
->old_exception
, intno
);
1439 #if !defined(CONFIG_USER_ONLY)
1440 if (env
->old_exception
== EXCP08_DBLE
) {
1441 if (env
->hflags
& HF_SVMI_MASK
)
1442 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1444 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1446 qemu_system_reset_request();
1451 if ((first_contributory
&& second_contributory
)
1452 || (env
->old_exception
== EXCP0E_PAGE
&&
1453 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1454 intno
= EXCP08_DBLE
;
1458 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1459 (intno
== EXCP08_DBLE
))
1460 env
->old_exception
= intno
;
1466 * Signal an interruption. It is executed in the main CPU loop.
1467 * is_int is TRUE if coming from the int instruction. next_eip is the
1468 * EIP value AFTER the interrupt instruction. It is only relevant if
1471 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1472 int next_eip_addend
)
1475 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1476 intno
= check_exception(intno
, &error_code
);
1478 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1481 env
->exception_index
= intno
;
1482 env
->error_code
= error_code
;
1483 env
->exception_is_int
= is_int
;
1484 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1488 /* shortcuts to generate exceptions */
1490 static void QEMU_NORETURN
raise_exception_err(int exception_index
,
1493 raise_interrupt(exception_index
, 0, error_code
, 0);
1496 void raise_exception_err_env(CPUState
*nenv
, int exception_index
,
1500 raise_interrupt(exception_index
, 0, error_code
, 0);
1503 static void QEMU_NORETURN
raise_exception(int exception_index
)
1505 raise_interrupt(exception_index
, 0, 0, 0);
1508 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1511 raise_exception(exception_index
);
1515 #if defined(CONFIG_USER_ONLY)
1517 void do_smm_enter(CPUState
*env1
)
1521 void helper_rsm(void)
1527 #ifdef TARGET_X86_64
1528 #define SMM_REVISION_ID 0x00020064
1530 #define SMM_REVISION_ID 0x00020000
1533 void do_smm_enter(CPUState
*env1
)
1535 target_ulong sm_state
;
1538 CPUState
*saved_env
;
1543 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1544 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1546 env
->hflags
|= HF_SMM_MASK
;
1547 cpu_smm_update(env
);
1549 sm_state
= env
->smbase
+ 0x8000;
1551 #ifdef TARGET_X86_64
1552 for(i
= 0; i
< 6; i
++) {
1554 offset
= 0x7e00 + i
* 16;
1555 stw_phys(sm_state
+ offset
, dt
->selector
);
1556 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1557 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1558 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1561 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1562 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1564 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1565 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1566 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1567 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1569 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1570 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1572 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1573 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1574 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1575 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1577 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1579 stq_phys(sm_state
+ 0x7ff8, EAX
);
1580 stq_phys(sm_state
+ 0x7ff0, ECX
);
1581 stq_phys(sm_state
+ 0x7fe8, EDX
);
1582 stq_phys(sm_state
+ 0x7fe0, EBX
);
1583 stq_phys(sm_state
+ 0x7fd8, ESP
);
1584 stq_phys(sm_state
+ 0x7fd0, EBP
);
1585 stq_phys(sm_state
+ 0x7fc8, ESI
);
1586 stq_phys(sm_state
+ 0x7fc0, EDI
);
1587 for(i
= 8; i
< 16; i
++)
1588 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1589 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1590 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1591 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1592 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1594 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1595 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1596 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1598 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1599 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1601 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1602 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1603 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1604 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1605 stl_phys(sm_state
+ 0x7fec, EDI
);
1606 stl_phys(sm_state
+ 0x7fe8, ESI
);
1607 stl_phys(sm_state
+ 0x7fe4, EBP
);
1608 stl_phys(sm_state
+ 0x7fe0, ESP
);
1609 stl_phys(sm_state
+ 0x7fdc, EBX
);
1610 stl_phys(sm_state
+ 0x7fd8, EDX
);
1611 stl_phys(sm_state
+ 0x7fd4, ECX
);
1612 stl_phys(sm_state
+ 0x7fd0, EAX
);
1613 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1614 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1616 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1617 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1618 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1619 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1621 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1622 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1623 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1624 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1626 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1627 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1629 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1630 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1632 for(i
= 0; i
< 6; i
++) {
1635 offset
= 0x7f84 + i
* 12;
1637 offset
= 0x7f2c + (i
- 3) * 12;
1638 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1639 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1640 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1641 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1643 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1645 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1646 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1648 /* init SMM cpu state */
1650 #ifdef TARGET_X86_64
1651 cpu_load_efer(env
, 0);
1653 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1654 env
->eip
= 0x00008000;
1655 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1657 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1658 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1659 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1660 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1661 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1663 cpu_x86_update_cr0(env
,
1664 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1665 cpu_x86_update_cr4(env
, 0);
1666 env
->dr
[7] = 0x00000400;
1667 CC_OP
= CC_OP_EFLAGS
;
1671 void helper_rsm(void)
1673 target_ulong sm_state
;
1677 sm_state
= env
->smbase
+ 0x8000;
1678 #ifdef TARGET_X86_64
1679 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1681 for(i
= 0; i
< 6; i
++) {
1682 offset
= 0x7e00 + i
* 16;
1683 cpu_x86_load_seg_cache(env
, i
,
1684 lduw_phys(sm_state
+ offset
),
1685 ldq_phys(sm_state
+ offset
+ 8),
1686 ldl_phys(sm_state
+ offset
+ 4),
1687 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1690 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1691 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1693 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1694 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1695 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1696 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1698 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1699 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1701 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1702 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1703 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1704 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1706 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1707 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1708 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1709 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1710 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1711 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1712 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1713 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1714 for(i
= 8; i
< 16; i
++)
1715 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1716 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1717 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1718 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1719 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1720 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1722 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1723 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1724 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1726 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1727 if (val
& 0x20000) {
1728 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1731 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1732 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1733 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1734 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1735 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1736 EDI
= ldl_phys(sm_state
+ 0x7fec);
1737 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1738 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1739 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1740 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1741 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1742 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1743 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1744 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1745 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1747 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1748 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1749 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1750 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1752 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1753 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1754 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1755 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1757 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1758 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1760 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1761 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1763 for(i
= 0; i
< 6; i
++) {
1765 offset
= 0x7f84 + i
* 12;
1767 offset
= 0x7f2c + (i
- 3) * 12;
1768 cpu_x86_load_seg_cache(env
, i
,
1769 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1770 ldl_phys(sm_state
+ offset
+ 8),
1771 ldl_phys(sm_state
+ offset
+ 4),
1772 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1774 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1776 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1777 if (val
& 0x20000) {
1778 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1781 CC_OP
= CC_OP_EFLAGS
;
1782 env
->hflags
&= ~HF_SMM_MASK
;
1783 cpu_smm_update(env
);
1785 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1786 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1789 #endif /* !CONFIG_USER_ONLY */
1792 /* division, flags are undefined */
1794 void helper_divb_AL(target_ulong t0
)
1796 unsigned int num
, den
, q
, r
;
1798 num
= (EAX
& 0xffff);
1801 raise_exception(EXCP00_DIVZ
);
1805 raise_exception(EXCP00_DIVZ
);
1807 r
= (num
% den
) & 0xff;
1808 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1811 void helper_idivb_AL(target_ulong t0
)
1818 raise_exception(EXCP00_DIVZ
);
1822 raise_exception(EXCP00_DIVZ
);
1824 r
= (num
% den
) & 0xff;
1825 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1828 void helper_divw_AX(target_ulong t0
)
1830 unsigned int num
, den
, q
, r
;
1832 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1833 den
= (t0
& 0xffff);
1835 raise_exception(EXCP00_DIVZ
);
1839 raise_exception(EXCP00_DIVZ
);
1841 r
= (num
% den
) & 0xffff;
1842 EAX
= (EAX
& ~0xffff) | q
;
1843 EDX
= (EDX
& ~0xffff) | r
;
1846 void helper_idivw_AX(target_ulong t0
)
1850 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1853 raise_exception(EXCP00_DIVZ
);
1856 if (q
!= (int16_t)q
)
1857 raise_exception(EXCP00_DIVZ
);
1859 r
= (num
% den
) & 0xffff;
1860 EAX
= (EAX
& ~0xffff) | q
;
1861 EDX
= (EDX
& ~0xffff) | r
;
1864 void helper_divl_EAX(target_ulong t0
)
1866 unsigned int den
, r
;
1869 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1872 raise_exception(EXCP00_DIVZ
);
1877 raise_exception(EXCP00_DIVZ
);
1882 void helper_idivl_EAX(target_ulong t0
)
1887 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1890 raise_exception(EXCP00_DIVZ
);
1894 if (q
!= (int32_t)q
)
1895 raise_exception(EXCP00_DIVZ
);
1902 /* XXX: exception */
1903 void helper_aam(int base
)
1909 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1913 void helper_aad(int base
)
1917 ah
= (EAX
>> 8) & 0xff;
1918 al
= ((ah
* base
) + al
) & 0xff;
1919 EAX
= (EAX
& ~0xffff) | al
;
1923 void helper_aaa(void)
1929 eflags
= helper_cc_compute_all(CC_OP
);
1932 ah
= (EAX
>> 8) & 0xff;
1934 icarry
= (al
> 0xf9);
1935 if (((al
& 0x0f) > 9 ) || af
) {
1936 al
= (al
+ 6) & 0x0f;
1937 ah
= (ah
+ 1 + icarry
) & 0xff;
1938 eflags
|= CC_C
| CC_A
;
1940 eflags
&= ~(CC_C
| CC_A
);
1943 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1947 void helper_aas(void)
1953 eflags
= helper_cc_compute_all(CC_OP
);
1956 ah
= (EAX
>> 8) & 0xff;
1959 if (((al
& 0x0f) > 9 ) || af
) {
1960 al
= (al
- 6) & 0x0f;
1961 ah
= (ah
- 1 - icarry
) & 0xff;
1962 eflags
|= CC_C
| CC_A
;
1964 eflags
&= ~(CC_C
| CC_A
);
1967 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1971 void helper_daa(void)
1973 int old_al
, al
, af
, cf
;
1976 eflags
= helper_cc_compute_all(CC_OP
);
1979 old_al
= al
= EAX
& 0xff;
1982 if (((al
& 0x0f) > 9 ) || af
) {
1983 al
= (al
+ 6) & 0xff;
1986 if ((old_al
> 0x99) || cf
) {
1987 al
= (al
+ 0x60) & 0xff;
1990 EAX
= (EAX
& ~0xff) | al
;
1991 /* well, speed is not an issue here, so we compute the flags by hand */
1992 eflags
|= (al
== 0) << 6; /* zf */
1993 eflags
|= parity_table
[al
]; /* pf */
1994 eflags
|= (al
& 0x80); /* sf */
1998 void helper_das(void)
2000 int al
, al1
, af
, cf
;
2003 eflags
= helper_cc_compute_all(CC_OP
);
2010 if (((al
& 0x0f) > 9 ) || af
) {
2014 al
= (al
- 6) & 0xff;
2016 if ((al1
> 0x99) || cf
) {
2017 al
= (al
- 0x60) & 0xff;
2020 EAX
= (EAX
& ~0xff) | al
;
2021 /* well, speed is not an issue here, so we compute the flags by hand */
2022 eflags
|= (al
== 0) << 6; /* zf */
2023 eflags
|= parity_table
[al
]; /* pf */
2024 eflags
|= (al
& 0x80); /* sf */
2028 void helper_into(int next_eip_addend
)
2031 eflags
= helper_cc_compute_all(CC_OP
);
2032 if (eflags
& CC_O
) {
2033 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
2037 void helper_cmpxchg8b(target_ulong a0
)
2042 eflags
= helper_cc_compute_all(CC_OP
);
2044 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
2045 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
2048 /* always do the store */
2050 EDX
= (uint32_t)(d
>> 32);
2057 #ifdef TARGET_X86_64
2058 void helper_cmpxchg16b(target_ulong a0
)
2063 if ((a0
& 0xf) != 0)
2064 raise_exception(EXCP0D_GPF
);
2065 eflags
= helper_cc_compute_all(CC_OP
);
2068 if (d0
== EAX
&& d1
== EDX
) {
2073 /* always do the store */
2084 void helper_single_step(void)
2086 #ifndef CONFIG_USER_ONLY
2087 check_hw_breakpoints(env
, 1);
2088 env
->dr
[6] |= DR6_BS
;
2090 raise_exception(EXCP01_DB
);
2093 void helper_cpuid(void)
2095 uint32_t eax
, ebx
, ecx
, edx
;
2097 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
2099 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
2106 void helper_enter_level(int level
, int data32
, target_ulong t1
)
2109 uint32_t esp_mask
, esp
, ebp
;
2111 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2112 ssp
= env
->segs
[R_SS
].base
;
2121 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
2124 stl(ssp
+ (esp
& esp_mask
), t1
);
2131 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
2134 stw(ssp
+ (esp
& esp_mask
), t1
);
2138 #ifdef TARGET_X86_64
2139 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
2141 target_ulong esp
, ebp
;
2161 stw(esp
, lduw(ebp
));
2169 void helper_lldt(int selector
)
2173 int index
, entry_limit
;
2177 if ((selector
& 0xfffc) == 0) {
2178 /* XXX: NULL selector case: invalid LDT */
2183 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2185 index
= selector
& ~7;
2186 #ifdef TARGET_X86_64
2187 if (env
->hflags
& HF_LMA_MASK
)
2192 if ((index
+ entry_limit
) > dt
->limit
)
2193 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2194 ptr
= dt
->base
+ index
;
2195 e1
= ldl_kernel(ptr
);
2196 e2
= ldl_kernel(ptr
+ 4);
2197 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2198 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2199 if (!(e2
& DESC_P_MASK
))
2200 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2201 #ifdef TARGET_X86_64
2202 if (env
->hflags
& HF_LMA_MASK
) {
2204 e3
= ldl_kernel(ptr
+ 8);
2205 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2206 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2210 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2213 env
->ldt
.selector
= selector
;
2216 void helper_ltr(int selector
)
2220 int index
, type
, entry_limit
;
2224 if ((selector
& 0xfffc) == 0) {
2225 /* NULL selector case: invalid TR */
2231 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2233 index
= selector
& ~7;
2234 #ifdef TARGET_X86_64
2235 if (env
->hflags
& HF_LMA_MASK
)
2240 if ((index
+ entry_limit
) > dt
->limit
)
2241 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2242 ptr
= dt
->base
+ index
;
2243 e1
= ldl_kernel(ptr
);
2244 e2
= ldl_kernel(ptr
+ 4);
2245 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2246 if ((e2
& DESC_S_MASK
) ||
2247 (type
!= 1 && type
!= 9))
2248 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2249 if (!(e2
& DESC_P_MASK
))
2250 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2251 #ifdef TARGET_X86_64
2252 if (env
->hflags
& HF_LMA_MASK
) {
2254 e3
= ldl_kernel(ptr
+ 8);
2255 e4
= ldl_kernel(ptr
+ 12);
2256 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2257 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2258 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2259 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2263 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2265 e2
|= DESC_TSS_BUSY_MASK
;
2266 stl_kernel(ptr
+ 4, e2
);
2268 env
->tr
.selector
= selector
;
2271 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2272 void helper_load_seg(int seg_reg
, int selector
)
2281 cpl
= env
->hflags
& HF_CPL_MASK
;
2282 if ((selector
& 0xfffc) == 0) {
2283 /* null selector case */
2285 #ifdef TARGET_X86_64
2286 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2289 raise_exception_err(EXCP0D_GPF
, 0);
2290 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2297 index
= selector
& ~7;
2298 if ((index
+ 7) > dt
->limit
)
2299 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2300 ptr
= dt
->base
+ index
;
2301 e1
= ldl_kernel(ptr
);
2302 e2
= ldl_kernel(ptr
+ 4);
2304 if (!(e2
& DESC_S_MASK
))
2305 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2307 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2308 if (seg_reg
== R_SS
) {
2309 /* must be writable segment */
2310 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2311 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2312 if (rpl
!= cpl
|| dpl
!= cpl
)
2313 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2315 /* must be readable segment */
2316 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2317 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2319 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2320 /* if not conforming code, test rights */
2321 if (dpl
< cpl
|| dpl
< rpl
)
2322 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2326 if (!(e2
& DESC_P_MASK
)) {
2327 if (seg_reg
== R_SS
)
2328 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2330 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2333 /* set the access bit if not already set */
2334 if (!(e2
& DESC_A_MASK
)) {
2336 stl_kernel(ptr
+ 4, e2
);
2339 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2340 get_seg_base(e1
, e2
),
2341 get_seg_limit(e1
, e2
),
2344 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2345 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2350 /* protected mode jump */
2351 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2352 int next_eip_addend
)
2355 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2356 target_ulong next_eip
;
2358 if ((new_cs
& 0xfffc) == 0)
2359 raise_exception_err(EXCP0D_GPF
, 0);
2360 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2361 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2362 cpl
= env
->hflags
& HF_CPL_MASK
;
2363 if (e2
& DESC_S_MASK
) {
2364 if (!(e2
& DESC_CS_MASK
))
2365 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2366 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2367 if (e2
& DESC_C_MASK
) {
2368 /* conforming code segment */
2370 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2372 /* non conforming code segment */
2375 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2377 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2379 if (!(e2
& DESC_P_MASK
))
2380 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2381 limit
= get_seg_limit(e1
, e2
);
2382 if (new_eip
> limit
&&
2383 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2384 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2385 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2386 get_seg_base(e1
, e2
), limit
, e2
);
2389 /* jump to call or task gate */
2390 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2392 cpl
= env
->hflags
& HF_CPL_MASK
;
2393 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2395 case 1: /* 286 TSS */
2396 case 9: /* 386 TSS */
2397 case 5: /* task gate */
2398 if (dpl
< cpl
|| dpl
< rpl
)
2399 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2400 next_eip
= env
->eip
+ next_eip_addend
;
2401 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2402 CC_OP
= CC_OP_EFLAGS
;
2404 case 4: /* 286 call gate */
2405 case 12: /* 386 call gate */
2406 if ((dpl
< cpl
) || (dpl
< rpl
))
2407 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2408 if (!(e2
& DESC_P_MASK
))
2409 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2411 new_eip
= (e1
& 0xffff);
2413 new_eip
|= (e2
& 0xffff0000);
2414 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2415 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2416 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2417 /* must be code segment */
2418 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2419 (DESC_S_MASK
| DESC_CS_MASK
)))
2420 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2421 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2422 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2423 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2424 if (!(e2
& DESC_P_MASK
))
2425 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2426 limit
= get_seg_limit(e1
, e2
);
2427 if (new_eip
> limit
)
2428 raise_exception_err(EXCP0D_GPF
, 0);
2429 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2430 get_seg_base(e1
, e2
), limit
, e2
);
2434 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2440 /* real mode call */
2441 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2442 int shift
, int next_eip
)
2445 uint32_t esp
, esp_mask
;
2450 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2451 ssp
= env
->segs
[R_SS
].base
;
2453 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2454 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2456 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2457 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2460 SET_ESP(esp
, esp_mask
);
2462 env
->segs
[R_CS
].selector
= new_cs
;
2463 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2466 /* protected mode call */
2467 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2468 int shift
, int next_eip_addend
)
2471 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2472 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2473 uint32_t val
, limit
, old_sp_mask
;
2474 target_ulong ssp
, old_ssp
, next_eip
;
2476 next_eip
= env
->eip
+ next_eip_addend
;
2477 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2478 LOG_PCALL_STATE(env
);
2479 if ((new_cs
& 0xfffc) == 0)
2480 raise_exception_err(EXCP0D_GPF
, 0);
2481 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2482 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2483 cpl
= env
->hflags
& HF_CPL_MASK
;
2484 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2485 if (e2
& DESC_S_MASK
) {
2486 if (!(e2
& DESC_CS_MASK
))
2487 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2488 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2489 if (e2
& DESC_C_MASK
) {
2490 /* conforming code segment */
2492 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2494 /* non conforming code segment */
2497 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2499 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2501 if (!(e2
& DESC_P_MASK
))
2502 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2504 #ifdef TARGET_X86_64
2505 /* XXX: check 16/32 bit cases in long mode */
2510 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2511 PUSHQ(rsp
, next_eip
);
2512 /* from this point, not restartable */
2514 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2515 get_seg_base(e1
, e2
),
2516 get_seg_limit(e1
, e2
), e2
);
2522 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2523 ssp
= env
->segs
[R_SS
].base
;
2525 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2526 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2528 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2529 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2532 limit
= get_seg_limit(e1
, e2
);
2533 if (new_eip
> limit
)
2534 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2535 /* from this point, not restartable */
2536 SET_ESP(sp
, sp_mask
);
2537 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2538 get_seg_base(e1
, e2
), limit
, e2
);
2542 /* check gate type */
2543 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2544 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2547 case 1: /* available 286 TSS */
2548 case 9: /* available 386 TSS */
2549 case 5: /* task gate */
2550 if (dpl
< cpl
|| dpl
< rpl
)
2551 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2552 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2553 CC_OP
= CC_OP_EFLAGS
;
2555 case 4: /* 286 call gate */
2556 case 12: /* 386 call gate */
2559 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2564 if (dpl
< cpl
|| dpl
< rpl
)
2565 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2566 /* check valid bit */
2567 if (!(e2
& DESC_P_MASK
))
2568 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2569 selector
= e1
>> 16;
2570 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2571 param_count
= e2
& 0x1f;
2572 if ((selector
& 0xfffc) == 0)
2573 raise_exception_err(EXCP0D_GPF
, 0);
2575 if (load_segment(&e1
, &e2
, selector
) != 0)
2576 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2577 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2578 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2579 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2581 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2582 if (!(e2
& DESC_P_MASK
))
2583 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2585 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2586 /* to inner privilege */
2587 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2588 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2589 ss
, sp
, param_count
, ESP
);
2590 if ((ss
& 0xfffc) == 0)
2591 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2592 if ((ss
& 3) != dpl
)
2593 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2594 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2595 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2596 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2598 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2599 if (!(ss_e2
& DESC_S_MASK
) ||
2600 (ss_e2
& DESC_CS_MASK
) ||
2601 !(ss_e2
& DESC_W_MASK
))
2602 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2603 if (!(ss_e2
& DESC_P_MASK
))
2604 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2606 // push_size = ((param_count * 2) + 8) << shift;
2608 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2609 old_ssp
= env
->segs
[R_SS
].base
;
2611 sp_mask
= get_sp_mask(ss_e2
);
2612 ssp
= get_seg_base(ss_e1
, ss_e2
);
2614 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2615 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2616 for(i
= param_count
- 1; i
>= 0; i
--) {
2617 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2618 PUSHL(ssp
, sp
, sp_mask
, val
);
2621 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2622 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2623 for(i
= param_count
- 1; i
>= 0; i
--) {
2624 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2625 PUSHW(ssp
, sp
, sp_mask
, val
);
2630 /* to same privilege */
2632 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2633 ssp
= env
->segs
[R_SS
].base
;
2634 // push_size = (4 << shift);
2639 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2640 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2642 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2643 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2646 /* from this point, not restartable */
2649 ss
= (ss
& ~3) | dpl
;
2650 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2652 get_seg_limit(ss_e1
, ss_e2
),
2656 selector
= (selector
& ~3) | dpl
;
2657 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2658 get_seg_base(e1
, e2
),
2659 get_seg_limit(e1
, e2
),
2661 cpu_x86_set_cpl(env
, dpl
);
2662 SET_ESP(sp
, sp_mask
);
2667 /* real and vm86 mode iret */
2668 void helper_iret_real(int shift
)
2670 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2674 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2676 ssp
= env
->segs
[R_SS
].base
;
2679 POPL(ssp
, sp
, sp_mask
, new_eip
);
2680 POPL(ssp
, sp
, sp_mask
, new_cs
);
2682 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2685 POPW(ssp
, sp
, sp_mask
, new_eip
);
2686 POPW(ssp
, sp
, sp_mask
, new_cs
);
2687 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2689 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2690 env
->segs
[R_CS
].selector
= new_cs
;
2691 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2693 if (env
->eflags
& VM_MASK
)
2694 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2696 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2698 eflags_mask
&= 0xffff;
2699 load_eflags(new_eflags
, eflags_mask
);
2700 env
->hflags2
&= ~HF2_NMI_MASK
;
2703 static inline void validate_seg(int seg_reg
, int cpl
)
2708 /* XXX: on x86_64, we do not want to nullify FS and GS because
2709 they may still contain a valid base. I would be interested to
2710 know how a real x86_64 CPU behaves */
2711 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2712 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2715 e2
= env
->segs
[seg_reg
].flags
;
2716 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2717 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2718 /* data or non conforming code segment */
2720 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2725 /* protected mode iret */
2726 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2728 uint32_t new_cs
, new_eflags
, new_ss
;
2729 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2730 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2731 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2732 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2734 #ifdef TARGET_X86_64
2739 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2741 ssp
= env
->segs
[R_SS
].base
;
2742 new_eflags
= 0; /* avoid warning */
2743 #ifdef TARGET_X86_64
2749 POPQ(sp
, new_eflags
);
2755 POPL(ssp
, sp
, sp_mask
, new_eip
);
2756 POPL(ssp
, sp
, sp_mask
, new_cs
);
2759 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2760 if (new_eflags
& VM_MASK
)
2761 goto return_to_vm86
;
2765 POPW(ssp
, sp
, sp_mask
, new_eip
);
2766 POPW(ssp
, sp
, sp_mask
, new_cs
);
2768 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2770 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2771 new_cs
, new_eip
, shift
, addend
);
2772 LOG_PCALL_STATE(env
);
2773 if ((new_cs
& 0xfffc) == 0)
2774 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2775 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2776 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2777 if (!(e2
& DESC_S_MASK
) ||
2778 !(e2
& DESC_CS_MASK
))
2779 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2780 cpl
= env
->hflags
& HF_CPL_MASK
;
2783 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2784 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2785 if (e2
& DESC_C_MASK
) {
2787 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2790 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2792 if (!(e2
& DESC_P_MASK
))
2793 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2796 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2797 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2798 /* return to same privilege level */
2799 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2800 get_seg_base(e1
, e2
),
2801 get_seg_limit(e1
, e2
),
2804 /* return to different privilege level */
2805 #ifdef TARGET_X86_64
2814 POPL(ssp
, sp
, sp_mask
, new_esp
);
2815 POPL(ssp
, sp
, sp_mask
, new_ss
);
2819 POPW(ssp
, sp
, sp_mask
, new_esp
);
2820 POPW(ssp
, sp
, sp_mask
, new_ss
);
2822 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2824 if ((new_ss
& 0xfffc) == 0) {
2825 #ifdef TARGET_X86_64
2826 /* NULL ss is allowed in long mode if cpl != 3*/
2827 /* XXX: test CS64 ? */
2828 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2829 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2831 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2832 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2833 DESC_W_MASK
| DESC_A_MASK
);
2834 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2838 raise_exception_err(EXCP0D_GPF
, 0);
2841 if ((new_ss
& 3) != rpl
)
2842 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2843 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2844 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2845 if (!(ss_e2
& DESC_S_MASK
) ||
2846 (ss_e2
& DESC_CS_MASK
) ||
2847 !(ss_e2
& DESC_W_MASK
))
2848 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2849 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2851 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2852 if (!(ss_e2
& DESC_P_MASK
))
2853 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2854 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2855 get_seg_base(ss_e1
, ss_e2
),
2856 get_seg_limit(ss_e1
, ss_e2
),
2860 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2861 get_seg_base(e1
, e2
),
2862 get_seg_limit(e1
, e2
),
2864 cpu_x86_set_cpl(env
, rpl
);
2866 #ifdef TARGET_X86_64
2867 if (env
->hflags
& HF_CS64_MASK
)
2871 sp_mask
= get_sp_mask(ss_e2
);
2873 /* validate data segments */
2874 validate_seg(R_ES
, rpl
);
2875 validate_seg(R_DS
, rpl
);
2876 validate_seg(R_FS
, rpl
);
2877 validate_seg(R_GS
, rpl
);
2881 SET_ESP(sp
, sp_mask
);
2884 /* NOTE: 'cpl' is the _old_ CPL */
2885 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2887 eflags_mask
|= IOPL_MASK
;
2888 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2890 eflags_mask
|= IF_MASK
;
2892 eflags_mask
&= 0xffff;
2893 load_eflags(new_eflags
, eflags_mask
);
2898 POPL(ssp
, sp
, sp_mask
, new_esp
);
2899 POPL(ssp
, sp
, sp_mask
, new_ss
);
2900 POPL(ssp
, sp
, sp_mask
, new_es
);
2901 POPL(ssp
, sp
, sp_mask
, new_ds
);
2902 POPL(ssp
, sp
, sp_mask
, new_fs
);
2903 POPL(ssp
, sp
, sp_mask
, new_gs
);
2905 /* modify processor state */
2906 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2907 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2908 load_seg_vm(R_CS
, new_cs
& 0xffff);
2909 cpu_x86_set_cpl(env
, 3);
2910 load_seg_vm(R_SS
, new_ss
& 0xffff);
2911 load_seg_vm(R_ES
, new_es
& 0xffff);
2912 load_seg_vm(R_DS
, new_ds
& 0xffff);
2913 load_seg_vm(R_FS
, new_fs
& 0xffff);
2914 load_seg_vm(R_GS
, new_gs
& 0xffff);
2916 env
->eip
= new_eip
& 0xffff;
2920 void helper_iret_protected(int shift
, int next_eip
)
2922 int tss_selector
, type
;
2925 /* specific case for TSS */
2926 if (env
->eflags
& NT_MASK
) {
2927 #ifdef TARGET_X86_64
2928 if (env
->hflags
& HF_LMA_MASK
)
2929 raise_exception_err(EXCP0D_GPF
, 0);
2931 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2932 if (tss_selector
& 4)
2933 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2934 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2935 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2936 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2937 /* NOTE: we check both segment and busy TSS */
2939 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2940 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2942 helper_ret_protected(shift
, 1, 0);
2944 env
->hflags2
&= ~HF2_NMI_MASK
;
2947 void helper_lret_protected(int shift
, int addend
)
2949 helper_ret_protected(shift
, 0, addend
);
2952 void helper_sysenter(void)
2954 if (env
->sysenter_cs
== 0) {
2955 raise_exception_err(EXCP0D_GPF
, 0);
2957 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2958 cpu_x86_set_cpl(env
, 0);
2960 #ifdef TARGET_X86_64
2961 if (env
->hflags
& HF_LMA_MASK
) {
2962 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2964 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2966 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2970 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2972 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2974 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2976 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2978 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2980 DESC_W_MASK
| DESC_A_MASK
);
2981 ESP
= env
->sysenter_esp
;
2982 EIP
= env
->sysenter_eip
;
2985 void helper_sysexit(int dflag
)
2989 cpl
= env
->hflags
& HF_CPL_MASK
;
2990 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2991 raise_exception_err(EXCP0D_GPF
, 0);
2993 cpu_x86_set_cpl(env
, 3);
2994 #ifdef TARGET_X86_64
2996 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2998 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2999 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3000 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
3001 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
3003 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
3004 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3005 DESC_W_MASK
| DESC_A_MASK
);
3009 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
3011 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
3012 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3013 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
3014 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
3016 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
3017 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3018 DESC_W_MASK
| DESC_A_MASK
);
3024 #if defined(CONFIG_USER_ONLY)
3025 target_ulong
helper_read_crN(int reg
)
3030 void helper_write_crN(int reg
, target_ulong t0
)
3034 void helper_movl_drN_T0(int reg
, target_ulong t0
)
3038 target_ulong
helper_read_crN(int reg
)
3042 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
3048 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
3049 val
= cpu_get_apic_tpr(env
->apic_state
);
3058 void helper_write_crN(int reg
, target_ulong t0
)
3060 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
3063 cpu_x86_update_cr0(env
, t0
);
3066 cpu_x86_update_cr3(env
, t0
);
3069 cpu_x86_update_cr4(env
, t0
);
3072 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
3073 cpu_set_apic_tpr(env
->apic_state
, t0
);
3075 env
->v_tpr
= t0
& 0x0f;
3083 void helper_movl_drN_T0(int reg
, target_ulong t0
)
3088 hw_breakpoint_remove(env
, reg
);
3090 hw_breakpoint_insert(env
, reg
);
3091 } else if (reg
== 7) {
3092 for (i
= 0; i
< 4; i
++)
3093 hw_breakpoint_remove(env
, i
);
3095 for (i
= 0; i
< 4; i
++)
3096 hw_breakpoint_insert(env
, i
);
3102 void helper_lmsw(target_ulong t0
)
3104 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3105 if already set to one. */
3106 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
3107 helper_write_crN(0, t0
);
3110 void helper_clts(void)
3112 env
->cr
[0] &= ~CR0_TS_MASK
;
3113 env
->hflags
&= ~HF_TS_MASK
;
3116 void helper_invlpg(target_ulong addr
)
3118 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
3119 tlb_flush_page(env
, addr
);
3122 void helper_rdtsc(void)
3126 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3127 raise_exception(EXCP0D_GPF
);
3129 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3131 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
3132 EAX
= (uint32_t)(val
);
3133 EDX
= (uint32_t)(val
>> 32);
3136 void helper_rdtscp(void)
3139 ECX
= (uint32_t)(env
->tsc_aux
);
3142 void helper_rdpmc(void)
3144 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3145 raise_exception(EXCP0D_GPF
);
3147 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3149 /* currently unimplemented */
3150 raise_exception_err(EXCP06_ILLOP
, 0);
3153 #if defined(CONFIG_USER_ONLY)
3154 void helper_wrmsr(void)
3158 void helper_rdmsr(void)
3162 void helper_wrmsr(void)
3166 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3168 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3170 switch((uint32_t)ECX
) {
3171 case MSR_IA32_SYSENTER_CS
:
3172 env
->sysenter_cs
= val
& 0xffff;
3174 case MSR_IA32_SYSENTER_ESP
:
3175 env
->sysenter_esp
= val
;
3177 case MSR_IA32_SYSENTER_EIP
:
3178 env
->sysenter_eip
= val
;
3180 case MSR_IA32_APICBASE
:
3181 cpu_set_apic_base(env
->apic_state
, val
);
3185 uint64_t update_mask
;
3187 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3188 update_mask
|= MSR_EFER_SCE
;
3189 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3190 update_mask
|= MSR_EFER_LME
;
3191 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3192 update_mask
|= MSR_EFER_FFXSR
;
3193 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3194 update_mask
|= MSR_EFER_NXE
;
3195 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3196 update_mask
|= MSR_EFER_SVME
;
3197 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3198 update_mask
|= MSR_EFER_FFXSR
;
3199 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3200 (val
& update_mask
));
3209 case MSR_VM_HSAVE_PA
:
3210 env
->vm_hsave
= val
;
3212 #ifdef TARGET_X86_64
3223 env
->segs
[R_FS
].base
= val
;
3226 env
->segs
[R_GS
].base
= val
;
3228 case MSR_KERNELGSBASE
:
3229 env
->kernelgsbase
= val
;
3232 case MSR_MTRRphysBase(0):
3233 case MSR_MTRRphysBase(1):
3234 case MSR_MTRRphysBase(2):
3235 case MSR_MTRRphysBase(3):
3236 case MSR_MTRRphysBase(4):
3237 case MSR_MTRRphysBase(5):
3238 case MSR_MTRRphysBase(6):
3239 case MSR_MTRRphysBase(7):
3240 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3242 case MSR_MTRRphysMask(0):
3243 case MSR_MTRRphysMask(1):
3244 case MSR_MTRRphysMask(2):
3245 case MSR_MTRRphysMask(3):
3246 case MSR_MTRRphysMask(4):
3247 case MSR_MTRRphysMask(5):
3248 case MSR_MTRRphysMask(6):
3249 case MSR_MTRRphysMask(7):
3250 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3252 case MSR_MTRRfix64K_00000
:
3253 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3255 case MSR_MTRRfix16K_80000
:
3256 case MSR_MTRRfix16K_A0000
:
3257 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3259 case MSR_MTRRfix4K_C0000
:
3260 case MSR_MTRRfix4K_C8000
:
3261 case MSR_MTRRfix4K_D0000
:
3262 case MSR_MTRRfix4K_D8000
:
3263 case MSR_MTRRfix4K_E0000
:
3264 case MSR_MTRRfix4K_E8000
:
3265 case MSR_MTRRfix4K_F0000
:
3266 case MSR_MTRRfix4K_F8000
:
3267 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3269 case MSR_MTRRdefType
:
3270 env
->mtrr_deftype
= val
;
3272 case MSR_MCG_STATUS
:
3273 env
->mcg_status
= val
;
3276 if ((env
->mcg_cap
& MCG_CTL_P
)
3277 && (val
== 0 || val
== ~(uint64_t)0))
3283 case MSR_IA32_MISC_ENABLE
:
3284 env
->msr_ia32_misc_enable
= val
;
3287 if ((uint32_t)ECX
>= MSR_MC0_CTL
3288 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3289 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3290 if ((offset
& 0x3) != 0
3291 || (val
== 0 || val
== ~(uint64_t)0))
3292 env
->mce_banks
[offset
] = val
;
3295 /* XXX: exception ? */
3300 void helper_rdmsr(void)
3304 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3306 switch((uint32_t)ECX
) {
3307 case MSR_IA32_SYSENTER_CS
:
3308 val
= env
->sysenter_cs
;
3310 case MSR_IA32_SYSENTER_ESP
:
3311 val
= env
->sysenter_esp
;
3313 case MSR_IA32_SYSENTER_EIP
:
3314 val
= env
->sysenter_eip
;
3316 case MSR_IA32_APICBASE
:
3317 val
= cpu_get_apic_base(env
->apic_state
);
3328 case MSR_VM_HSAVE_PA
:
3329 val
= env
->vm_hsave
;
3331 case MSR_IA32_PERF_STATUS
:
3332 /* tsc_increment_by_tick */
3334 /* CPU multiplier */
3335 val
|= (((uint64_t)4ULL) << 40);
3337 #ifdef TARGET_X86_64
3348 val
= env
->segs
[R_FS
].base
;
3351 val
= env
->segs
[R_GS
].base
;
3353 case MSR_KERNELGSBASE
:
3354 val
= env
->kernelgsbase
;
3360 case MSR_MTRRphysBase(0):
3361 case MSR_MTRRphysBase(1):
3362 case MSR_MTRRphysBase(2):
3363 case MSR_MTRRphysBase(3):
3364 case MSR_MTRRphysBase(4):
3365 case MSR_MTRRphysBase(5):
3366 case MSR_MTRRphysBase(6):
3367 case MSR_MTRRphysBase(7):
3368 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3370 case MSR_MTRRphysMask(0):
3371 case MSR_MTRRphysMask(1):
3372 case MSR_MTRRphysMask(2):
3373 case MSR_MTRRphysMask(3):
3374 case MSR_MTRRphysMask(4):
3375 case MSR_MTRRphysMask(5):
3376 case MSR_MTRRphysMask(6):
3377 case MSR_MTRRphysMask(7):
3378 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3380 case MSR_MTRRfix64K_00000
:
3381 val
= env
->mtrr_fixed
[0];
3383 case MSR_MTRRfix16K_80000
:
3384 case MSR_MTRRfix16K_A0000
:
3385 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3387 case MSR_MTRRfix4K_C0000
:
3388 case MSR_MTRRfix4K_C8000
:
3389 case MSR_MTRRfix4K_D0000
:
3390 case MSR_MTRRfix4K_D8000
:
3391 case MSR_MTRRfix4K_E0000
:
3392 case MSR_MTRRfix4K_E8000
:
3393 case MSR_MTRRfix4K_F0000
:
3394 case MSR_MTRRfix4K_F8000
:
3395 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3397 case MSR_MTRRdefType
:
3398 val
= env
->mtrr_deftype
;
3401 if (env
->cpuid_features
& CPUID_MTRR
)
3402 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3404 /* XXX: exception ? */
3411 if (env
->mcg_cap
& MCG_CTL_P
)
3416 case MSR_MCG_STATUS
:
3417 val
= env
->mcg_status
;
3419 case MSR_IA32_MISC_ENABLE
:
3420 val
= env
->msr_ia32_misc_enable
;
3423 if ((uint32_t)ECX
>= MSR_MC0_CTL
3424 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3425 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3426 val
= env
->mce_banks
[offset
];
3429 /* XXX: exception ? */
3433 EAX
= (uint32_t)(val
);
3434 EDX
= (uint32_t)(val
>> 32);
3438 target_ulong
helper_lsl(target_ulong selector1
)
3441 uint32_t e1
, e2
, eflags
, selector
;
3442 int rpl
, dpl
, cpl
, type
;
3444 selector
= selector1
& 0xffff;
3445 eflags
= helper_cc_compute_all(CC_OP
);
3446 if ((selector
& 0xfffc) == 0)
3448 if (load_segment(&e1
, &e2
, selector
) != 0)
3451 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3452 cpl
= env
->hflags
& HF_CPL_MASK
;
3453 if (e2
& DESC_S_MASK
) {
3454 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3457 if (dpl
< cpl
|| dpl
< rpl
)
3461 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3472 if (dpl
< cpl
|| dpl
< rpl
) {
3474 CC_SRC
= eflags
& ~CC_Z
;
3478 limit
= get_seg_limit(e1
, e2
);
3479 CC_SRC
= eflags
| CC_Z
;
3483 target_ulong
helper_lar(target_ulong selector1
)
3485 uint32_t e1
, e2
, eflags
, selector
;
3486 int rpl
, dpl
, cpl
, type
;
3488 selector
= selector1
& 0xffff;
3489 eflags
= helper_cc_compute_all(CC_OP
);
3490 if ((selector
& 0xfffc) == 0)
3492 if (load_segment(&e1
, &e2
, selector
) != 0)
3495 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3496 cpl
= env
->hflags
& HF_CPL_MASK
;
3497 if (e2
& DESC_S_MASK
) {
3498 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3501 if (dpl
< cpl
|| dpl
< rpl
)
3505 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3519 if (dpl
< cpl
|| dpl
< rpl
) {
3521 CC_SRC
= eflags
& ~CC_Z
;
3525 CC_SRC
= eflags
| CC_Z
;
3526 return e2
& 0x00f0ff00;
3529 void helper_verr(target_ulong selector1
)
3531 uint32_t e1
, e2
, eflags
, selector
;
3534 selector
= selector1
& 0xffff;
3535 eflags
= helper_cc_compute_all(CC_OP
);
3536 if ((selector
& 0xfffc) == 0)
3538 if (load_segment(&e1
, &e2
, selector
) != 0)
3540 if (!(e2
& DESC_S_MASK
))
3543 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3544 cpl
= env
->hflags
& HF_CPL_MASK
;
3545 if (e2
& DESC_CS_MASK
) {
3546 if (!(e2
& DESC_R_MASK
))
3548 if (!(e2
& DESC_C_MASK
)) {
3549 if (dpl
< cpl
|| dpl
< rpl
)
3553 if (dpl
< cpl
|| dpl
< rpl
) {
3555 CC_SRC
= eflags
& ~CC_Z
;
3559 CC_SRC
= eflags
| CC_Z
;
3562 void helper_verw(target_ulong selector1
)
3564 uint32_t e1
, e2
, eflags
, selector
;
3567 selector
= selector1
& 0xffff;
3568 eflags
= helper_cc_compute_all(CC_OP
);
3569 if ((selector
& 0xfffc) == 0)
3571 if (load_segment(&e1
, &e2
, selector
) != 0)
3573 if (!(e2
& DESC_S_MASK
))
3576 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3577 cpl
= env
->hflags
& HF_CPL_MASK
;
3578 if (e2
& DESC_CS_MASK
) {
3581 if (dpl
< cpl
|| dpl
< rpl
)
3583 if (!(e2
& DESC_W_MASK
)) {
3585 CC_SRC
= eflags
& ~CC_Z
;
3589 CC_SRC
= eflags
| CC_Z
;
3592 /* x87 FPU helpers */
3594 static inline double floatx80_to_double(floatx80 a
)
3601 u
.f64
= floatx80_to_float64(a
, &env
->fp_status
);
3605 static inline floatx80
double_to_floatx80(double a
)
3613 return float64_to_floatx80(u
.f64
, &env
->fp_status
);
3616 static void fpu_set_exception(int mask
)
3619 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3620 env
->fpus
|= FPUS_SE
| FPUS_B
;
3623 static inline floatx80
helper_fdiv(floatx80 a
, floatx80 b
)
3625 if (floatx80_is_zero(b
)) {
3626 fpu_set_exception(FPUS_ZE
);
3628 return floatx80_div(a
, b
, &env
->fp_status
);
3631 static void fpu_raise_exception(void)
3633 if (env
->cr
[0] & CR0_NE_MASK
) {
3634 raise_exception(EXCP10_COPR
);
3636 #if !defined(CONFIG_USER_ONLY)
3643 void helper_flds_FT0(uint32_t val
)
3650 FT0
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3653 void helper_fldl_FT0(uint64_t val
)
3660 FT0
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3663 void helper_fildl_FT0(int32_t val
)
3665 FT0
= int32_to_floatx80(val
, &env
->fp_status
);
3668 void helper_flds_ST0(uint32_t val
)
3675 new_fpstt
= (env
->fpstt
- 1) & 7;
3677 env
->fpregs
[new_fpstt
].d
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3678 env
->fpstt
= new_fpstt
;
3679 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3682 void helper_fldl_ST0(uint64_t val
)
3689 new_fpstt
= (env
->fpstt
- 1) & 7;
3691 env
->fpregs
[new_fpstt
].d
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3692 env
->fpstt
= new_fpstt
;
3693 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3696 void helper_fildl_ST0(int32_t val
)
3699 new_fpstt
= (env
->fpstt
- 1) & 7;
3700 env
->fpregs
[new_fpstt
].d
= int32_to_floatx80(val
, &env
->fp_status
);
3701 env
->fpstt
= new_fpstt
;
3702 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3705 void helper_fildll_ST0(int64_t val
)
3708 new_fpstt
= (env
->fpstt
- 1) & 7;
3709 env
->fpregs
[new_fpstt
].d
= int64_to_floatx80(val
, &env
->fp_status
);
3710 env
->fpstt
= new_fpstt
;
3711 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3714 uint32_t helper_fsts_ST0(void)
3720 u
.f
= floatx80_to_float32(ST0
, &env
->fp_status
);
3724 uint64_t helper_fstl_ST0(void)
3730 u
.f
= floatx80_to_float64(ST0
, &env
->fp_status
);
3734 int32_t helper_fist_ST0(void)
3737 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3738 if (val
!= (int16_t)val
)
3743 int32_t helper_fistl_ST0(void)
3746 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3750 int64_t helper_fistll_ST0(void)
3753 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
3757 int32_t helper_fistt_ST0(void)
3760 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3761 if (val
!= (int16_t)val
)
3766 int32_t helper_fisttl_ST0(void)
3769 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3773 int64_t helper_fisttll_ST0(void)
3776 val
= floatx80_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3780 void helper_fldt_ST0(target_ulong ptr
)
3783 new_fpstt
= (env
->fpstt
- 1) & 7;
3784 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3785 env
->fpstt
= new_fpstt
;
3786 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3789 void helper_fstt_ST0(target_ulong ptr
)
3791 helper_fstt(ST0
, ptr
);
3794 void helper_fpush(void)
3799 void helper_fpop(void)
3804 void helper_fdecstp(void)
3806 env
->fpstt
= (env
->fpstt
- 1) & 7;
3807 env
->fpus
&= (~0x4700);
3810 void helper_fincstp(void)
3812 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3813 env
->fpus
&= (~0x4700);
3818 void helper_ffree_STN(int st_index
)
3820 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3823 void helper_fmov_ST0_FT0(void)
3828 void helper_fmov_FT0_STN(int st_index
)
3833 void helper_fmov_ST0_STN(int st_index
)
3838 void helper_fmov_STN_ST0(int st_index
)
3843 void helper_fxchg_ST0_STN(int st_index
)
3851 /* FPU operations */
3853 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3855 void helper_fcom_ST0_FT0(void)
3859 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3860 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3863 void helper_fucom_ST0_FT0(void)
3867 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3868 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3871 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3873 void helper_fcomi_ST0_FT0(void)
3878 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3879 eflags
= helper_cc_compute_all(CC_OP
);
3880 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3884 void helper_fucomi_ST0_FT0(void)
3889 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3890 eflags
= helper_cc_compute_all(CC_OP
);
3891 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3895 void helper_fadd_ST0_FT0(void)
3897 ST0
= floatx80_add(ST0
, FT0
, &env
->fp_status
);
3900 void helper_fmul_ST0_FT0(void)
3902 ST0
= floatx80_mul(ST0
, FT0
, &env
->fp_status
);
3905 void helper_fsub_ST0_FT0(void)
3907 ST0
= floatx80_sub(ST0
, FT0
, &env
->fp_status
);
3910 void helper_fsubr_ST0_FT0(void)
3912 ST0
= floatx80_sub(FT0
, ST0
, &env
->fp_status
);
3915 void helper_fdiv_ST0_FT0(void)
3917 ST0
= helper_fdiv(ST0
, FT0
);
3920 void helper_fdivr_ST0_FT0(void)
3922 ST0
= helper_fdiv(FT0
, ST0
);
3925 /* fp operations between STN and ST0 */
3927 void helper_fadd_STN_ST0(int st_index
)
3929 ST(st_index
) = floatx80_add(ST(st_index
), ST0
, &env
->fp_status
);
3932 void helper_fmul_STN_ST0(int st_index
)
3934 ST(st_index
) = floatx80_mul(ST(st_index
), ST0
, &env
->fp_status
);
3937 void helper_fsub_STN_ST0(int st_index
)
3939 ST(st_index
) = floatx80_sub(ST(st_index
), ST0
, &env
->fp_status
);
3942 void helper_fsubr_STN_ST0(int st_index
)
3944 ST(st_index
) = floatx80_sub(ST0
, ST(st_index
), &env
->fp_status
);
3947 void helper_fdiv_STN_ST0(int st_index
)
3951 *p
= helper_fdiv(*p
, ST0
);
3954 void helper_fdivr_STN_ST0(int st_index
)
3958 *p
= helper_fdiv(ST0
, *p
);
3961 /* misc FPU operations */
3962 void helper_fchs_ST0(void)
3964 ST0
= floatx80_chs(ST0
);
3967 void helper_fabs_ST0(void)
3969 ST0
= floatx80_abs(ST0
);
3972 void helper_fld1_ST0(void)
3977 void helper_fldl2t_ST0(void)
3982 void helper_fldl2e_ST0(void)
3987 void helper_fldpi_ST0(void)
3992 void helper_fldlg2_ST0(void)
3997 void helper_fldln2_ST0(void)
4002 void helper_fldz_ST0(void)
4004 ST0
= floatx80_zero
;
4007 void helper_fldz_FT0(void)
4009 FT0
= floatx80_zero
;
4012 uint32_t helper_fnstsw(void)
4014 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4017 uint32_t helper_fnstcw(void)
4022 static void update_fp_status(void)
4026 /* set rounding mode */
4027 switch(env
->fpuc
& RC_MASK
) {
4030 rnd_type
= float_round_nearest_even
;
4033 rnd_type
= float_round_down
;
4036 rnd_type
= float_round_up
;
4039 rnd_type
= float_round_to_zero
;
4042 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
4043 switch((env
->fpuc
>> 8) & 3) {
4055 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
4058 void helper_fldcw(uint32_t val
)
4064 void helper_fclex(void)
4066 env
->fpus
&= 0x7f00;
4069 void helper_fwait(void)
4071 if (env
->fpus
& FPUS_SE
)
4072 fpu_raise_exception();
4075 void helper_fninit(void)
4092 void helper_fbld_ST0(target_ulong ptr
)
4100 for(i
= 8; i
>= 0; i
--) {
4102 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
4104 tmp
= int64_to_floatx80(val
, &env
->fp_status
);
4105 if (ldub(ptr
+ 9) & 0x80) {
4112 void helper_fbst_ST0(target_ulong ptr
)
4115 target_ulong mem_ref
, mem_end
;
4118 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
4120 mem_end
= mem_ref
+ 9;
4127 while (mem_ref
< mem_end
) {
4132 v
= ((v
/ 10) << 4) | (v
% 10);
4135 while (mem_ref
< mem_end
) {
4140 void helper_f2xm1(void)
4142 double val
= floatx80_to_double(ST0
);
4143 val
= pow(2.0, val
) - 1.0;
4144 ST0
= double_to_floatx80(val
);
4147 void helper_fyl2x(void)
4149 double fptemp
= floatx80_to_double(ST0
);
4152 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
4153 fptemp
*= floatx80_to_double(ST1
);
4154 ST1
= double_to_floatx80(fptemp
);
4157 env
->fpus
&= (~0x4700);
4162 void helper_fptan(void)
4164 double fptemp
= floatx80_to_double(ST0
);
4166 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4169 fptemp
= tan(fptemp
);
4170 ST0
= double_to_floatx80(fptemp
);
4173 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4174 /* the above code is for |arg| < 2**52 only */
4178 void helper_fpatan(void)
4180 double fptemp
, fpsrcop
;
4182 fpsrcop
= floatx80_to_double(ST1
);
4183 fptemp
= floatx80_to_double(ST0
);
4184 ST1
= double_to_floatx80(atan2(fpsrcop
, fptemp
));
4188 void helper_fxtract(void)
4194 if (floatx80_is_zero(ST0
)) {
4195 /* Easy way to generate -inf and raising division by 0 exception */
4196 ST0
= floatx80_div(floatx80_chs(floatx80_one
), floatx80_zero
, &env
->fp_status
);
4202 expdif
= EXPD(temp
) - EXPBIAS
;
4203 /*DP exponent bias*/
4204 ST0
= int32_to_floatx80(expdif
, &env
->fp_status
);
4211 void helper_fprem1(void)
4213 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4214 CPU_LDoubleU fpsrcop1
, fptemp1
;
4216 signed long long int q
;
4218 st0
= floatx80_to_double(ST0
);
4219 st1
= floatx80_to_double(ST1
);
4221 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4222 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4223 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4231 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4234 /* optimisation? taken from the AMD docs */
4235 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4236 /* ST0 is unchanged */
4241 dblq
= fpsrcop
/ fptemp
;
4242 /* round dblq towards nearest integer */
4244 st0
= fpsrcop
- fptemp
* dblq
;
4246 /* convert dblq to q by truncating towards zero */
4248 q
= (signed long long int)(-dblq
);
4250 q
= (signed long long int)dblq
;
4252 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4253 /* (C0,C3,C1) <-- (q2,q1,q0) */
4254 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4255 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4256 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4258 env
->fpus
|= 0x400; /* C2 <-- 1 */
4259 fptemp
= pow(2.0, expdif
- 50);
4260 fpsrcop
= (st0
/ st1
) / fptemp
;
4261 /* fpsrcop = integer obtained by chopping */
4262 fpsrcop
= (fpsrcop
< 0.0) ?
4263 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4264 st0
-= (st1
* fpsrcop
* fptemp
);
4266 ST0
= double_to_floatx80(st0
);
4269 void helper_fprem(void)
4271 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4272 CPU_LDoubleU fpsrcop1
, fptemp1
;
4274 signed long long int q
;
4276 st0
= floatx80_to_double(ST0
);
4277 st1
= floatx80_to_double(ST1
);
4279 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4280 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4281 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4289 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4292 /* optimisation? taken from the AMD docs */
4293 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4294 /* ST0 is unchanged */
4298 if ( expdif
< 53 ) {
4299 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4300 /* round dblq towards zero */
4301 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4302 st0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4304 /* convert dblq to q by truncating towards zero */
4306 q
= (signed long long int)(-dblq
);
4308 q
= (signed long long int)dblq
;
4310 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4311 /* (C0,C3,C1) <-- (q2,q1,q0) */
4312 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4313 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4314 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4316 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4317 env
->fpus
|= 0x400; /* C2 <-- 1 */
4318 fptemp
= pow(2.0, (double)(expdif
- N
));
4319 fpsrcop
= (st0
/ st1
) / fptemp
;
4320 /* fpsrcop = integer obtained by chopping */
4321 fpsrcop
= (fpsrcop
< 0.0) ?
4322 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4323 st0
-= (st1
* fpsrcop
* fptemp
);
4325 ST0
= double_to_floatx80(st0
);
4328 void helper_fyl2xp1(void)
4330 double fptemp
= floatx80_to_double(ST0
);
4332 if ((fptemp
+1.0)>0.0) {
4333 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4334 fptemp
*= floatx80_to_double(ST1
);
4335 ST1
= double_to_floatx80(fptemp
);
4338 env
->fpus
&= (~0x4700);
4343 void helper_fsqrt(void)
4345 if (floatx80_is_neg(ST0
)) {
4346 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4349 ST0
= floatx80_sqrt(ST0
, &env
->fp_status
);
4352 void helper_fsincos(void)
4354 double fptemp
= floatx80_to_double(ST0
);
4356 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4359 ST0
= double_to_floatx80(sin(fptemp
));
4361 ST0
= double_to_floatx80(cos(fptemp
));
4362 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4363 /* the above code is for |arg| < 2**63 only */
4367 void helper_frndint(void)
4369 ST0
= floatx80_round_to_int(ST0
, &env
->fp_status
);
4372 void helper_fscale(void)
4374 if (floatx80_is_any_nan(ST1
)) {
4377 int n
= floatx80_to_int32_round_to_zero(ST1
, &env
->fp_status
);
4378 ST0
= floatx80_scalbn(ST0
, n
, &env
->fp_status
);
4382 void helper_fsin(void)
4384 double fptemp
= floatx80_to_double(ST0
);
4386 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4389 ST0
= double_to_floatx80(sin(fptemp
));
4390 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4391 /* the above code is for |arg| < 2**53 only */
4395 void helper_fcos(void)
4397 double fptemp
= floatx80_to_double(ST0
);
4399 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4402 ST0
= double_to_floatx80(cos(fptemp
));
4403 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4404 /* the above code is for |arg5 < 2**63 only */
4408 void helper_fxam_ST0(void)
4415 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4417 env
->fpus
|= 0x200; /* C1 <-- 1 */
4419 /* XXX: test fptags too */
4420 expdif
= EXPD(temp
);
4421 if (expdif
== MAXEXPD
) {
4422 if (MANTD(temp
) == 0x8000000000000000ULL
)
4423 env
->fpus
|= 0x500 /*Infinity*/;
4425 env
->fpus
|= 0x100 /*NaN*/;
4426 } else if (expdif
== 0) {
4427 if (MANTD(temp
) == 0)
4428 env
->fpus
|= 0x4000 /*Zero*/;
4430 env
->fpus
|= 0x4400 /*Denormal*/;
4436 void helper_fstenv(target_ulong ptr
, int data32
)
4438 int fpus
, fptag
, exp
, i
;
4442 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4444 for (i
=7; i
>=0; i
--) {
4446 if (env
->fptags
[i
]) {
4449 tmp
.d
= env
->fpregs
[i
].d
;
4452 if (exp
== 0 && mant
== 0) {
4455 } else if (exp
== 0 || exp
== MAXEXPD
4456 || (mant
& (1LL << 63)) == 0
4458 /* NaNs, infinity, denormal */
4465 stl(ptr
, env
->fpuc
);
4467 stl(ptr
+ 8, fptag
);
4468 stl(ptr
+ 12, 0); /* fpip */
4469 stl(ptr
+ 16, 0); /* fpcs */
4470 stl(ptr
+ 20, 0); /* fpoo */
4471 stl(ptr
+ 24, 0); /* fpos */
4474 stw(ptr
, env
->fpuc
);
4476 stw(ptr
+ 4, fptag
);
4484 void helper_fldenv(target_ulong ptr
, int data32
)
4489 env
->fpuc
= lduw(ptr
);
4490 fpus
= lduw(ptr
+ 4);
4491 fptag
= lduw(ptr
+ 8);
4494 env
->fpuc
= lduw(ptr
);
4495 fpus
= lduw(ptr
+ 2);
4496 fptag
= lduw(ptr
+ 4);
4498 env
->fpstt
= (fpus
>> 11) & 7;
4499 env
->fpus
= fpus
& ~0x3800;
4500 for(i
= 0;i
< 8; i
++) {
4501 env
->fptags
[i
] = ((fptag
& 3) == 3);
4506 void helper_fsave(target_ulong ptr
, int data32
)
4511 helper_fstenv(ptr
, data32
);
4513 ptr
+= (14 << data32
);
4514 for(i
= 0;i
< 8; i
++) {
4516 helper_fstt(tmp
, ptr
);
4534 void helper_frstor(target_ulong ptr
, int data32
)
4539 helper_fldenv(ptr
, data32
);
4540 ptr
+= (14 << data32
);
4542 for(i
= 0;i
< 8; i
++) {
4543 tmp
= helper_fldt(ptr
);
4550 #if defined(CONFIG_USER_ONLY)
4551 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
4553 CPUX86State
*saved_env
;
4557 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
4559 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
4560 (selector
<< 4), 0xffff, 0);
4562 helper_load_seg(seg_reg
, selector
);
4567 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
4569 CPUX86State
*saved_env
;
4574 helper_fsave(ptr
, data32
);
4579 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
4581 CPUX86State
*saved_env
;
4586 helper_frstor(ptr
, data32
);
4592 void helper_fxsave(target_ulong ptr
, int data64
)
4594 int fpus
, fptag
, i
, nb_xmm_regs
;
4598 /* The operand must be 16 byte aligned */
4600 raise_exception(EXCP0D_GPF
);
4603 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4605 for(i
= 0; i
< 8; i
++) {
4606 fptag
|= (env
->fptags
[i
] << i
);
4608 stw(ptr
, env
->fpuc
);
4610 stw(ptr
+ 4, fptag
^ 0xff);
4611 #ifdef TARGET_X86_64
4613 stq(ptr
+ 0x08, 0); /* rip */
4614 stq(ptr
+ 0x10, 0); /* rdp */
4618 stl(ptr
+ 0x08, 0); /* eip */
4619 stl(ptr
+ 0x0c, 0); /* sel */
4620 stl(ptr
+ 0x10, 0); /* dp */
4621 stl(ptr
+ 0x14, 0); /* sel */
4625 for(i
= 0;i
< 8; i
++) {
4627 helper_fstt(tmp
, addr
);
4631 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4632 /* XXX: finish it */
4633 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4634 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4635 if (env
->hflags
& HF_CS64_MASK
)
4640 /* Fast FXSAVE leaves out the XMM registers */
4641 if (!(env
->efer
& MSR_EFER_FFXSR
)
4642 || (env
->hflags
& HF_CPL_MASK
)
4643 || !(env
->hflags
& HF_LMA_MASK
)) {
4644 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4645 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4646 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4653 void helper_fxrstor(target_ulong ptr
, int data64
)
4655 int i
, fpus
, fptag
, nb_xmm_regs
;
4659 /* The operand must be 16 byte aligned */
4661 raise_exception(EXCP0D_GPF
);
4664 env
->fpuc
= lduw(ptr
);
4665 fpus
= lduw(ptr
+ 2);
4666 fptag
= lduw(ptr
+ 4);
4667 env
->fpstt
= (fpus
>> 11) & 7;
4668 env
->fpus
= fpus
& ~0x3800;
4670 for(i
= 0;i
< 8; i
++) {
4671 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4675 for(i
= 0;i
< 8; i
++) {
4676 tmp
= helper_fldt(addr
);
4681 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4682 /* XXX: finish it */
4683 env
->mxcsr
= ldl(ptr
+ 0x18);
4685 if (env
->hflags
& HF_CS64_MASK
)
4690 /* Fast FXRESTORE leaves out the XMM registers */
4691 if (!(env
->efer
& MSR_EFER_FFXSR
)
4692 || (env
->hflags
& HF_CPL_MASK
)
4693 || !(env
->hflags
& HF_LMA_MASK
)) {
4694 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4695 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4696 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4703 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, floatx80 f
)
4708 *pmant
= temp
.l
.lower
;
4709 *pexp
= temp
.l
.upper
;
4712 floatx80
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4716 temp
.l
.upper
= upper
;
4717 temp
.l
.lower
= mant
;
4721 #ifdef TARGET_X86_64
4723 //#define DEBUG_MULDIV
4725 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4734 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4738 add128(plow
, phigh
, 1, 0);
4741 /* return TRUE if overflow */
4742 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4744 uint64_t q
, r
, a1
, a0
;
4757 /* XXX: use a better algorithm */
4758 for(i
= 0; i
< 64; i
++) {
4760 a1
= (a1
<< 1) | (a0
>> 63);
4761 if (ab
|| a1
>= b
) {
4767 a0
= (a0
<< 1) | qb
;
4769 #if defined(DEBUG_MULDIV)
4770 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4771 *phigh
, *plow
, b
, a0
, a1
);
4779 /* return TRUE if overflow */
4780 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4783 sa
= ((int64_t)*phigh
< 0);
4785 neg128(plow
, phigh
);
4789 if (div64(plow
, phigh
, b
) != 0)
4792 if (*plow
> (1ULL << 63))
4796 if (*plow
>= (1ULL << 63))
4804 void helper_mulq_EAX_T0(target_ulong t0
)
4808 mulu64(&r0
, &r1
, EAX
, t0
);
4815 void helper_imulq_EAX_T0(target_ulong t0
)
4819 muls64(&r0
, &r1
, EAX
, t0
);
4823 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4826 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4830 muls64(&r0
, &r1
, t0
, t1
);
4832 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4836 void helper_divq_EAX(target_ulong t0
)
4840 raise_exception(EXCP00_DIVZ
);
4844 if (div64(&r0
, &r1
, t0
))
4845 raise_exception(EXCP00_DIVZ
);
4850 void helper_idivq_EAX(target_ulong t0
)
4854 raise_exception(EXCP00_DIVZ
);
4858 if (idiv64(&r0
, &r1
, t0
))
4859 raise_exception(EXCP00_DIVZ
);
4865 static void do_hlt(void)
4867 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4869 env
->exception_index
= EXCP_HLT
;
4873 void helper_hlt(int next_eip_addend
)
4875 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4876 EIP
+= next_eip_addend
;
4881 void helper_monitor(target_ulong ptr
)
4883 if ((uint32_t)ECX
!= 0)
4884 raise_exception(EXCP0D_GPF
);
4885 /* XXX: store address ? */
4886 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4889 void helper_mwait(int next_eip_addend
)
4891 if ((uint32_t)ECX
!= 0)
4892 raise_exception(EXCP0D_GPF
);
4893 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4894 EIP
+= next_eip_addend
;
4896 /* XXX: not complete but not completely erroneous */
4897 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4898 /* more than one CPU: do not sleep because another CPU may
4905 void helper_debug(void)
4907 env
->exception_index
= EXCP_DEBUG
;
4911 void helper_reset_rf(void)
4913 env
->eflags
&= ~RF_MASK
;
4916 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4918 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4921 void helper_raise_exception(int exception_index
)
4923 raise_exception(exception_index
);
4926 void helper_cli(void)
4928 env
->eflags
&= ~IF_MASK
;
4931 void helper_sti(void)
4933 env
->eflags
|= IF_MASK
;
4937 /* vm86plus instructions */
4938 void helper_cli_vm(void)
4940 env
->eflags
&= ~VIF_MASK
;
4943 void helper_sti_vm(void)
4945 env
->eflags
|= VIF_MASK
;
4946 if (env
->eflags
& VIP_MASK
) {
4947 raise_exception(EXCP0D_GPF
);
4952 void helper_set_inhibit_irq(void)
4954 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4957 void helper_reset_inhibit_irq(void)
4959 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4962 void helper_boundw(target_ulong a0
, int v
)
4966 high
= ldsw(a0
+ 2);
4968 if (v
< low
|| v
> high
) {
4969 raise_exception(EXCP05_BOUND
);
4973 void helper_boundl(target_ulong a0
, int v
)
4978 if (v
< low
|| v
> high
) {
4979 raise_exception(EXCP05_BOUND
);
4983 #if !defined(CONFIG_USER_ONLY)
4985 #define MMUSUFFIX _mmu
4988 #include "softmmu_template.h"
4991 #include "softmmu_template.h"
4994 #include "softmmu_template.h"
4997 #include "softmmu_template.h"
5001 #if !defined(CONFIG_USER_ONLY)
5002 /* try to fill the TLB and return an exception if error. If retaddr is
5003 NULL, it means that the function was called in C code (i.e. not
5004 from generated code or from helper.c) */
5005 /* XXX: fix it to restore all registers */
5006 void tlb_fill(CPUState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
5009 TranslationBlock
*tb
;
5012 CPUX86State
*saved_env
;
5017 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
5020 /* now we have a real cpu fault */
5021 pc
= (unsigned long)retaddr
;
5022 tb
= tb_find_pc(pc
);
5024 /* the PC is inside the translated code. It means that we have
5025 a virtual CPU fault */
5026 cpu_restore_state(tb
, env
, pc
);
5029 raise_exception_err(env
->exception_index
, env
->error_code
);
5035 /* Secure Virtual Machine helpers */
5037 #if defined(CONFIG_USER_ONLY)
5039 void helper_vmrun(int aflag
, int next_eip_addend
)
5042 void helper_vmmcall(void)
5045 void helper_vmload(int aflag
)
5048 void helper_vmsave(int aflag
)
5051 void helper_stgi(void)
5054 void helper_clgi(void)
5057 void helper_skinit(void)
5060 void helper_invlpga(int aflag
)
5063 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5066 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5070 void svm_check_intercept(CPUState
*env1
, uint32_t type
)
5074 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5075 uint32_t next_eip_addend
)
5080 static inline void svm_save_seg(target_phys_addr_t addr
,
5081 const SegmentCache
*sc
)
5083 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
5085 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
5087 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
5089 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
5090 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
5093 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
5097 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
5098 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
5099 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
5100 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
5101 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
5104 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
5105 CPUState
*env
, int seg_reg
)
5107 SegmentCache sc1
, *sc
= &sc1
;
5108 svm_load_seg(addr
, sc
);
5109 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
5110 sc
->base
, sc
->limit
, sc
->flags
);
5113 void helper_vmrun(int aflag
, int next_eip_addend
)
5119 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
5124 addr
= (uint32_t)EAX
;
5126 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
5128 env
->vm_vmcb
= addr
;
5130 /* save the current CPU state in the hsave page */
5131 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5132 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5134 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5135 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5137 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5138 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5139 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5140 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5141 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5142 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5144 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5145 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5147 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5149 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5151 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5153 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5156 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
5157 EIP
+ next_eip_addend
);
5158 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5159 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5161 /* load the interception bitmaps so we do not need to access the
5163 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
5164 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
5165 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
5166 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
5167 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
5168 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
5170 /* enable intercepts */
5171 env
->hflags
|= HF_SVMI_MASK
;
5173 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
5175 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5176 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5178 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
5179 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5181 /* clear exit_info_2 so we behave like the real hardware */
5182 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
5184 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
5185 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
5186 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
5187 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
5188 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5189 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5190 if (int_ctl
& V_INTR_MASKING_MASK
) {
5191 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
5192 env
->hflags2
|= HF2_VINTR_MASK
;
5193 if (env
->eflags
& IF_MASK
)
5194 env
->hflags2
|= HF2_HIF_MASK
;
5198 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5200 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5201 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5202 CC_OP
= CC_OP_EFLAGS
;
5204 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5206 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5208 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5210 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5213 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5215 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5216 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5217 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5218 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5219 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5221 /* FIXME: guest state consistency checks */
5223 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5224 case TLB_CONTROL_DO_NOTHING
:
5226 case TLB_CONTROL_FLUSH_ALL_ASID
:
5227 /* FIXME: this is not 100% correct but should work for now */
5232 env
->hflags2
|= HF2_GIF_MASK
;
5234 if (int_ctl
& V_IRQ_MASK
) {
5235 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5238 /* maybe we need to inject an event */
5239 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5240 if (event_inj
& SVM_EVTINJ_VALID
) {
5241 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5242 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5243 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5245 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5246 /* FIXME: need to implement valid_err */
5247 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5248 case SVM_EVTINJ_TYPE_INTR
:
5249 env
->exception_index
= vector
;
5250 env
->error_code
= event_inj_err
;
5251 env
->exception_is_int
= 0;
5252 env
->exception_next_eip
= -1;
5253 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5254 /* XXX: is it always correct ? */
5255 do_interrupt_all(vector
, 0, 0, 0, 1);
5257 case SVM_EVTINJ_TYPE_NMI
:
5258 env
->exception_index
= EXCP02_NMI
;
5259 env
->error_code
= event_inj_err
;
5260 env
->exception_is_int
= 0;
5261 env
->exception_next_eip
= EIP
;
5262 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5265 case SVM_EVTINJ_TYPE_EXEPT
:
5266 env
->exception_index
= vector
;
5267 env
->error_code
= event_inj_err
;
5268 env
->exception_is_int
= 0;
5269 env
->exception_next_eip
= -1;
5270 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5273 case SVM_EVTINJ_TYPE_SOFT
:
5274 env
->exception_index
= vector
;
5275 env
->error_code
= event_inj_err
;
5276 env
->exception_is_int
= 1;
5277 env
->exception_next_eip
= EIP
;
5278 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5282 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5286 void helper_vmmcall(void)
5288 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5289 raise_exception(EXCP06_ILLOP
);
5292 void helper_vmload(int aflag
)
5295 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5300 addr
= (uint32_t)EAX
;
5302 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5303 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5304 env
->segs
[R_FS
].base
);
5306 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5308 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5310 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5312 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5315 #ifdef TARGET_X86_64
5316 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5317 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5318 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5319 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5321 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5322 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5323 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5324 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5327 void helper_vmsave(int aflag
)
5330 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5335 addr
= (uint32_t)EAX
;
5337 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5338 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5339 env
->segs
[R_FS
].base
);
5341 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5343 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5345 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5347 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5350 #ifdef TARGET_X86_64
5351 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5352 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5353 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5354 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5356 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5357 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5358 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5359 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5362 void helper_stgi(void)
5364 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5365 env
->hflags2
|= HF2_GIF_MASK
;
5368 void helper_clgi(void)
5370 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5371 env
->hflags2
&= ~HF2_GIF_MASK
;
5374 void helper_skinit(void)
5376 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5377 /* XXX: not implemented */
5378 raise_exception(EXCP06_ILLOP
);
5381 void helper_invlpga(int aflag
)
5384 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5389 addr
= (uint32_t)EAX
;
5391 /* XXX: could use the ASID to see if it is needed to do the
5393 tlb_flush_page(env
, addr
);
5396 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5398 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5401 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5402 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5403 helper_vmexit(type
, param
);
5406 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5407 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5408 helper_vmexit(type
, param
);
5411 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5412 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5413 helper_vmexit(type
, param
);
5416 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5417 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5418 helper_vmexit(type
, param
);
5421 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5422 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5423 helper_vmexit(type
, param
);
5427 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5428 /* FIXME: this should be read in at vmrun (faster this way?) */
5429 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5431 switch((uint32_t)ECX
) {
5436 case 0xc0000000 ... 0xc0001fff:
5437 t0
= (8192 + ECX
- 0xc0000000) * 2;
5441 case 0xc0010000 ... 0xc0011fff:
5442 t0
= (16384 + ECX
- 0xc0010000) * 2;
5447 helper_vmexit(type
, param
);
5452 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5453 helper_vmexit(type
, param
);
5457 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5458 helper_vmexit(type
, param
);
5464 void svm_check_intercept(CPUState
*env1
, uint32_t type
)
5466 CPUState
*saved_env
;
5470 helper_svm_check_intercept_param(type
, 0);
5474 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5475 uint32_t next_eip_addend
)
5477 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5478 /* FIXME: this should be read in at vmrun (faster this way?) */
5479 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5480 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5481 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5483 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5484 env
->eip
+ next_eip_addend
);
5485 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5490 /* Note: currently only 32 bits of exit_code are used */
5491 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5495 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5496 exit_code
, exit_info_1
,
5497 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5500 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5501 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5502 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5504 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5507 /* Save the VM state in the vmcb */
5508 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5510 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5512 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5514 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5517 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5518 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5520 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5521 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5523 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5524 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5525 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5526 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5527 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5529 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5530 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5531 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5532 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5533 int_ctl
|= V_IRQ_MASK
;
5534 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5536 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5537 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5538 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5539 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5540 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5541 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5542 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5544 /* Reload the host state from vm_hsave */
5545 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5546 env
->hflags
&= ~HF_SVMI_MASK
;
5548 env
->intercept_exceptions
= 0;
5549 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5550 env
->tsc_offset
= 0;
5552 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5553 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5555 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5556 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5558 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5559 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5560 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5561 /* we need to set the efer after the crs so the hidden flags get
5564 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5566 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5567 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5568 CC_OP
= CC_OP_EFLAGS
;
5570 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5572 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5574 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5576 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5579 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5580 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5581 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5583 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5584 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5587 cpu_x86_set_cpl(env
, 0);
5588 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5589 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5591 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5592 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5593 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5594 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5595 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5597 env
->hflags2
&= ~HF2_GIF_MASK
;
5598 /* FIXME: Resets the current ASID register to zero (host ASID). */
5600 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5602 /* Clears the TSC_OFFSET inside the processor. */
5604 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5605 from the page table indicated the host's CR3. If the PDPEs contain
5606 illegal state, the processor causes a shutdown. */
5608 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5609 env
->cr
[0] |= CR0_PE_MASK
;
5610 env
->eflags
&= ~VM_MASK
;
5612 /* Disables all breakpoints in the host DR7 register. */
5614 /* Checks the reloaded host state for consistency. */
5616 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5617 host's code segment or non-canonical (in the case of long mode), a
5618 #GP fault is delivered inside the host.) */
5620 /* remove any pending exception */
5621 env
->exception_index
= -1;
5622 env
->error_code
= 0;
5623 env
->old_exception
= -1;
5631 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5632 void helper_enter_mmx(void)
5635 *(uint32_t *)(env
->fptags
) = 0;
5636 *(uint32_t *)(env
->fptags
+ 4) = 0;
5639 void helper_emms(void)
5641 /* set to empty state */
5642 *(uint32_t *)(env
->fptags
) = 0x01010101;
5643 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5647 void helper_movq(void *d
, void *s
)
5649 *(uint64_t *)d
= *(uint64_t *)s
;
5653 #include "ops_sse.h"
5656 #include "ops_sse.h"
5659 #include "helper_template.h"
5663 #include "helper_template.h"
5667 #include "helper_template.h"
5670 #ifdef TARGET_X86_64
5673 #include "helper_template.h"
5678 /* bit operations */
5679 target_ulong
helper_bsf(target_ulong t0
)
5686 while ((res
& 1) == 0) {
5693 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5696 target_ulong res
, mask
;
5698 if (wordsize
> 0 && t0
== 0) {
5702 count
= TARGET_LONG_BITS
- 1;
5703 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5704 while ((res
& mask
) == 0) {
5709 return wordsize
- 1 - count
;
5714 target_ulong
helper_bsr(target_ulong t0
)
5716 return helper_lzcnt(t0
, 0);
5719 static int compute_all_eflags(void)
5724 static int compute_c_eflags(void)
5726 return CC_SRC
& CC_C
;
5729 uint32_t helper_cc_compute_all(int op
)
5732 default: /* should never happen */ return 0;
5734 case CC_OP_EFLAGS
: return compute_all_eflags();
5736 case CC_OP_MULB
: return compute_all_mulb();
5737 case CC_OP_MULW
: return compute_all_mulw();
5738 case CC_OP_MULL
: return compute_all_mull();
5740 case CC_OP_ADDB
: return compute_all_addb();
5741 case CC_OP_ADDW
: return compute_all_addw();
5742 case CC_OP_ADDL
: return compute_all_addl();
5744 case CC_OP_ADCB
: return compute_all_adcb();
5745 case CC_OP_ADCW
: return compute_all_adcw();
5746 case CC_OP_ADCL
: return compute_all_adcl();
5748 case CC_OP_SUBB
: return compute_all_subb();
5749 case CC_OP_SUBW
: return compute_all_subw();
5750 case CC_OP_SUBL
: return compute_all_subl();
5752 case CC_OP_SBBB
: return compute_all_sbbb();
5753 case CC_OP_SBBW
: return compute_all_sbbw();
5754 case CC_OP_SBBL
: return compute_all_sbbl();
5756 case CC_OP_LOGICB
: return compute_all_logicb();
5757 case CC_OP_LOGICW
: return compute_all_logicw();
5758 case CC_OP_LOGICL
: return compute_all_logicl();
5760 case CC_OP_INCB
: return compute_all_incb();
5761 case CC_OP_INCW
: return compute_all_incw();
5762 case CC_OP_INCL
: return compute_all_incl();
5764 case CC_OP_DECB
: return compute_all_decb();
5765 case CC_OP_DECW
: return compute_all_decw();
5766 case CC_OP_DECL
: return compute_all_decl();
5768 case CC_OP_SHLB
: return compute_all_shlb();
5769 case CC_OP_SHLW
: return compute_all_shlw();
5770 case CC_OP_SHLL
: return compute_all_shll();
5772 case CC_OP_SARB
: return compute_all_sarb();
5773 case CC_OP_SARW
: return compute_all_sarw();
5774 case CC_OP_SARL
: return compute_all_sarl();
5776 #ifdef TARGET_X86_64
5777 case CC_OP_MULQ
: return compute_all_mulq();
5779 case CC_OP_ADDQ
: return compute_all_addq();
5781 case CC_OP_ADCQ
: return compute_all_adcq();
5783 case CC_OP_SUBQ
: return compute_all_subq();
5785 case CC_OP_SBBQ
: return compute_all_sbbq();
5787 case CC_OP_LOGICQ
: return compute_all_logicq();
5789 case CC_OP_INCQ
: return compute_all_incq();
5791 case CC_OP_DECQ
: return compute_all_decq();
5793 case CC_OP_SHLQ
: return compute_all_shlq();
5795 case CC_OP_SARQ
: return compute_all_sarq();
5800 uint32_t cpu_cc_compute_all(CPUState
*env1
, int op
)
5802 CPUState
*saved_env
;
5807 ret
= helper_cc_compute_all(op
);
5812 uint32_t helper_cc_compute_c(int op
)
5815 default: /* should never happen */ return 0;
5817 case CC_OP_EFLAGS
: return compute_c_eflags();
5819 case CC_OP_MULB
: return compute_c_mull();
5820 case CC_OP_MULW
: return compute_c_mull();
5821 case CC_OP_MULL
: return compute_c_mull();
5823 case CC_OP_ADDB
: return compute_c_addb();
5824 case CC_OP_ADDW
: return compute_c_addw();
5825 case CC_OP_ADDL
: return compute_c_addl();
5827 case CC_OP_ADCB
: return compute_c_adcb();
5828 case CC_OP_ADCW
: return compute_c_adcw();
5829 case CC_OP_ADCL
: return compute_c_adcl();
5831 case CC_OP_SUBB
: return compute_c_subb();
5832 case CC_OP_SUBW
: return compute_c_subw();
5833 case CC_OP_SUBL
: return compute_c_subl();
5835 case CC_OP_SBBB
: return compute_c_sbbb();
5836 case CC_OP_SBBW
: return compute_c_sbbw();
5837 case CC_OP_SBBL
: return compute_c_sbbl();
5839 case CC_OP_LOGICB
: return compute_c_logicb();
5840 case CC_OP_LOGICW
: return compute_c_logicw();
5841 case CC_OP_LOGICL
: return compute_c_logicl();
5843 case CC_OP_INCB
: return compute_c_incl();
5844 case CC_OP_INCW
: return compute_c_incl();
5845 case CC_OP_INCL
: return compute_c_incl();
5847 case CC_OP_DECB
: return compute_c_incl();
5848 case CC_OP_DECW
: return compute_c_incl();
5849 case CC_OP_DECL
: return compute_c_incl();
5851 case CC_OP_SHLB
: return compute_c_shlb();
5852 case CC_OP_SHLW
: return compute_c_shlw();
5853 case CC_OP_SHLL
: return compute_c_shll();
5855 case CC_OP_SARB
: return compute_c_sarl();
5856 case CC_OP_SARW
: return compute_c_sarl();
5857 case CC_OP_SARL
: return compute_c_sarl();
5859 #ifdef TARGET_X86_64
5860 case CC_OP_MULQ
: return compute_c_mull();
5862 case CC_OP_ADDQ
: return compute_c_addq();
5864 case CC_OP_ADCQ
: return compute_c_adcq();
5866 case CC_OP_SUBQ
: return compute_c_subq();
5868 case CC_OP_SBBQ
: return compute_c_sbbq();
5870 case CC_OP_LOGICQ
: return compute_c_logicq();
5872 case CC_OP_INCQ
: return compute_c_incl();
5874 case CC_OP_DECQ
: return compute_c_incl();
5876 case CC_OP_SHLQ
: return compute_c_shlq();
5878 case CC_OP_SARQ
: return compute_c_sarl();