4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
25 #include "qemu-common.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "softmmu_exec.h"
32 #endif /* !defined(CONFIG_USER_ONLY) */
37 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
38 # define LOG_PCALL_STATE(env) \
39 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
41 # define LOG_PCALL(...) do { } while (0)
42 # define LOG_PCALL_STATE(env) do { } while (0)
45 /* n must be a constant to be efficient */
46 static inline target_long
lshift(target_long x
, int n
)
61 #define MAXTAN 9223372036854775808.0
63 /* the following deal with x86 long double-precision numbers */
64 #define MAXEXPD 0x7fff
66 #define EXPD(fp) (fp.l.upper & 0x7fff)
67 #define SIGND(fp) ((fp.l.upper) & 0x8000)
68 #define MANTD(fp) (fp.l.lower)
69 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
71 static inline void fpush(void)
73 env
->fpstt
= (env
->fpstt
- 1) & 7;
74 env
->fptags
[env
->fpstt
] = 0; /* validate stack entry */
77 static inline void fpop(void)
79 env
->fptags
[env
->fpstt
] = 1; /* invvalidate stack entry */
80 env
->fpstt
= (env
->fpstt
+ 1) & 7;
83 static inline floatx80
helper_fldt(target_ulong ptr
)
87 temp
.l
.lower
= ldq(ptr
);
88 temp
.l
.upper
= lduw(ptr
+ 8);
92 static inline void helper_fstt(floatx80 f
, target_ulong ptr
)
97 stq(ptr
, temp
.l
.lower
);
98 stw(ptr
+ 8, temp
.l
.upper
);
101 #define FPUS_IE (1 << 0)
102 #define FPUS_DE (1 << 1)
103 #define FPUS_ZE (1 << 2)
104 #define FPUS_OE (1 << 3)
105 #define FPUS_UE (1 << 4)
106 #define FPUS_PE (1 << 5)
107 #define FPUS_SF (1 << 6)
108 #define FPUS_SE (1 << 7)
109 #define FPUS_B (1 << 15)
113 static inline uint32_t compute_eflags(void)
115 return env
->eflags
| helper_cc_compute_all(CC_OP
) | (DF
& DF_MASK
);
118 /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
119 static inline void load_eflags(int eflags
, int update_mask
)
121 CC_SRC
= eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
122 DF
= 1 - (2 * ((eflags
>> 10) & 1));
123 env
->eflags
= (env
->eflags
& ~update_mask
) |
124 (eflags
& update_mask
) | 0x2;
127 /* load efer and update the corresponding hflags. XXX: do consistency
128 checks with cpuid bits ? */
129 static inline void cpu_load_efer(CPUState
*env
, uint64_t val
)
132 env
->hflags
&= ~(HF_LMA_MASK
| HF_SVME_MASK
);
133 if (env
->efer
& MSR_EFER_LMA
) {
134 env
->hflags
|= HF_LMA_MASK
;
136 if (env
->efer
& MSR_EFER_SVME
) {
137 env
->hflags
|= HF_SVME_MASK
;
142 #define raise_exception_err(a, b)\
144 qemu_log("raise_exception line=%d\n", __LINE__);\
145 (raise_exception_err)(a, b);\
149 static void QEMU_NORETURN
raise_exception_err(int exception_index
,
152 static const uint8_t parity_table
[256] = {
153 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
154 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
155 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
156 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
157 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
158 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
159 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
160 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
161 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
162 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
163 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
164 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
165 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
166 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
167 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
168 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
169 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
170 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
171 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
172 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
173 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
174 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
175 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
176 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
177 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
178 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
179 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
180 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
181 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
182 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
183 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
184 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
187 /* modulo 17 table */
188 static const uint8_t rclw_table
[32] = {
189 0, 1, 2, 3, 4, 5, 6, 7,
190 8, 9,10,11,12,13,14,15,
191 16, 0, 1, 2, 3, 4, 5, 6,
192 7, 8, 9,10,11,12,13,14,
196 static const uint8_t rclb_table
[32] = {
197 0, 1, 2, 3, 4, 5, 6, 7,
198 8, 0, 1, 2, 3, 4, 5, 6,
199 7, 8, 0, 1, 2, 3, 4, 5,
200 6, 7, 8, 0, 1, 2, 3, 4,
203 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
204 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
205 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
207 /* broken thread support */
209 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
211 void helper_lock(void)
213 spin_lock(&global_cpu_lock
);
216 void helper_unlock(void)
218 spin_unlock(&global_cpu_lock
);
221 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
223 load_eflags(t0
, update_mask
);
226 target_ulong
helper_read_eflags(void)
229 eflags
= helper_cc_compute_all(CC_OP
);
230 eflags
|= (DF
& DF_MASK
);
231 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
235 /* return non zero if error */
236 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
247 index
= selector
& ~7;
248 if ((index
+ 7) > dt
->limit
)
250 ptr
= dt
->base
+ index
;
251 *e1_ptr
= ldl_kernel(ptr
);
252 *e2_ptr
= ldl_kernel(ptr
+ 4);
256 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
259 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
260 if (e2
& DESC_G_MASK
)
261 limit
= (limit
<< 12) | 0xfff;
265 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
267 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
270 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
272 sc
->base
= get_seg_base(e1
, e2
);
273 sc
->limit
= get_seg_limit(e1
, e2
);
277 /* init the segment cache in vm86 mode. */
278 static inline void load_seg_vm(int seg
, int selector
)
281 cpu_x86_load_seg_cache(env
, seg
, selector
,
282 (selector
<< 4), 0xffff, 0);
285 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
286 uint32_t *esp_ptr
, int dpl
)
288 int type
, index
, shift
;
293 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
294 for(i
=0;i
<env
->tr
.limit
;i
++) {
295 printf("%02x ", env
->tr
.base
[i
]);
296 if ((i
& 7) == 7) printf("\n");
302 if (!(env
->tr
.flags
& DESC_P_MASK
))
303 cpu_abort(env
, "invalid tss");
304 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
306 cpu_abort(env
, "invalid tss type");
308 index
= (dpl
* 4 + 2) << shift
;
309 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
310 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
312 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
313 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
315 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
316 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
320 /* XXX: merge with load_seg() */
321 static void tss_load_seg(int seg_reg
, int selector
)
326 if ((selector
& 0xfffc) != 0) {
327 if (load_segment(&e1
, &e2
, selector
) != 0)
328 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
329 if (!(e2
& DESC_S_MASK
))
330 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
332 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
333 cpl
= env
->hflags
& HF_CPL_MASK
;
334 if (seg_reg
== R_CS
) {
335 if (!(e2
& DESC_CS_MASK
))
336 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
337 /* XXX: is it correct ? */
339 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
340 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
341 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
342 } else if (seg_reg
== R_SS
) {
343 /* SS must be writable data */
344 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
345 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
346 if (dpl
!= cpl
|| dpl
!= rpl
)
347 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
349 /* not readable code */
350 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
351 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
352 /* if data or non conforming code, checks the rights */
353 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
354 if (dpl
< cpl
|| dpl
< rpl
)
355 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
358 if (!(e2
& DESC_P_MASK
))
359 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
360 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
361 get_seg_base(e1
, e2
),
362 get_seg_limit(e1
, e2
),
365 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
366 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
370 #define SWITCH_TSS_JMP 0
371 #define SWITCH_TSS_IRET 1
372 #define SWITCH_TSS_CALL 2
374 /* XXX: restore CPU state in registers (PowerPC case) */
375 static void switch_tss(int tss_selector
,
376 uint32_t e1
, uint32_t e2
, int source
,
379 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
380 target_ulong tss_base
;
381 uint32_t new_regs
[8], new_segs
[6];
382 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
383 uint32_t old_eflags
, eflags_mask
;
388 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
389 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
391 /* if task gate, we read the TSS segment and we load it */
393 if (!(e2
& DESC_P_MASK
))
394 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
395 tss_selector
= e1
>> 16;
396 if (tss_selector
& 4)
397 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
398 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
399 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
400 if (e2
& DESC_S_MASK
)
401 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
402 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
404 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
407 if (!(e2
& DESC_P_MASK
))
408 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
414 tss_limit
= get_seg_limit(e1
, e2
);
415 tss_base
= get_seg_base(e1
, e2
);
416 if ((tss_selector
& 4) != 0 ||
417 tss_limit
< tss_limit_max
)
418 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
419 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
421 old_tss_limit_max
= 103;
423 old_tss_limit_max
= 43;
425 /* read all the registers from the new TSS */
428 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
429 new_eip
= ldl_kernel(tss_base
+ 0x20);
430 new_eflags
= ldl_kernel(tss_base
+ 0x24);
431 for(i
= 0; i
< 8; i
++)
432 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
433 for(i
= 0; i
< 6; i
++)
434 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
435 new_ldt
= lduw_kernel(tss_base
+ 0x60);
436 new_trap
= ldl_kernel(tss_base
+ 0x64);
440 new_eip
= lduw_kernel(tss_base
+ 0x0e);
441 new_eflags
= lduw_kernel(tss_base
+ 0x10);
442 for(i
= 0; i
< 8; i
++)
443 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
444 for(i
= 0; i
< 4; i
++)
445 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
446 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
451 /* XXX: avoid a compiler warning, see
452 http://support.amd.com/us/Processor_TechDocs/24593.pdf
453 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
456 /* NOTE: we must avoid memory exceptions during the task switch,
457 so we make dummy accesses before */
458 /* XXX: it can still fail in some cases, so a bigger hack is
459 necessary to valid the TLB after having done the accesses */
461 v1
= ldub_kernel(env
->tr
.base
);
462 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
463 stb_kernel(env
->tr
.base
, v1
);
464 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
466 /* clear busy bit (it is restartable) */
467 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
470 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
471 e2
= ldl_kernel(ptr
+ 4);
472 e2
&= ~DESC_TSS_BUSY_MASK
;
473 stl_kernel(ptr
+ 4, e2
);
475 old_eflags
= compute_eflags();
476 if (source
== SWITCH_TSS_IRET
)
477 old_eflags
&= ~NT_MASK
;
479 /* save the current state in the old TSS */
482 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
483 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
484 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
485 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
486 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
487 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
488 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
489 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
490 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
491 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
492 for(i
= 0; i
< 6; i
++)
493 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
496 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
497 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
498 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
499 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
500 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
501 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
502 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
503 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
504 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
505 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
506 for(i
= 0; i
< 4; i
++)
507 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
510 /* now if an exception occurs, it will occurs in the next task
513 if (source
== SWITCH_TSS_CALL
) {
514 stw_kernel(tss_base
, env
->tr
.selector
);
515 new_eflags
|= NT_MASK
;
519 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
522 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
523 e2
= ldl_kernel(ptr
+ 4);
524 e2
|= DESC_TSS_BUSY_MASK
;
525 stl_kernel(ptr
+ 4, e2
);
528 /* set the new CPU state */
529 /* from this point, any exception which occurs can give problems */
530 env
->cr
[0] |= CR0_TS_MASK
;
531 env
->hflags
|= HF_TS_MASK
;
532 env
->tr
.selector
= tss_selector
;
533 env
->tr
.base
= tss_base
;
534 env
->tr
.limit
= tss_limit
;
535 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
537 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
538 cpu_x86_update_cr3(env
, new_cr3
);
541 /* load all registers without an exception, then reload them with
542 possible exception */
544 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
545 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
547 eflags_mask
&= 0xffff;
548 load_eflags(new_eflags
, eflags_mask
);
549 /* XXX: what to do in 16 bit case ? */
558 if (new_eflags
& VM_MASK
) {
559 for(i
= 0; i
< 6; i
++)
560 load_seg_vm(i
, new_segs
[i
]);
561 /* in vm86, CPL is always 3 */
562 cpu_x86_set_cpl(env
, 3);
564 /* CPL is set the RPL of CS */
565 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
566 /* first just selectors as the rest may trigger exceptions */
567 for(i
= 0; i
< 6; i
++)
568 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
571 env
->ldt
.selector
= new_ldt
& ~4;
578 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
580 if ((new_ldt
& 0xfffc) != 0) {
582 index
= new_ldt
& ~7;
583 if ((index
+ 7) > dt
->limit
)
584 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
585 ptr
= dt
->base
+ index
;
586 e1
= ldl_kernel(ptr
);
587 e2
= ldl_kernel(ptr
+ 4);
588 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
589 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
590 if (!(e2
& DESC_P_MASK
))
591 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
592 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
595 /* load the segments */
596 if (!(new_eflags
& VM_MASK
)) {
597 tss_load_seg(R_CS
, new_segs
[R_CS
]);
598 tss_load_seg(R_SS
, new_segs
[R_SS
]);
599 tss_load_seg(R_ES
, new_segs
[R_ES
]);
600 tss_load_seg(R_DS
, new_segs
[R_DS
]);
601 tss_load_seg(R_FS
, new_segs
[R_FS
]);
602 tss_load_seg(R_GS
, new_segs
[R_GS
]);
605 /* check that EIP is in the CS segment limits */
606 if (new_eip
> env
->segs
[R_CS
].limit
) {
607 /* XXX: different exception if CALL ? */
608 raise_exception_err(EXCP0D_GPF
, 0);
611 #ifndef CONFIG_USER_ONLY
612 /* reset local breakpoints */
613 if (env
->dr
[7] & 0x55) {
614 for (i
= 0; i
< 4; i
++) {
615 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
616 hw_breakpoint_remove(env
, i
);
623 /* check if Port I/O is allowed in TSS */
624 static inline void check_io(int addr
, int size
)
626 int io_offset
, val
, mask
;
628 /* TSS must be a valid 32 bit one */
629 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
630 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
633 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
634 io_offset
+= (addr
>> 3);
635 /* Note: the check needs two bytes */
636 if ((io_offset
+ 1) > env
->tr
.limit
)
638 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
640 mask
= (1 << size
) - 1;
641 /* all bits must be zero to allow the I/O */
642 if ((val
& mask
) != 0) {
644 raise_exception_err(EXCP0D_GPF
, 0);
648 void helper_check_iob(uint32_t t0
)
653 void helper_check_iow(uint32_t t0
)
658 void helper_check_iol(uint32_t t0
)
663 void helper_outb(uint32_t port
, uint32_t data
)
665 cpu_outb(port
, data
& 0xff);
668 target_ulong
helper_inb(uint32_t port
)
670 return cpu_inb(port
);
673 void helper_outw(uint32_t port
, uint32_t data
)
675 cpu_outw(port
, data
& 0xffff);
678 target_ulong
helper_inw(uint32_t port
)
680 return cpu_inw(port
);
683 void helper_outl(uint32_t port
, uint32_t data
)
685 cpu_outl(port
, data
);
688 target_ulong
helper_inl(uint32_t port
)
690 return cpu_inl(port
);
693 static inline unsigned int get_sp_mask(unsigned int e2
)
695 if (e2
& DESC_B_MASK
)
701 static int exeption_has_error_code(int intno
)
717 #define SET_ESP(val, sp_mask)\
719 if ((sp_mask) == 0xffff)\
720 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
721 else if ((sp_mask) == 0xffffffffLL)\
722 ESP = (uint32_t)(val);\
727 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
730 /* in 64-bit machines, this can overflow. So this segment addition macro
731 * can be used to trim the value to 32-bit whenever needed */
732 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
734 /* XXX: add a is_user flag to have proper security support */
735 #define PUSHW(ssp, sp, sp_mask, val)\
738 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
741 #define PUSHL(ssp, sp, sp_mask, val)\
744 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
747 #define POPW(ssp, sp, sp_mask, val)\
749 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
753 #define POPL(ssp, sp, sp_mask, val)\
755 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
759 /* protected mode interrupt */
760 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
761 unsigned int next_eip
, int is_hw
)
764 target_ulong ptr
, ssp
;
765 int type
, dpl
, selector
, ss_dpl
, cpl
;
766 int has_error_code
, new_stack
, shift
;
767 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
768 uint32_t old_eip
, sp_mask
;
771 if (!is_int
&& !is_hw
)
772 has_error_code
= exeption_has_error_code(intno
);
779 if (intno
* 8 + 7 > dt
->limit
)
780 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
781 ptr
= dt
->base
+ intno
* 8;
782 e1
= ldl_kernel(ptr
);
783 e2
= ldl_kernel(ptr
+ 4);
784 /* check gate type */
785 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
787 case 5: /* task gate */
788 /* must do that check here to return the correct error code */
789 if (!(e2
& DESC_P_MASK
))
790 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
791 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
792 if (has_error_code
) {
795 /* push the error code */
796 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
798 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
802 esp
= (ESP
- (2 << shift
)) & mask
;
803 ssp
= env
->segs
[R_SS
].base
+ esp
;
805 stl_kernel(ssp
, error_code
);
807 stw_kernel(ssp
, error_code
);
811 case 6: /* 286 interrupt gate */
812 case 7: /* 286 trap gate */
813 case 14: /* 386 interrupt gate */
814 case 15: /* 386 trap gate */
817 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
820 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
821 cpl
= env
->hflags
& HF_CPL_MASK
;
822 /* check privilege if software int */
823 if (is_int
&& dpl
< cpl
)
824 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
825 /* check valid bit */
826 if (!(e2
& DESC_P_MASK
))
827 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
829 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
830 if ((selector
& 0xfffc) == 0)
831 raise_exception_err(EXCP0D_GPF
, 0);
833 if (load_segment(&e1
, &e2
, selector
) != 0)
834 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
835 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
836 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
837 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
839 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
840 if (!(e2
& DESC_P_MASK
))
841 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
842 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
843 /* to inner privilege */
844 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
845 if ((ss
& 0xfffc) == 0)
846 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
848 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
849 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
850 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
851 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
853 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
854 if (!(ss_e2
& DESC_S_MASK
) ||
855 (ss_e2
& DESC_CS_MASK
) ||
856 !(ss_e2
& DESC_W_MASK
))
857 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
858 if (!(ss_e2
& DESC_P_MASK
))
859 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
861 sp_mask
= get_sp_mask(ss_e2
);
862 ssp
= get_seg_base(ss_e1
, ss_e2
);
863 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
864 /* to same privilege */
865 if (env
->eflags
& VM_MASK
)
866 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
868 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
869 ssp
= env
->segs
[R_SS
].base
;
873 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
874 new_stack
= 0; /* avoid warning */
875 sp_mask
= 0; /* avoid warning */
876 ssp
= 0; /* avoid warning */
877 esp
= 0; /* avoid warning */
883 /* XXX: check that enough room is available */
884 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
885 if (env
->eflags
& VM_MASK
)
891 if (env
->eflags
& VM_MASK
) {
892 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
893 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
894 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
895 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
897 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
898 PUSHL(ssp
, esp
, sp_mask
, ESP
);
900 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
901 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
902 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
903 if (has_error_code
) {
904 PUSHL(ssp
, esp
, sp_mask
, error_code
);
908 if (env
->eflags
& VM_MASK
) {
909 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
910 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
911 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
912 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
914 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
915 PUSHW(ssp
, esp
, sp_mask
, ESP
);
917 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
918 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
919 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
920 if (has_error_code
) {
921 PUSHW(ssp
, esp
, sp_mask
, error_code
);
926 if (env
->eflags
& VM_MASK
) {
927 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
928 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
929 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
930 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
932 ss
= (ss
& ~3) | dpl
;
933 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
934 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
936 SET_ESP(esp
, sp_mask
);
938 selector
= (selector
& ~3) | dpl
;
939 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
940 get_seg_base(e1
, e2
),
941 get_seg_limit(e1
, e2
),
943 cpu_x86_set_cpl(env
, dpl
);
946 /* interrupt gate clear IF mask */
947 if ((type
& 1) == 0) {
948 env
->eflags
&= ~IF_MASK
;
950 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
955 #define PUSHQ(sp, val)\
958 stq_kernel(sp, (val));\
961 #define POPQ(sp, val)\
963 val = ldq_kernel(sp);\
967 static inline target_ulong
get_rsp_from_tss(int level
)
972 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
973 env
->tr
.base
, env
->tr
.limit
);
976 if (!(env
->tr
.flags
& DESC_P_MASK
))
977 cpu_abort(env
, "invalid tss");
978 index
= 8 * level
+ 4;
979 if ((index
+ 7) > env
->tr
.limit
)
980 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
981 return ldq_kernel(env
->tr
.base
+ index
);
984 /* 64 bit interrupt */
985 static void do_interrupt64(int intno
, int is_int
, int error_code
,
986 target_ulong next_eip
, int is_hw
)
990 int type
, dpl
, selector
, cpl
, ist
;
991 int has_error_code
, new_stack
;
992 uint32_t e1
, e2
, e3
, ss
;
993 target_ulong old_eip
, esp
, offset
;
996 if (!is_int
&& !is_hw
)
997 has_error_code
= exeption_has_error_code(intno
);
1004 if (intno
* 16 + 15 > dt
->limit
)
1005 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
1006 ptr
= dt
->base
+ intno
* 16;
1007 e1
= ldl_kernel(ptr
);
1008 e2
= ldl_kernel(ptr
+ 4);
1009 e3
= ldl_kernel(ptr
+ 8);
1010 /* check gate type */
1011 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1013 case 14: /* 386 interrupt gate */
1014 case 15: /* 386 trap gate */
1017 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
1020 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1021 cpl
= env
->hflags
& HF_CPL_MASK
;
1022 /* check privilege if software int */
1023 if (is_int
&& dpl
< cpl
)
1024 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
1025 /* check valid bit */
1026 if (!(e2
& DESC_P_MASK
))
1027 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
1028 selector
= e1
>> 16;
1029 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1031 if ((selector
& 0xfffc) == 0)
1032 raise_exception_err(EXCP0D_GPF
, 0);
1034 if (load_segment(&e1
, &e2
, selector
) != 0)
1035 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1036 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1037 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1038 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1040 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1041 if (!(e2
& DESC_P_MASK
))
1042 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1043 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
1044 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1045 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
1046 /* to inner privilege */
1048 esp
= get_rsp_from_tss(ist
+ 3);
1050 esp
= get_rsp_from_tss(dpl
);
1051 esp
&= ~0xfLL
; /* align stack */
1054 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
1055 /* to same privilege */
1056 if (env
->eflags
& VM_MASK
)
1057 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1060 esp
= get_rsp_from_tss(ist
+ 3);
1063 esp
&= ~0xfLL
; /* align stack */
1066 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1067 new_stack
= 0; /* avoid warning */
1068 esp
= 0; /* avoid warning */
1071 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
1073 PUSHQ(esp
, compute_eflags());
1074 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
1075 PUSHQ(esp
, old_eip
);
1076 if (has_error_code
) {
1077 PUSHQ(esp
, error_code
);
1082 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
1086 selector
= (selector
& ~3) | dpl
;
1087 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1088 get_seg_base(e1
, e2
),
1089 get_seg_limit(e1
, e2
),
1091 cpu_x86_set_cpl(env
, dpl
);
1094 /* interrupt gate clear IF mask */
1095 if ((type
& 1) == 0) {
1096 env
->eflags
&= ~IF_MASK
;
1098 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1102 #ifdef TARGET_X86_64
1103 #if defined(CONFIG_USER_ONLY)
1104 void helper_syscall(int next_eip_addend
)
1106 env
->exception_index
= EXCP_SYSCALL
;
1107 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1111 void helper_syscall(int next_eip_addend
)
1115 if (!(env
->efer
& MSR_EFER_SCE
)) {
1116 raise_exception_err(EXCP06_ILLOP
, 0);
1118 selector
= (env
->star
>> 32) & 0xffff;
1119 if (env
->hflags
& HF_LMA_MASK
) {
1122 ECX
= env
->eip
+ next_eip_addend
;
1123 env
->regs
[11] = compute_eflags();
1125 code64
= env
->hflags
& HF_CS64_MASK
;
1127 cpu_x86_set_cpl(env
, 0);
1128 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1130 DESC_G_MASK
| DESC_P_MASK
|
1132 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1133 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1135 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1137 DESC_W_MASK
| DESC_A_MASK
);
1138 env
->eflags
&= ~env
->fmask
;
1139 load_eflags(env
->eflags
, 0);
1141 env
->eip
= env
->lstar
;
1143 env
->eip
= env
->cstar
;
1145 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1147 cpu_x86_set_cpl(env
, 0);
1148 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1150 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1152 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1153 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1155 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1157 DESC_W_MASK
| DESC_A_MASK
);
1158 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1159 env
->eip
= (uint32_t)env
->star
;
1165 #ifdef TARGET_X86_64
1166 void helper_sysret(int dflag
)
1170 if (!(env
->efer
& MSR_EFER_SCE
)) {
1171 raise_exception_err(EXCP06_ILLOP
, 0);
1173 cpl
= env
->hflags
& HF_CPL_MASK
;
1174 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1175 raise_exception_err(EXCP0D_GPF
, 0);
1177 selector
= (env
->star
>> 48) & 0xffff;
1178 if (env
->hflags
& HF_LMA_MASK
) {
1180 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1182 DESC_G_MASK
| DESC_P_MASK
|
1183 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1184 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1188 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1190 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1191 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1192 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1193 env
->eip
= (uint32_t)ECX
;
1195 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1197 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1198 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1199 DESC_W_MASK
| DESC_A_MASK
);
1200 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1201 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1202 cpu_x86_set_cpl(env
, 3);
1204 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1206 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1207 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1208 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1209 env
->eip
= (uint32_t)ECX
;
1210 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1212 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1213 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1214 DESC_W_MASK
| DESC_A_MASK
);
1215 env
->eflags
|= IF_MASK
;
1216 cpu_x86_set_cpl(env
, 3);
1221 /* real mode interrupt */
1222 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1223 unsigned int next_eip
)
1226 target_ulong ptr
, ssp
;
1228 uint32_t offset
, esp
;
1229 uint32_t old_cs
, old_eip
;
1231 /* real mode (simpler !) */
1233 if (intno
* 4 + 3 > dt
->limit
)
1234 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1235 ptr
= dt
->base
+ intno
* 4;
1236 offset
= lduw_kernel(ptr
);
1237 selector
= lduw_kernel(ptr
+ 2);
1239 ssp
= env
->segs
[R_SS
].base
;
1244 old_cs
= env
->segs
[R_CS
].selector
;
1245 /* XXX: use SS segment size ? */
1246 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1247 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1248 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1250 /* update processor state */
1251 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1253 env
->segs
[R_CS
].selector
= selector
;
1254 env
->segs
[R_CS
].base
= (selector
<< 4);
1255 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1258 #if defined(CONFIG_USER_ONLY)
1259 /* fake user mode interrupt */
1260 static void do_interrupt_user(int intno
, int is_int
, int error_code
,
1261 target_ulong next_eip
)
1265 int dpl
, cpl
, shift
;
1269 if (env
->hflags
& HF_LMA_MASK
) {
1274 ptr
= dt
->base
+ (intno
<< shift
);
1275 e2
= ldl_kernel(ptr
+ 4);
1277 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1278 cpl
= env
->hflags
& HF_CPL_MASK
;
1279 /* check privilege if software int */
1280 if (is_int
&& dpl
< cpl
)
1281 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1283 /* Since we emulate only user space, we cannot do more than
1284 exiting the emulation with the suitable exception and error
1292 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1295 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1296 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1299 type
= SVM_EVTINJ_TYPE_SOFT
;
1301 type
= SVM_EVTINJ_TYPE_EXEPT
;
1302 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1303 if (!rm
&& exeption_has_error_code(intno
)) {
1304 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1305 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1307 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1313 * Begin execution of an interruption. is_int is TRUE if coming from
1314 * the int instruction. next_eip is the EIP value AFTER the interrupt
1315 * instruction. It is only relevant if is_int is TRUE.
1317 static void do_interrupt_all(int intno
, int is_int
, int error_code
,
1318 target_ulong next_eip
, int is_hw
)
1320 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1321 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1323 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1324 count
, intno
, error_code
, is_int
,
1325 env
->hflags
& HF_CPL_MASK
,
1326 env
->segs
[R_CS
].selector
, EIP
,
1327 (int)env
->segs
[R_CS
].base
+ EIP
,
1328 env
->segs
[R_SS
].selector
, ESP
);
1329 if (intno
== 0x0e) {
1330 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1332 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1335 log_cpu_state(env
, X86_DUMP_CCOP
);
1341 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1342 for(i
= 0; i
< 16; i
++) {
1343 qemu_log(" %02x", ldub(ptr
+ i
));
1351 if (env
->cr
[0] & CR0_PE_MASK
) {
1352 #if !defined(CONFIG_USER_ONLY)
1353 if (env
->hflags
& HF_SVMI_MASK
)
1354 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1356 #ifdef TARGET_X86_64
1357 if (env
->hflags
& HF_LMA_MASK
) {
1358 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1362 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1365 #if !defined(CONFIG_USER_ONLY)
1366 if (env
->hflags
& HF_SVMI_MASK
)
1367 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1369 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1372 #if !defined(CONFIG_USER_ONLY)
1373 if (env
->hflags
& HF_SVMI_MASK
) {
1374 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1375 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1380 void do_interrupt(CPUState
*env1
)
1382 CPUState
*saved_env
;
1386 #if defined(CONFIG_USER_ONLY)
1387 /* if user mode only, we simulate a fake exception
1388 which will be handled outside the cpu execution
1390 do_interrupt_user(env
->exception_index
,
1391 env
->exception_is_int
,
1393 env
->exception_next_eip
);
1394 /* successfully delivered */
1395 env
->old_exception
= -1;
1397 /* simulate a real cpu exception. On i386, it can
1398 trigger new exceptions, but we do not handle
1399 double or triple faults yet. */
1400 do_interrupt_all(env
->exception_index
,
1401 env
->exception_is_int
,
1403 env
->exception_next_eip
, 0);
1404 /* successfully delivered */
1405 env
->old_exception
= -1;
1410 void do_interrupt_x86_hardirq(CPUState
*env1
, int intno
, int is_hw
)
1412 CPUState
*saved_env
;
1416 do_interrupt_all(intno
, 0, 0, 0, is_hw
);
1420 /* This should come from sysemu.h - if we could include it here... */
1421 void qemu_system_reset_request(void);
1424 * Check nested exceptions and change to double or triple fault if
1425 * needed. It should only be called, if this is not an interrupt.
1426 * Returns the new exception number.
1428 static int check_exception(int intno
, int *error_code
)
1430 int first_contributory
= env
->old_exception
== 0 ||
1431 (env
->old_exception
>= 10 &&
1432 env
->old_exception
<= 13);
1433 int second_contributory
= intno
== 0 ||
1434 (intno
>= 10 && intno
<= 13);
1436 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1437 env
->old_exception
, intno
);
1439 #if !defined(CONFIG_USER_ONLY)
1440 if (env
->old_exception
== EXCP08_DBLE
) {
1441 if (env
->hflags
& HF_SVMI_MASK
)
1442 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1444 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1446 qemu_system_reset_request();
1451 if ((first_contributory
&& second_contributory
)
1452 || (env
->old_exception
== EXCP0E_PAGE
&&
1453 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1454 intno
= EXCP08_DBLE
;
1458 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1459 (intno
== EXCP08_DBLE
))
1460 env
->old_exception
= intno
;
1466 * Signal an interruption. It is executed in the main CPU loop.
1467 * is_int is TRUE if coming from the int instruction. next_eip is the
1468 * EIP value AFTER the interrupt instruction. It is only relevant if
1471 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1472 int next_eip_addend
)
1475 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1476 intno
= check_exception(intno
, &error_code
);
1478 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1481 env
->exception_index
= intno
;
1482 env
->error_code
= error_code
;
1483 env
->exception_is_int
= is_int
;
1484 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1488 /* shortcuts to generate exceptions */
1490 static void QEMU_NORETURN
raise_exception_err(int exception_index
,
1493 raise_interrupt(exception_index
, 0, error_code
, 0);
1496 void raise_exception_err_env(CPUState
*nenv
, int exception_index
,
1500 raise_interrupt(exception_index
, 0, error_code
, 0);
1503 static void QEMU_NORETURN
raise_exception(int exception_index
)
1505 raise_interrupt(exception_index
, 0, 0, 0);
1508 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1511 raise_exception(exception_index
);
1515 #if defined(CONFIG_USER_ONLY)
1517 void do_smm_enter(CPUState
*env1
)
1521 void helper_rsm(void)
1527 #ifdef TARGET_X86_64
1528 #define SMM_REVISION_ID 0x00020064
1530 #define SMM_REVISION_ID 0x00020000
1533 void do_smm_enter(CPUState
*env1
)
1535 target_ulong sm_state
;
1538 CPUState
*saved_env
;
1543 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1544 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1546 env
->hflags
|= HF_SMM_MASK
;
1547 cpu_smm_update(env
);
1549 sm_state
= env
->smbase
+ 0x8000;
1551 #ifdef TARGET_X86_64
1552 for(i
= 0; i
< 6; i
++) {
1554 offset
= 0x7e00 + i
* 16;
1555 stw_phys(sm_state
+ offset
, dt
->selector
);
1556 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1557 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1558 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1561 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1562 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1564 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1565 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1566 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1567 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1569 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1570 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1572 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1573 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1574 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1575 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1577 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1579 stq_phys(sm_state
+ 0x7ff8, EAX
);
1580 stq_phys(sm_state
+ 0x7ff0, ECX
);
1581 stq_phys(sm_state
+ 0x7fe8, EDX
);
1582 stq_phys(sm_state
+ 0x7fe0, EBX
);
1583 stq_phys(sm_state
+ 0x7fd8, ESP
);
1584 stq_phys(sm_state
+ 0x7fd0, EBP
);
1585 stq_phys(sm_state
+ 0x7fc8, ESI
);
1586 stq_phys(sm_state
+ 0x7fc0, EDI
);
1587 for(i
= 8; i
< 16; i
++)
1588 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1589 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1590 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1591 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1592 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1594 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1595 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1596 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1598 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1599 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1601 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1602 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1603 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1604 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1605 stl_phys(sm_state
+ 0x7fec, EDI
);
1606 stl_phys(sm_state
+ 0x7fe8, ESI
);
1607 stl_phys(sm_state
+ 0x7fe4, EBP
);
1608 stl_phys(sm_state
+ 0x7fe0, ESP
);
1609 stl_phys(sm_state
+ 0x7fdc, EBX
);
1610 stl_phys(sm_state
+ 0x7fd8, EDX
);
1611 stl_phys(sm_state
+ 0x7fd4, ECX
);
1612 stl_phys(sm_state
+ 0x7fd0, EAX
);
1613 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1614 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1616 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1617 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1618 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1619 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1621 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1622 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1623 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1624 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1626 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1627 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1629 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1630 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1632 for(i
= 0; i
< 6; i
++) {
1635 offset
= 0x7f84 + i
* 12;
1637 offset
= 0x7f2c + (i
- 3) * 12;
1638 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1639 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1640 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1641 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1643 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1645 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1646 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1648 /* init SMM cpu state */
1650 #ifdef TARGET_X86_64
1651 cpu_load_efer(env
, 0);
1653 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1654 env
->eip
= 0x00008000;
1655 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1657 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1658 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1659 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1660 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1661 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1663 cpu_x86_update_cr0(env
,
1664 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1665 cpu_x86_update_cr4(env
, 0);
1666 env
->dr
[7] = 0x00000400;
1667 CC_OP
= CC_OP_EFLAGS
;
1671 void helper_rsm(void)
1673 target_ulong sm_state
;
1677 sm_state
= env
->smbase
+ 0x8000;
1678 #ifdef TARGET_X86_64
1679 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1681 for(i
= 0; i
< 6; i
++) {
1682 offset
= 0x7e00 + i
* 16;
1683 cpu_x86_load_seg_cache(env
, i
,
1684 lduw_phys(sm_state
+ offset
),
1685 ldq_phys(sm_state
+ offset
+ 8),
1686 ldl_phys(sm_state
+ offset
+ 4),
1687 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1690 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1691 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1693 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1694 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1695 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1696 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1698 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1699 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1701 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1702 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1703 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1704 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1706 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1707 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1708 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1709 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1710 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1711 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1712 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1713 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1714 for(i
= 8; i
< 16; i
++)
1715 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1716 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1717 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1718 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1719 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1720 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1722 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1723 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1724 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1726 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1727 if (val
& 0x20000) {
1728 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1731 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1732 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1733 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1734 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1735 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1736 EDI
= ldl_phys(sm_state
+ 0x7fec);
1737 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1738 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1739 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1740 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1741 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1742 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1743 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1744 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1745 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1747 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1748 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1749 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1750 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1752 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1753 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1754 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1755 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1757 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1758 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1760 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1761 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1763 for(i
= 0; i
< 6; i
++) {
1765 offset
= 0x7f84 + i
* 12;
1767 offset
= 0x7f2c + (i
- 3) * 12;
1768 cpu_x86_load_seg_cache(env
, i
,
1769 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1770 ldl_phys(sm_state
+ offset
+ 8),
1771 ldl_phys(sm_state
+ offset
+ 4),
1772 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1774 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1776 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1777 if (val
& 0x20000) {
1778 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1781 CC_OP
= CC_OP_EFLAGS
;
1782 env
->hflags
&= ~HF_SMM_MASK
;
1783 cpu_smm_update(env
);
1785 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1786 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1789 #endif /* !CONFIG_USER_ONLY */
1792 /* division, flags are undefined */
1794 void helper_divb_AL(target_ulong t0
)
1796 unsigned int num
, den
, q
, r
;
1798 num
= (EAX
& 0xffff);
1801 raise_exception(EXCP00_DIVZ
);
1805 raise_exception(EXCP00_DIVZ
);
1807 r
= (num
% den
) & 0xff;
1808 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1811 void helper_idivb_AL(target_ulong t0
)
1818 raise_exception(EXCP00_DIVZ
);
1822 raise_exception(EXCP00_DIVZ
);
1824 r
= (num
% den
) & 0xff;
1825 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1828 void helper_divw_AX(target_ulong t0
)
1830 unsigned int num
, den
, q
, r
;
1832 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1833 den
= (t0
& 0xffff);
1835 raise_exception(EXCP00_DIVZ
);
1839 raise_exception(EXCP00_DIVZ
);
1841 r
= (num
% den
) & 0xffff;
1842 EAX
= (EAX
& ~0xffff) | q
;
1843 EDX
= (EDX
& ~0xffff) | r
;
1846 void helper_idivw_AX(target_ulong t0
)
1850 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1853 raise_exception(EXCP00_DIVZ
);
1856 if (q
!= (int16_t)q
)
1857 raise_exception(EXCP00_DIVZ
);
1859 r
= (num
% den
) & 0xffff;
1860 EAX
= (EAX
& ~0xffff) | q
;
1861 EDX
= (EDX
& ~0xffff) | r
;
1864 void helper_divl_EAX(target_ulong t0
)
1866 unsigned int den
, r
;
1869 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1872 raise_exception(EXCP00_DIVZ
);
1877 raise_exception(EXCP00_DIVZ
);
1882 void helper_idivl_EAX(target_ulong t0
)
1887 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1890 raise_exception(EXCP00_DIVZ
);
1894 if (q
!= (int32_t)q
)
1895 raise_exception(EXCP00_DIVZ
);
1902 /* XXX: exception */
1903 void helper_aam(int base
)
1909 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1913 void helper_aad(int base
)
1917 ah
= (EAX
>> 8) & 0xff;
1918 al
= ((ah
* base
) + al
) & 0xff;
1919 EAX
= (EAX
& ~0xffff) | al
;
1923 void helper_aaa(void)
1929 eflags
= helper_cc_compute_all(CC_OP
);
1932 ah
= (EAX
>> 8) & 0xff;
1934 icarry
= (al
> 0xf9);
1935 if (((al
& 0x0f) > 9 ) || af
) {
1936 al
= (al
+ 6) & 0x0f;
1937 ah
= (ah
+ 1 + icarry
) & 0xff;
1938 eflags
|= CC_C
| CC_A
;
1940 eflags
&= ~(CC_C
| CC_A
);
1943 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1947 void helper_aas(void)
1953 eflags
= helper_cc_compute_all(CC_OP
);
1956 ah
= (EAX
>> 8) & 0xff;
1959 if (((al
& 0x0f) > 9 ) || af
) {
1960 al
= (al
- 6) & 0x0f;
1961 ah
= (ah
- 1 - icarry
) & 0xff;
1962 eflags
|= CC_C
| CC_A
;
1964 eflags
&= ~(CC_C
| CC_A
);
1967 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1971 void helper_daa(void)
1976 eflags
= helper_cc_compute_all(CC_OP
);
1982 if (((al
& 0x0f) > 9 ) || af
) {
1983 al
= (al
+ 6) & 0xff;
1986 if ((al
> 0x9f) || cf
) {
1987 al
= (al
+ 0x60) & 0xff;
1990 EAX
= (EAX
& ~0xff) | al
;
1991 /* well, speed is not an issue here, so we compute the flags by hand */
1992 eflags
|= (al
== 0) << 6; /* zf */
1993 eflags
|= parity_table
[al
]; /* pf */
1994 eflags
|= (al
& 0x80); /* sf */
1998 void helper_das(void)
2000 int al
, al1
, af
, cf
;
2003 eflags
= helper_cc_compute_all(CC_OP
);
2010 if (((al
& 0x0f) > 9 ) || af
) {
2014 al
= (al
- 6) & 0xff;
2016 if ((al1
> 0x99) || cf
) {
2017 al
= (al
- 0x60) & 0xff;
2020 EAX
= (EAX
& ~0xff) | al
;
2021 /* well, speed is not an issue here, so we compute the flags by hand */
2022 eflags
|= (al
== 0) << 6; /* zf */
2023 eflags
|= parity_table
[al
]; /* pf */
2024 eflags
|= (al
& 0x80); /* sf */
2028 void helper_into(int next_eip_addend
)
2031 eflags
= helper_cc_compute_all(CC_OP
);
2032 if (eflags
& CC_O
) {
2033 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
2037 void helper_cmpxchg8b(target_ulong a0
)
2042 eflags
= helper_cc_compute_all(CC_OP
);
2044 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
2045 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
2048 /* always do the store */
2050 EDX
= (uint32_t)(d
>> 32);
2057 #ifdef TARGET_X86_64
2058 void helper_cmpxchg16b(target_ulong a0
)
2063 if ((a0
& 0xf) != 0)
2064 raise_exception(EXCP0D_GPF
);
2065 eflags
= helper_cc_compute_all(CC_OP
);
2068 if (d0
== EAX
&& d1
== EDX
) {
2073 /* always do the store */
2084 void helper_single_step(void)
2086 #ifndef CONFIG_USER_ONLY
2087 check_hw_breakpoints(env
, 1);
2088 env
->dr
[6] |= DR6_BS
;
2090 raise_exception(EXCP01_DB
);
2093 void helper_cpuid(void)
2095 uint32_t eax
, ebx
, ecx
, edx
;
2097 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
2099 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
2106 void helper_enter_level(int level
, int data32
, target_ulong t1
)
2109 uint32_t esp_mask
, esp
, ebp
;
2111 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2112 ssp
= env
->segs
[R_SS
].base
;
2121 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
2124 stl(ssp
+ (esp
& esp_mask
), t1
);
2131 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
2134 stw(ssp
+ (esp
& esp_mask
), t1
);
2138 #ifdef TARGET_X86_64
2139 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
2141 target_ulong esp
, ebp
;
2161 stw(esp
, lduw(ebp
));
2169 void helper_lldt(int selector
)
2173 int index
, entry_limit
;
2177 if ((selector
& 0xfffc) == 0) {
2178 /* XXX: NULL selector case: invalid LDT */
2183 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2185 index
= selector
& ~7;
2186 #ifdef TARGET_X86_64
2187 if (env
->hflags
& HF_LMA_MASK
)
2192 if ((index
+ entry_limit
) > dt
->limit
)
2193 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2194 ptr
= dt
->base
+ index
;
2195 e1
= ldl_kernel(ptr
);
2196 e2
= ldl_kernel(ptr
+ 4);
2197 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2198 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2199 if (!(e2
& DESC_P_MASK
))
2200 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2201 #ifdef TARGET_X86_64
2202 if (env
->hflags
& HF_LMA_MASK
) {
2204 e3
= ldl_kernel(ptr
+ 8);
2205 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2206 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2210 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2213 env
->ldt
.selector
= selector
;
2216 void helper_ltr(int selector
)
2220 int index
, type
, entry_limit
;
2224 if ((selector
& 0xfffc) == 0) {
2225 /* NULL selector case: invalid TR */
2231 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2233 index
= selector
& ~7;
2234 #ifdef TARGET_X86_64
2235 if (env
->hflags
& HF_LMA_MASK
)
2240 if ((index
+ entry_limit
) > dt
->limit
)
2241 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2242 ptr
= dt
->base
+ index
;
2243 e1
= ldl_kernel(ptr
);
2244 e2
= ldl_kernel(ptr
+ 4);
2245 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2246 if ((e2
& DESC_S_MASK
) ||
2247 (type
!= 1 && type
!= 9))
2248 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2249 if (!(e2
& DESC_P_MASK
))
2250 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2251 #ifdef TARGET_X86_64
2252 if (env
->hflags
& HF_LMA_MASK
) {
2254 e3
= ldl_kernel(ptr
+ 8);
2255 e4
= ldl_kernel(ptr
+ 12);
2256 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2257 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2258 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2259 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2263 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2265 e2
|= DESC_TSS_BUSY_MASK
;
2266 stl_kernel(ptr
+ 4, e2
);
2268 env
->tr
.selector
= selector
;
2271 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2272 void helper_load_seg(int seg_reg
, int selector
)
2281 cpl
= env
->hflags
& HF_CPL_MASK
;
2282 if ((selector
& 0xfffc) == 0) {
2283 /* null selector case */
2285 #ifdef TARGET_X86_64
2286 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2289 raise_exception_err(EXCP0D_GPF
, 0);
2290 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2297 index
= selector
& ~7;
2298 if ((index
+ 7) > dt
->limit
)
2299 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2300 ptr
= dt
->base
+ index
;
2301 e1
= ldl_kernel(ptr
);
2302 e2
= ldl_kernel(ptr
+ 4);
2304 if (!(e2
& DESC_S_MASK
))
2305 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2307 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2308 if (seg_reg
== R_SS
) {
2309 /* must be writable segment */
2310 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2311 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2312 if (rpl
!= cpl
|| dpl
!= cpl
)
2313 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2315 /* must be readable segment */
2316 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2317 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2319 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2320 /* if not conforming code, test rights */
2321 if (dpl
< cpl
|| dpl
< rpl
)
2322 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2326 if (!(e2
& DESC_P_MASK
)) {
2327 if (seg_reg
== R_SS
)
2328 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2330 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2333 /* set the access bit if not already set */
2334 if (!(e2
& DESC_A_MASK
)) {
2336 stl_kernel(ptr
+ 4, e2
);
2339 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2340 get_seg_base(e1
, e2
),
2341 get_seg_limit(e1
, e2
),
2344 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2345 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2350 /* protected mode jump */
2351 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2352 int next_eip_addend
)
2355 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2356 target_ulong next_eip
;
2358 if ((new_cs
& 0xfffc) == 0)
2359 raise_exception_err(EXCP0D_GPF
, 0);
2360 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2361 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2362 cpl
= env
->hflags
& HF_CPL_MASK
;
2363 if (e2
& DESC_S_MASK
) {
2364 if (!(e2
& DESC_CS_MASK
))
2365 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2366 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2367 if (e2
& DESC_C_MASK
) {
2368 /* conforming code segment */
2370 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2372 /* non conforming code segment */
2375 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2377 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2379 if (!(e2
& DESC_P_MASK
))
2380 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2381 limit
= get_seg_limit(e1
, e2
);
2382 if (new_eip
> limit
&&
2383 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2384 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2385 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2386 get_seg_base(e1
, e2
), limit
, e2
);
2389 /* jump to call or task gate */
2390 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2392 cpl
= env
->hflags
& HF_CPL_MASK
;
2393 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2395 case 1: /* 286 TSS */
2396 case 9: /* 386 TSS */
2397 case 5: /* task gate */
2398 if (dpl
< cpl
|| dpl
< rpl
)
2399 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2400 next_eip
= env
->eip
+ next_eip_addend
;
2401 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2402 CC_OP
= CC_OP_EFLAGS
;
2404 case 4: /* 286 call gate */
2405 case 12: /* 386 call gate */
2406 if ((dpl
< cpl
) || (dpl
< rpl
))
2407 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2408 if (!(e2
& DESC_P_MASK
))
2409 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2411 new_eip
= (e1
& 0xffff);
2413 new_eip
|= (e2
& 0xffff0000);
2414 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2415 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2416 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2417 /* must be code segment */
2418 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2419 (DESC_S_MASK
| DESC_CS_MASK
)))
2420 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2421 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2422 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2423 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2424 if (!(e2
& DESC_P_MASK
))
2425 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2426 limit
= get_seg_limit(e1
, e2
);
2427 if (new_eip
> limit
)
2428 raise_exception_err(EXCP0D_GPF
, 0);
2429 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2430 get_seg_base(e1
, e2
), limit
, e2
);
2434 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2440 /* real mode call */
2441 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2442 int shift
, int next_eip
)
2445 uint32_t esp
, esp_mask
;
2450 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2451 ssp
= env
->segs
[R_SS
].base
;
2453 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2454 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2456 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2457 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2460 SET_ESP(esp
, esp_mask
);
2462 env
->segs
[R_CS
].selector
= new_cs
;
2463 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2466 /* protected mode call */
2467 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2468 int shift
, int next_eip_addend
)
2471 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2472 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2473 uint32_t val
, limit
, old_sp_mask
;
2474 target_ulong ssp
, old_ssp
, next_eip
;
2476 next_eip
= env
->eip
+ next_eip_addend
;
2477 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2478 LOG_PCALL_STATE(env
);
2479 if ((new_cs
& 0xfffc) == 0)
2480 raise_exception_err(EXCP0D_GPF
, 0);
2481 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2482 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2483 cpl
= env
->hflags
& HF_CPL_MASK
;
2484 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2485 if (e2
& DESC_S_MASK
) {
2486 if (!(e2
& DESC_CS_MASK
))
2487 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2488 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2489 if (e2
& DESC_C_MASK
) {
2490 /* conforming code segment */
2492 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2494 /* non conforming code segment */
2497 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2499 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2501 if (!(e2
& DESC_P_MASK
))
2502 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2504 #ifdef TARGET_X86_64
2505 /* XXX: check 16/32 bit cases in long mode */
2510 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2511 PUSHQ(rsp
, next_eip
);
2512 /* from this point, not restartable */
2514 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2515 get_seg_base(e1
, e2
),
2516 get_seg_limit(e1
, e2
), e2
);
2522 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2523 ssp
= env
->segs
[R_SS
].base
;
2525 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2526 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2528 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2529 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2532 limit
= get_seg_limit(e1
, e2
);
2533 if (new_eip
> limit
)
2534 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2535 /* from this point, not restartable */
2536 SET_ESP(sp
, sp_mask
);
2537 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2538 get_seg_base(e1
, e2
), limit
, e2
);
2542 /* check gate type */
2543 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2544 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2547 case 1: /* available 286 TSS */
2548 case 9: /* available 386 TSS */
2549 case 5: /* task gate */
2550 if (dpl
< cpl
|| dpl
< rpl
)
2551 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2552 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2553 CC_OP
= CC_OP_EFLAGS
;
2555 case 4: /* 286 call gate */
2556 case 12: /* 386 call gate */
2559 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2564 if (dpl
< cpl
|| dpl
< rpl
)
2565 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2566 /* check valid bit */
2567 if (!(e2
& DESC_P_MASK
))
2568 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2569 selector
= e1
>> 16;
2570 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2571 param_count
= e2
& 0x1f;
2572 if ((selector
& 0xfffc) == 0)
2573 raise_exception_err(EXCP0D_GPF
, 0);
2575 if (load_segment(&e1
, &e2
, selector
) != 0)
2576 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2577 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2578 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2579 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2581 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2582 if (!(e2
& DESC_P_MASK
))
2583 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2585 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2586 /* to inner privilege */
2587 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2588 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2589 ss
, sp
, param_count
, ESP
);
2590 if ((ss
& 0xfffc) == 0)
2591 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2592 if ((ss
& 3) != dpl
)
2593 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2594 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2595 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2596 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2598 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2599 if (!(ss_e2
& DESC_S_MASK
) ||
2600 (ss_e2
& DESC_CS_MASK
) ||
2601 !(ss_e2
& DESC_W_MASK
))
2602 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2603 if (!(ss_e2
& DESC_P_MASK
))
2604 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2606 // push_size = ((param_count * 2) + 8) << shift;
2608 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2609 old_ssp
= env
->segs
[R_SS
].base
;
2611 sp_mask
= get_sp_mask(ss_e2
);
2612 ssp
= get_seg_base(ss_e1
, ss_e2
);
2614 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2615 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2616 for(i
= param_count
- 1; i
>= 0; i
--) {
2617 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2618 PUSHL(ssp
, sp
, sp_mask
, val
);
2621 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2622 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2623 for(i
= param_count
- 1; i
>= 0; i
--) {
2624 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2625 PUSHW(ssp
, sp
, sp_mask
, val
);
2630 /* to same privilege */
2632 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2633 ssp
= env
->segs
[R_SS
].base
;
2634 // push_size = (4 << shift);
2639 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2640 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2642 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2643 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2646 /* from this point, not restartable */
2649 ss
= (ss
& ~3) | dpl
;
2650 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2652 get_seg_limit(ss_e1
, ss_e2
),
2656 selector
= (selector
& ~3) | dpl
;
2657 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2658 get_seg_base(e1
, e2
),
2659 get_seg_limit(e1
, e2
),
2661 cpu_x86_set_cpl(env
, dpl
);
2662 SET_ESP(sp
, sp_mask
);
2667 /* real and vm86 mode iret */
2668 void helper_iret_real(int shift
)
2670 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2674 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2676 ssp
= env
->segs
[R_SS
].base
;
2679 POPL(ssp
, sp
, sp_mask
, new_eip
);
2680 POPL(ssp
, sp
, sp_mask
, new_cs
);
2682 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2685 POPW(ssp
, sp
, sp_mask
, new_eip
);
2686 POPW(ssp
, sp
, sp_mask
, new_cs
);
2687 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2689 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2690 env
->segs
[R_CS
].selector
= new_cs
;
2691 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2693 if (env
->eflags
& VM_MASK
)
2694 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2696 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2698 eflags_mask
&= 0xffff;
2699 load_eflags(new_eflags
, eflags_mask
);
2700 env
->hflags2
&= ~HF2_NMI_MASK
;
2703 static inline void validate_seg(int seg_reg
, int cpl
)
2708 /* XXX: on x86_64, we do not want to nullify FS and GS because
2709 they may still contain a valid base. I would be interested to
2710 know how a real x86_64 CPU behaves */
2711 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2712 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2715 e2
= env
->segs
[seg_reg
].flags
;
2716 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2717 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2718 /* data or non conforming code segment */
2720 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2725 /* protected mode iret */
2726 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2728 uint32_t new_cs
, new_eflags
, new_ss
;
2729 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2730 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2731 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2732 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2734 #ifdef TARGET_X86_64
2739 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2741 ssp
= env
->segs
[R_SS
].base
;
2742 new_eflags
= 0; /* avoid warning */
2743 #ifdef TARGET_X86_64
2749 POPQ(sp
, new_eflags
);
2755 POPL(ssp
, sp
, sp_mask
, new_eip
);
2756 POPL(ssp
, sp
, sp_mask
, new_cs
);
2759 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2760 if (new_eflags
& VM_MASK
)
2761 goto return_to_vm86
;
2765 POPW(ssp
, sp
, sp_mask
, new_eip
);
2766 POPW(ssp
, sp
, sp_mask
, new_cs
);
2768 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2770 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2771 new_cs
, new_eip
, shift
, addend
);
2772 LOG_PCALL_STATE(env
);
2773 if ((new_cs
& 0xfffc) == 0)
2774 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2775 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2776 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2777 if (!(e2
& DESC_S_MASK
) ||
2778 !(e2
& DESC_CS_MASK
))
2779 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2780 cpl
= env
->hflags
& HF_CPL_MASK
;
2783 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2784 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2785 if (e2
& DESC_C_MASK
) {
2787 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2790 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2792 if (!(e2
& DESC_P_MASK
))
2793 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2796 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2797 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2798 /* return to same privilege level */
2799 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2800 get_seg_base(e1
, e2
),
2801 get_seg_limit(e1
, e2
),
2804 /* return to different privilege level */
2805 #ifdef TARGET_X86_64
2814 POPL(ssp
, sp
, sp_mask
, new_esp
);
2815 POPL(ssp
, sp
, sp_mask
, new_ss
);
2819 POPW(ssp
, sp
, sp_mask
, new_esp
);
2820 POPW(ssp
, sp
, sp_mask
, new_ss
);
2822 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2824 if ((new_ss
& 0xfffc) == 0) {
2825 #ifdef TARGET_X86_64
2826 /* NULL ss is allowed in long mode if cpl != 3*/
2827 /* XXX: test CS64 ? */
2828 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2829 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2831 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2832 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2833 DESC_W_MASK
| DESC_A_MASK
);
2834 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2838 raise_exception_err(EXCP0D_GPF
, 0);
2841 if ((new_ss
& 3) != rpl
)
2842 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2843 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2844 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2845 if (!(ss_e2
& DESC_S_MASK
) ||
2846 (ss_e2
& DESC_CS_MASK
) ||
2847 !(ss_e2
& DESC_W_MASK
))
2848 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2849 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2851 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2852 if (!(ss_e2
& DESC_P_MASK
))
2853 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2854 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2855 get_seg_base(ss_e1
, ss_e2
),
2856 get_seg_limit(ss_e1
, ss_e2
),
2860 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2861 get_seg_base(e1
, e2
),
2862 get_seg_limit(e1
, e2
),
2864 cpu_x86_set_cpl(env
, rpl
);
2866 #ifdef TARGET_X86_64
2867 if (env
->hflags
& HF_CS64_MASK
)
2871 sp_mask
= get_sp_mask(ss_e2
);
2873 /* validate data segments */
2874 validate_seg(R_ES
, rpl
);
2875 validate_seg(R_DS
, rpl
);
2876 validate_seg(R_FS
, rpl
);
2877 validate_seg(R_GS
, rpl
);
2881 SET_ESP(sp
, sp_mask
);
2884 /* NOTE: 'cpl' is the _old_ CPL */
2885 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2887 eflags_mask
|= IOPL_MASK
;
2888 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2890 eflags_mask
|= IF_MASK
;
2892 eflags_mask
&= 0xffff;
2893 load_eflags(new_eflags
, eflags_mask
);
2898 POPL(ssp
, sp
, sp_mask
, new_esp
);
2899 POPL(ssp
, sp
, sp_mask
, new_ss
);
2900 POPL(ssp
, sp
, sp_mask
, new_es
);
2901 POPL(ssp
, sp
, sp_mask
, new_ds
);
2902 POPL(ssp
, sp
, sp_mask
, new_fs
);
2903 POPL(ssp
, sp
, sp_mask
, new_gs
);
2905 /* modify processor state */
2906 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2907 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2908 load_seg_vm(R_CS
, new_cs
& 0xffff);
2909 cpu_x86_set_cpl(env
, 3);
2910 load_seg_vm(R_SS
, new_ss
& 0xffff);
2911 load_seg_vm(R_ES
, new_es
& 0xffff);
2912 load_seg_vm(R_DS
, new_ds
& 0xffff);
2913 load_seg_vm(R_FS
, new_fs
& 0xffff);
2914 load_seg_vm(R_GS
, new_gs
& 0xffff);
2916 env
->eip
= new_eip
& 0xffff;
2920 void helper_iret_protected(int shift
, int next_eip
)
2922 int tss_selector
, type
;
2925 /* specific case for TSS */
2926 if (env
->eflags
& NT_MASK
) {
2927 #ifdef TARGET_X86_64
2928 if (env
->hflags
& HF_LMA_MASK
)
2929 raise_exception_err(EXCP0D_GPF
, 0);
2931 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2932 if (tss_selector
& 4)
2933 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2934 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2935 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2936 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2937 /* NOTE: we check both segment and busy TSS */
2939 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2940 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2942 helper_ret_protected(shift
, 1, 0);
2944 env
->hflags2
&= ~HF2_NMI_MASK
;
2947 void helper_lret_protected(int shift
, int addend
)
2949 helper_ret_protected(shift
, 0, addend
);
2952 void helper_sysenter(void)
2954 if (env
->sysenter_cs
== 0) {
2955 raise_exception_err(EXCP0D_GPF
, 0);
2957 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2958 cpu_x86_set_cpl(env
, 0);
2960 #ifdef TARGET_X86_64
2961 if (env
->hflags
& HF_LMA_MASK
) {
2962 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2964 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2966 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2970 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2972 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2974 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2976 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2978 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2980 DESC_W_MASK
| DESC_A_MASK
);
2981 ESP
= env
->sysenter_esp
;
2982 EIP
= env
->sysenter_eip
;
2985 void helper_sysexit(int dflag
)
2989 cpl
= env
->hflags
& HF_CPL_MASK
;
2990 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2991 raise_exception_err(EXCP0D_GPF
, 0);
2993 cpu_x86_set_cpl(env
, 3);
2994 #ifdef TARGET_X86_64
2996 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2998 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2999 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3000 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
3001 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
3003 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
3004 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3005 DESC_W_MASK
| DESC_A_MASK
);
3009 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
3011 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
3012 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3013 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
3014 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
3016 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
3017 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
3018 DESC_W_MASK
| DESC_A_MASK
);
3024 #if defined(CONFIG_USER_ONLY)
3025 target_ulong
helper_read_crN(int reg
)
3030 void helper_write_crN(int reg
, target_ulong t0
)
3034 void helper_movl_drN_T0(int reg
, target_ulong t0
)
3038 target_ulong
helper_read_crN(int reg
)
3042 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
3048 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
3049 val
= cpu_get_apic_tpr(env
->apic_state
);
3058 void helper_write_crN(int reg
, target_ulong t0
)
3060 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
3063 cpu_x86_update_cr0(env
, t0
);
3066 cpu_x86_update_cr3(env
, t0
);
3069 cpu_x86_update_cr4(env
, t0
);
3072 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
3073 cpu_set_apic_tpr(env
->apic_state
, t0
);
3075 env
->v_tpr
= t0
& 0x0f;
3083 void helper_movl_drN_T0(int reg
, target_ulong t0
)
3088 hw_breakpoint_remove(env
, reg
);
3090 hw_breakpoint_insert(env
, reg
);
3091 } else if (reg
== 7) {
3092 for (i
= 0; i
< 4; i
++)
3093 hw_breakpoint_remove(env
, i
);
3095 for (i
= 0; i
< 4; i
++)
3096 hw_breakpoint_insert(env
, i
);
3102 void helper_lmsw(target_ulong t0
)
3104 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3105 if already set to one. */
3106 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
3107 helper_write_crN(0, t0
);
3110 void helper_clts(void)
3112 env
->cr
[0] &= ~CR0_TS_MASK
;
3113 env
->hflags
&= ~HF_TS_MASK
;
3116 void helper_invlpg(target_ulong addr
)
3118 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
3119 tlb_flush_page(env
, addr
);
3122 void helper_rdtsc(void)
3126 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3127 raise_exception(EXCP0D_GPF
);
3129 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3131 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
3132 EAX
= (uint32_t)(val
);
3133 EDX
= (uint32_t)(val
>> 32);
3136 void helper_rdtscp(void)
3139 ECX
= (uint32_t)(env
->tsc_aux
);
3142 void helper_rdpmc(void)
3144 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3145 raise_exception(EXCP0D_GPF
);
3147 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3149 /* currently unimplemented */
3150 raise_exception_err(EXCP06_ILLOP
, 0);
3153 #if defined(CONFIG_USER_ONLY)
3154 void helper_wrmsr(void)
3158 void helper_rdmsr(void)
3162 void helper_wrmsr(void)
3166 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3168 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3170 switch((uint32_t)ECX
) {
3171 case MSR_IA32_SYSENTER_CS
:
3172 env
->sysenter_cs
= val
& 0xffff;
3174 case MSR_IA32_SYSENTER_ESP
:
3175 env
->sysenter_esp
= val
;
3177 case MSR_IA32_SYSENTER_EIP
:
3178 env
->sysenter_eip
= val
;
3180 case MSR_IA32_APICBASE
:
3181 cpu_set_apic_base(env
->apic_state
, val
);
3185 uint64_t update_mask
;
3187 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3188 update_mask
|= MSR_EFER_SCE
;
3189 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3190 update_mask
|= MSR_EFER_LME
;
3191 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3192 update_mask
|= MSR_EFER_FFXSR
;
3193 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3194 update_mask
|= MSR_EFER_NXE
;
3195 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3196 update_mask
|= MSR_EFER_SVME
;
3197 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3198 update_mask
|= MSR_EFER_FFXSR
;
3199 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3200 (val
& update_mask
));
3209 case MSR_VM_HSAVE_PA
:
3210 env
->vm_hsave
= val
;
3212 #ifdef TARGET_X86_64
3223 env
->segs
[R_FS
].base
= val
;
3226 env
->segs
[R_GS
].base
= val
;
3228 case MSR_KERNELGSBASE
:
3229 env
->kernelgsbase
= val
;
3232 case MSR_MTRRphysBase(0):
3233 case MSR_MTRRphysBase(1):
3234 case MSR_MTRRphysBase(2):
3235 case MSR_MTRRphysBase(3):
3236 case MSR_MTRRphysBase(4):
3237 case MSR_MTRRphysBase(5):
3238 case MSR_MTRRphysBase(6):
3239 case MSR_MTRRphysBase(7):
3240 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3242 case MSR_MTRRphysMask(0):
3243 case MSR_MTRRphysMask(1):
3244 case MSR_MTRRphysMask(2):
3245 case MSR_MTRRphysMask(3):
3246 case MSR_MTRRphysMask(4):
3247 case MSR_MTRRphysMask(5):
3248 case MSR_MTRRphysMask(6):
3249 case MSR_MTRRphysMask(7):
3250 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3252 case MSR_MTRRfix64K_00000
:
3253 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3255 case MSR_MTRRfix16K_80000
:
3256 case MSR_MTRRfix16K_A0000
:
3257 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3259 case MSR_MTRRfix4K_C0000
:
3260 case MSR_MTRRfix4K_C8000
:
3261 case MSR_MTRRfix4K_D0000
:
3262 case MSR_MTRRfix4K_D8000
:
3263 case MSR_MTRRfix4K_E0000
:
3264 case MSR_MTRRfix4K_E8000
:
3265 case MSR_MTRRfix4K_F0000
:
3266 case MSR_MTRRfix4K_F8000
:
3267 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3269 case MSR_MTRRdefType
:
3270 env
->mtrr_deftype
= val
;
3272 case MSR_MCG_STATUS
:
3273 env
->mcg_status
= val
;
3276 if ((env
->mcg_cap
& MCG_CTL_P
)
3277 && (val
== 0 || val
== ~(uint64_t)0))
3284 if ((uint32_t)ECX
>= MSR_MC0_CTL
3285 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3286 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3287 if ((offset
& 0x3) != 0
3288 || (val
== 0 || val
== ~(uint64_t)0))
3289 env
->mce_banks
[offset
] = val
;
3292 /* XXX: exception ? */
3297 void helper_rdmsr(void)
3301 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3303 switch((uint32_t)ECX
) {
3304 case MSR_IA32_SYSENTER_CS
:
3305 val
= env
->sysenter_cs
;
3307 case MSR_IA32_SYSENTER_ESP
:
3308 val
= env
->sysenter_esp
;
3310 case MSR_IA32_SYSENTER_EIP
:
3311 val
= env
->sysenter_eip
;
3313 case MSR_IA32_APICBASE
:
3314 val
= cpu_get_apic_base(env
->apic_state
);
3325 case MSR_VM_HSAVE_PA
:
3326 val
= env
->vm_hsave
;
3328 case MSR_IA32_PERF_STATUS
:
3329 /* tsc_increment_by_tick */
3331 /* CPU multiplier */
3332 val
|= (((uint64_t)4ULL) << 40);
3334 #ifdef TARGET_X86_64
3345 val
= env
->segs
[R_FS
].base
;
3348 val
= env
->segs
[R_GS
].base
;
3350 case MSR_KERNELGSBASE
:
3351 val
= env
->kernelgsbase
;
3357 case MSR_MTRRphysBase(0):
3358 case MSR_MTRRphysBase(1):
3359 case MSR_MTRRphysBase(2):
3360 case MSR_MTRRphysBase(3):
3361 case MSR_MTRRphysBase(4):
3362 case MSR_MTRRphysBase(5):
3363 case MSR_MTRRphysBase(6):
3364 case MSR_MTRRphysBase(7):
3365 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3367 case MSR_MTRRphysMask(0):
3368 case MSR_MTRRphysMask(1):
3369 case MSR_MTRRphysMask(2):
3370 case MSR_MTRRphysMask(3):
3371 case MSR_MTRRphysMask(4):
3372 case MSR_MTRRphysMask(5):
3373 case MSR_MTRRphysMask(6):
3374 case MSR_MTRRphysMask(7):
3375 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3377 case MSR_MTRRfix64K_00000
:
3378 val
= env
->mtrr_fixed
[0];
3380 case MSR_MTRRfix16K_80000
:
3381 case MSR_MTRRfix16K_A0000
:
3382 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3384 case MSR_MTRRfix4K_C0000
:
3385 case MSR_MTRRfix4K_C8000
:
3386 case MSR_MTRRfix4K_D0000
:
3387 case MSR_MTRRfix4K_D8000
:
3388 case MSR_MTRRfix4K_E0000
:
3389 case MSR_MTRRfix4K_E8000
:
3390 case MSR_MTRRfix4K_F0000
:
3391 case MSR_MTRRfix4K_F8000
:
3392 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3394 case MSR_MTRRdefType
:
3395 val
= env
->mtrr_deftype
;
3398 if (env
->cpuid_features
& CPUID_MTRR
)
3399 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3401 /* XXX: exception ? */
3408 if (env
->mcg_cap
& MCG_CTL_P
)
3413 case MSR_MCG_STATUS
:
3414 val
= env
->mcg_status
;
3417 if ((uint32_t)ECX
>= MSR_MC0_CTL
3418 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3419 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3420 val
= env
->mce_banks
[offset
];
3423 /* XXX: exception ? */
3427 EAX
= (uint32_t)(val
);
3428 EDX
= (uint32_t)(val
>> 32);
3432 target_ulong
helper_lsl(target_ulong selector1
)
3435 uint32_t e1
, e2
, eflags
, selector
;
3436 int rpl
, dpl
, cpl
, type
;
3438 selector
= selector1
& 0xffff;
3439 eflags
= helper_cc_compute_all(CC_OP
);
3440 if ((selector
& 0xfffc) == 0)
3442 if (load_segment(&e1
, &e2
, selector
) != 0)
3445 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3446 cpl
= env
->hflags
& HF_CPL_MASK
;
3447 if (e2
& DESC_S_MASK
) {
3448 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3451 if (dpl
< cpl
|| dpl
< rpl
)
3455 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3466 if (dpl
< cpl
|| dpl
< rpl
) {
3468 CC_SRC
= eflags
& ~CC_Z
;
3472 limit
= get_seg_limit(e1
, e2
);
3473 CC_SRC
= eflags
| CC_Z
;
3477 target_ulong
helper_lar(target_ulong selector1
)
3479 uint32_t e1
, e2
, eflags
, selector
;
3480 int rpl
, dpl
, cpl
, type
;
3482 selector
= selector1
& 0xffff;
3483 eflags
= helper_cc_compute_all(CC_OP
);
3484 if ((selector
& 0xfffc) == 0)
3486 if (load_segment(&e1
, &e2
, selector
) != 0)
3489 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3490 cpl
= env
->hflags
& HF_CPL_MASK
;
3491 if (e2
& DESC_S_MASK
) {
3492 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3495 if (dpl
< cpl
|| dpl
< rpl
)
3499 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3513 if (dpl
< cpl
|| dpl
< rpl
) {
3515 CC_SRC
= eflags
& ~CC_Z
;
3519 CC_SRC
= eflags
| CC_Z
;
3520 return e2
& 0x00f0ff00;
3523 void helper_verr(target_ulong selector1
)
3525 uint32_t e1
, e2
, eflags
, selector
;
3528 selector
= selector1
& 0xffff;
3529 eflags
= helper_cc_compute_all(CC_OP
);
3530 if ((selector
& 0xfffc) == 0)
3532 if (load_segment(&e1
, &e2
, selector
) != 0)
3534 if (!(e2
& DESC_S_MASK
))
3537 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3538 cpl
= env
->hflags
& HF_CPL_MASK
;
3539 if (e2
& DESC_CS_MASK
) {
3540 if (!(e2
& DESC_R_MASK
))
3542 if (!(e2
& DESC_C_MASK
)) {
3543 if (dpl
< cpl
|| dpl
< rpl
)
3547 if (dpl
< cpl
|| dpl
< rpl
) {
3549 CC_SRC
= eflags
& ~CC_Z
;
3553 CC_SRC
= eflags
| CC_Z
;
3556 void helper_verw(target_ulong selector1
)
3558 uint32_t e1
, e2
, eflags
, selector
;
3561 selector
= selector1
& 0xffff;
3562 eflags
= helper_cc_compute_all(CC_OP
);
3563 if ((selector
& 0xfffc) == 0)
3565 if (load_segment(&e1
, &e2
, selector
) != 0)
3567 if (!(e2
& DESC_S_MASK
))
3570 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3571 cpl
= env
->hflags
& HF_CPL_MASK
;
3572 if (e2
& DESC_CS_MASK
) {
3575 if (dpl
< cpl
|| dpl
< rpl
)
3577 if (!(e2
& DESC_W_MASK
)) {
3579 CC_SRC
= eflags
& ~CC_Z
;
3583 CC_SRC
= eflags
| CC_Z
;
3586 /* x87 FPU helpers */
3588 static inline double floatx80_to_double(floatx80 a
)
3595 u
.f64
= floatx80_to_float64(a
, &env
->fp_status
);
3599 static inline floatx80
double_to_floatx80(double a
)
3607 return float64_to_floatx80(u
.f64
, &env
->fp_status
);
3610 static void fpu_set_exception(int mask
)
3613 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3614 env
->fpus
|= FPUS_SE
| FPUS_B
;
3617 static inline floatx80
helper_fdiv(floatx80 a
, floatx80 b
)
3619 if (floatx80_is_zero(b
)) {
3620 fpu_set_exception(FPUS_ZE
);
3622 return floatx80_div(a
, b
, &env
->fp_status
);
3625 static void fpu_raise_exception(void)
3627 if (env
->cr
[0] & CR0_NE_MASK
) {
3628 raise_exception(EXCP10_COPR
);
3630 #if !defined(CONFIG_USER_ONLY)
3637 void helper_flds_FT0(uint32_t val
)
3644 FT0
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3647 void helper_fldl_FT0(uint64_t val
)
3654 FT0
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3657 void helper_fildl_FT0(int32_t val
)
3659 FT0
= int32_to_floatx80(val
, &env
->fp_status
);
3662 void helper_flds_ST0(uint32_t val
)
3669 new_fpstt
= (env
->fpstt
- 1) & 7;
3671 env
->fpregs
[new_fpstt
].d
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3672 env
->fpstt
= new_fpstt
;
3673 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3676 void helper_fldl_ST0(uint64_t val
)
3683 new_fpstt
= (env
->fpstt
- 1) & 7;
3685 env
->fpregs
[new_fpstt
].d
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3686 env
->fpstt
= new_fpstt
;
3687 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3690 void helper_fildl_ST0(int32_t val
)
3693 new_fpstt
= (env
->fpstt
- 1) & 7;
3694 env
->fpregs
[new_fpstt
].d
= int32_to_floatx80(val
, &env
->fp_status
);
3695 env
->fpstt
= new_fpstt
;
3696 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3699 void helper_fildll_ST0(int64_t val
)
3702 new_fpstt
= (env
->fpstt
- 1) & 7;
3703 env
->fpregs
[new_fpstt
].d
= int64_to_floatx80(val
, &env
->fp_status
);
3704 env
->fpstt
= new_fpstt
;
3705 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3708 uint32_t helper_fsts_ST0(void)
3714 u
.f
= floatx80_to_float32(ST0
, &env
->fp_status
);
3718 uint64_t helper_fstl_ST0(void)
3724 u
.f
= floatx80_to_float64(ST0
, &env
->fp_status
);
3728 int32_t helper_fist_ST0(void)
3731 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3732 if (val
!= (int16_t)val
)
3737 int32_t helper_fistl_ST0(void)
3740 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3744 int64_t helper_fistll_ST0(void)
3747 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
3751 int32_t helper_fistt_ST0(void)
3754 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3755 if (val
!= (int16_t)val
)
3760 int32_t helper_fisttl_ST0(void)
3763 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3767 int64_t helper_fisttll_ST0(void)
3770 val
= floatx80_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3774 void helper_fldt_ST0(target_ulong ptr
)
3777 new_fpstt
= (env
->fpstt
- 1) & 7;
3778 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3779 env
->fpstt
= new_fpstt
;
3780 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3783 void helper_fstt_ST0(target_ulong ptr
)
3785 helper_fstt(ST0
, ptr
);
3788 void helper_fpush(void)
3793 void helper_fpop(void)
3798 void helper_fdecstp(void)
3800 env
->fpstt
= (env
->fpstt
- 1) & 7;
3801 env
->fpus
&= (~0x4700);
3804 void helper_fincstp(void)
3806 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3807 env
->fpus
&= (~0x4700);
3812 void helper_ffree_STN(int st_index
)
3814 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3817 void helper_fmov_ST0_FT0(void)
3822 void helper_fmov_FT0_STN(int st_index
)
3827 void helper_fmov_ST0_STN(int st_index
)
3832 void helper_fmov_STN_ST0(int st_index
)
3837 void helper_fxchg_ST0_STN(int st_index
)
3845 /* FPU operations */
3847 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3849 void helper_fcom_ST0_FT0(void)
3853 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3854 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3857 void helper_fucom_ST0_FT0(void)
3861 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3862 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3865 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3867 void helper_fcomi_ST0_FT0(void)
3872 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3873 eflags
= helper_cc_compute_all(CC_OP
);
3874 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3878 void helper_fucomi_ST0_FT0(void)
3883 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3884 eflags
= helper_cc_compute_all(CC_OP
);
3885 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3889 void helper_fadd_ST0_FT0(void)
3891 ST0
= floatx80_add(ST0
, FT0
, &env
->fp_status
);
3894 void helper_fmul_ST0_FT0(void)
3896 ST0
= floatx80_mul(ST0
, FT0
, &env
->fp_status
);
3899 void helper_fsub_ST0_FT0(void)
3901 ST0
= floatx80_sub(ST0
, FT0
, &env
->fp_status
);
3904 void helper_fsubr_ST0_FT0(void)
3906 ST0
= floatx80_sub(FT0
, ST0
, &env
->fp_status
);
3909 void helper_fdiv_ST0_FT0(void)
3911 ST0
= helper_fdiv(ST0
, FT0
);
3914 void helper_fdivr_ST0_FT0(void)
3916 ST0
= helper_fdiv(FT0
, ST0
);
3919 /* fp operations between STN and ST0 */
3921 void helper_fadd_STN_ST0(int st_index
)
3923 ST(st_index
) = floatx80_add(ST(st_index
), ST0
, &env
->fp_status
);
3926 void helper_fmul_STN_ST0(int st_index
)
3928 ST(st_index
) = floatx80_mul(ST(st_index
), ST0
, &env
->fp_status
);
3931 void helper_fsub_STN_ST0(int st_index
)
3933 ST(st_index
) = floatx80_sub(ST(st_index
), ST0
, &env
->fp_status
);
3936 void helper_fsubr_STN_ST0(int st_index
)
3938 ST(st_index
) = floatx80_sub(ST0
, ST(st_index
), &env
->fp_status
);
3941 void helper_fdiv_STN_ST0(int st_index
)
3945 *p
= helper_fdiv(*p
, ST0
);
3948 void helper_fdivr_STN_ST0(int st_index
)
3952 *p
= helper_fdiv(ST0
, *p
);
3955 /* misc FPU operations */
3956 void helper_fchs_ST0(void)
3958 ST0
= floatx80_chs(ST0
);
3961 void helper_fabs_ST0(void)
3963 ST0
= floatx80_abs(ST0
);
3966 void helper_fld1_ST0(void)
3971 void helper_fldl2t_ST0(void)
3976 void helper_fldl2e_ST0(void)
3981 void helper_fldpi_ST0(void)
3986 void helper_fldlg2_ST0(void)
3991 void helper_fldln2_ST0(void)
3996 void helper_fldz_ST0(void)
3998 ST0
= floatx80_zero
;
4001 void helper_fldz_FT0(void)
4003 FT0
= floatx80_zero
;
4006 uint32_t helper_fnstsw(void)
4008 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4011 uint32_t helper_fnstcw(void)
4016 static void update_fp_status(void)
4020 /* set rounding mode */
4021 switch(env
->fpuc
& RC_MASK
) {
4024 rnd_type
= float_round_nearest_even
;
4027 rnd_type
= float_round_down
;
4030 rnd_type
= float_round_up
;
4033 rnd_type
= float_round_to_zero
;
4036 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
4037 switch((env
->fpuc
>> 8) & 3) {
4049 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
4052 void helper_fldcw(uint32_t val
)
4058 void helper_fclex(void)
4060 env
->fpus
&= 0x7f00;
4063 void helper_fwait(void)
4065 if (env
->fpus
& FPUS_SE
)
4066 fpu_raise_exception();
4069 void helper_fninit(void)
4086 void helper_fbld_ST0(target_ulong ptr
)
4094 for(i
= 8; i
>= 0; i
--) {
4096 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
4098 tmp
= int64_to_floatx80(val
, &env
->fp_status
);
4099 if (ldub(ptr
+ 9) & 0x80) {
4106 void helper_fbst_ST0(target_ulong ptr
)
4109 target_ulong mem_ref
, mem_end
;
4112 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
4114 mem_end
= mem_ref
+ 9;
4121 while (mem_ref
< mem_end
) {
4126 v
= ((v
/ 10) << 4) | (v
% 10);
4129 while (mem_ref
< mem_end
) {
4134 void helper_f2xm1(void)
4136 double val
= floatx80_to_double(ST0
);
4137 val
= pow(2.0, val
) - 1.0;
4138 ST0
= double_to_floatx80(val
);
4141 void helper_fyl2x(void)
4143 double fptemp
= floatx80_to_double(ST0
);
4146 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
4147 fptemp
*= floatx80_to_double(ST1
);
4148 ST1
= double_to_floatx80(fptemp
);
4151 env
->fpus
&= (~0x4700);
4156 void helper_fptan(void)
4158 double fptemp
= floatx80_to_double(ST0
);
4160 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4163 fptemp
= tan(fptemp
);
4164 ST0
= double_to_floatx80(fptemp
);
4167 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4168 /* the above code is for |arg| < 2**52 only */
4172 void helper_fpatan(void)
4174 double fptemp
, fpsrcop
;
4176 fpsrcop
= floatx80_to_double(ST1
);
4177 fptemp
= floatx80_to_double(ST0
);
4178 ST1
= double_to_floatx80(atan2(fpsrcop
, fptemp
));
4182 void helper_fxtract(void)
4188 if (floatx80_is_zero(ST0
)) {
4189 /* Easy way to generate -inf and raising division by 0 exception */
4190 ST0
= floatx80_div(floatx80_chs(floatx80_one
), floatx80_zero
, &env
->fp_status
);
4196 expdif
= EXPD(temp
) - EXPBIAS
;
4197 /*DP exponent bias*/
4198 ST0
= int32_to_floatx80(expdif
, &env
->fp_status
);
4205 void helper_fprem1(void)
4207 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4208 CPU_LDoubleU fpsrcop1
, fptemp1
;
4210 signed long long int q
;
4212 st0
= floatx80_to_double(ST0
);
4213 st1
= floatx80_to_double(ST1
);
4215 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4216 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4217 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4225 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4228 /* optimisation? taken from the AMD docs */
4229 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4230 /* ST0 is unchanged */
4235 dblq
= fpsrcop
/ fptemp
;
4236 /* round dblq towards nearest integer */
4238 st0
= fpsrcop
- fptemp
* dblq
;
4240 /* convert dblq to q by truncating towards zero */
4242 q
= (signed long long int)(-dblq
);
4244 q
= (signed long long int)dblq
;
4246 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4247 /* (C0,C3,C1) <-- (q2,q1,q0) */
4248 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4249 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4250 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4252 env
->fpus
|= 0x400; /* C2 <-- 1 */
4253 fptemp
= pow(2.0, expdif
- 50);
4254 fpsrcop
= (st0
/ st1
) / fptemp
;
4255 /* fpsrcop = integer obtained by chopping */
4256 fpsrcop
= (fpsrcop
< 0.0) ?
4257 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4258 st0
-= (st1
* fpsrcop
* fptemp
);
4260 ST0
= double_to_floatx80(st0
);
4263 void helper_fprem(void)
4265 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4266 CPU_LDoubleU fpsrcop1
, fptemp1
;
4268 signed long long int q
;
4270 st0
= floatx80_to_double(ST0
);
4271 st1
= floatx80_to_double(ST1
);
4273 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4274 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4275 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4283 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4286 /* optimisation? taken from the AMD docs */
4287 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4288 /* ST0 is unchanged */
4292 if ( expdif
< 53 ) {
4293 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4294 /* round dblq towards zero */
4295 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4296 st0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4298 /* convert dblq to q by truncating towards zero */
4300 q
= (signed long long int)(-dblq
);
4302 q
= (signed long long int)dblq
;
4304 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4305 /* (C0,C3,C1) <-- (q2,q1,q0) */
4306 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4307 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4308 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4310 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4311 env
->fpus
|= 0x400; /* C2 <-- 1 */
4312 fptemp
= pow(2.0, (double)(expdif
- N
));
4313 fpsrcop
= (st0
/ st1
) / fptemp
;
4314 /* fpsrcop = integer obtained by chopping */
4315 fpsrcop
= (fpsrcop
< 0.0) ?
4316 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4317 st0
-= (st1
* fpsrcop
* fptemp
);
4319 ST0
= double_to_floatx80(st0
);
4322 void helper_fyl2xp1(void)
4324 double fptemp
= floatx80_to_double(ST0
);
4326 if ((fptemp
+1.0)>0.0) {
4327 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4328 fptemp
*= floatx80_to_double(ST1
);
4329 ST1
= double_to_floatx80(fptemp
);
4332 env
->fpus
&= (~0x4700);
4337 void helper_fsqrt(void)
4339 if (floatx80_is_neg(ST0
)) {
4340 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4343 ST0
= floatx80_sqrt(ST0
, &env
->fp_status
);
4346 void helper_fsincos(void)
4348 double fptemp
= floatx80_to_double(ST0
);
4350 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4353 ST0
= double_to_floatx80(sin(fptemp
));
4355 ST0
= double_to_floatx80(cos(fptemp
));
4356 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4357 /* the above code is for |arg| < 2**63 only */
4361 void helper_frndint(void)
4363 ST0
= floatx80_round_to_int(ST0
, &env
->fp_status
);
4366 void helper_fscale(void)
4368 if (floatx80_is_any_nan(ST1
)) {
4371 int n
= floatx80_to_int32_round_to_zero(ST1
, &env
->fp_status
);
4372 ST0
= floatx80_scalbn(ST0
, n
, &env
->fp_status
);
4376 void helper_fsin(void)
4378 double fptemp
= floatx80_to_double(ST0
);
4380 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4383 ST0
= double_to_floatx80(sin(fptemp
));
4384 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4385 /* the above code is for |arg| < 2**53 only */
4389 void helper_fcos(void)
4391 double fptemp
= floatx80_to_double(ST0
);
4393 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4396 ST0
= double_to_floatx80(cos(fptemp
));
4397 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4398 /* the above code is for |arg5 < 2**63 only */
4402 void helper_fxam_ST0(void)
4409 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4411 env
->fpus
|= 0x200; /* C1 <-- 1 */
4413 /* XXX: test fptags too */
4414 expdif
= EXPD(temp
);
4415 if (expdif
== MAXEXPD
) {
4416 if (MANTD(temp
) == 0x8000000000000000ULL
)
4417 env
->fpus
|= 0x500 /*Infinity*/;
4419 env
->fpus
|= 0x100 /*NaN*/;
4420 } else if (expdif
== 0) {
4421 if (MANTD(temp
) == 0)
4422 env
->fpus
|= 0x4000 /*Zero*/;
4424 env
->fpus
|= 0x4400 /*Denormal*/;
4430 void helper_fstenv(target_ulong ptr
, int data32
)
4432 int fpus
, fptag
, exp
, i
;
4436 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4438 for (i
=7; i
>=0; i
--) {
4440 if (env
->fptags
[i
]) {
4443 tmp
.d
= env
->fpregs
[i
].d
;
4446 if (exp
== 0 && mant
== 0) {
4449 } else if (exp
== 0 || exp
== MAXEXPD
4450 || (mant
& (1LL << 63)) == 0
4452 /* NaNs, infinity, denormal */
4459 stl(ptr
, env
->fpuc
);
4461 stl(ptr
+ 8, fptag
);
4462 stl(ptr
+ 12, 0); /* fpip */
4463 stl(ptr
+ 16, 0); /* fpcs */
4464 stl(ptr
+ 20, 0); /* fpoo */
4465 stl(ptr
+ 24, 0); /* fpos */
4468 stw(ptr
, env
->fpuc
);
4470 stw(ptr
+ 4, fptag
);
4478 void helper_fldenv(target_ulong ptr
, int data32
)
4483 env
->fpuc
= lduw(ptr
);
4484 fpus
= lduw(ptr
+ 4);
4485 fptag
= lduw(ptr
+ 8);
4488 env
->fpuc
= lduw(ptr
);
4489 fpus
= lduw(ptr
+ 2);
4490 fptag
= lduw(ptr
+ 4);
4492 env
->fpstt
= (fpus
>> 11) & 7;
4493 env
->fpus
= fpus
& ~0x3800;
4494 for(i
= 0;i
< 8; i
++) {
4495 env
->fptags
[i
] = ((fptag
& 3) == 3);
4500 void helper_fsave(target_ulong ptr
, int data32
)
4505 helper_fstenv(ptr
, data32
);
4507 ptr
+= (14 << data32
);
4508 for(i
= 0;i
< 8; i
++) {
4510 helper_fstt(tmp
, ptr
);
4528 void helper_frstor(target_ulong ptr
, int data32
)
4533 helper_fldenv(ptr
, data32
);
4534 ptr
+= (14 << data32
);
4536 for(i
= 0;i
< 8; i
++) {
4537 tmp
= helper_fldt(ptr
);
4544 #if defined(CONFIG_USER_ONLY)
4545 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
4547 CPUX86State
*saved_env
;
4551 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
4553 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
4554 (selector
<< 4), 0xffff, 0);
4556 helper_load_seg(seg_reg
, selector
);
4561 void cpu_x86_fsave(CPUX86State
*s
, target_ulong ptr
, int data32
)
4563 CPUX86State
*saved_env
;
4568 helper_fsave(ptr
, data32
);
4573 void cpu_x86_frstor(CPUX86State
*s
, target_ulong ptr
, int data32
)
4575 CPUX86State
*saved_env
;
4580 helper_frstor(ptr
, data32
);
4586 void helper_fxsave(target_ulong ptr
, int data64
)
4588 int fpus
, fptag
, i
, nb_xmm_regs
;
4592 /* The operand must be 16 byte aligned */
4594 raise_exception(EXCP0D_GPF
);
4597 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4599 for(i
= 0; i
< 8; i
++) {
4600 fptag
|= (env
->fptags
[i
] << i
);
4602 stw(ptr
, env
->fpuc
);
4604 stw(ptr
+ 4, fptag
^ 0xff);
4605 #ifdef TARGET_X86_64
4607 stq(ptr
+ 0x08, 0); /* rip */
4608 stq(ptr
+ 0x10, 0); /* rdp */
4612 stl(ptr
+ 0x08, 0); /* eip */
4613 stl(ptr
+ 0x0c, 0); /* sel */
4614 stl(ptr
+ 0x10, 0); /* dp */
4615 stl(ptr
+ 0x14, 0); /* sel */
4619 for(i
= 0;i
< 8; i
++) {
4621 helper_fstt(tmp
, addr
);
4625 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4626 /* XXX: finish it */
4627 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4628 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4629 if (env
->hflags
& HF_CS64_MASK
)
4634 /* Fast FXSAVE leaves out the XMM registers */
4635 if (!(env
->efer
& MSR_EFER_FFXSR
)
4636 || (env
->hflags
& HF_CPL_MASK
)
4637 || !(env
->hflags
& HF_LMA_MASK
)) {
4638 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4639 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4640 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4647 void helper_fxrstor(target_ulong ptr
, int data64
)
4649 int i
, fpus
, fptag
, nb_xmm_regs
;
4653 /* The operand must be 16 byte aligned */
4655 raise_exception(EXCP0D_GPF
);
4658 env
->fpuc
= lduw(ptr
);
4659 fpus
= lduw(ptr
+ 2);
4660 fptag
= lduw(ptr
+ 4);
4661 env
->fpstt
= (fpus
>> 11) & 7;
4662 env
->fpus
= fpus
& ~0x3800;
4664 for(i
= 0;i
< 8; i
++) {
4665 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4669 for(i
= 0;i
< 8; i
++) {
4670 tmp
= helper_fldt(addr
);
4675 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4676 /* XXX: finish it */
4677 env
->mxcsr
= ldl(ptr
+ 0x18);
4679 if (env
->hflags
& HF_CS64_MASK
)
4684 /* Fast FXRESTORE leaves out the XMM registers */
4685 if (!(env
->efer
& MSR_EFER_FFXSR
)
4686 || (env
->hflags
& HF_CPL_MASK
)
4687 || !(env
->hflags
& HF_LMA_MASK
)) {
4688 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4689 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4690 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4697 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, floatx80 f
)
4702 *pmant
= temp
.l
.lower
;
4703 *pexp
= temp
.l
.upper
;
4706 floatx80
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4710 temp
.l
.upper
= upper
;
4711 temp
.l
.lower
= mant
;
4715 #ifdef TARGET_X86_64
4717 //#define DEBUG_MULDIV
4719 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4728 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4732 add128(plow
, phigh
, 1, 0);
4735 /* return TRUE if overflow */
4736 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4738 uint64_t q
, r
, a1
, a0
;
4751 /* XXX: use a better algorithm */
4752 for(i
= 0; i
< 64; i
++) {
4754 a1
= (a1
<< 1) | (a0
>> 63);
4755 if (ab
|| a1
>= b
) {
4761 a0
= (a0
<< 1) | qb
;
4763 #if defined(DEBUG_MULDIV)
4764 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4765 *phigh
, *plow
, b
, a0
, a1
);
4773 /* return TRUE if overflow */
4774 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4777 sa
= ((int64_t)*phigh
< 0);
4779 neg128(plow
, phigh
);
4783 if (div64(plow
, phigh
, b
) != 0)
4786 if (*plow
> (1ULL << 63))
4790 if (*plow
>= (1ULL << 63))
4798 void helper_mulq_EAX_T0(target_ulong t0
)
4802 mulu64(&r0
, &r1
, EAX
, t0
);
4809 void helper_imulq_EAX_T0(target_ulong t0
)
4813 muls64(&r0
, &r1
, EAX
, t0
);
4817 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4820 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4824 muls64(&r0
, &r1
, t0
, t1
);
4826 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4830 void helper_divq_EAX(target_ulong t0
)
4834 raise_exception(EXCP00_DIVZ
);
4838 if (div64(&r0
, &r1
, t0
))
4839 raise_exception(EXCP00_DIVZ
);
4844 void helper_idivq_EAX(target_ulong t0
)
4848 raise_exception(EXCP00_DIVZ
);
4852 if (idiv64(&r0
, &r1
, t0
))
4853 raise_exception(EXCP00_DIVZ
);
4859 static void do_hlt(void)
4861 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4863 env
->exception_index
= EXCP_HLT
;
4867 void helper_hlt(int next_eip_addend
)
4869 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4870 EIP
+= next_eip_addend
;
4875 void helper_monitor(target_ulong ptr
)
4877 if ((uint32_t)ECX
!= 0)
4878 raise_exception(EXCP0D_GPF
);
4879 /* XXX: store address ? */
4880 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4883 void helper_mwait(int next_eip_addend
)
4885 if ((uint32_t)ECX
!= 0)
4886 raise_exception(EXCP0D_GPF
);
4887 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4888 EIP
+= next_eip_addend
;
4890 /* XXX: not complete but not completely erroneous */
4891 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4892 /* more than one CPU: do not sleep because another CPU may
4899 void helper_debug(void)
4901 env
->exception_index
= EXCP_DEBUG
;
4905 void helper_reset_rf(void)
4907 env
->eflags
&= ~RF_MASK
;
4910 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4912 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4915 void helper_raise_exception(int exception_index
)
4917 raise_exception(exception_index
);
4920 void helper_cli(void)
4922 env
->eflags
&= ~IF_MASK
;
4925 void helper_sti(void)
4927 env
->eflags
|= IF_MASK
;
4931 /* vm86plus instructions */
4932 void helper_cli_vm(void)
4934 env
->eflags
&= ~VIF_MASK
;
4937 void helper_sti_vm(void)
4939 env
->eflags
|= VIF_MASK
;
4940 if (env
->eflags
& VIP_MASK
) {
4941 raise_exception(EXCP0D_GPF
);
4946 void helper_set_inhibit_irq(void)
4948 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4951 void helper_reset_inhibit_irq(void)
4953 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4956 void helper_boundw(target_ulong a0
, int v
)
4960 high
= ldsw(a0
+ 2);
4962 if (v
< low
|| v
> high
) {
4963 raise_exception(EXCP05_BOUND
);
4967 void helper_boundl(target_ulong a0
, int v
)
4972 if (v
< low
|| v
> high
) {
4973 raise_exception(EXCP05_BOUND
);
4977 #if !defined(CONFIG_USER_ONLY)
4979 #define MMUSUFFIX _mmu
4982 #include "softmmu_template.h"
4985 #include "softmmu_template.h"
4988 #include "softmmu_template.h"
4991 #include "softmmu_template.h"
4995 #if !defined(CONFIG_USER_ONLY)
4996 /* try to fill the TLB and return an exception if error. If retaddr is
4997 NULL, it means that the function was called in C code (i.e. not
4998 from generated code or from helper.c) */
4999 /* XXX: fix it to restore all registers */
5000 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
5002 TranslationBlock
*tb
;
5005 CPUX86State
*saved_env
;
5007 /* XXX: hack to restore env in all cases, even if not called from
5010 env
= cpu_single_env
;
5012 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
5015 /* now we have a real cpu fault */
5016 pc
= (unsigned long)retaddr
;
5017 tb
= tb_find_pc(pc
);
5019 /* the PC is inside the translated code. It means that we have
5020 a virtual CPU fault */
5021 cpu_restore_state(tb
, env
, pc
);
5024 raise_exception_err(env
->exception_index
, env
->error_code
);
5030 /* Secure Virtual Machine helpers */
5032 #if defined(CONFIG_USER_ONLY)
5034 void helper_vmrun(int aflag
, int next_eip_addend
)
5037 void helper_vmmcall(void)
5040 void helper_vmload(int aflag
)
5043 void helper_vmsave(int aflag
)
5046 void helper_stgi(void)
5049 void helper_clgi(void)
5052 void helper_skinit(void)
5055 void helper_invlpga(int aflag
)
5058 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5061 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5065 void svm_check_intercept(CPUState
*env1
, uint32_t type
)
5069 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5070 uint32_t next_eip_addend
)
5075 static inline void svm_save_seg(target_phys_addr_t addr
,
5076 const SegmentCache
*sc
)
5078 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
5080 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
5082 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
5084 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
5085 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
5088 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
5092 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
5093 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
5094 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
5095 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
5096 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
5099 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
5100 CPUState
*env
, int seg_reg
)
5102 SegmentCache sc1
, *sc
= &sc1
;
5103 svm_load_seg(addr
, sc
);
5104 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
5105 sc
->base
, sc
->limit
, sc
->flags
);
5108 void helper_vmrun(int aflag
, int next_eip_addend
)
5114 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
5119 addr
= (uint32_t)EAX
;
5121 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
5123 env
->vm_vmcb
= addr
;
5125 /* save the current CPU state in the hsave page */
5126 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5127 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5129 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5130 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5132 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5133 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5134 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5135 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5136 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5137 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5139 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5140 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5142 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5144 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5146 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5148 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5151 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
5152 EIP
+ next_eip_addend
);
5153 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5154 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5156 /* load the interception bitmaps so we do not need to access the
5158 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
5159 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
5160 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
5161 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
5162 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
5163 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
5165 /* enable intercepts */
5166 env
->hflags
|= HF_SVMI_MASK
;
5168 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
5170 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5171 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5173 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
5174 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5176 /* clear exit_info_2 so we behave like the real hardware */
5177 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
5179 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
5180 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
5181 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
5182 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
5183 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5184 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5185 if (int_ctl
& V_INTR_MASKING_MASK
) {
5186 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
5187 env
->hflags2
|= HF2_VINTR_MASK
;
5188 if (env
->eflags
& IF_MASK
)
5189 env
->hflags2
|= HF2_HIF_MASK
;
5193 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5195 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5196 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5197 CC_OP
= CC_OP_EFLAGS
;
5199 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5201 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5203 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5205 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5208 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5210 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5211 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5212 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5213 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5214 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5216 /* FIXME: guest state consistency checks */
5218 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5219 case TLB_CONTROL_DO_NOTHING
:
5221 case TLB_CONTROL_FLUSH_ALL_ASID
:
5222 /* FIXME: this is not 100% correct but should work for now */
5227 env
->hflags2
|= HF2_GIF_MASK
;
5229 if (int_ctl
& V_IRQ_MASK
) {
5230 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5233 /* maybe we need to inject an event */
5234 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5235 if (event_inj
& SVM_EVTINJ_VALID
) {
5236 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5237 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5238 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5240 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5241 /* FIXME: need to implement valid_err */
5242 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5243 case SVM_EVTINJ_TYPE_INTR
:
5244 env
->exception_index
= vector
;
5245 env
->error_code
= event_inj_err
;
5246 env
->exception_is_int
= 0;
5247 env
->exception_next_eip
= -1;
5248 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5249 /* XXX: is it always correct ? */
5250 do_interrupt_all(vector
, 0, 0, 0, 1);
5252 case SVM_EVTINJ_TYPE_NMI
:
5253 env
->exception_index
= EXCP02_NMI
;
5254 env
->error_code
= event_inj_err
;
5255 env
->exception_is_int
= 0;
5256 env
->exception_next_eip
= EIP
;
5257 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5260 case SVM_EVTINJ_TYPE_EXEPT
:
5261 env
->exception_index
= vector
;
5262 env
->error_code
= event_inj_err
;
5263 env
->exception_is_int
= 0;
5264 env
->exception_next_eip
= -1;
5265 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5268 case SVM_EVTINJ_TYPE_SOFT
:
5269 env
->exception_index
= vector
;
5270 env
->error_code
= event_inj_err
;
5271 env
->exception_is_int
= 1;
5272 env
->exception_next_eip
= EIP
;
5273 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5277 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5281 void helper_vmmcall(void)
5283 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5284 raise_exception(EXCP06_ILLOP
);
5287 void helper_vmload(int aflag
)
5290 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5295 addr
= (uint32_t)EAX
;
5297 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5298 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5299 env
->segs
[R_FS
].base
);
5301 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5303 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5305 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5307 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5310 #ifdef TARGET_X86_64
5311 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5312 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5313 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5314 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5316 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5317 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5318 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5319 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5322 void helper_vmsave(int aflag
)
5325 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5330 addr
= (uint32_t)EAX
;
5332 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5333 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5334 env
->segs
[R_FS
].base
);
5336 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5338 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5340 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5342 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5345 #ifdef TARGET_X86_64
5346 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5347 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5348 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5349 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5351 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5352 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5353 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5354 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5357 void helper_stgi(void)
5359 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5360 env
->hflags2
|= HF2_GIF_MASK
;
5363 void helper_clgi(void)
5365 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5366 env
->hflags2
&= ~HF2_GIF_MASK
;
5369 void helper_skinit(void)
5371 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5372 /* XXX: not implemented */
5373 raise_exception(EXCP06_ILLOP
);
5376 void helper_invlpga(int aflag
)
5379 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5384 addr
= (uint32_t)EAX
;
5386 /* XXX: could use the ASID to see if it is needed to do the
5388 tlb_flush_page(env
, addr
);
5391 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5393 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5396 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5397 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5398 helper_vmexit(type
, param
);
5401 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5402 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5403 helper_vmexit(type
, param
);
5406 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5407 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5408 helper_vmexit(type
, param
);
5411 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5412 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5413 helper_vmexit(type
, param
);
5416 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5417 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5418 helper_vmexit(type
, param
);
5422 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5423 /* FIXME: this should be read in at vmrun (faster this way?) */
5424 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5426 switch((uint32_t)ECX
) {
5431 case 0xc0000000 ... 0xc0001fff:
5432 t0
= (8192 + ECX
- 0xc0000000) * 2;
5436 case 0xc0010000 ... 0xc0011fff:
5437 t0
= (16384 + ECX
- 0xc0010000) * 2;
5442 helper_vmexit(type
, param
);
5447 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5448 helper_vmexit(type
, param
);
5452 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5453 helper_vmexit(type
, param
);
5459 void svm_check_intercept(CPUState
*env1
, uint32_t type
)
5461 CPUState
*saved_env
;
5465 helper_svm_check_intercept_param(type
, 0);
5469 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5470 uint32_t next_eip_addend
)
5472 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5473 /* FIXME: this should be read in at vmrun (faster this way?) */
5474 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5475 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5476 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5478 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5479 env
->eip
+ next_eip_addend
);
5480 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5485 /* Note: currently only 32 bits of exit_code are used */
5486 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5490 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5491 exit_code
, exit_info_1
,
5492 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5495 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5496 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5497 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5499 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5502 /* Save the VM state in the vmcb */
5503 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5505 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5507 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5509 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5512 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5513 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5515 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5516 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5518 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5519 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5520 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5521 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5522 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5524 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5525 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5526 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5527 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5528 int_ctl
|= V_IRQ_MASK
;
5529 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5531 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5532 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5533 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5534 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5535 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5536 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5537 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5539 /* Reload the host state from vm_hsave */
5540 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5541 env
->hflags
&= ~HF_SVMI_MASK
;
5543 env
->intercept_exceptions
= 0;
5544 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5545 env
->tsc_offset
= 0;
5547 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5548 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5550 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5551 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5553 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5554 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5555 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5556 /* we need to set the efer after the crs so the hidden flags get
5559 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5561 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5562 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5563 CC_OP
= CC_OP_EFLAGS
;
5565 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5567 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5569 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5571 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5574 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5575 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5576 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5578 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5579 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5582 cpu_x86_set_cpl(env
, 0);
5583 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5584 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5586 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5587 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5588 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5589 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5590 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5592 env
->hflags2
&= ~HF2_GIF_MASK
;
5593 /* FIXME: Resets the current ASID register to zero (host ASID). */
5595 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5597 /* Clears the TSC_OFFSET inside the processor. */
5599 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5600 from the page table indicated the host's CR3. If the PDPEs contain
5601 illegal state, the processor causes a shutdown. */
5603 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5604 env
->cr
[0] |= CR0_PE_MASK
;
5605 env
->eflags
&= ~VM_MASK
;
5607 /* Disables all breakpoints in the host DR7 register. */
5609 /* Checks the reloaded host state for consistency. */
5611 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5612 host's code segment or non-canonical (in the case of long mode), a
5613 #GP fault is delivered inside the host.) */
5615 /* remove any pending exception */
5616 env
->exception_index
= -1;
5617 env
->error_code
= 0;
5618 env
->old_exception
= -1;
5626 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5627 void helper_enter_mmx(void)
5630 *(uint32_t *)(env
->fptags
) = 0;
5631 *(uint32_t *)(env
->fptags
+ 4) = 0;
5634 void helper_emms(void)
5636 /* set to empty state */
5637 *(uint32_t *)(env
->fptags
) = 0x01010101;
5638 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5642 void helper_movq(void *d
, void *s
)
5644 *(uint64_t *)d
= *(uint64_t *)s
;
5648 #include "ops_sse.h"
5651 #include "ops_sse.h"
5654 #include "helper_template.h"
5658 #include "helper_template.h"
5662 #include "helper_template.h"
5665 #ifdef TARGET_X86_64
5668 #include "helper_template.h"
5673 /* bit operations */
5674 target_ulong
helper_bsf(target_ulong t0
)
5681 while ((res
& 1) == 0) {
5688 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5691 target_ulong res
, mask
;
5693 if (wordsize
> 0 && t0
== 0) {
5697 count
= TARGET_LONG_BITS
- 1;
5698 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5699 while ((res
& mask
) == 0) {
5704 return wordsize
- 1 - count
;
5709 target_ulong
helper_bsr(target_ulong t0
)
5711 return helper_lzcnt(t0
, 0);
5714 static int compute_all_eflags(void)
5719 static int compute_c_eflags(void)
5721 return CC_SRC
& CC_C
;
5724 uint32_t helper_cc_compute_all(int op
)
5727 default: /* should never happen */ return 0;
5729 case CC_OP_EFLAGS
: return compute_all_eflags();
5731 case CC_OP_MULB
: return compute_all_mulb();
5732 case CC_OP_MULW
: return compute_all_mulw();
5733 case CC_OP_MULL
: return compute_all_mull();
5735 case CC_OP_ADDB
: return compute_all_addb();
5736 case CC_OP_ADDW
: return compute_all_addw();
5737 case CC_OP_ADDL
: return compute_all_addl();
5739 case CC_OP_ADCB
: return compute_all_adcb();
5740 case CC_OP_ADCW
: return compute_all_adcw();
5741 case CC_OP_ADCL
: return compute_all_adcl();
5743 case CC_OP_SUBB
: return compute_all_subb();
5744 case CC_OP_SUBW
: return compute_all_subw();
5745 case CC_OP_SUBL
: return compute_all_subl();
5747 case CC_OP_SBBB
: return compute_all_sbbb();
5748 case CC_OP_SBBW
: return compute_all_sbbw();
5749 case CC_OP_SBBL
: return compute_all_sbbl();
5751 case CC_OP_LOGICB
: return compute_all_logicb();
5752 case CC_OP_LOGICW
: return compute_all_logicw();
5753 case CC_OP_LOGICL
: return compute_all_logicl();
5755 case CC_OP_INCB
: return compute_all_incb();
5756 case CC_OP_INCW
: return compute_all_incw();
5757 case CC_OP_INCL
: return compute_all_incl();
5759 case CC_OP_DECB
: return compute_all_decb();
5760 case CC_OP_DECW
: return compute_all_decw();
5761 case CC_OP_DECL
: return compute_all_decl();
5763 case CC_OP_SHLB
: return compute_all_shlb();
5764 case CC_OP_SHLW
: return compute_all_shlw();
5765 case CC_OP_SHLL
: return compute_all_shll();
5767 case CC_OP_SARB
: return compute_all_sarb();
5768 case CC_OP_SARW
: return compute_all_sarw();
5769 case CC_OP_SARL
: return compute_all_sarl();
5771 #ifdef TARGET_X86_64
5772 case CC_OP_MULQ
: return compute_all_mulq();
5774 case CC_OP_ADDQ
: return compute_all_addq();
5776 case CC_OP_ADCQ
: return compute_all_adcq();
5778 case CC_OP_SUBQ
: return compute_all_subq();
5780 case CC_OP_SBBQ
: return compute_all_sbbq();
5782 case CC_OP_LOGICQ
: return compute_all_logicq();
5784 case CC_OP_INCQ
: return compute_all_incq();
5786 case CC_OP_DECQ
: return compute_all_decq();
5788 case CC_OP_SHLQ
: return compute_all_shlq();
5790 case CC_OP_SARQ
: return compute_all_sarq();
5795 uint32_t cpu_cc_compute_all(CPUState
*env1
, int op
)
5797 CPUState
*saved_env
;
5802 ret
= helper_cc_compute_all(op
);
5807 uint32_t helper_cc_compute_c(int op
)
5810 default: /* should never happen */ return 0;
5812 case CC_OP_EFLAGS
: return compute_c_eflags();
5814 case CC_OP_MULB
: return compute_c_mull();
5815 case CC_OP_MULW
: return compute_c_mull();
5816 case CC_OP_MULL
: return compute_c_mull();
5818 case CC_OP_ADDB
: return compute_c_addb();
5819 case CC_OP_ADDW
: return compute_c_addw();
5820 case CC_OP_ADDL
: return compute_c_addl();
5822 case CC_OP_ADCB
: return compute_c_adcb();
5823 case CC_OP_ADCW
: return compute_c_adcw();
5824 case CC_OP_ADCL
: return compute_c_adcl();
5826 case CC_OP_SUBB
: return compute_c_subb();
5827 case CC_OP_SUBW
: return compute_c_subw();
5828 case CC_OP_SUBL
: return compute_c_subl();
5830 case CC_OP_SBBB
: return compute_c_sbbb();
5831 case CC_OP_SBBW
: return compute_c_sbbw();
5832 case CC_OP_SBBL
: return compute_c_sbbl();
5834 case CC_OP_LOGICB
: return compute_c_logicb();
5835 case CC_OP_LOGICW
: return compute_c_logicw();
5836 case CC_OP_LOGICL
: return compute_c_logicl();
5838 case CC_OP_INCB
: return compute_c_incl();
5839 case CC_OP_INCW
: return compute_c_incl();
5840 case CC_OP_INCL
: return compute_c_incl();
5842 case CC_OP_DECB
: return compute_c_incl();
5843 case CC_OP_DECW
: return compute_c_incl();
5844 case CC_OP_DECL
: return compute_c_incl();
5846 case CC_OP_SHLB
: return compute_c_shlb();
5847 case CC_OP_SHLW
: return compute_c_shlw();
5848 case CC_OP_SHLL
: return compute_c_shll();
5850 case CC_OP_SARB
: return compute_c_sarl();
5851 case CC_OP_SARW
: return compute_c_sarl();
5852 case CC_OP_SARL
: return compute_c_sarl();
5854 #ifdef TARGET_X86_64
5855 case CC_OP_MULQ
: return compute_c_mull();
5857 case CC_OP_ADDQ
: return compute_c_addq();
5859 case CC_OP_ADCQ
: return compute_c_adcq();
5861 case CC_OP_SUBQ
: return compute_c_subq();
5863 case CC_OP_SBBQ
: return compute_c_sbbq();
5865 case CC_OP_LOGICQ
: return compute_c_logicq();
5867 case CC_OP_INCQ
: return compute_c_incl();
5869 case CC_OP_DECQ
: return compute_c_incl();
5871 case CC_OP_SHLQ
: return compute_c_shlq();
5873 case CC_OP_SARQ
: return compute_c_sarl();