4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
23 #include "host-utils.h"
28 #define raise_exception_err(a, b)\
31 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
32 (raise_exception_err)(a, b);\
36 static const uint8_t parity_table
[256] = {
37 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
40 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
72 static const uint8_t rclw_table
[32] = {
73 0, 1, 2, 3, 4, 5, 6, 7,
74 8, 9,10,11,12,13,14,15,
75 16, 0, 1, 2, 3, 4, 5, 6,
76 7, 8, 9,10,11,12,13,14,
80 static const uint8_t rclb_table
[32] = {
81 0, 1, 2, 3, 4, 5, 6, 7,
82 8, 0, 1, 2, 3, 4, 5, 6,
83 7, 8, 0, 1, 2, 3, 4, 5,
84 6, 7, 8, 0, 1, 2, 3, 4,
87 static const CPU86_LDouble f15rk
[7] =
89 0.00000000000000000000L,
90 1.00000000000000000000L,
91 3.14159265358979323851L, /*pi*/
92 0.30102999566398119523L, /*lg2*/
93 0.69314718055994530943L, /*ln2*/
94 1.44269504088896340739L, /*l2e*/
95 3.32192809488736234781L, /*l2t*/
98 /* broken thread support */
100 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
102 void helper_lock(void)
104 spin_lock(&global_cpu_lock
);
107 void helper_unlock(void)
109 spin_unlock(&global_cpu_lock
);
112 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
114 load_eflags(t0
, update_mask
);
117 target_ulong
helper_read_eflags(void)
120 eflags
= helper_cc_compute_all(CC_OP
);
121 eflags
|= (DF
& DF_MASK
);
122 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
126 /* return non zero if error */
127 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
138 index
= selector
& ~7;
139 if ((index
+ 7) > dt
->limit
)
141 ptr
= dt
->base
+ index
;
142 *e1_ptr
= ldl_kernel(ptr
);
143 *e2_ptr
= ldl_kernel(ptr
+ 4);
147 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
150 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
151 if (e2
& DESC_G_MASK
)
152 limit
= (limit
<< 12) | 0xfff;
156 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
158 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
161 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
163 sc
->base
= get_seg_base(e1
, e2
);
164 sc
->limit
= get_seg_limit(e1
, e2
);
168 /* init the segment cache in vm86 mode. */
169 static inline void load_seg_vm(int seg
, int selector
)
172 cpu_x86_load_seg_cache(env
, seg
, selector
,
173 (selector
<< 4), 0xffff, 0);
176 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
177 uint32_t *esp_ptr
, int dpl
)
179 int type
, index
, shift
;
184 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
185 for(i
=0;i
<env
->tr
.limit
;i
++) {
186 printf("%02x ", env
->tr
.base
[i
]);
187 if ((i
& 7) == 7) printf("\n");
193 if (!(env
->tr
.flags
& DESC_P_MASK
))
194 cpu_abort(env
, "invalid tss");
195 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
197 cpu_abort(env
, "invalid tss type");
199 index
= (dpl
* 4 + 2) << shift
;
200 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
201 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
203 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
204 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
206 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
207 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
211 /* XXX: merge with load_seg() */
212 static void tss_load_seg(int seg_reg
, int selector
)
217 if ((selector
& 0xfffc) != 0) {
218 if (load_segment(&e1
, &e2
, selector
) != 0)
219 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
220 if (!(e2
& DESC_S_MASK
))
221 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
223 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
224 cpl
= env
->hflags
& HF_CPL_MASK
;
225 if (seg_reg
== R_CS
) {
226 if (!(e2
& DESC_CS_MASK
))
227 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
228 /* XXX: is it correct ? */
230 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
231 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
232 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 } else if (seg_reg
== R_SS
) {
234 /* SS must be writable data */
235 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
237 if (dpl
!= cpl
|| dpl
!= rpl
)
238 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
240 /* not readable code */
241 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 /* if data or non conforming code, checks the rights */
244 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
245 if (dpl
< cpl
|| dpl
< rpl
)
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
249 if (!(e2
& DESC_P_MASK
))
250 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
251 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
252 get_seg_base(e1
, e2
),
253 get_seg_limit(e1
, e2
),
256 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
257 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
261 #define SWITCH_TSS_JMP 0
262 #define SWITCH_TSS_IRET 1
263 #define SWITCH_TSS_CALL 2
265 /* XXX: restore CPU state in registers (PowerPC case) */
266 static void switch_tss(int tss_selector
,
267 uint32_t e1
, uint32_t e2
, int source
,
270 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
271 target_ulong tss_base
;
272 uint32_t new_regs
[8], new_segs
[6];
273 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
274 uint32_t old_eflags
, eflags_mask
;
279 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
281 if (loglevel
& CPU_LOG_PCALL
)
282 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
285 /* if task gate, we read the TSS segment and we load it */
287 if (!(e2
& DESC_P_MASK
))
288 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
289 tss_selector
= e1
>> 16;
290 if (tss_selector
& 4)
291 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
292 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
293 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
294 if (e2
& DESC_S_MASK
)
295 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
296 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
298 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (!(e2
& DESC_P_MASK
))
302 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
308 tss_limit
= get_seg_limit(e1
, e2
);
309 tss_base
= get_seg_base(e1
, e2
);
310 if ((tss_selector
& 4) != 0 ||
311 tss_limit
< tss_limit_max
)
312 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
313 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
315 old_tss_limit_max
= 103;
317 old_tss_limit_max
= 43;
319 /* read all the registers from the new TSS */
322 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
323 new_eip
= ldl_kernel(tss_base
+ 0x20);
324 new_eflags
= ldl_kernel(tss_base
+ 0x24);
325 for(i
= 0; i
< 8; i
++)
326 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
327 for(i
= 0; i
< 6; i
++)
328 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
329 new_ldt
= lduw_kernel(tss_base
+ 0x60);
330 new_trap
= ldl_kernel(tss_base
+ 0x64);
334 new_eip
= lduw_kernel(tss_base
+ 0x0e);
335 new_eflags
= lduw_kernel(tss_base
+ 0x10);
336 for(i
= 0; i
< 8; i
++)
337 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
338 for(i
= 0; i
< 4; i
++)
339 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
340 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
346 /* NOTE: we must avoid memory exceptions during the task switch,
347 so we make dummy accesses before */
348 /* XXX: it can still fail in some cases, so a bigger hack is
349 necessary to valid the TLB after having done the accesses */
351 v1
= ldub_kernel(env
->tr
.base
);
352 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
353 stb_kernel(env
->tr
.base
, v1
);
354 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
356 /* clear busy bit (it is restartable) */
357 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
360 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
361 e2
= ldl_kernel(ptr
+ 4);
362 e2
&= ~DESC_TSS_BUSY_MASK
;
363 stl_kernel(ptr
+ 4, e2
);
365 old_eflags
= compute_eflags();
366 if (source
== SWITCH_TSS_IRET
)
367 old_eflags
&= ~NT_MASK
;
369 /* save the current state in the old TSS */
372 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
373 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
374 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
375 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
376 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
377 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
378 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
379 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
382 for(i
= 0; i
< 6; i
++)
383 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
386 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
387 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
388 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
389 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
390 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
391 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
392 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
393 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
396 for(i
= 0; i
< 4; i
++)
397 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
400 /* now if an exception occurs, it will occurs in the next task
403 if (source
== SWITCH_TSS_CALL
) {
404 stw_kernel(tss_base
, env
->tr
.selector
);
405 new_eflags
|= NT_MASK
;
409 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
412 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
413 e2
= ldl_kernel(ptr
+ 4);
414 e2
|= DESC_TSS_BUSY_MASK
;
415 stl_kernel(ptr
+ 4, e2
);
418 /* set the new CPU state */
419 /* from this point, any exception which occurs can give problems */
420 env
->cr
[0] |= CR0_TS_MASK
;
421 env
->hflags
|= HF_TS_MASK
;
422 env
->tr
.selector
= tss_selector
;
423 env
->tr
.base
= tss_base
;
424 env
->tr
.limit
= tss_limit
;
425 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
427 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
428 cpu_x86_update_cr3(env
, new_cr3
);
431 /* load all registers without an exception, then reload them with
432 possible exception */
434 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
435 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
437 eflags_mask
&= 0xffff;
438 load_eflags(new_eflags
, eflags_mask
);
439 /* XXX: what to do in 16 bit case ? */
448 if (new_eflags
& VM_MASK
) {
449 for(i
= 0; i
< 6; i
++)
450 load_seg_vm(i
, new_segs
[i
]);
451 /* in vm86, CPL is always 3 */
452 cpu_x86_set_cpl(env
, 3);
454 /* CPL is set the RPL of CS */
455 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
456 /* first just selectors as the rest may trigger exceptions */
457 for(i
= 0; i
< 6; i
++)
458 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
461 env
->ldt
.selector
= new_ldt
& ~4;
468 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
470 if ((new_ldt
& 0xfffc) != 0) {
472 index
= new_ldt
& ~7;
473 if ((index
+ 7) > dt
->limit
)
474 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
475 ptr
= dt
->base
+ index
;
476 e1
= ldl_kernel(ptr
);
477 e2
= ldl_kernel(ptr
+ 4);
478 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
479 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
480 if (!(e2
& DESC_P_MASK
))
481 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
482 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
485 /* load the segments */
486 if (!(new_eflags
& VM_MASK
)) {
487 tss_load_seg(R_CS
, new_segs
[R_CS
]);
488 tss_load_seg(R_SS
, new_segs
[R_SS
]);
489 tss_load_seg(R_ES
, new_segs
[R_ES
]);
490 tss_load_seg(R_DS
, new_segs
[R_DS
]);
491 tss_load_seg(R_FS
, new_segs
[R_FS
]);
492 tss_load_seg(R_GS
, new_segs
[R_GS
]);
495 /* check that EIP is in the CS segment limits */
496 if (new_eip
> env
->segs
[R_CS
].limit
) {
497 /* XXX: different exception if CALL ? */
498 raise_exception_err(EXCP0D_GPF
, 0);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env
->dr
[7] & 0x55) {
504 for (i
= 0; i
< 4; i
++) {
505 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
506 hw_breakpoint_remove(env
, i
);
513 /* check if Port I/O is allowed in TSS */
514 static inline void check_io(int addr
, int size
)
516 int io_offset
, val
, mask
;
518 /* TSS must be a valid 32 bit one */
519 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
520 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
523 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
524 io_offset
+= (addr
>> 3);
525 /* Note: the check needs two bytes */
526 if ((io_offset
+ 1) > env
->tr
.limit
)
528 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
530 mask
= (1 << size
) - 1;
531 /* all bits must be zero to allow the I/O */
532 if ((val
& mask
) != 0) {
534 raise_exception_err(EXCP0D_GPF
, 0);
538 void helper_check_iob(uint32_t t0
)
543 void helper_check_iow(uint32_t t0
)
548 void helper_check_iol(uint32_t t0
)
553 void helper_outb(uint32_t port
, uint32_t data
)
555 cpu_outb(env
, port
, data
& 0xff);
558 target_ulong
helper_inb(uint32_t port
)
560 return cpu_inb(env
, port
);
563 void helper_outw(uint32_t port
, uint32_t data
)
565 cpu_outw(env
, port
, data
& 0xffff);
568 target_ulong
helper_inw(uint32_t port
)
570 return cpu_inw(env
, port
);
573 void helper_outl(uint32_t port
, uint32_t data
)
575 cpu_outl(env
, port
, data
);
578 target_ulong
helper_inl(uint32_t port
)
580 return cpu_inl(env
, port
);
583 static inline unsigned int get_sp_mask(unsigned int e2
)
585 if (e2
& DESC_B_MASK
)
592 #define SET_ESP(val, sp_mask)\
594 if ((sp_mask) == 0xffff)\
595 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
596 else if ((sp_mask) == 0xffffffffLL)\
597 ESP = (uint32_t)(val);\
602 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
605 /* in 64-bit machines, this can overflow. So this segment addition macro
606 * can be used to trim the value to 32-bit whenever needed */
607 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
609 /* XXX: add a is_user flag to have proper security support */
610 #define PUSHW(ssp, sp, sp_mask, val)\
613 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
616 #define PUSHL(ssp, sp, sp_mask, val)\
619 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
622 #define POPW(ssp, sp, sp_mask, val)\
624 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
628 #define POPL(ssp, sp, sp_mask, val)\
630 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
634 /* protected mode interrupt */
635 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
636 unsigned int next_eip
, int is_hw
)
639 target_ulong ptr
, ssp
;
640 int type
, dpl
, selector
, ss_dpl
, cpl
;
641 int has_error_code
, new_stack
, shift
;
642 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
643 uint32_t old_eip
, sp_mask
;
646 if (!is_int
&& !is_hw
) {
665 if (intno
* 8 + 7 > dt
->limit
)
666 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
667 ptr
= dt
->base
+ intno
* 8;
668 e1
= ldl_kernel(ptr
);
669 e2
= ldl_kernel(ptr
+ 4);
670 /* check gate type */
671 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
673 case 5: /* task gate */
674 /* must do that check here to return the correct error code */
675 if (!(e2
& DESC_P_MASK
))
676 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
677 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
678 if (has_error_code
) {
681 /* push the error code */
682 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
684 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
688 esp
= (ESP
- (2 << shift
)) & mask
;
689 ssp
= env
->segs
[R_SS
].base
+ esp
;
691 stl_kernel(ssp
, error_code
);
693 stw_kernel(ssp
, error_code
);
697 case 6: /* 286 interrupt gate */
698 case 7: /* 286 trap gate */
699 case 14: /* 386 interrupt gate */
700 case 15: /* 386 trap gate */
703 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
706 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
707 cpl
= env
->hflags
& HF_CPL_MASK
;
708 /* check privilege if software int */
709 if (is_int
&& dpl
< cpl
)
710 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
711 /* check valid bit */
712 if (!(e2
& DESC_P_MASK
))
713 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
715 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
716 if ((selector
& 0xfffc) == 0)
717 raise_exception_err(EXCP0D_GPF
, 0);
719 if (load_segment(&e1
, &e2
, selector
) != 0)
720 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
721 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
722 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
723 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
725 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
726 if (!(e2
& DESC_P_MASK
))
727 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
728 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
729 /* to inner privilege */
730 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
731 if ((ss
& 0xfffc) == 0)
732 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
734 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
735 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
736 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
737 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
739 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
740 if (!(ss_e2
& DESC_S_MASK
) ||
741 (ss_e2
& DESC_CS_MASK
) ||
742 !(ss_e2
& DESC_W_MASK
))
743 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 if (!(ss_e2
& DESC_P_MASK
))
745 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
747 sp_mask
= get_sp_mask(ss_e2
);
748 ssp
= get_seg_base(ss_e1
, ss_e2
);
749 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
750 /* to same privilege */
751 if (env
->eflags
& VM_MASK
)
752 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
754 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
755 ssp
= env
->segs
[R_SS
].base
;
759 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
760 new_stack
= 0; /* avoid warning */
761 sp_mask
= 0; /* avoid warning */
762 ssp
= 0; /* avoid warning */
763 esp
= 0; /* avoid warning */
769 /* XXX: check that enough room is available */
770 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
771 if (env
->eflags
& VM_MASK
)
777 if (env
->eflags
& VM_MASK
) {
778 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
779 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
780 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
781 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
783 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
784 PUSHL(ssp
, esp
, sp_mask
, ESP
);
786 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
787 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
788 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
789 if (has_error_code
) {
790 PUSHL(ssp
, esp
, sp_mask
, error_code
);
794 if (env
->eflags
& VM_MASK
) {
795 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
796 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
797 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
798 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
800 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
801 PUSHW(ssp
, esp
, sp_mask
, ESP
);
803 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
804 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
805 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
806 if (has_error_code
) {
807 PUSHW(ssp
, esp
, sp_mask
, error_code
);
812 if (env
->eflags
& VM_MASK
) {
813 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
814 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
815 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
816 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
818 ss
= (ss
& ~3) | dpl
;
819 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
820 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
822 SET_ESP(esp
, sp_mask
);
824 selector
= (selector
& ~3) | dpl
;
825 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
826 get_seg_base(e1
, e2
),
827 get_seg_limit(e1
, e2
),
829 cpu_x86_set_cpl(env
, dpl
);
832 /* interrupt gate clear IF mask */
833 if ((type
& 1) == 0) {
834 env
->eflags
&= ~IF_MASK
;
836 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
841 #define PUSHQ(sp, val)\
844 stq_kernel(sp, (val));\
847 #define POPQ(sp, val)\
849 val = ldq_kernel(sp);\
853 static inline target_ulong
get_rsp_from_tss(int level
)
858 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
859 env
->tr
.base
, env
->tr
.limit
);
862 if (!(env
->tr
.flags
& DESC_P_MASK
))
863 cpu_abort(env
, "invalid tss");
864 index
= 8 * level
+ 4;
865 if ((index
+ 7) > env
->tr
.limit
)
866 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
867 return ldq_kernel(env
->tr
.base
+ index
);
870 /* 64 bit interrupt */
871 static void do_interrupt64(int intno
, int is_int
, int error_code
,
872 target_ulong next_eip
, int is_hw
)
876 int type
, dpl
, selector
, cpl
, ist
;
877 int has_error_code
, new_stack
;
878 uint32_t e1
, e2
, e3
, ss
;
879 target_ulong old_eip
, esp
, offset
;
882 if (!is_int
&& !is_hw
) {
901 if (intno
* 16 + 15 > dt
->limit
)
902 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
903 ptr
= dt
->base
+ intno
* 16;
904 e1
= ldl_kernel(ptr
);
905 e2
= ldl_kernel(ptr
+ 4);
906 e3
= ldl_kernel(ptr
+ 8);
907 /* check gate type */
908 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
910 case 14: /* 386 interrupt gate */
911 case 15: /* 386 trap gate */
914 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
917 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
918 cpl
= env
->hflags
& HF_CPL_MASK
;
919 /* check privilege if software int */
920 if (is_int
&& dpl
< cpl
)
921 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
922 /* check valid bit */
923 if (!(e2
& DESC_P_MASK
))
924 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
926 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
928 if ((selector
& 0xfffc) == 0)
929 raise_exception_err(EXCP0D_GPF
, 0);
931 if (load_segment(&e1
, &e2
, selector
) != 0)
932 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
933 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
934 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
935 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
937 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
938 if (!(e2
& DESC_P_MASK
))
939 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
940 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
943 /* to inner privilege */
945 esp
= get_rsp_from_tss(ist
+ 3);
947 esp
= get_rsp_from_tss(dpl
);
948 esp
&= ~0xfLL
; /* align stack */
951 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
952 /* to same privilege */
953 if (env
->eflags
& VM_MASK
)
954 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
957 esp
= get_rsp_from_tss(ist
+ 3);
960 esp
&= ~0xfLL
; /* align stack */
963 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
964 new_stack
= 0; /* avoid warning */
965 esp
= 0; /* avoid warning */
968 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
970 PUSHQ(esp
, compute_eflags());
971 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
973 if (has_error_code
) {
974 PUSHQ(esp
, error_code
);
979 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
983 selector
= (selector
& ~3) | dpl
;
984 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
985 get_seg_base(e1
, e2
),
986 get_seg_limit(e1
, e2
),
988 cpu_x86_set_cpl(env
, dpl
);
991 /* interrupt gate clear IF mask */
992 if ((type
& 1) == 0) {
993 env
->eflags
&= ~IF_MASK
;
995 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1000 #if defined(CONFIG_USER_ONLY)
1001 void helper_syscall(int next_eip_addend
)
1003 env
->exception_index
= EXCP_SYSCALL
;
1004 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1008 void helper_syscall(int next_eip_addend
)
1012 if (!(env
->efer
& MSR_EFER_SCE
)) {
1013 raise_exception_err(EXCP06_ILLOP
, 0);
1015 selector
= (env
->star
>> 32) & 0xffff;
1016 if (env
->hflags
& HF_LMA_MASK
) {
1019 ECX
= env
->eip
+ next_eip_addend
;
1020 env
->regs
[11] = compute_eflags();
1022 code64
= env
->hflags
& HF_CS64_MASK
;
1024 cpu_x86_set_cpl(env
, 0);
1025 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1027 DESC_G_MASK
| DESC_P_MASK
|
1029 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1030 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1032 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1034 DESC_W_MASK
| DESC_A_MASK
);
1035 env
->eflags
&= ~env
->fmask
;
1036 load_eflags(env
->eflags
, 0);
1038 env
->eip
= env
->lstar
;
1040 env
->eip
= env
->cstar
;
1042 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1044 cpu_x86_set_cpl(env
, 0);
1045 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1047 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1049 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1050 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1052 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1054 DESC_W_MASK
| DESC_A_MASK
);
1055 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1056 env
->eip
= (uint32_t)env
->star
;
1062 #ifdef TARGET_X86_64
1063 void helper_sysret(int dflag
)
1067 if (!(env
->efer
& MSR_EFER_SCE
)) {
1068 raise_exception_err(EXCP06_ILLOP
, 0);
1070 cpl
= env
->hflags
& HF_CPL_MASK
;
1071 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1072 raise_exception_err(EXCP0D_GPF
, 0);
1074 selector
= (env
->star
>> 48) & 0xffff;
1075 if (env
->hflags
& HF_LMA_MASK
) {
1077 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1079 DESC_G_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1085 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1087 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1088 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1089 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1090 env
->eip
= (uint32_t)ECX
;
1092 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1094 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1095 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1096 DESC_W_MASK
| DESC_A_MASK
);
1097 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1098 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1099 cpu_x86_set_cpl(env
, 3);
1101 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1103 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1104 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1105 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1106 env
->eip
= (uint32_t)ECX
;
1107 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1109 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1110 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1111 DESC_W_MASK
| DESC_A_MASK
);
1112 env
->eflags
|= IF_MASK
;
1113 cpu_x86_set_cpl(env
, 3);
1116 if (kqemu_is_ok(env
)) {
1117 if (env
->hflags
& HF_LMA_MASK
)
1118 CC_OP
= CC_OP_EFLAGS
;
1119 env
->exception_index
= -1;
1126 /* real mode interrupt */
1127 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1128 unsigned int next_eip
)
1131 target_ulong ptr
, ssp
;
1133 uint32_t offset
, esp
;
1134 uint32_t old_cs
, old_eip
;
1136 /* real mode (simpler !) */
1138 if (intno
* 4 + 3 > dt
->limit
)
1139 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1140 ptr
= dt
->base
+ intno
* 4;
1141 offset
= lduw_kernel(ptr
);
1142 selector
= lduw_kernel(ptr
+ 2);
1144 ssp
= env
->segs
[R_SS
].base
;
1149 old_cs
= env
->segs
[R_CS
].selector
;
1150 /* XXX: use SS segment size ? */
1151 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1152 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1153 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1155 /* update processor state */
1156 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1158 env
->segs
[R_CS
].selector
= selector
;
1159 env
->segs
[R_CS
].base
= (selector
<< 4);
1160 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1163 /* fake user mode interrupt */
1164 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1165 target_ulong next_eip
)
1169 int dpl
, cpl
, shift
;
1173 if (env
->hflags
& HF_LMA_MASK
) {
1178 ptr
= dt
->base
+ (intno
<< shift
);
1179 e2
= ldl_kernel(ptr
+ 4);
1181 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1182 cpl
= env
->hflags
& HF_CPL_MASK
;
1183 /* check privilege if software int */
1184 if (is_int
&& dpl
< cpl
)
1185 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1187 /* Since we emulate only user space, we cannot do more than
1188 exiting the emulation with the suitable exception and error
1195 * Begin execution of an interruption. is_int is TRUE if coming from
1196 * the int instruction. next_eip is the EIP value AFTER the interrupt
1197 * instruction. It is only relevant if is_int is TRUE.
1199 void do_interrupt(int intno
, int is_int
, int error_code
,
1200 target_ulong next_eip
, int is_hw
)
1202 if (loglevel
& CPU_LOG_INT
) {
1203 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1205 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1206 count
, intno
, error_code
, is_int
,
1207 env
->hflags
& HF_CPL_MASK
,
1208 env
->segs
[R_CS
].selector
, EIP
,
1209 (int)env
->segs
[R_CS
].base
+ EIP
,
1210 env
->segs
[R_SS
].selector
, ESP
);
1211 if (intno
== 0x0e) {
1212 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1214 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1216 fprintf(logfile
, "\n");
1217 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1222 fprintf(logfile
, " code=");
1223 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1224 for(i
= 0; i
< 16; i
++) {
1225 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1227 fprintf(logfile
, "\n");
1233 if (env
->cr
[0] & CR0_PE_MASK
) {
1234 #ifdef TARGET_X86_64
1235 if (env
->hflags
& HF_LMA_MASK
) {
1236 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1240 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1243 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1248 * Check nested exceptions and change to double or triple fault if
1249 * needed. It should only be called, if this is not an interrupt.
1250 * Returns the new exception number.
1252 static int check_exception(int intno
, int *error_code
)
1254 int first_contributory
= env
->old_exception
== 0 ||
1255 (env
->old_exception
>= 10 &&
1256 env
->old_exception
<= 13);
1257 int second_contributory
= intno
== 0 ||
1258 (intno
>= 10 && intno
<= 13);
1260 if (loglevel
& CPU_LOG_INT
)
1261 fprintf(logfile
, "check_exception old: 0x%x new 0x%x\n",
1262 env
->old_exception
, intno
);
1264 if (env
->old_exception
== EXCP08_DBLE
)
1265 cpu_abort(env
, "triple fault");
1267 if ((first_contributory
&& second_contributory
)
1268 || (env
->old_exception
== EXCP0E_PAGE
&&
1269 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1270 intno
= EXCP08_DBLE
;
1274 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1275 (intno
== EXCP08_DBLE
))
1276 env
->old_exception
= intno
;
1282 * Signal an interruption. It is executed in the main CPU loop.
1283 * is_int is TRUE if coming from the int instruction. next_eip is the
1284 * EIP value AFTER the interrupt instruction. It is only relevant if
1287 static void raise_interrupt(int intno
, int is_int
, int error_code
,
1288 int next_eip_addend
)
1291 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1292 intno
= check_exception(intno
, &error_code
);
1294 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1297 env
->exception_index
= intno
;
1298 env
->error_code
= error_code
;
1299 env
->exception_is_int
= is_int
;
1300 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1304 /* shortcuts to generate exceptions */
1306 void raise_exception_err(int exception_index
, int error_code
)
1308 raise_interrupt(exception_index
, 0, error_code
, 0);
1311 void raise_exception(int exception_index
)
1313 raise_interrupt(exception_index
, 0, 0, 0);
1318 #if defined(CONFIG_USER_ONLY)
1320 void do_smm_enter(void)
1324 void helper_rsm(void)
1330 #ifdef TARGET_X86_64
1331 #define SMM_REVISION_ID 0x00020064
1333 #define SMM_REVISION_ID 0x00020000
1336 void do_smm_enter(void)
1338 target_ulong sm_state
;
1342 if (loglevel
& CPU_LOG_INT
) {
1343 fprintf(logfile
, "SMM: enter\n");
1344 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1347 env
->hflags
|= HF_SMM_MASK
;
1348 cpu_smm_update(env
);
1350 sm_state
= env
->smbase
+ 0x8000;
1352 #ifdef TARGET_X86_64
1353 for(i
= 0; i
< 6; i
++) {
1355 offset
= 0x7e00 + i
* 16;
1356 stw_phys(sm_state
+ offset
, dt
->selector
);
1357 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1358 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1359 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1362 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1363 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1365 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1366 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1367 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1368 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1370 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1371 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1373 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1374 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1375 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1376 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1378 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1380 stq_phys(sm_state
+ 0x7ff8, EAX
);
1381 stq_phys(sm_state
+ 0x7ff0, ECX
);
1382 stq_phys(sm_state
+ 0x7fe8, EDX
);
1383 stq_phys(sm_state
+ 0x7fe0, EBX
);
1384 stq_phys(sm_state
+ 0x7fd8, ESP
);
1385 stq_phys(sm_state
+ 0x7fd0, EBP
);
1386 stq_phys(sm_state
+ 0x7fc8, ESI
);
1387 stq_phys(sm_state
+ 0x7fc0, EDI
);
1388 for(i
= 8; i
< 16; i
++)
1389 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1390 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1391 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1392 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1393 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1395 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1396 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1397 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1399 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1400 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1402 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1403 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1404 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1405 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1406 stl_phys(sm_state
+ 0x7fec, EDI
);
1407 stl_phys(sm_state
+ 0x7fe8, ESI
);
1408 stl_phys(sm_state
+ 0x7fe4, EBP
);
1409 stl_phys(sm_state
+ 0x7fe0, ESP
);
1410 stl_phys(sm_state
+ 0x7fdc, EBX
);
1411 stl_phys(sm_state
+ 0x7fd8, EDX
);
1412 stl_phys(sm_state
+ 0x7fd4, ECX
);
1413 stl_phys(sm_state
+ 0x7fd0, EAX
);
1414 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1415 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1417 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1418 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1419 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1420 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1422 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1423 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1424 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1425 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1427 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1428 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1430 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1431 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1433 for(i
= 0; i
< 6; i
++) {
1436 offset
= 0x7f84 + i
* 12;
1438 offset
= 0x7f2c + (i
- 3) * 12;
1439 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1440 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1441 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1442 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1444 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1446 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1447 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1449 /* init SMM cpu state */
1451 #ifdef TARGET_X86_64
1452 cpu_load_efer(env
, 0);
1454 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1455 env
->eip
= 0x00008000;
1456 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1458 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1459 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1460 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1461 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1462 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1464 cpu_x86_update_cr0(env
,
1465 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1466 cpu_x86_update_cr4(env
, 0);
1467 env
->dr
[7] = 0x00000400;
1468 CC_OP
= CC_OP_EFLAGS
;
1471 void helper_rsm(void)
1473 target_ulong sm_state
;
1477 sm_state
= env
->smbase
+ 0x8000;
1478 #ifdef TARGET_X86_64
1479 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1481 for(i
= 0; i
< 6; i
++) {
1482 offset
= 0x7e00 + i
* 16;
1483 cpu_x86_load_seg_cache(env
, i
,
1484 lduw_phys(sm_state
+ offset
),
1485 ldq_phys(sm_state
+ offset
+ 8),
1486 ldl_phys(sm_state
+ offset
+ 4),
1487 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1490 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1491 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1493 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1494 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1495 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1496 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1498 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1499 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1501 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1502 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1503 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1504 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1506 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1507 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1508 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1509 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1510 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1511 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1512 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1513 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1514 for(i
= 8; i
< 16; i
++)
1515 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1516 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1517 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1518 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1519 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1520 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1522 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1523 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1524 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1526 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1527 if (val
& 0x20000) {
1528 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1531 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1532 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1533 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1534 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1535 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1536 EDI
= ldl_phys(sm_state
+ 0x7fec);
1537 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1538 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1539 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1540 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1541 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1542 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1543 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1544 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1545 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1547 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1548 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1549 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1550 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1552 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1553 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1554 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1555 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1557 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1558 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1560 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1561 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1563 for(i
= 0; i
< 6; i
++) {
1565 offset
= 0x7f84 + i
* 12;
1567 offset
= 0x7f2c + (i
- 3) * 12;
1568 cpu_x86_load_seg_cache(env
, i
,
1569 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1570 ldl_phys(sm_state
+ offset
+ 8),
1571 ldl_phys(sm_state
+ offset
+ 4),
1572 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1574 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1576 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1577 if (val
& 0x20000) {
1578 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1581 CC_OP
= CC_OP_EFLAGS
;
1582 env
->hflags
&= ~HF_SMM_MASK
;
1583 cpu_smm_update(env
);
1585 if (loglevel
& CPU_LOG_INT
) {
1586 fprintf(logfile
, "SMM: after RSM\n");
1587 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1591 #endif /* !CONFIG_USER_ONLY */
1594 /* division, flags are undefined */
1596 void helper_divb_AL(target_ulong t0
)
1598 unsigned int num
, den
, q
, r
;
1600 num
= (EAX
& 0xffff);
1603 raise_exception(EXCP00_DIVZ
);
1607 raise_exception(EXCP00_DIVZ
);
1609 r
= (num
% den
) & 0xff;
1610 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1613 void helper_idivb_AL(target_ulong t0
)
1620 raise_exception(EXCP00_DIVZ
);
1624 raise_exception(EXCP00_DIVZ
);
1626 r
= (num
% den
) & 0xff;
1627 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1630 void helper_divw_AX(target_ulong t0
)
1632 unsigned int num
, den
, q
, r
;
1634 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1635 den
= (t0
& 0xffff);
1637 raise_exception(EXCP00_DIVZ
);
1641 raise_exception(EXCP00_DIVZ
);
1643 r
= (num
% den
) & 0xffff;
1644 EAX
= (EAX
& ~0xffff) | q
;
1645 EDX
= (EDX
& ~0xffff) | r
;
1648 void helper_idivw_AX(target_ulong t0
)
1652 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1655 raise_exception(EXCP00_DIVZ
);
1658 if (q
!= (int16_t)q
)
1659 raise_exception(EXCP00_DIVZ
);
1661 r
= (num
% den
) & 0xffff;
1662 EAX
= (EAX
& ~0xffff) | q
;
1663 EDX
= (EDX
& ~0xffff) | r
;
1666 void helper_divl_EAX(target_ulong t0
)
1668 unsigned int den
, r
;
1671 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1674 raise_exception(EXCP00_DIVZ
);
1679 raise_exception(EXCP00_DIVZ
);
1684 void helper_idivl_EAX(target_ulong t0
)
1689 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1692 raise_exception(EXCP00_DIVZ
);
1696 if (q
!= (int32_t)q
)
1697 raise_exception(EXCP00_DIVZ
);
1704 /* XXX: exception */
1705 void helper_aam(int base
)
1711 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1715 void helper_aad(int base
)
1719 ah
= (EAX
>> 8) & 0xff;
1720 al
= ((ah
* base
) + al
) & 0xff;
1721 EAX
= (EAX
& ~0xffff) | al
;
1725 void helper_aaa(void)
1731 eflags
= helper_cc_compute_all(CC_OP
);
1734 ah
= (EAX
>> 8) & 0xff;
1736 icarry
= (al
> 0xf9);
1737 if (((al
& 0x0f) > 9 ) || af
) {
1738 al
= (al
+ 6) & 0x0f;
1739 ah
= (ah
+ 1 + icarry
) & 0xff;
1740 eflags
|= CC_C
| CC_A
;
1742 eflags
&= ~(CC_C
| CC_A
);
1745 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1749 void helper_aas(void)
1755 eflags
= helper_cc_compute_all(CC_OP
);
1758 ah
= (EAX
>> 8) & 0xff;
1761 if (((al
& 0x0f) > 9 ) || af
) {
1762 al
= (al
- 6) & 0x0f;
1763 ah
= (ah
- 1 - icarry
) & 0xff;
1764 eflags
|= CC_C
| CC_A
;
1766 eflags
&= ~(CC_C
| CC_A
);
1769 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1773 void helper_daa(void)
1778 eflags
= helper_cc_compute_all(CC_OP
);
1784 if (((al
& 0x0f) > 9 ) || af
) {
1785 al
= (al
+ 6) & 0xff;
1788 if ((al
> 0x9f) || cf
) {
1789 al
= (al
+ 0x60) & 0xff;
1792 EAX
= (EAX
& ~0xff) | al
;
1793 /* well, speed is not an issue here, so we compute the flags by hand */
1794 eflags
|= (al
== 0) << 6; /* zf */
1795 eflags
|= parity_table
[al
]; /* pf */
1796 eflags
|= (al
& 0x80); /* sf */
1800 void helper_das(void)
1802 int al
, al1
, af
, cf
;
1805 eflags
= helper_cc_compute_all(CC_OP
);
1812 if (((al
& 0x0f) > 9 ) || af
) {
1816 al
= (al
- 6) & 0xff;
1818 if ((al1
> 0x99) || cf
) {
1819 al
= (al
- 0x60) & 0xff;
1822 EAX
= (EAX
& ~0xff) | al
;
1823 /* well, speed is not an issue here, so we compute the flags by hand */
1824 eflags
|= (al
== 0) << 6; /* zf */
1825 eflags
|= parity_table
[al
]; /* pf */
1826 eflags
|= (al
& 0x80); /* sf */
1830 void helper_into(int next_eip_addend
)
1833 eflags
= helper_cc_compute_all(CC_OP
);
1834 if (eflags
& CC_O
) {
1835 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1839 void helper_cmpxchg8b(target_ulong a0
)
1844 eflags
= helper_cc_compute_all(CC_OP
);
1846 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1847 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1850 /* always do the store */
1852 EDX
= (uint32_t)(d
>> 32);
1859 #ifdef TARGET_X86_64
1860 void helper_cmpxchg16b(target_ulong a0
)
1865 if ((a0
& 0xf) != 0)
1866 raise_exception(EXCP0D_GPF
);
1867 eflags
= helper_cc_compute_all(CC_OP
);
1870 if (d0
== EAX
&& d1
== EDX
) {
1875 /* always do the store */
1886 void helper_single_step(void)
1888 #ifndef CONFIG_USER_ONLY
1889 check_hw_breakpoints(env
, 1);
1890 env
->dr
[6] |= DR6_BS
;
1892 raise_exception(EXCP01_DB
);
1895 void helper_cpuid(void)
1897 uint32_t eax
, ebx
, ecx
, edx
;
1899 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1901 cpu_x86_cpuid(env
, (uint32_t)EAX
, &eax
, &ebx
, &ecx
, &edx
);
1908 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1911 uint32_t esp_mask
, esp
, ebp
;
1913 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1914 ssp
= env
->segs
[R_SS
].base
;
1923 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1926 stl(ssp
+ (esp
& esp_mask
), t1
);
1933 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1936 stw(ssp
+ (esp
& esp_mask
), t1
);
1940 #ifdef TARGET_X86_64
1941 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1943 target_ulong esp
, ebp
;
1963 stw(esp
, lduw(ebp
));
1971 void helper_lldt(int selector
)
1975 int index
, entry_limit
;
1979 if ((selector
& 0xfffc) == 0) {
1980 /* XXX: NULL selector case: invalid LDT */
1985 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1987 index
= selector
& ~7;
1988 #ifdef TARGET_X86_64
1989 if (env
->hflags
& HF_LMA_MASK
)
1994 if ((index
+ entry_limit
) > dt
->limit
)
1995 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1996 ptr
= dt
->base
+ index
;
1997 e1
= ldl_kernel(ptr
);
1998 e2
= ldl_kernel(ptr
+ 4);
1999 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2000 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2001 if (!(e2
& DESC_P_MASK
))
2002 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2003 #ifdef TARGET_X86_64
2004 if (env
->hflags
& HF_LMA_MASK
) {
2006 e3
= ldl_kernel(ptr
+ 8);
2007 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2008 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2012 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2015 env
->ldt
.selector
= selector
;
2018 void helper_ltr(int selector
)
2022 int index
, type
, entry_limit
;
2026 if ((selector
& 0xfffc) == 0) {
2027 /* NULL selector case: invalid TR */
2033 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2035 index
= selector
& ~7;
2036 #ifdef TARGET_X86_64
2037 if (env
->hflags
& HF_LMA_MASK
)
2042 if ((index
+ entry_limit
) > dt
->limit
)
2043 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2044 ptr
= dt
->base
+ index
;
2045 e1
= ldl_kernel(ptr
);
2046 e2
= ldl_kernel(ptr
+ 4);
2047 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2048 if ((e2
& DESC_S_MASK
) ||
2049 (type
!= 1 && type
!= 9))
2050 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2051 if (!(e2
& DESC_P_MASK
))
2052 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2053 #ifdef TARGET_X86_64
2054 if (env
->hflags
& HF_LMA_MASK
) {
2056 e3
= ldl_kernel(ptr
+ 8);
2057 e4
= ldl_kernel(ptr
+ 12);
2058 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2059 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2060 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2061 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2065 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2067 e2
|= DESC_TSS_BUSY_MASK
;
2068 stl_kernel(ptr
+ 4, e2
);
2070 env
->tr
.selector
= selector
;
2073 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2074 void helper_load_seg(int seg_reg
, int selector
)
2083 cpl
= env
->hflags
& HF_CPL_MASK
;
2084 if ((selector
& 0xfffc) == 0) {
2085 /* null selector case */
2087 #ifdef TARGET_X86_64
2088 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2091 raise_exception_err(EXCP0D_GPF
, 0);
2092 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2099 index
= selector
& ~7;
2100 if ((index
+ 7) > dt
->limit
)
2101 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2102 ptr
= dt
->base
+ index
;
2103 e1
= ldl_kernel(ptr
);
2104 e2
= ldl_kernel(ptr
+ 4);
2106 if (!(e2
& DESC_S_MASK
))
2107 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2109 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2110 if (seg_reg
== R_SS
) {
2111 /* must be writable segment */
2112 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2113 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2114 if (rpl
!= cpl
|| dpl
!= cpl
)
2115 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2117 /* must be readable segment */
2118 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2119 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2121 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2122 /* if not conforming code, test rights */
2123 if (dpl
< cpl
|| dpl
< rpl
)
2124 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2128 if (!(e2
& DESC_P_MASK
)) {
2129 if (seg_reg
== R_SS
)
2130 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2132 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2135 /* set the access bit if not already set */
2136 if (!(e2
& DESC_A_MASK
)) {
2138 stl_kernel(ptr
+ 4, e2
);
2141 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2142 get_seg_base(e1
, e2
),
2143 get_seg_limit(e1
, e2
),
2146 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2147 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2152 /* protected mode jump */
2153 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2154 int next_eip_addend
)
2157 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2158 target_ulong next_eip
;
2160 if ((new_cs
& 0xfffc) == 0)
2161 raise_exception_err(EXCP0D_GPF
, 0);
2162 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2163 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2164 cpl
= env
->hflags
& HF_CPL_MASK
;
2165 if (e2
& DESC_S_MASK
) {
2166 if (!(e2
& DESC_CS_MASK
))
2167 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2168 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2169 if (e2
& DESC_C_MASK
) {
2170 /* conforming code segment */
2172 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2174 /* non conforming code segment */
2177 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2179 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2181 if (!(e2
& DESC_P_MASK
))
2182 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2183 limit
= get_seg_limit(e1
, e2
);
2184 if (new_eip
> limit
&&
2185 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2186 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2187 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2188 get_seg_base(e1
, e2
), limit
, e2
);
2191 /* jump to call or task gate */
2192 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2194 cpl
= env
->hflags
& HF_CPL_MASK
;
2195 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2197 case 1: /* 286 TSS */
2198 case 9: /* 386 TSS */
2199 case 5: /* task gate */
2200 if (dpl
< cpl
|| dpl
< rpl
)
2201 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2202 next_eip
= env
->eip
+ next_eip_addend
;
2203 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2204 CC_OP
= CC_OP_EFLAGS
;
2206 case 4: /* 286 call gate */
2207 case 12: /* 386 call gate */
2208 if ((dpl
< cpl
) || (dpl
< rpl
))
2209 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2210 if (!(e2
& DESC_P_MASK
))
2211 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2213 new_eip
= (e1
& 0xffff);
2215 new_eip
|= (e2
& 0xffff0000);
2216 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2217 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2218 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2219 /* must be code segment */
2220 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2221 (DESC_S_MASK
| DESC_CS_MASK
)))
2222 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2223 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2224 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2225 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2226 if (!(e2
& DESC_P_MASK
))
2227 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2228 limit
= get_seg_limit(e1
, e2
);
2229 if (new_eip
> limit
)
2230 raise_exception_err(EXCP0D_GPF
, 0);
2231 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2232 get_seg_base(e1
, e2
), limit
, e2
);
2236 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2242 /* real mode call */
2243 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2244 int shift
, int next_eip
)
2247 uint32_t esp
, esp_mask
;
2252 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2253 ssp
= env
->segs
[R_SS
].base
;
2255 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2256 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2258 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2259 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2262 SET_ESP(esp
, esp_mask
);
2264 env
->segs
[R_CS
].selector
= new_cs
;
2265 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2268 /* protected mode call */
2269 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2270 int shift
, int next_eip_addend
)
2273 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2274 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2275 uint32_t val
, limit
, old_sp_mask
;
2276 target_ulong ssp
, old_ssp
, next_eip
;
2278 next_eip
= env
->eip
+ next_eip_addend
;
2280 if (loglevel
& CPU_LOG_PCALL
) {
2281 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2282 new_cs
, (uint32_t)new_eip
, shift
);
2283 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2286 if ((new_cs
& 0xfffc) == 0)
2287 raise_exception_err(EXCP0D_GPF
, 0);
2288 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2289 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2290 cpl
= env
->hflags
& HF_CPL_MASK
;
2292 if (loglevel
& CPU_LOG_PCALL
) {
2293 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2296 if (e2
& DESC_S_MASK
) {
2297 if (!(e2
& DESC_CS_MASK
))
2298 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2299 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2300 if (e2
& DESC_C_MASK
) {
2301 /* conforming code segment */
2303 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2305 /* non conforming code segment */
2308 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2310 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2312 if (!(e2
& DESC_P_MASK
))
2313 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2315 #ifdef TARGET_X86_64
2316 /* XXX: check 16/32 bit cases in long mode */
2321 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2322 PUSHQ(rsp
, next_eip
);
2323 /* from this point, not restartable */
2325 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2326 get_seg_base(e1
, e2
),
2327 get_seg_limit(e1
, e2
), e2
);
2333 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2334 ssp
= env
->segs
[R_SS
].base
;
2336 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2337 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2339 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2340 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2343 limit
= get_seg_limit(e1
, e2
);
2344 if (new_eip
> limit
)
2345 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2346 /* from this point, not restartable */
2347 SET_ESP(sp
, sp_mask
);
2348 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2349 get_seg_base(e1
, e2
), limit
, e2
);
2353 /* check gate type */
2354 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2355 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2358 case 1: /* available 286 TSS */
2359 case 9: /* available 386 TSS */
2360 case 5: /* task gate */
2361 if (dpl
< cpl
|| dpl
< rpl
)
2362 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2363 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2364 CC_OP
= CC_OP_EFLAGS
;
2366 case 4: /* 286 call gate */
2367 case 12: /* 386 call gate */
2370 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2375 if (dpl
< cpl
|| dpl
< rpl
)
2376 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2377 /* check valid bit */
2378 if (!(e2
& DESC_P_MASK
))
2379 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2380 selector
= e1
>> 16;
2381 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2382 param_count
= e2
& 0x1f;
2383 if ((selector
& 0xfffc) == 0)
2384 raise_exception_err(EXCP0D_GPF
, 0);
2386 if (load_segment(&e1
, &e2
, selector
) != 0)
2387 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2388 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2389 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2390 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2392 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2393 if (!(e2
& DESC_P_MASK
))
2394 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2396 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2397 /* to inner privilege */
2398 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2400 if (loglevel
& CPU_LOG_PCALL
)
2401 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2402 ss
, sp
, param_count
, ESP
);
2404 if ((ss
& 0xfffc) == 0)
2405 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2406 if ((ss
& 3) != dpl
)
2407 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2408 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2409 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2410 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2412 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2413 if (!(ss_e2
& DESC_S_MASK
) ||
2414 (ss_e2
& DESC_CS_MASK
) ||
2415 !(ss_e2
& DESC_W_MASK
))
2416 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2417 if (!(ss_e2
& DESC_P_MASK
))
2418 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2420 // push_size = ((param_count * 2) + 8) << shift;
2422 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2423 old_ssp
= env
->segs
[R_SS
].base
;
2425 sp_mask
= get_sp_mask(ss_e2
);
2426 ssp
= get_seg_base(ss_e1
, ss_e2
);
2428 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2429 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2430 for(i
= param_count
- 1; i
>= 0; i
--) {
2431 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2432 PUSHL(ssp
, sp
, sp_mask
, val
);
2435 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2436 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2437 for(i
= param_count
- 1; i
>= 0; i
--) {
2438 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2439 PUSHW(ssp
, sp
, sp_mask
, val
);
2444 /* to same privilege */
2446 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2447 ssp
= env
->segs
[R_SS
].base
;
2448 // push_size = (4 << shift);
2453 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2454 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2456 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2457 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2460 /* from this point, not restartable */
2463 ss
= (ss
& ~3) | dpl
;
2464 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2466 get_seg_limit(ss_e1
, ss_e2
),
2470 selector
= (selector
& ~3) | dpl
;
2471 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2472 get_seg_base(e1
, e2
),
2473 get_seg_limit(e1
, e2
),
2475 cpu_x86_set_cpl(env
, dpl
);
2476 SET_ESP(sp
, sp_mask
);
2480 if (kqemu_is_ok(env
)) {
2481 env
->exception_index
= -1;
2487 /* real and vm86 mode iret */
2488 void helper_iret_real(int shift
)
2490 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2494 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2496 ssp
= env
->segs
[R_SS
].base
;
2499 POPL(ssp
, sp
, sp_mask
, new_eip
);
2500 POPL(ssp
, sp
, sp_mask
, new_cs
);
2502 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2505 POPW(ssp
, sp
, sp_mask
, new_eip
);
2506 POPW(ssp
, sp
, sp_mask
, new_cs
);
2507 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2509 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2510 env
->segs
[R_CS
].selector
= new_cs
;
2511 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2513 if (env
->eflags
& VM_MASK
)
2514 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2516 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2518 eflags_mask
&= 0xffff;
2519 load_eflags(new_eflags
, eflags_mask
);
2520 env
->hflags2
&= ~HF2_NMI_MASK
;
2523 static inline void validate_seg(int seg_reg
, int cpl
)
2528 /* XXX: on x86_64, we do not want to nullify FS and GS because
2529 they may still contain a valid base. I would be interested to
2530 know how a real x86_64 CPU behaves */
2531 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2532 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2535 e2
= env
->segs
[seg_reg
].flags
;
2536 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2537 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2538 /* data or non conforming code segment */
2540 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2545 /* protected mode iret */
2546 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2548 uint32_t new_cs
, new_eflags
, new_ss
;
2549 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2550 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2551 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2552 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2554 #ifdef TARGET_X86_64
2559 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2561 ssp
= env
->segs
[R_SS
].base
;
2562 new_eflags
= 0; /* avoid warning */
2563 #ifdef TARGET_X86_64
2569 POPQ(sp
, new_eflags
);
2575 POPL(ssp
, sp
, sp_mask
, new_eip
);
2576 POPL(ssp
, sp
, sp_mask
, new_cs
);
2579 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2580 if (new_eflags
& VM_MASK
)
2581 goto return_to_vm86
;
2585 POPW(ssp
, sp
, sp_mask
, new_eip
);
2586 POPW(ssp
, sp
, sp_mask
, new_cs
);
2588 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2591 if (loglevel
& CPU_LOG_PCALL
) {
2592 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2593 new_cs
, new_eip
, shift
, addend
);
2594 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2597 if ((new_cs
& 0xfffc) == 0)
2598 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2599 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2600 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2601 if (!(e2
& DESC_S_MASK
) ||
2602 !(e2
& DESC_CS_MASK
))
2603 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2604 cpl
= env
->hflags
& HF_CPL_MASK
;
2607 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2608 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2609 if (e2
& DESC_C_MASK
) {
2611 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2614 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2616 if (!(e2
& DESC_P_MASK
))
2617 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2620 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2621 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2622 /* return to same privilege level */
2623 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2624 get_seg_base(e1
, e2
),
2625 get_seg_limit(e1
, e2
),
2628 /* return to different privilege level */
2629 #ifdef TARGET_X86_64
2638 POPL(ssp
, sp
, sp_mask
, new_esp
);
2639 POPL(ssp
, sp
, sp_mask
, new_ss
);
2643 POPW(ssp
, sp
, sp_mask
, new_esp
);
2644 POPW(ssp
, sp
, sp_mask
, new_ss
);
2647 if (loglevel
& CPU_LOG_PCALL
) {
2648 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2652 if ((new_ss
& 0xfffc) == 0) {
2653 #ifdef TARGET_X86_64
2654 /* NULL ss is allowed in long mode if cpl != 3*/
2655 /* XXX: test CS64 ? */
2656 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2657 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2659 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2660 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2661 DESC_W_MASK
| DESC_A_MASK
);
2662 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2666 raise_exception_err(EXCP0D_GPF
, 0);
2669 if ((new_ss
& 3) != rpl
)
2670 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2671 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2672 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2673 if (!(ss_e2
& DESC_S_MASK
) ||
2674 (ss_e2
& DESC_CS_MASK
) ||
2675 !(ss_e2
& DESC_W_MASK
))
2676 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2677 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2679 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2680 if (!(ss_e2
& DESC_P_MASK
))
2681 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2682 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2683 get_seg_base(ss_e1
, ss_e2
),
2684 get_seg_limit(ss_e1
, ss_e2
),
2688 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2689 get_seg_base(e1
, e2
),
2690 get_seg_limit(e1
, e2
),
2692 cpu_x86_set_cpl(env
, rpl
);
2694 #ifdef TARGET_X86_64
2695 if (env
->hflags
& HF_CS64_MASK
)
2699 sp_mask
= get_sp_mask(ss_e2
);
2701 /* validate data segments */
2702 validate_seg(R_ES
, rpl
);
2703 validate_seg(R_DS
, rpl
);
2704 validate_seg(R_FS
, rpl
);
2705 validate_seg(R_GS
, rpl
);
2709 SET_ESP(sp
, sp_mask
);
2712 /* NOTE: 'cpl' is the _old_ CPL */
2713 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2715 eflags_mask
|= IOPL_MASK
;
2716 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2718 eflags_mask
|= IF_MASK
;
2720 eflags_mask
&= 0xffff;
2721 load_eflags(new_eflags
, eflags_mask
);
2726 POPL(ssp
, sp
, sp_mask
, new_esp
);
2727 POPL(ssp
, sp
, sp_mask
, new_ss
);
2728 POPL(ssp
, sp
, sp_mask
, new_es
);
2729 POPL(ssp
, sp
, sp_mask
, new_ds
);
2730 POPL(ssp
, sp
, sp_mask
, new_fs
);
2731 POPL(ssp
, sp
, sp_mask
, new_gs
);
2733 /* modify processor state */
2734 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2735 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2736 load_seg_vm(R_CS
, new_cs
& 0xffff);
2737 cpu_x86_set_cpl(env
, 3);
2738 load_seg_vm(R_SS
, new_ss
& 0xffff);
2739 load_seg_vm(R_ES
, new_es
& 0xffff);
2740 load_seg_vm(R_DS
, new_ds
& 0xffff);
2741 load_seg_vm(R_FS
, new_fs
& 0xffff);
2742 load_seg_vm(R_GS
, new_gs
& 0xffff);
2744 env
->eip
= new_eip
& 0xffff;
2748 void helper_iret_protected(int shift
, int next_eip
)
2750 int tss_selector
, type
;
2753 /* specific case for TSS */
2754 if (env
->eflags
& NT_MASK
) {
2755 #ifdef TARGET_X86_64
2756 if (env
->hflags
& HF_LMA_MASK
)
2757 raise_exception_err(EXCP0D_GPF
, 0);
2759 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2760 if (tss_selector
& 4)
2761 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2762 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2763 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2764 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2765 /* NOTE: we check both segment and busy TSS */
2767 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2768 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2770 helper_ret_protected(shift
, 1, 0);
2772 env
->hflags2
&= ~HF2_NMI_MASK
;
2774 if (kqemu_is_ok(env
)) {
2775 CC_OP
= CC_OP_EFLAGS
;
2776 env
->exception_index
= -1;
2782 void helper_lret_protected(int shift
, int addend
)
2784 helper_ret_protected(shift
, 0, addend
);
2786 if (kqemu_is_ok(env
)) {
2787 env
->exception_index
= -1;
2793 void helper_sysenter(void)
2795 if (env
->sysenter_cs
== 0) {
2796 raise_exception_err(EXCP0D_GPF
, 0);
2798 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2799 cpu_x86_set_cpl(env
, 0);
2801 #ifdef TARGET_X86_64
2802 if (env
->hflags
& HF_LMA_MASK
) {
2803 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2805 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2807 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2811 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2813 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2815 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2817 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2819 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2821 DESC_W_MASK
| DESC_A_MASK
);
2822 ESP
= env
->sysenter_esp
;
2823 EIP
= env
->sysenter_eip
;
2826 void helper_sysexit(int dflag
)
2830 cpl
= env
->hflags
& HF_CPL_MASK
;
2831 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2832 raise_exception_err(EXCP0D_GPF
, 0);
2834 cpu_x86_set_cpl(env
, 3);
2835 #ifdef TARGET_X86_64
2837 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2839 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2840 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2841 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2842 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2844 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2845 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2846 DESC_W_MASK
| DESC_A_MASK
);
2850 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2852 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2853 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2854 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2855 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2857 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2858 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2859 DESC_W_MASK
| DESC_A_MASK
);
2864 if (kqemu_is_ok(env
)) {
2865 env
->exception_index
= -1;
2871 #if defined(CONFIG_USER_ONLY)
2872 target_ulong
helper_read_crN(int reg
)
2877 void helper_write_crN(int reg
, target_ulong t0
)
2881 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2885 target_ulong
helper_read_crN(int reg
)
2889 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2895 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2896 val
= cpu_get_apic_tpr(env
);
2905 void helper_write_crN(int reg
, target_ulong t0
)
2907 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2910 cpu_x86_update_cr0(env
, t0
);
2913 cpu_x86_update_cr3(env
, t0
);
2916 cpu_x86_update_cr4(env
, t0
);
2919 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2920 cpu_set_apic_tpr(env
, t0
);
2922 env
->v_tpr
= t0
& 0x0f;
2930 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2935 hw_breakpoint_remove(env
, reg
);
2937 hw_breakpoint_insert(env
, reg
);
2938 } else if (reg
== 7) {
2939 for (i
= 0; i
< 4; i
++)
2940 hw_breakpoint_remove(env
, i
);
2942 for (i
= 0; i
< 4; i
++)
2943 hw_breakpoint_insert(env
, i
);
2949 void helper_lmsw(target_ulong t0
)
2951 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2952 if already set to one. */
2953 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2954 helper_write_crN(0, t0
);
2957 void helper_clts(void)
2959 env
->cr
[0] &= ~CR0_TS_MASK
;
2960 env
->hflags
&= ~HF_TS_MASK
;
2963 void helper_invlpg(target_ulong addr
)
2965 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2966 tlb_flush_page(env
, addr
);
2969 void helper_rdtsc(void)
2973 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2974 raise_exception(EXCP0D_GPF
);
2976 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2978 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2979 EAX
= (uint32_t)(val
);
2980 EDX
= (uint32_t)(val
>> 32);
2983 void helper_rdpmc(void)
2985 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2986 raise_exception(EXCP0D_GPF
);
2988 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2990 /* currently unimplemented */
2991 raise_exception_err(EXCP06_ILLOP
, 0);
2994 #if defined(CONFIG_USER_ONLY)
2995 void helper_wrmsr(void)
2999 void helper_rdmsr(void)
3003 void helper_wrmsr(void)
3007 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3009 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3011 switch((uint32_t)ECX
) {
3012 case MSR_IA32_SYSENTER_CS
:
3013 env
->sysenter_cs
= val
& 0xffff;
3015 case MSR_IA32_SYSENTER_ESP
:
3016 env
->sysenter_esp
= val
;
3018 case MSR_IA32_SYSENTER_EIP
:
3019 env
->sysenter_eip
= val
;
3021 case MSR_IA32_APICBASE
:
3022 cpu_set_apic_base(env
, val
);
3026 uint64_t update_mask
;
3028 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3029 update_mask
|= MSR_EFER_SCE
;
3030 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3031 update_mask
|= MSR_EFER_LME
;
3032 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3033 update_mask
|= MSR_EFER_FFXSR
;
3034 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3035 update_mask
|= MSR_EFER_NXE
;
3036 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3037 update_mask
|= MSR_EFER_SVME
;
3038 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3039 (val
& update_mask
));
3048 case MSR_VM_HSAVE_PA
:
3049 env
->vm_hsave
= val
;
3051 #ifdef TARGET_X86_64
3062 env
->segs
[R_FS
].base
= val
;
3065 env
->segs
[R_GS
].base
= val
;
3067 case MSR_KERNELGSBASE
:
3068 env
->kernelgsbase
= val
;
3072 /* XXX: exception ? */
3077 void helper_rdmsr(void)
3081 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3083 switch((uint32_t)ECX
) {
3084 case MSR_IA32_SYSENTER_CS
:
3085 val
= env
->sysenter_cs
;
3087 case MSR_IA32_SYSENTER_ESP
:
3088 val
= env
->sysenter_esp
;
3090 case MSR_IA32_SYSENTER_EIP
:
3091 val
= env
->sysenter_eip
;
3093 case MSR_IA32_APICBASE
:
3094 val
= cpu_get_apic_base(env
);
3105 case MSR_VM_HSAVE_PA
:
3106 val
= env
->vm_hsave
;
3108 case MSR_IA32_PERF_STATUS
:
3109 /* tsc_increment_by_tick */
3111 /* CPU multiplier */
3112 val
|= (((uint64_t)4ULL) << 40);
3114 #ifdef TARGET_X86_64
3125 val
= env
->segs
[R_FS
].base
;
3128 val
= env
->segs
[R_GS
].base
;
3130 case MSR_KERNELGSBASE
:
3131 val
= env
->kernelgsbase
;
3135 case MSR_QPI_COMMBASE
:
3136 if (env
->kqemu_enabled
) {
3137 val
= kqemu_comm_base
;
3144 /* XXX: exception ? */
3148 EAX
= (uint32_t)(val
);
3149 EDX
= (uint32_t)(val
>> 32);
3153 target_ulong
helper_lsl(target_ulong selector1
)
3156 uint32_t e1
, e2
, eflags
, selector
;
3157 int rpl
, dpl
, cpl
, type
;
3159 selector
= selector1
& 0xffff;
3160 eflags
= helper_cc_compute_all(CC_OP
);
3161 if (load_segment(&e1
, &e2
, selector
) != 0)
3164 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3165 cpl
= env
->hflags
& HF_CPL_MASK
;
3166 if (e2
& DESC_S_MASK
) {
3167 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3170 if (dpl
< cpl
|| dpl
< rpl
)
3174 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3185 if (dpl
< cpl
|| dpl
< rpl
) {
3187 CC_SRC
= eflags
& ~CC_Z
;
3191 limit
= get_seg_limit(e1
, e2
);
3192 CC_SRC
= eflags
| CC_Z
;
3196 target_ulong
helper_lar(target_ulong selector1
)
3198 uint32_t e1
, e2
, eflags
, selector
;
3199 int rpl
, dpl
, cpl
, type
;
3201 selector
= selector1
& 0xffff;
3202 eflags
= helper_cc_compute_all(CC_OP
);
3203 if ((selector
& 0xfffc) == 0)
3205 if (load_segment(&e1
, &e2
, selector
) != 0)
3208 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3209 cpl
= env
->hflags
& HF_CPL_MASK
;
3210 if (e2
& DESC_S_MASK
) {
3211 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3214 if (dpl
< cpl
|| dpl
< rpl
)
3218 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3232 if (dpl
< cpl
|| dpl
< rpl
) {
3234 CC_SRC
= eflags
& ~CC_Z
;
3238 CC_SRC
= eflags
| CC_Z
;
3239 return e2
& 0x00f0ff00;
3242 void helper_verr(target_ulong selector1
)
3244 uint32_t e1
, e2
, eflags
, selector
;
3247 selector
= selector1
& 0xffff;
3248 eflags
= helper_cc_compute_all(CC_OP
);
3249 if ((selector
& 0xfffc) == 0)
3251 if (load_segment(&e1
, &e2
, selector
) != 0)
3253 if (!(e2
& DESC_S_MASK
))
3256 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3257 cpl
= env
->hflags
& HF_CPL_MASK
;
3258 if (e2
& DESC_CS_MASK
) {
3259 if (!(e2
& DESC_R_MASK
))
3261 if (!(e2
& DESC_C_MASK
)) {
3262 if (dpl
< cpl
|| dpl
< rpl
)
3266 if (dpl
< cpl
|| dpl
< rpl
) {
3268 CC_SRC
= eflags
& ~CC_Z
;
3272 CC_SRC
= eflags
| CC_Z
;
3275 void helper_verw(target_ulong selector1
)
3277 uint32_t e1
, e2
, eflags
, selector
;
3280 selector
= selector1
& 0xffff;
3281 eflags
= helper_cc_compute_all(CC_OP
);
3282 if ((selector
& 0xfffc) == 0)
3284 if (load_segment(&e1
, &e2
, selector
) != 0)
3286 if (!(e2
& DESC_S_MASK
))
3289 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3290 cpl
= env
->hflags
& HF_CPL_MASK
;
3291 if (e2
& DESC_CS_MASK
) {
3294 if (dpl
< cpl
|| dpl
< rpl
)
3296 if (!(e2
& DESC_W_MASK
)) {
3298 CC_SRC
= eflags
& ~CC_Z
;
3302 CC_SRC
= eflags
| CC_Z
;
3305 /* x87 FPU helpers */
3307 static void fpu_set_exception(int mask
)
3310 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3311 env
->fpus
|= FPUS_SE
| FPUS_B
;
3314 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3317 fpu_set_exception(FPUS_ZE
);
3321 static void fpu_raise_exception(void)
3323 if (env
->cr
[0] & CR0_NE_MASK
) {
3324 raise_exception(EXCP10_COPR
);
3326 #if !defined(CONFIG_USER_ONLY)
3333 void helper_flds_FT0(uint32_t val
)
3340 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3343 void helper_fldl_FT0(uint64_t val
)
3350 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3353 void helper_fildl_FT0(int32_t val
)
3355 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3358 void helper_flds_ST0(uint32_t val
)
3365 new_fpstt
= (env
->fpstt
- 1) & 7;
3367 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3368 env
->fpstt
= new_fpstt
;
3369 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3372 void helper_fldl_ST0(uint64_t val
)
3379 new_fpstt
= (env
->fpstt
- 1) & 7;
3381 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3382 env
->fpstt
= new_fpstt
;
3383 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3386 void helper_fildl_ST0(int32_t val
)
3389 new_fpstt
= (env
->fpstt
- 1) & 7;
3390 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3391 env
->fpstt
= new_fpstt
;
3392 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3395 void helper_fildll_ST0(int64_t val
)
3398 new_fpstt
= (env
->fpstt
- 1) & 7;
3399 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3400 env
->fpstt
= new_fpstt
;
3401 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3404 uint32_t helper_fsts_ST0(void)
3410 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3414 uint64_t helper_fstl_ST0(void)
3420 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3424 int32_t helper_fist_ST0(void)
3427 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3428 if (val
!= (int16_t)val
)
3433 int32_t helper_fistl_ST0(void)
3436 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3440 int64_t helper_fistll_ST0(void)
3443 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3447 int32_t helper_fistt_ST0(void)
3450 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3451 if (val
!= (int16_t)val
)
3456 int32_t helper_fisttl_ST0(void)
3459 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3463 int64_t helper_fisttll_ST0(void)
3466 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3470 void helper_fldt_ST0(target_ulong ptr
)
3473 new_fpstt
= (env
->fpstt
- 1) & 7;
3474 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3475 env
->fpstt
= new_fpstt
;
3476 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3479 void helper_fstt_ST0(target_ulong ptr
)
3481 helper_fstt(ST0
, ptr
);
3484 void helper_fpush(void)
3489 void helper_fpop(void)
3494 void helper_fdecstp(void)
3496 env
->fpstt
= (env
->fpstt
- 1) & 7;
3497 env
->fpus
&= (~0x4700);
3500 void helper_fincstp(void)
3502 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3503 env
->fpus
&= (~0x4700);
3508 void helper_ffree_STN(int st_index
)
3510 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3513 void helper_fmov_ST0_FT0(void)
3518 void helper_fmov_FT0_STN(int st_index
)
3523 void helper_fmov_ST0_STN(int st_index
)
3528 void helper_fmov_STN_ST0(int st_index
)
3533 void helper_fxchg_ST0_STN(int st_index
)
3541 /* FPU operations */
3543 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3545 void helper_fcom_ST0_FT0(void)
3549 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3550 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3553 void helper_fucom_ST0_FT0(void)
3557 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3558 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3561 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3563 void helper_fcomi_ST0_FT0(void)
3568 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3569 eflags
= helper_cc_compute_all(CC_OP
);
3570 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3574 void helper_fucomi_ST0_FT0(void)
3579 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3580 eflags
= helper_cc_compute_all(CC_OP
);
3581 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3585 void helper_fadd_ST0_FT0(void)
3590 void helper_fmul_ST0_FT0(void)
3595 void helper_fsub_ST0_FT0(void)
3600 void helper_fsubr_ST0_FT0(void)
3605 void helper_fdiv_ST0_FT0(void)
3607 ST0
= helper_fdiv(ST0
, FT0
);
3610 void helper_fdivr_ST0_FT0(void)
3612 ST0
= helper_fdiv(FT0
, ST0
);
3615 /* fp operations between STN and ST0 */
3617 void helper_fadd_STN_ST0(int st_index
)
3619 ST(st_index
) += ST0
;
3622 void helper_fmul_STN_ST0(int st_index
)
3624 ST(st_index
) *= ST0
;
3627 void helper_fsub_STN_ST0(int st_index
)
3629 ST(st_index
) -= ST0
;
3632 void helper_fsubr_STN_ST0(int st_index
)
3639 void helper_fdiv_STN_ST0(int st_index
)
3643 *p
= helper_fdiv(*p
, ST0
);
3646 void helper_fdivr_STN_ST0(int st_index
)
3650 *p
= helper_fdiv(ST0
, *p
);
3653 /* misc FPU operations */
3654 void helper_fchs_ST0(void)
3656 ST0
= floatx_chs(ST0
);
3659 void helper_fabs_ST0(void)
3661 ST0
= floatx_abs(ST0
);
3664 void helper_fld1_ST0(void)
3669 void helper_fldl2t_ST0(void)
3674 void helper_fldl2e_ST0(void)
3679 void helper_fldpi_ST0(void)
3684 void helper_fldlg2_ST0(void)
3689 void helper_fldln2_ST0(void)
3694 void helper_fldz_ST0(void)
3699 void helper_fldz_FT0(void)
3704 uint32_t helper_fnstsw(void)
3706 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3709 uint32_t helper_fnstcw(void)
3714 static void update_fp_status(void)
3718 /* set rounding mode */
3719 switch(env
->fpuc
& RC_MASK
) {
3722 rnd_type
= float_round_nearest_even
;
3725 rnd_type
= float_round_down
;
3728 rnd_type
= float_round_up
;
3731 rnd_type
= float_round_to_zero
;
3734 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3736 switch((env
->fpuc
>> 8) & 3) {
3748 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3752 void helper_fldcw(uint32_t val
)
3758 void helper_fclex(void)
3760 env
->fpus
&= 0x7f00;
3763 void helper_fwait(void)
3765 if (env
->fpus
& FPUS_SE
)
3766 fpu_raise_exception();
3769 void helper_fninit(void)
3786 void helper_fbld_ST0(target_ulong ptr
)
3794 for(i
= 8; i
>= 0; i
--) {
3796 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3799 if (ldub(ptr
+ 9) & 0x80)
3805 void helper_fbst_ST0(target_ulong ptr
)
3808 target_ulong mem_ref
, mem_end
;
3811 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3813 mem_end
= mem_ref
+ 9;
3820 while (mem_ref
< mem_end
) {
3825 v
= ((v
/ 10) << 4) | (v
% 10);
3828 while (mem_ref
< mem_end
) {
3833 void helper_f2xm1(void)
3835 ST0
= pow(2.0,ST0
) - 1.0;
3838 void helper_fyl2x(void)
3840 CPU86_LDouble fptemp
;
3844 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3848 env
->fpus
&= (~0x4700);
3853 void helper_fptan(void)
3855 CPU86_LDouble fptemp
;
3858 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3864 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3865 /* the above code is for |arg| < 2**52 only */
3869 void helper_fpatan(void)
3871 CPU86_LDouble fptemp
, fpsrcop
;
3875 ST1
= atan2(fpsrcop
,fptemp
);
3879 void helper_fxtract(void)
3881 CPU86_LDoubleU temp
;
3882 unsigned int expdif
;
3885 expdif
= EXPD(temp
) - EXPBIAS
;
3886 /*DP exponent bias*/
3893 void helper_fprem1(void)
3895 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3896 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3898 signed long long int q
;
3900 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3901 ST0
= 0.0 / 0.0; /* NaN */
3902 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3908 fpsrcop1
.d
= fpsrcop
;
3910 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3913 /* optimisation? taken from the AMD docs */
3914 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3915 /* ST0 is unchanged */
3920 dblq
= fpsrcop
/ fptemp
;
3921 /* round dblq towards nearest integer */
3923 ST0
= fpsrcop
- fptemp
* dblq
;
3925 /* convert dblq to q by truncating towards zero */
3927 q
= (signed long long int)(-dblq
);
3929 q
= (signed long long int)dblq
;
3931 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3932 /* (C0,C3,C1) <-- (q2,q1,q0) */
3933 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3934 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3935 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3937 env
->fpus
|= 0x400; /* C2 <-- 1 */
3938 fptemp
= pow(2.0, expdif
- 50);
3939 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3940 /* fpsrcop = integer obtained by chopping */
3941 fpsrcop
= (fpsrcop
< 0.0) ?
3942 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3943 ST0
-= (ST1
* fpsrcop
* fptemp
);
3947 void helper_fprem(void)
3949 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3950 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3952 signed long long int q
;
3954 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3955 ST0
= 0.0 / 0.0; /* NaN */
3956 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3960 fpsrcop
= (CPU86_LDouble
)ST0
;
3961 fptemp
= (CPU86_LDouble
)ST1
;
3962 fpsrcop1
.d
= fpsrcop
;
3964 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3967 /* optimisation? taken from the AMD docs */
3968 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3969 /* ST0 is unchanged */
3973 if ( expdif
< 53 ) {
3974 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
3975 /* round dblq towards zero */
3976 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
3977 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
3979 /* convert dblq to q by truncating towards zero */
3981 q
= (signed long long int)(-dblq
);
3983 q
= (signed long long int)dblq
;
3985 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3986 /* (C0,C3,C1) <-- (q2,q1,q0) */
3987 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3988 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3989 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3991 int N
= 32 + (expdif
% 32); /* as per AMD docs */
3992 env
->fpus
|= 0x400; /* C2 <-- 1 */
3993 fptemp
= pow(2.0, (double)(expdif
- N
));
3994 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3995 /* fpsrcop = integer obtained by chopping */
3996 fpsrcop
= (fpsrcop
< 0.0) ?
3997 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3998 ST0
-= (ST1
* fpsrcop
* fptemp
);
4002 void helper_fyl2xp1(void)
4004 CPU86_LDouble fptemp
;
4007 if ((fptemp
+1.0)>0.0) {
4008 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4012 env
->fpus
&= (~0x4700);
4017 void helper_fsqrt(void)
4019 CPU86_LDouble fptemp
;
4023 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4029 void helper_fsincos(void)
4031 CPU86_LDouble fptemp
;
4034 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4040 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4041 /* the above code is for |arg| < 2**63 only */
4045 void helper_frndint(void)
4047 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4050 void helper_fscale(void)
4052 ST0
= ldexp (ST0
, (int)(ST1
));
4055 void helper_fsin(void)
4057 CPU86_LDouble fptemp
;
4060 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4064 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4065 /* the above code is for |arg| < 2**53 only */
4069 void helper_fcos(void)
4071 CPU86_LDouble fptemp
;
4074 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4078 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4079 /* the above code is for |arg5 < 2**63 only */
4083 void helper_fxam_ST0(void)
4085 CPU86_LDoubleU temp
;
4090 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4092 env
->fpus
|= 0x200; /* C1 <-- 1 */
4094 /* XXX: test fptags too */
4095 expdif
= EXPD(temp
);
4096 if (expdif
== MAXEXPD
) {
4097 #ifdef USE_X86LDOUBLE
4098 if (MANTD(temp
) == 0x8000000000000000ULL
)
4100 if (MANTD(temp
) == 0)
4102 env
->fpus
|= 0x500 /*Infinity*/;
4104 env
->fpus
|= 0x100 /*NaN*/;
4105 } else if (expdif
== 0) {
4106 if (MANTD(temp
) == 0)
4107 env
->fpus
|= 0x4000 /*Zero*/;
4109 env
->fpus
|= 0x4400 /*Denormal*/;
4115 void helper_fstenv(target_ulong ptr
, int data32
)
4117 int fpus
, fptag
, exp
, i
;
4121 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4123 for (i
=7; i
>=0; i
--) {
4125 if (env
->fptags
[i
]) {
4128 tmp
.d
= env
->fpregs
[i
].d
;
4131 if (exp
== 0 && mant
== 0) {
4134 } else if (exp
== 0 || exp
== MAXEXPD
4135 #ifdef USE_X86LDOUBLE
4136 || (mant
& (1LL << 63)) == 0
4139 /* NaNs, infinity, denormal */
4146 stl(ptr
, env
->fpuc
);
4148 stl(ptr
+ 8, fptag
);
4149 stl(ptr
+ 12, 0); /* fpip */
4150 stl(ptr
+ 16, 0); /* fpcs */
4151 stl(ptr
+ 20, 0); /* fpoo */
4152 stl(ptr
+ 24, 0); /* fpos */
4155 stw(ptr
, env
->fpuc
);
4157 stw(ptr
+ 4, fptag
);
4165 void helper_fldenv(target_ulong ptr
, int data32
)
4170 env
->fpuc
= lduw(ptr
);
4171 fpus
= lduw(ptr
+ 4);
4172 fptag
= lduw(ptr
+ 8);
4175 env
->fpuc
= lduw(ptr
);
4176 fpus
= lduw(ptr
+ 2);
4177 fptag
= lduw(ptr
+ 4);
4179 env
->fpstt
= (fpus
>> 11) & 7;
4180 env
->fpus
= fpus
& ~0x3800;
4181 for(i
= 0;i
< 8; i
++) {
4182 env
->fptags
[i
] = ((fptag
& 3) == 3);
4187 void helper_fsave(target_ulong ptr
, int data32
)
4192 helper_fstenv(ptr
, data32
);
4194 ptr
+= (14 << data32
);
4195 for(i
= 0;i
< 8; i
++) {
4197 helper_fstt(tmp
, ptr
);
4215 void helper_frstor(target_ulong ptr
, int data32
)
4220 helper_fldenv(ptr
, data32
);
4221 ptr
+= (14 << data32
);
4223 for(i
= 0;i
< 8; i
++) {
4224 tmp
= helper_fldt(ptr
);
4230 void helper_fxsave(target_ulong ptr
, int data64
)
4232 int fpus
, fptag
, i
, nb_xmm_regs
;
4236 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4238 for(i
= 0; i
< 8; i
++) {
4239 fptag
|= (env
->fptags
[i
] << i
);
4241 stw(ptr
, env
->fpuc
);
4243 stw(ptr
+ 4, fptag
^ 0xff);
4244 #ifdef TARGET_X86_64
4246 stq(ptr
+ 0x08, 0); /* rip */
4247 stq(ptr
+ 0x10, 0); /* rdp */
4251 stl(ptr
+ 0x08, 0); /* eip */
4252 stl(ptr
+ 0x0c, 0); /* sel */
4253 stl(ptr
+ 0x10, 0); /* dp */
4254 stl(ptr
+ 0x14, 0); /* sel */
4258 for(i
= 0;i
< 8; i
++) {
4260 helper_fstt(tmp
, addr
);
4264 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4265 /* XXX: finish it */
4266 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4267 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4268 if (env
->hflags
& HF_CS64_MASK
)
4273 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4274 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4275 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4281 void helper_fxrstor(target_ulong ptr
, int data64
)
4283 int i
, fpus
, fptag
, nb_xmm_regs
;
4287 env
->fpuc
= lduw(ptr
);
4288 fpus
= lduw(ptr
+ 2);
4289 fptag
= lduw(ptr
+ 4);
4290 env
->fpstt
= (fpus
>> 11) & 7;
4291 env
->fpus
= fpus
& ~0x3800;
4293 for(i
= 0;i
< 8; i
++) {
4294 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4298 for(i
= 0;i
< 8; i
++) {
4299 tmp
= helper_fldt(addr
);
4304 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4305 /* XXX: finish it */
4306 env
->mxcsr
= ldl(ptr
+ 0x18);
4308 if (env
->hflags
& HF_CS64_MASK
)
4313 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4314 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4315 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4321 #ifndef USE_X86LDOUBLE
4323 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4325 CPU86_LDoubleU temp
;
4330 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4331 /* exponent + sign */
4332 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4333 e
|= SIGND(temp
) >> 16;
4337 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4339 CPU86_LDoubleU temp
;
4343 /* XXX: handle overflow ? */
4344 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4345 e
|= (upper
>> 4) & 0x800; /* sign */
4346 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4348 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4351 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4358 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4360 CPU86_LDoubleU temp
;
4363 *pmant
= temp
.l
.lower
;
4364 *pexp
= temp
.l
.upper
;
4367 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4369 CPU86_LDoubleU temp
;
4371 temp
.l
.upper
= upper
;
4372 temp
.l
.lower
= mant
;
4377 #ifdef TARGET_X86_64
4379 //#define DEBUG_MULDIV
4381 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4390 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4394 add128(plow
, phigh
, 1, 0);
4397 /* return TRUE if overflow */
4398 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4400 uint64_t q
, r
, a1
, a0
;
4413 /* XXX: use a better algorithm */
4414 for(i
= 0; i
< 64; i
++) {
4416 a1
= (a1
<< 1) | (a0
>> 63);
4417 if (ab
|| a1
>= b
) {
4423 a0
= (a0
<< 1) | qb
;
4425 #if defined(DEBUG_MULDIV)
4426 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4427 *phigh
, *plow
, b
, a0
, a1
);
4435 /* return TRUE if overflow */
4436 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4439 sa
= ((int64_t)*phigh
< 0);
4441 neg128(plow
, phigh
);
4445 if (div64(plow
, phigh
, b
) != 0)
4448 if (*plow
> (1ULL << 63))
4452 if (*plow
>= (1ULL << 63))
4460 void helper_mulq_EAX_T0(target_ulong t0
)
4464 mulu64(&r0
, &r1
, EAX
, t0
);
4471 void helper_imulq_EAX_T0(target_ulong t0
)
4475 muls64(&r0
, &r1
, EAX
, t0
);
4479 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4482 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4486 muls64(&r0
, &r1
, t0
, t1
);
4488 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4492 void helper_divq_EAX(target_ulong t0
)
4496 raise_exception(EXCP00_DIVZ
);
4500 if (div64(&r0
, &r1
, t0
))
4501 raise_exception(EXCP00_DIVZ
);
4506 void helper_idivq_EAX(target_ulong t0
)
4510 raise_exception(EXCP00_DIVZ
);
4514 if (idiv64(&r0
, &r1
, t0
))
4515 raise_exception(EXCP00_DIVZ
);
4521 static void do_hlt(void)
4523 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4525 env
->exception_index
= EXCP_HLT
;
4529 void helper_hlt(int next_eip_addend
)
4531 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4532 EIP
+= next_eip_addend
;
4537 void helper_monitor(target_ulong ptr
)
4539 if ((uint32_t)ECX
!= 0)
4540 raise_exception(EXCP0D_GPF
);
4541 /* XXX: store address ? */
4542 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4545 void helper_mwait(int next_eip_addend
)
4547 if ((uint32_t)ECX
!= 0)
4548 raise_exception(EXCP0D_GPF
);
4549 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4550 EIP
+= next_eip_addend
;
4552 /* XXX: not complete but not completely erroneous */
4553 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4554 /* more than one CPU: do not sleep because another CPU may
4561 void helper_debug(void)
4563 env
->exception_index
= EXCP_DEBUG
;
4567 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4569 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4572 void helper_raise_exception(int exception_index
)
4574 raise_exception(exception_index
);
4577 void helper_cli(void)
4579 env
->eflags
&= ~IF_MASK
;
4582 void helper_sti(void)
4584 env
->eflags
|= IF_MASK
;
4588 /* vm86plus instructions */
4589 void helper_cli_vm(void)
4591 env
->eflags
&= ~VIF_MASK
;
4594 void helper_sti_vm(void)
4596 env
->eflags
|= VIF_MASK
;
4597 if (env
->eflags
& VIP_MASK
) {
4598 raise_exception(EXCP0D_GPF
);
4603 void helper_set_inhibit_irq(void)
4605 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4608 void helper_reset_inhibit_irq(void)
4610 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4613 void helper_boundw(target_ulong a0
, int v
)
4617 high
= ldsw(a0
+ 2);
4619 if (v
< low
|| v
> high
) {
4620 raise_exception(EXCP05_BOUND
);
4624 void helper_boundl(target_ulong a0
, int v
)
4629 if (v
< low
|| v
> high
) {
4630 raise_exception(EXCP05_BOUND
);
4634 static float approx_rsqrt(float a
)
4636 return 1.0 / sqrt(a
);
4639 static float approx_rcp(float a
)
4644 #if !defined(CONFIG_USER_ONLY)
4646 #define MMUSUFFIX _mmu
4649 #include "softmmu_template.h"
4652 #include "softmmu_template.h"
4655 #include "softmmu_template.h"
4658 #include "softmmu_template.h"
4662 #if !defined(CONFIG_USER_ONLY)
4663 /* try to fill the TLB and return an exception if error. If retaddr is
4664 NULL, it means that the function was called in C code (i.e. not
4665 from generated code or from helper.c) */
4666 /* XXX: fix it to restore all registers */
4667 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4669 TranslationBlock
*tb
;
4672 CPUX86State
*saved_env
;
4674 /* XXX: hack to restore env in all cases, even if not called from
4677 env
= cpu_single_env
;
4679 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4682 /* now we have a real cpu fault */
4683 pc
= (unsigned long)retaddr
;
4684 tb
= tb_find_pc(pc
);
4686 /* the PC is inside the translated code. It means that we have
4687 a virtual CPU fault */
4688 cpu_restore_state(tb
, env
, pc
, NULL
);
4691 raise_exception_err(env
->exception_index
, env
->error_code
);
4697 /* Secure Virtual Machine helpers */
4699 #if defined(CONFIG_USER_ONLY)
4701 void helper_vmrun(int aflag
, int next_eip_addend
)
4704 void helper_vmmcall(void)
4707 void helper_vmload(int aflag
)
4710 void helper_vmsave(int aflag
)
4713 void helper_stgi(void)
4716 void helper_clgi(void)
4719 void helper_skinit(void)
4722 void helper_invlpga(int aflag
)
4725 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4728 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4732 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4733 uint32_t next_eip_addend
)
4738 static inline void svm_save_seg(target_phys_addr_t addr
,
4739 const SegmentCache
*sc
)
4741 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4743 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4745 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4747 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4748 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4751 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4755 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4756 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4757 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4758 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4759 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4762 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4763 CPUState
*env
, int seg_reg
)
4765 SegmentCache sc1
, *sc
= &sc1
;
4766 svm_load_seg(addr
, sc
);
4767 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4768 sc
->base
, sc
->limit
, sc
->flags
);
4771 void helper_vmrun(int aflag
, int next_eip_addend
)
4777 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4782 addr
= (uint32_t)EAX
;
4784 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4785 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
4787 env
->vm_vmcb
= addr
;
4789 /* save the current CPU state in the hsave page */
4790 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4791 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4793 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4794 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4796 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4797 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4798 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4799 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4800 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4801 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4803 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4804 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4806 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4808 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4810 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4812 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4815 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4816 EIP
+ next_eip_addend
);
4817 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4818 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4820 /* load the interception bitmaps so we do not need to access the
4822 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4823 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4824 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4825 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4826 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4827 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4829 /* enable intercepts */
4830 env
->hflags
|= HF_SVMI_MASK
;
4832 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4834 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4835 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4837 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4838 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4840 /* clear exit_info_2 so we behave like the real hardware */
4841 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4843 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4844 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4845 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4846 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4847 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4848 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4849 if (int_ctl
& V_INTR_MASKING_MASK
) {
4850 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4851 env
->hflags2
|= HF2_VINTR_MASK
;
4852 if (env
->eflags
& IF_MASK
)
4853 env
->hflags2
|= HF2_HIF_MASK
;
4857 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4859 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4860 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4861 CC_OP
= CC_OP_EFLAGS
;
4863 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4865 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4867 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4869 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4872 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4874 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4875 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4876 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4877 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4878 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4880 /* FIXME: guest state consistency checks */
4882 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4883 case TLB_CONTROL_DO_NOTHING
:
4885 case TLB_CONTROL_FLUSH_ALL_ASID
:
4886 /* FIXME: this is not 100% correct but should work for now */
4891 env
->hflags2
|= HF2_GIF_MASK
;
4893 if (int_ctl
& V_IRQ_MASK
) {
4894 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4897 /* maybe we need to inject an event */
4898 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4899 if (event_inj
& SVM_EVTINJ_VALID
) {
4900 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4901 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4902 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4903 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4905 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4906 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4907 /* FIXME: need to implement valid_err */
4908 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4909 case SVM_EVTINJ_TYPE_INTR
:
4910 env
->exception_index
= vector
;
4911 env
->error_code
= event_inj_err
;
4912 env
->exception_is_int
= 0;
4913 env
->exception_next_eip
= -1;
4914 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4915 fprintf(logfile
, "INTR");
4916 /* XXX: is it always correct ? */
4917 do_interrupt(vector
, 0, 0, 0, 1);
4919 case SVM_EVTINJ_TYPE_NMI
:
4920 env
->exception_index
= EXCP02_NMI
;
4921 env
->error_code
= event_inj_err
;
4922 env
->exception_is_int
= 0;
4923 env
->exception_next_eip
= EIP
;
4924 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4925 fprintf(logfile
, "NMI");
4928 case SVM_EVTINJ_TYPE_EXEPT
:
4929 env
->exception_index
= vector
;
4930 env
->error_code
= event_inj_err
;
4931 env
->exception_is_int
= 0;
4932 env
->exception_next_eip
= -1;
4933 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4934 fprintf(logfile
, "EXEPT");
4937 case SVM_EVTINJ_TYPE_SOFT
:
4938 env
->exception_index
= vector
;
4939 env
->error_code
= event_inj_err
;
4940 env
->exception_is_int
= 1;
4941 env
->exception_next_eip
= EIP
;
4942 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4943 fprintf(logfile
, "SOFT");
4947 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4948 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4952 void helper_vmmcall(void)
4954 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
4955 raise_exception(EXCP06_ILLOP
);
4958 void helper_vmload(int aflag
)
4961 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
4966 addr
= (uint32_t)EAX
;
4968 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4969 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4970 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4971 env
->segs
[R_FS
].base
);
4973 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
4975 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
4977 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
4979 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
4982 #ifdef TARGET_X86_64
4983 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
4984 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
4985 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
4986 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
4988 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
4989 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
4990 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
4991 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
4994 void helper_vmsave(int aflag
)
4997 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5002 addr
= (uint32_t)EAX
;
5004 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5005 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5006 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5007 env
->segs
[R_FS
].base
);
5009 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5011 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5013 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5015 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5018 #ifdef TARGET_X86_64
5019 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5020 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5021 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5022 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5024 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5025 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5026 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5027 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5030 void helper_stgi(void)
5032 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5033 env
->hflags2
|= HF2_GIF_MASK
;
5036 void helper_clgi(void)
5038 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5039 env
->hflags2
&= ~HF2_GIF_MASK
;
5042 void helper_skinit(void)
5044 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5045 /* XXX: not implemented */
5046 raise_exception(EXCP06_ILLOP
);
5049 void helper_invlpga(int aflag
)
5052 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5057 addr
= (uint32_t)EAX
;
5059 /* XXX: could use the ASID to see if it is needed to do the
5061 tlb_flush_page(env
, addr
);
5064 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5066 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5069 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5070 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5071 helper_vmexit(type
, param
);
5074 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5075 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5076 helper_vmexit(type
, param
);
5079 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5080 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5081 helper_vmexit(type
, param
);
5084 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5085 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5086 helper_vmexit(type
, param
);
5089 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5090 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5091 helper_vmexit(type
, param
);
5095 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5096 /* FIXME: this should be read in at vmrun (faster this way?) */
5097 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5099 switch((uint32_t)ECX
) {
5104 case 0xc0000000 ... 0xc0001fff:
5105 t0
= (8192 + ECX
- 0xc0000000) * 2;
5109 case 0xc0010000 ... 0xc0011fff:
5110 t0
= (16384 + ECX
- 0xc0010000) * 2;
5115 helper_vmexit(type
, param
);
5120 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5121 helper_vmexit(type
, param
);
5125 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5126 helper_vmexit(type
, param
);
5132 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5133 uint32_t next_eip_addend
)
5135 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5136 /* FIXME: this should be read in at vmrun (faster this way?) */
5137 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5138 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5139 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5141 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5142 env
->eip
+ next_eip_addend
);
5143 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5148 /* Note: currently only 32 bits of exit_code are used */
5149 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5153 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5154 fprintf(logfile
,"vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5155 exit_code
, exit_info_1
,
5156 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5159 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5160 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5161 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5163 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5166 /* Save the VM state in the vmcb */
5167 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5169 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5171 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5173 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5176 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5177 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5179 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5180 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5182 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5183 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5184 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5185 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5186 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5188 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5189 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5190 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5191 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5192 int_ctl
|= V_IRQ_MASK
;
5193 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5195 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5196 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5197 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5198 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5199 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5200 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5201 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5203 /* Reload the host state from vm_hsave */
5204 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5205 env
->hflags
&= ~HF_SVMI_MASK
;
5207 env
->intercept_exceptions
= 0;
5208 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5209 env
->tsc_offset
= 0;
5211 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5212 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5214 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5215 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5217 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5218 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5219 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5220 /* we need to set the efer after the crs so the hidden flags get
5223 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5225 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5226 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5227 CC_OP
= CC_OP_EFLAGS
;
5229 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5231 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5233 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5235 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5238 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5239 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5240 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5242 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5243 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5246 cpu_x86_set_cpl(env
, 0);
5247 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5248 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5250 env
->hflags2
&= ~HF2_GIF_MASK
;
5251 /* FIXME: Resets the current ASID register to zero (host ASID). */
5253 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5255 /* Clears the TSC_OFFSET inside the processor. */
5257 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5258 from the page table indicated the host's CR3. If the PDPEs contain
5259 illegal state, the processor causes a shutdown. */
5261 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5262 env
->cr
[0] |= CR0_PE_MASK
;
5263 env
->eflags
&= ~VM_MASK
;
5265 /* Disables all breakpoints in the host DR7 register. */
5267 /* Checks the reloaded host state for consistency. */
5269 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5270 host's code segment or non-canonical (in the case of long mode), a
5271 #GP fault is delivered inside the host.) */
5273 /* remove any pending exception */
5274 env
->exception_index
= -1;
5275 env
->error_code
= 0;
5276 env
->old_exception
= -1;
5284 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5285 void helper_enter_mmx(void)
5288 *(uint32_t *)(env
->fptags
) = 0;
5289 *(uint32_t *)(env
->fptags
+ 4) = 0;
5292 void helper_emms(void)
5294 /* set to empty state */
5295 *(uint32_t *)(env
->fptags
) = 0x01010101;
5296 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5300 void helper_movq(void *d
, void *s
)
5302 *(uint64_t *)d
= *(uint64_t *)s
;
5306 #include "ops_sse.h"
5309 #include "ops_sse.h"
5312 #include "helper_template.h"
5316 #include "helper_template.h"
5320 #include "helper_template.h"
5323 #ifdef TARGET_X86_64
5326 #include "helper_template.h"
5331 /* bit operations */
5332 target_ulong
helper_bsf(target_ulong t0
)
5339 while ((res
& 1) == 0) {
5346 target_ulong
helper_bsr(target_ulong t0
)
5349 target_ulong res
, mask
;
5352 count
= TARGET_LONG_BITS
- 1;
5353 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5354 while ((res
& mask
) == 0) {
5362 static int compute_all_eflags(void)
5367 static int compute_c_eflags(void)
5369 return CC_SRC
& CC_C
;
5372 uint32_t helper_cc_compute_all(int op
)
5375 default: /* should never happen */ return 0;
5377 case CC_OP_EFLAGS
: return compute_all_eflags();
5379 case CC_OP_MULB
: return compute_all_mulb();
5380 case CC_OP_MULW
: return compute_all_mulw();
5381 case CC_OP_MULL
: return compute_all_mull();
5383 case CC_OP_ADDB
: return compute_all_addb();
5384 case CC_OP_ADDW
: return compute_all_addw();
5385 case CC_OP_ADDL
: return compute_all_addl();
5387 case CC_OP_ADCB
: return compute_all_adcb();
5388 case CC_OP_ADCW
: return compute_all_adcw();
5389 case CC_OP_ADCL
: return compute_all_adcl();
5391 case CC_OP_SUBB
: return compute_all_subb();
5392 case CC_OP_SUBW
: return compute_all_subw();
5393 case CC_OP_SUBL
: return compute_all_subl();
5395 case CC_OP_SBBB
: return compute_all_sbbb();
5396 case CC_OP_SBBW
: return compute_all_sbbw();
5397 case CC_OP_SBBL
: return compute_all_sbbl();
5399 case CC_OP_LOGICB
: return compute_all_logicb();
5400 case CC_OP_LOGICW
: return compute_all_logicw();
5401 case CC_OP_LOGICL
: return compute_all_logicl();
5403 case CC_OP_INCB
: return compute_all_incb();
5404 case CC_OP_INCW
: return compute_all_incw();
5405 case CC_OP_INCL
: return compute_all_incl();
5407 case CC_OP_DECB
: return compute_all_decb();
5408 case CC_OP_DECW
: return compute_all_decw();
5409 case CC_OP_DECL
: return compute_all_decl();
5411 case CC_OP_SHLB
: return compute_all_shlb();
5412 case CC_OP_SHLW
: return compute_all_shlw();
5413 case CC_OP_SHLL
: return compute_all_shll();
5415 case CC_OP_SARB
: return compute_all_sarb();
5416 case CC_OP_SARW
: return compute_all_sarw();
5417 case CC_OP_SARL
: return compute_all_sarl();
5419 #ifdef TARGET_X86_64
5420 case CC_OP_MULQ
: return compute_all_mulq();
5422 case CC_OP_ADDQ
: return compute_all_addq();
5424 case CC_OP_ADCQ
: return compute_all_adcq();
5426 case CC_OP_SUBQ
: return compute_all_subq();
5428 case CC_OP_SBBQ
: return compute_all_sbbq();
5430 case CC_OP_LOGICQ
: return compute_all_logicq();
5432 case CC_OP_INCQ
: return compute_all_incq();
5434 case CC_OP_DECQ
: return compute_all_decq();
5436 case CC_OP_SHLQ
: return compute_all_shlq();
5438 case CC_OP_SARQ
: return compute_all_sarq();
5443 uint32_t helper_cc_compute_c(int op
)
5446 default: /* should never happen */ return 0;
5448 case CC_OP_EFLAGS
: return compute_c_eflags();
5450 case CC_OP_MULB
: return compute_c_mull();
5451 case CC_OP_MULW
: return compute_c_mull();
5452 case CC_OP_MULL
: return compute_c_mull();
5454 case CC_OP_ADDB
: return compute_c_addb();
5455 case CC_OP_ADDW
: return compute_c_addw();
5456 case CC_OP_ADDL
: return compute_c_addl();
5458 case CC_OP_ADCB
: return compute_c_adcb();
5459 case CC_OP_ADCW
: return compute_c_adcw();
5460 case CC_OP_ADCL
: return compute_c_adcl();
5462 case CC_OP_SUBB
: return compute_c_subb();
5463 case CC_OP_SUBW
: return compute_c_subw();
5464 case CC_OP_SUBL
: return compute_c_subl();
5466 case CC_OP_SBBB
: return compute_c_sbbb();
5467 case CC_OP_SBBW
: return compute_c_sbbw();
5468 case CC_OP_SBBL
: return compute_c_sbbl();
5470 case CC_OP_LOGICB
: return compute_c_logicb();
5471 case CC_OP_LOGICW
: return compute_c_logicw();
5472 case CC_OP_LOGICL
: return compute_c_logicl();
5474 case CC_OP_INCB
: return compute_c_incl();
5475 case CC_OP_INCW
: return compute_c_incl();
5476 case CC_OP_INCL
: return compute_c_incl();
5478 case CC_OP_DECB
: return compute_c_incl();
5479 case CC_OP_DECW
: return compute_c_incl();
5480 case CC_OP_DECL
: return compute_c_incl();
5482 case CC_OP_SHLB
: return compute_c_shlb();
5483 case CC_OP_SHLW
: return compute_c_shlw();
5484 case CC_OP_SHLL
: return compute_c_shll();
5486 case CC_OP_SARB
: return compute_c_sarl();
5487 case CC_OP_SARW
: return compute_c_sarl();
5488 case CC_OP_SARL
: return compute_c_sarl();
5490 #ifdef TARGET_X86_64
5491 case CC_OP_MULQ
: return compute_c_mull();
5493 case CC_OP_ADDQ
: return compute_c_addq();
5495 case CC_OP_ADCQ
: return compute_c_adcq();
5497 case CC_OP_SUBQ
: return compute_c_subq();
5499 case CC_OP_SBBQ
: return compute_c_sbbq();
5501 case CC_OP_LOGICQ
: return compute_c_logicq();
5503 case CC_OP_INCQ
: return compute_c_incl();
5505 case CC_OP_DECQ
: return compute_c_incl();
5507 case CC_OP_SHLQ
: return compute_c_shlq();
5509 case CC_OP_SARQ
: return compute_c_sarl();