4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
27 #define raise_exception_err(a, b)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
35 const uint8_t parity_table
[256] = {
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 const uint8_t rclw_table
[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
79 const uint8_t rclb_table
[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk
[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock
);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock
);
111 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
113 load_eflags(t0
, update_mask
);
116 target_ulong
helper_read_eflags(void)
119 eflags
= cc_table
[CC_OP
].compute_all();
120 eflags
|= (DF
& DF_MASK
);
121 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
137 index
= selector
& ~7;
138 if ((index
+ 7) > dt
->limit
)
140 ptr
= dt
->base
+ index
;
141 *e1_ptr
= ldl_kernel(ptr
);
142 *e2_ptr
= ldl_kernel(ptr
+ 4);
146 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
149 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
150 if (e2
& DESC_G_MASK
)
151 limit
= (limit
<< 12) | 0xfff;
155 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
157 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
162 sc
->base
= get_seg_base(e1
, e2
);
163 sc
->limit
= get_seg_limit(e1
, e2
);
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg
, int selector
)
171 cpu_x86_load_seg_cache(env
, seg
, selector
,
172 (selector
<< 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
176 uint32_t *esp_ptr
, int dpl
)
178 int type
, index
, shift
;
183 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
184 for(i
=0;i
<env
->tr
.limit
;i
++) {
185 printf("%02x ", env
->tr
.base
[i
]);
186 if ((i
& 7) == 7) printf("\n");
192 if (!(env
->tr
.flags
& DESC_P_MASK
))
193 cpu_abort(env
, "invalid tss");
194 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
196 cpu_abort(env
, "invalid tss type");
198 index
= (dpl
* 4 + 2) << shift
;
199 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
200 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
202 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
203 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
205 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
206 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg
, int selector
)
216 if ((selector
& 0xfffc) != 0) {
217 if (load_segment(&e1
, &e2
, selector
) != 0)
218 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
219 if (!(e2
& DESC_S_MASK
))
220 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
222 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
223 cpl
= env
->hflags
& HF_CPL_MASK
;
224 if (seg_reg
== R_CS
) {
225 if (!(e2
& DESC_CS_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 /* XXX: is it correct ? */
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 } else if (seg_reg
== R_SS
) {
233 /* SS must be writable data */
234 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
235 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 if (dpl
!= cpl
|| dpl
!= rpl
)
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
239 /* not readable code */
240 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
244 if (dpl
< cpl
|| dpl
< rpl
)
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
248 if (!(e2
& DESC_P_MASK
))
249 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
250 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
251 get_seg_base(e1
, e2
),
252 get_seg_limit(e1
, e2
),
255 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector
,
266 uint32_t e1
, uint32_t e2
, int source
,
269 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
270 target_ulong tss_base
;
271 uint32_t new_regs
[8], new_segs
[6];
272 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
273 uint32_t old_eflags
, eflags_mask
;
278 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
280 if (loglevel
& CPU_LOG_PCALL
)
281 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
284 /* if task gate, we read the TSS segment and we load it */
286 if (!(e2
& DESC_P_MASK
))
287 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
288 tss_selector
= e1
>> 16;
289 if (tss_selector
& 4)
290 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
291 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
292 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
293 if (e2
& DESC_S_MASK
)
294 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
295 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
297 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (!(e2
& DESC_P_MASK
))
301 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
307 tss_limit
= get_seg_limit(e1
, e2
);
308 tss_base
= get_seg_base(e1
, e2
);
309 if ((tss_selector
& 4) != 0 ||
310 tss_limit
< tss_limit_max
)
311 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
312 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
314 old_tss_limit_max
= 103;
316 old_tss_limit_max
= 43;
318 /* read all the registers from the new TSS */
321 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
322 new_eip
= ldl_kernel(tss_base
+ 0x20);
323 new_eflags
= ldl_kernel(tss_base
+ 0x24);
324 for(i
= 0; i
< 8; i
++)
325 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
326 for(i
= 0; i
< 6; i
++)
327 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
328 new_ldt
= lduw_kernel(tss_base
+ 0x60);
329 new_trap
= ldl_kernel(tss_base
+ 0x64);
333 new_eip
= lduw_kernel(tss_base
+ 0x0e);
334 new_eflags
= lduw_kernel(tss_base
+ 0x10);
335 for(i
= 0; i
< 8; i
++)
336 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
337 for(i
= 0; i
< 4; i
++)
338 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
339 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1
= ldub_kernel(env
->tr
.base
);
351 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
352 stb_kernel(env
->tr
.base
, v1
);
353 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
355 /* clear busy bit (it is restartable) */
356 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
359 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
360 e2
= ldl_kernel(ptr
+ 4);
361 e2
&= ~DESC_TSS_BUSY_MASK
;
362 stl_kernel(ptr
+ 4, e2
);
364 old_eflags
= compute_eflags();
365 if (source
== SWITCH_TSS_IRET
)
366 old_eflags
&= ~NT_MASK
;
368 /* save the current state in the old TSS */
371 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
372 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
373 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
374 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
375 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
376 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
377 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
378 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
379 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
381 for(i
= 0; i
< 6; i
++)
382 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
385 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
386 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
387 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
388 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
389 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
390 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
391 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
392 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
393 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
395 for(i
= 0; i
< 4; i
++)
396 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
399 /* now if an exception occurs, it will occurs in the next task
402 if (source
== SWITCH_TSS_CALL
) {
403 stw_kernel(tss_base
, env
->tr
.selector
);
404 new_eflags
|= NT_MASK
;
408 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
411 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
412 e2
= ldl_kernel(ptr
+ 4);
413 e2
|= DESC_TSS_BUSY_MASK
;
414 stl_kernel(ptr
+ 4, e2
);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env
->cr
[0] |= CR0_TS_MASK
;
420 env
->hflags
|= HF_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
437 load_eflags(new_eflags
, eflags_mask
);
438 /* XXX: what to do in 16 bit case ? */
447 if (new_eflags
& VM_MASK
) {
448 for(i
= 0; i
< 6; i
++)
449 load_seg_vm(i
, new_segs
[i
]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env
, 3);
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i
= 0; i
< 6; i
++)
457 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
460 env
->ldt
.selector
= new_ldt
& ~4;
467 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
469 if ((new_ldt
& 0xfffc) != 0) {
471 index
= new_ldt
& ~7;
472 if ((index
+ 7) > dt
->limit
)
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 ptr
= dt
->base
+ index
;
475 e1
= ldl_kernel(ptr
);
476 e2
= ldl_kernel(ptr
+ 4);
477 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
479 if (!(e2
& DESC_P_MASK
))
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
484 /* load the segments */
485 if (!(new_eflags
& VM_MASK
)) {
486 tss_load_seg(R_CS
, new_segs
[R_CS
]);
487 tss_load_seg(R_SS
, new_segs
[R_SS
]);
488 tss_load_seg(R_ES
, new_segs
[R_ES
]);
489 tss_load_seg(R_DS
, new_segs
[R_DS
]);
490 tss_load_seg(R_FS
, new_segs
[R_FS
]);
491 tss_load_seg(R_GS
, new_segs
[R_GS
]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip
> env
->segs
[R_CS
].limit
) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF
, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr
, int size
)
504 int io_offset
, val
, mask
;
506 /* TSS must be a valid 32 bit one */
507 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
508 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
511 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
512 io_offset
+= (addr
>> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset
+ 1) > env
->tr
.limit
)
516 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
518 mask
= (1 << size
) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val
& mask
) != 0) {
522 raise_exception_err(EXCP0D_GPF
, 0);
526 void helper_check_iob(uint32_t t0
)
531 void helper_check_iow(uint32_t t0
)
536 void helper_check_iol(uint32_t t0
)
541 void helper_outb(uint32_t port
, uint32_t data
)
543 cpu_outb(env
, port
, data
& 0xff);
546 target_ulong
helper_inb(uint32_t port
)
548 return cpu_inb(env
, port
);
551 void helper_outw(uint32_t port
, uint32_t data
)
553 cpu_outw(env
, port
, data
& 0xffff);
556 target_ulong
helper_inw(uint32_t port
)
558 return cpu_inw(env
, port
);
561 void helper_outl(uint32_t port
, uint32_t data
)
563 cpu_outl(env
, port
, data
);
566 target_ulong
helper_inl(uint32_t port
)
568 return cpu_inl(env
, port
);
571 static inline unsigned int get_sp_mask(unsigned int e2
)
573 if (e2
& DESC_B_MASK
)
580 #define SET_ESP(val, sp_mask)\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
593 /* XXX: add a is_user flag to have proper security support */
594 #define PUSHW(ssp, sp, sp_mask, val)\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
600 #define PUSHL(ssp, sp, sp_mask, val)\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
606 #define POPW(ssp, sp, sp_mask, val)\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
612 #define POPL(ssp, sp, sp_mask, val)\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
618 /* protected mode interrupt */
619 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
620 unsigned int next_eip
, int is_hw
)
623 target_ulong ptr
, ssp
;
624 int type
, dpl
, selector
, ss_dpl
, cpl
;
625 int has_error_code
, new_stack
, shift
;
626 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
627 uint32_t old_eip
, sp_mask
;
630 if (!is_int
&& !is_hw
) {
649 if (intno
* 8 + 7 > dt
->limit
)
650 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
651 ptr
= dt
->base
+ intno
* 8;
652 e1
= ldl_kernel(ptr
);
653 e2
= ldl_kernel(ptr
+ 4);
654 /* check gate type */
655 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
657 case 5: /* task gate */
658 /* must do that check here to return the correct error code */
659 if (!(e2
& DESC_P_MASK
))
660 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
661 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
662 if (has_error_code
) {
665 /* push the error code */
666 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
668 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
672 esp
= (ESP
- (2 << shift
)) & mask
;
673 ssp
= env
->segs
[R_SS
].base
+ esp
;
675 stl_kernel(ssp
, error_code
);
677 stw_kernel(ssp
, error_code
);
681 case 6: /* 286 interrupt gate */
682 case 7: /* 286 trap gate */
683 case 14: /* 386 interrupt gate */
684 case 15: /* 386 trap gate */
687 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
690 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
691 cpl
= env
->hflags
& HF_CPL_MASK
;
692 /* check privilege if software int */
693 if (is_int
&& dpl
< cpl
)
694 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
695 /* check valid bit */
696 if (!(e2
& DESC_P_MASK
))
697 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
699 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
700 if ((selector
& 0xfffc) == 0)
701 raise_exception_err(EXCP0D_GPF
, 0);
703 if (load_segment(&e1
, &e2
, selector
) != 0)
704 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
705 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
706 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
707 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
709 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
710 if (!(e2
& DESC_P_MASK
))
711 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
712 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
713 /* to inner privilege */
714 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
715 if ((ss
& 0xfffc) == 0)
716 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
718 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
719 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
720 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
721 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
723 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
724 if (!(ss_e2
& DESC_S_MASK
) ||
725 (ss_e2
& DESC_CS_MASK
) ||
726 !(ss_e2
& DESC_W_MASK
))
727 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
728 if (!(ss_e2
& DESC_P_MASK
))
729 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
731 sp_mask
= get_sp_mask(ss_e2
);
732 ssp
= get_seg_base(ss_e1
, ss_e2
);
733 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
734 /* to same privilege */
735 if (env
->eflags
& VM_MASK
)
736 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
738 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
739 ssp
= env
->segs
[R_SS
].base
;
743 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
744 new_stack
= 0; /* avoid warning */
745 sp_mask
= 0; /* avoid warning */
746 ssp
= 0; /* avoid warning */
747 esp
= 0; /* avoid warning */
753 /* XXX: check that enough room is available */
754 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
755 if (env
->eflags
& VM_MASK
)
761 if (env
->eflags
& VM_MASK
) {
762 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
763 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
764 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
765 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
767 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
768 PUSHL(ssp
, esp
, sp_mask
, ESP
);
770 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
771 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
772 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
773 if (has_error_code
) {
774 PUSHL(ssp
, esp
, sp_mask
, error_code
);
778 if (env
->eflags
& VM_MASK
) {
779 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
780 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
781 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
782 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
784 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
785 PUSHW(ssp
, esp
, sp_mask
, ESP
);
787 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
788 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
789 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
790 if (has_error_code
) {
791 PUSHW(ssp
, esp
, sp_mask
, error_code
);
796 if (env
->eflags
& VM_MASK
) {
797 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
802 ss
= (ss
& ~3) | dpl
;
803 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
804 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
806 SET_ESP(esp
, sp_mask
);
808 selector
= (selector
& ~3) | dpl
;
809 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
810 get_seg_base(e1
, e2
),
811 get_seg_limit(e1
, e2
),
813 cpu_x86_set_cpl(env
, dpl
);
816 /* interrupt gate clear IF mask */
817 if ((type
& 1) == 0) {
818 env
->eflags
&= ~IF_MASK
;
820 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
825 #define PUSHQ(sp, val)\
828 stq_kernel(sp, (val));\
831 #define POPQ(sp, val)\
833 val = ldq_kernel(sp);\
837 static inline target_ulong
get_rsp_from_tss(int level
)
842 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
843 env
->tr
.base
, env
->tr
.limit
);
846 if (!(env
->tr
.flags
& DESC_P_MASK
))
847 cpu_abort(env
, "invalid tss");
848 index
= 8 * level
+ 4;
849 if ((index
+ 7) > env
->tr
.limit
)
850 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
851 return ldq_kernel(env
->tr
.base
+ index
);
854 /* 64 bit interrupt */
855 static void do_interrupt64(int intno
, int is_int
, int error_code
,
856 target_ulong next_eip
, int is_hw
)
860 int type
, dpl
, selector
, cpl
, ist
;
861 int has_error_code
, new_stack
;
862 uint32_t e1
, e2
, e3
, ss
;
863 target_ulong old_eip
, esp
, offset
;
866 if (!is_int
&& !is_hw
) {
885 if (intno
* 16 + 15 > dt
->limit
)
886 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
887 ptr
= dt
->base
+ intno
* 16;
888 e1
= ldl_kernel(ptr
);
889 e2
= ldl_kernel(ptr
+ 4);
890 e3
= ldl_kernel(ptr
+ 8);
891 /* check gate type */
892 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
898 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
901 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
902 cpl
= env
->hflags
& HF_CPL_MASK
;
903 /* check privilege if software int */
904 if (is_int
&& dpl
< cpl
)
905 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
906 /* check valid bit */
907 if (!(e2
& DESC_P_MASK
))
908 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
910 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
912 if ((selector
& 0xfffc) == 0)
913 raise_exception_err(EXCP0D_GPF
, 0);
915 if (load_segment(&e1
, &e2
, selector
) != 0)
916 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
917 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
918 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
919 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
921 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
922 if (!(e2
& DESC_P_MASK
))
923 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
924 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
925 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
926 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
927 /* to inner privilege */
929 esp
= get_rsp_from_tss(ist
+ 3);
931 esp
= get_rsp_from_tss(dpl
);
932 esp
&= ~0xfLL
; /* align stack */
935 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
936 /* to same privilege */
937 if (env
->eflags
& VM_MASK
)
938 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
941 esp
= get_rsp_from_tss(ist
+ 3);
944 esp
&= ~0xfLL
; /* align stack */
947 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
948 new_stack
= 0; /* avoid warning */
949 esp
= 0; /* avoid warning */
952 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
954 PUSHQ(esp
, compute_eflags());
955 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
957 if (has_error_code
) {
958 PUSHQ(esp
, error_code
);
963 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
967 selector
= (selector
& ~3) | dpl
;
968 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
969 get_seg_base(e1
, e2
),
970 get_seg_limit(e1
, e2
),
972 cpu_x86_set_cpl(env
, dpl
);
975 /* interrupt gate clear IF mask */
976 if ((type
& 1) == 0) {
977 env
->eflags
&= ~IF_MASK
;
979 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
983 #if defined(CONFIG_USER_ONLY)
984 void helper_syscall(int next_eip_addend
)
986 env
->exception_index
= EXCP_SYSCALL
;
987 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
991 void helper_syscall(int next_eip_addend
)
995 if (!(env
->efer
& MSR_EFER_SCE
)) {
996 raise_exception_err(EXCP06_ILLOP
, 0);
998 selector
= (env
->star
>> 32) & 0xffff;
1000 if (env
->hflags
& HF_LMA_MASK
) {
1003 ECX
= env
->eip
+ next_eip_addend
;
1004 env
->regs
[11] = compute_eflags();
1006 code64
= env
->hflags
& HF_CS64_MASK
;
1008 cpu_x86_set_cpl(env
, 0);
1009 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1011 DESC_G_MASK
| DESC_P_MASK
|
1013 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1014 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1016 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1018 DESC_W_MASK
| DESC_A_MASK
);
1019 env
->eflags
&= ~env
->fmask
;
1020 load_eflags(env
->eflags
, 0);
1022 env
->eip
= env
->lstar
;
1024 env
->eip
= env
->cstar
;
1028 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1030 cpu_x86_set_cpl(env
, 0);
1031 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1033 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1035 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1036 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1038 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1040 DESC_W_MASK
| DESC_A_MASK
);
1041 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1042 env
->eip
= (uint32_t)env
->star
;
1047 void helper_sysret(int dflag
)
1051 if (!(env
->efer
& MSR_EFER_SCE
)) {
1052 raise_exception_err(EXCP06_ILLOP
, 0);
1054 cpl
= env
->hflags
& HF_CPL_MASK
;
1055 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1056 raise_exception_err(EXCP0D_GPF
, 0);
1058 selector
= (env
->star
>> 48) & 0xffff;
1059 #ifdef TARGET_X86_64
1060 if (env
->hflags
& HF_LMA_MASK
) {
1062 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1064 DESC_G_MASK
| DESC_P_MASK
|
1065 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1066 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1070 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1072 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1073 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1074 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1075 env
->eip
= (uint32_t)ECX
;
1077 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1079 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_W_MASK
| DESC_A_MASK
);
1082 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1083 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1084 cpu_x86_set_cpl(env
, 3);
1088 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1090 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1091 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1092 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1093 env
->eip
= (uint32_t)ECX
;
1094 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1096 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1097 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1098 DESC_W_MASK
| DESC_A_MASK
);
1099 env
->eflags
|= IF_MASK
;
1100 cpu_x86_set_cpl(env
, 3);
1103 if (kqemu_is_ok(env
)) {
1104 if (env
->hflags
& HF_LMA_MASK
)
1105 CC_OP
= CC_OP_EFLAGS
;
1106 env
->exception_index
= -1;
1112 /* real mode interrupt */
1113 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1114 unsigned int next_eip
)
1117 target_ulong ptr
, ssp
;
1119 uint32_t offset
, esp
;
1120 uint32_t old_cs
, old_eip
;
1122 /* real mode (simpler !) */
1124 if (intno
* 4 + 3 > dt
->limit
)
1125 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1126 ptr
= dt
->base
+ intno
* 4;
1127 offset
= lduw_kernel(ptr
);
1128 selector
= lduw_kernel(ptr
+ 2);
1130 ssp
= env
->segs
[R_SS
].base
;
1135 old_cs
= env
->segs
[R_CS
].selector
;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1138 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1139 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1141 /* update processor state */
1142 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1144 env
->segs
[R_CS
].selector
= selector
;
1145 env
->segs
[R_CS
].base
= (selector
<< 4);
1146 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1149 /* fake user mode interrupt */
1150 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1151 target_ulong next_eip
)
1155 int dpl
, cpl
, shift
;
1159 if (env
->hflags
& HF_LMA_MASK
) {
1164 ptr
= dt
->base
+ (intno
<< shift
);
1165 e2
= ldl_kernel(ptr
+ 4);
1167 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1168 cpl
= env
->hflags
& HF_CPL_MASK
;
1169 /* check privilege if software int */
1170 if (is_int
&& dpl
< cpl
)
1171 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1185 void do_interrupt(int intno
, int is_int
, int error_code
,
1186 target_ulong next_eip
, int is_hw
)
1188 if (loglevel
& CPU_LOG_INT
) {
1189 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1191 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1192 count
, intno
, error_code
, is_int
,
1193 env
->hflags
& HF_CPL_MASK
,
1194 env
->segs
[R_CS
].selector
, EIP
,
1195 (int)env
->segs
[R_CS
].base
+ EIP
,
1196 env
->segs
[R_SS
].selector
, ESP
);
1197 if (intno
== 0x0e) {
1198 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1200 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1202 fprintf(logfile
, "\n");
1203 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1208 fprintf(logfile
, " code=");
1209 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1210 for(i
= 0; i
< 16; i
++) {
1211 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1213 fprintf(logfile
, "\n");
1219 if (env
->cr
[0] & CR0_PE_MASK
) {
1221 if (env
->hflags
& HF_LMA_MASK
) {
1222 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1226 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1229 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1238 static int check_exception(int intno
, int *error_code
)
1240 int first_contributory
= env
->old_exception
== 0 ||
1241 (env
->old_exception
>= 10 &&
1242 env
->old_exception
<= 13);
1243 int second_contributory
= intno
== 0 ||
1244 (intno
>= 10 && intno
<= 13);
1246 if (loglevel
& CPU_LOG_INT
)
1247 fprintf(logfile
, "check_exception old: 0x%x new 0x%x\n",
1248 env
->old_exception
, intno
);
1250 if (env
->old_exception
== EXCP08_DBLE
)
1251 cpu_abort(env
, "triple fault");
1253 if ((first_contributory
&& second_contributory
)
1254 || (env
->old_exception
== EXCP0E_PAGE
&&
1255 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1256 intno
= EXCP08_DBLE
;
1260 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1261 (intno
== EXCP08_DBLE
))
1262 env
->old_exception
= intno
;
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1273 void raise_interrupt(int intno
, int is_int
, int error_code
,
1274 int next_eip_addend
)
1277 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1278 intno
= check_exception(intno
, &error_code
);
1280 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1283 env
->exception_index
= intno
;
1284 env
->error_code
= error_code
;
1285 env
->exception_is_int
= is_int
;
1286 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1290 /* shortcuts to generate exceptions */
1292 void (raise_exception_err
)(int exception_index
, int error_code
)
1294 raise_interrupt(exception_index
, 0, error_code
, 0);
1297 void raise_exception(int exception_index
)
1299 raise_interrupt(exception_index
, 0, 0, 0);
1304 #if defined(CONFIG_USER_ONLY)
1306 void do_smm_enter(void)
1310 void helper_rsm(void)
1316 #ifdef TARGET_X86_64
1317 #define SMM_REVISION_ID 0x00020064
1319 #define SMM_REVISION_ID 0x00020000
1322 void do_smm_enter(void)
1324 target_ulong sm_state
;
1328 if (loglevel
& CPU_LOG_INT
) {
1329 fprintf(logfile
, "SMM: enter\n");
1330 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1333 env
->hflags
|= HF_SMM_MASK
;
1334 cpu_smm_update(env
);
1336 sm_state
= env
->smbase
+ 0x8000;
1338 #ifdef TARGET_X86_64
1339 for(i
= 0; i
< 6; i
++) {
1341 offset
= 0x7e00 + i
* 16;
1342 stw_phys(sm_state
+ offset
, dt
->selector
);
1343 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1344 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1345 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1348 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1349 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1351 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1352 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1353 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1354 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1356 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1357 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1359 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1360 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1361 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1362 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1364 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1366 stq_phys(sm_state
+ 0x7ff8, EAX
);
1367 stq_phys(sm_state
+ 0x7ff0, ECX
);
1368 stq_phys(sm_state
+ 0x7fe8, EDX
);
1369 stq_phys(sm_state
+ 0x7fe0, EBX
);
1370 stq_phys(sm_state
+ 0x7fd8, ESP
);
1371 stq_phys(sm_state
+ 0x7fd0, EBP
);
1372 stq_phys(sm_state
+ 0x7fc8, ESI
);
1373 stq_phys(sm_state
+ 0x7fc0, EDI
);
1374 for(i
= 8; i
< 16; i
++)
1375 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1376 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1377 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1378 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1379 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1381 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1382 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1383 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1385 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1386 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1388 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1389 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1390 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1391 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1392 stl_phys(sm_state
+ 0x7fec, EDI
);
1393 stl_phys(sm_state
+ 0x7fe8, ESI
);
1394 stl_phys(sm_state
+ 0x7fe4, EBP
);
1395 stl_phys(sm_state
+ 0x7fe0, ESP
);
1396 stl_phys(sm_state
+ 0x7fdc, EBX
);
1397 stl_phys(sm_state
+ 0x7fd8, EDX
);
1398 stl_phys(sm_state
+ 0x7fd4, ECX
);
1399 stl_phys(sm_state
+ 0x7fd0, EAX
);
1400 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1401 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1403 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1404 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1405 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1406 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1408 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1409 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1410 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1411 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1413 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1414 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1416 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1417 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1419 for(i
= 0; i
< 6; i
++) {
1422 offset
= 0x7f84 + i
* 12;
1424 offset
= 0x7f2c + (i
- 3) * 12;
1425 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1426 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1427 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1428 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1430 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1432 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1433 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1435 /* init SMM cpu state */
1437 #ifdef TARGET_X86_64
1438 cpu_load_efer(env
, 0);
1440 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1441 env
->eip
= 0x00008000;
1442 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1444 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1445 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1446 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1447 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1450 cpu_x86_update_cr0(env
,
1451 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1452 cpu_x86_update_cr4(env
, 0);
1453 env
->dr
[7] = 0x00000400;
1454 CC_OP
= CC_OP_EFLAGS
;
1457 void helper_rsm(void)
1459 target_ulong sm_state
;
1463 sm_state
= env
->smbase
+ 0x8000;
1464 #ifdef TARGET_X86_64
1465 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1467 for(i
= 0; i
< 6; i
++) {
1468 offset
= 0x7e00 + i
* 16;
1469 cpu_x86_load_seg_cache(env
, i
,
1470 lduw_phys(sm_state
+ offset
),
1471 ldq_phys(sm_state
+ offset
+ 8),
1472 ldl_phys(sm_state
+ offset
+ 4),
1473 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1476 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1477 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1479 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1480 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1481 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1482 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1484 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1485 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1487 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1488 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1489 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1490 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1492 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1493 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1494 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1495 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1496 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1497 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1498 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1499 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1500 for(i
= 8; i
< 16; i
++)
1501 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1502 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1503 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1504 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1505 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1506 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1508 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1509 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1510 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1512 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1513 if (val
& 0x20000) {
1514 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1517 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1518 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1519 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1520 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1521 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1522 EDI
= ldl_phys(sm_state
+ 0x7fec);
1523 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1524 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1525 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1526 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1527 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1528 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1529 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1530 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1531 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1533 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1534 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1535 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1536 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1538 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1539 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1540 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1541 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1543 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1544 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1546 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1547 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1549 for(i
= 0; i
< 6; i
++) {
1551 offset
= 0x7f84 + i
* 12;
1553 offset
= 0x7f2c + (i
- 3) * 12;
1554 cpu_x86_load_seg_cache(env
, i
,
1555 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1556 ldl_phys(sm_state
+ offset
+ 8),
1557 ldl_phys(sm_state
+ offset
+ 4),
1558 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1560 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1562 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1563 if (val
& 0x20000) {
1564 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1567 CC_OP
= CC_OP_EFLAGS
;
1568 env
->hflags
&= ~HF_SMM_MASK
;
1569 cpu_smm_update(env
);
1571 if (loglevel
& CPU_LOG_INT
) {
1572 fprintf(logfile
, "SMM: after RSM\n");
1573 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1577 #endif /* !CONFIG_USER_ONLY */
1580 /* division, flags are undefined */
1582 void helper_divb_AL(target_ulong t0
)
1584 unsigned int num
, den
, q
, r
;
1586 num
= (EAX
& 0xffff);
1589 raise_exception(EXCP00_DIVZ
);
1593 raise_exception(EXCP00_DIVZ
);
1595 r
= (num
% den
) & 0xff;
1596 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1599 void helper_idivb_AL(target_ulong t0
)
1606 raise_exception(EXCP00_DIVZ
);
1610 raise_exception(EXCP00_DIVZ
);
1612 r
= (num
% den
) & 0xff;
1613 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1616 void helper_divw_AX(target_ulong t0
)
1618 unsigned int num
, den
, q
, r
;
1620 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1621 den
= (t0
& 0xffff);
1623 raise_exception(EXCP00_DIVZ
);
1627 raise_exception(EXCP00_DIVZ
);
1629 r
= (num
% den
) & 0xffff;
1630 EAX
= (EAX
& ~0xffff) | q
;
1631 EDX
= (EDX
& ~0xffff) | r
;
1634 void helper_idivw_AX(target_ulong t0
)
1638 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1641 raise_exception(EXCP00_DIVZ
);
1644 if (q
!= (int16_t)q
)
1645 raise_exception(EXCP00_DIVZ
);
1647 r
= (num
% den
) & 0xffff;
1648 EAX
= (EAX
& ~0xffff) | q
;
1649 EDX
= (EDX
& ~0xffff) | r
;
1652 void helper_divl_EAX(target_ulong t0
)
1654 unsigned int den
, r
;
1657 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1660 raise_exception(EXCP00_DIVZ
);
1665 raise_exception(EXCP00_DIVZ
);
1670 void helper_idivl_EAX(target_ulong t0
)
1675 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1678 raise_exception(EXCP00_DIVZ
);
1682 if (q
!= (int32_t)q
)
1683 raise_exception(EXCP00_DIVZ
);
1690 /* XXX: exception */
1691 void helper_aam(int base
)
1697 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1701 void helper_aad(int base
)
1705 ah
= (EAX
>> 8) & 0xff;
1706 al
= ((ah
* base
) + al
) & 0xff;
1707 EAX
= (EAX
& ~0xffff) | al
;
1711 void helper_aaa(void)
1717 eflags
= cc_table
[CC_OP
].compute_all();
1720 ah
= (EAX
>> 8) & 0xff;
1722 icarry
= (al
> 0xf9);
1723 if (((al
& 0x0f) > 9 ) || af
) {
1724 al
= (al
+ 6) & 0x0f;
1725 ah
= (ah
+ 1 + icarry
) & 0xff;
1726 eflags
|= CC_C
| CC_A
;
1728 eflags
&= ~(CC_C
| CC_A
);
1731 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1736 void helper_aas(void)
1742 eflags
= cc_table
[CC_OP
].compute_all();
1745 ah
= (EAX
>> 8) & 0xff;
1748 if (((al
& 0x0f) > 9 ) || af
) {
1749 al
= (al
- 6) & 0x0f;
1750 ah
= (ah
- 1 - icarry
) & 0xff;
1751 eflags
|= CC_C
| CC_A
;
1753 eflags
&= ~(CC_C
| CC_A
);
1756 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1761 void helper_daa(void)
1766 eflags
= cc_table
[CC_OP
].compute_all();
1772 if (((al
& 0x0f) > 9 ) || af
) {
1773 al
= (al
+ 6) & 0xff;
1776 if ((al
> 0x9f) || cf
) {
1777 al
= (al
+ 0x60) & 0xff;
1780 EAX
= (EAX
& ~0xff) | al
;
1781 /* well, speed is not an issue here, so we compute the flags by hand */
1782 eflags
|= (al
== 0) << 6; /* zf */
1783 eflags
|= parity_table
[al
]; /* pf */
1784 eflags
|= (al
& 0x80); /* sf */
1789 void helper_das(void)
1791 int al
, al1
, af
, cf
;
1794 eflags
= cc_table
[CC_OP
].compute_all();
1801 if (((al
& 0x0f) > 9 ) || af
) {
1805 al
= (al
- 6) & 0xff;
1807 if ((al1
> 0x99) || cf
) {
1808 al
= (al
- 0x60) & 0xff;
1811 EAX
= (EAX
& ~0xff) | al
;
1812 /* well, speed is not an issue here, so we compute the flags by hand */
1813 eflags
|= (al
== 0) << 6; /* zf */
1814 eflags
|= parity_table
[al
]; /* pf */
1815 eflags
|= (al
& 0x80); /* sf */
1820 void helper_into(int next_eip_addend
)
1823 eflags
= cc_table
[CC_OP
].compute_all();
1824 if (eflags
& CC_O
) {
1825 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1829 void helper_cmpxchg8b(target_ulong a0
)
1834 eflags
= cc_table
[CC_OP
].compute_all();
1836 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1837 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1840 EDX
= (uint32_t)(d
>> 32);
1847 #ifdef TARGET_X86_64
1848 void helper_cmpxchg16b(target_ulong a0
)
1853 eflags
= cc_table
[CC_OP
].compute_all();
1856 if (d0
== EAX
&& d1
== EDX
) {
1869 void helper_single_step(void)
1871 env
->dr
[6] |= 0x4000;
1872 raise_exception(EXCP01_SSTP
);
1875 void helper_cpuid(void)
1879 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1881 index
= (uint32_t)EAX
;
1882 /* test if maximum index reached */
1883 if (index
& 0x80000000) {
1884 if (index
> env
->cpuid_xlevel
)
1885 index
= env
->cpuid_level
;
1887 if (index
> env
->cpuid_level
)
1888 index
= env
->cpuid_level
;
1893 EAX
= env
->cpuid_level
;
1894 EBX
= env
->cpuid_vendor1
;
1895 EDX
= env
->cpuid_vendor2
;
1896 ECX
= env
->cpuid_vendor3
;
1899 EAX
= env
->cpuid_version
;
1900 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1901 ECX
= env
->cpuid_ext_features
;
1902 EDX
= env
->cpuid_features
;
1905 /* cache info: needed for Pentium Pro compatibility */
1912 EAX
= env
->cpuid_xlevel
;
1913 EBX
= env
->cpuid_vendor1
;
1914 EDX
= env
->cpuid_vendor2
;
1915 ECX
= env
->cpuid_vendor3
;
1918 EAX
= env
->cpuid_features
;
1920 ECX
= env
->cpuid_ext3_features
;
1921 EDX
= env
->cpuid_ext2_features
;
1926 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1927 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1928 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1929 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1932 /* cache info (L1 cache) */
1939 /* cache info (L2 cache) */
1946 /* virtual & phys address size in low 2 bytes. */
1947 /* XXX: This value must match the one used in the MMU code. */
1948 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
1949 /* 64 bit processor */
1950 #if defined(USE_KQEMU)
1951 EAX
= 0x00003020; /* 48 bits virtual, 32 bits physical */
1953 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1954 EAX
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1957 #if defined(USE_KQEMU)
1958 EAX
= 0x00000020; /* 32 bits physical */
1960 EAX
= 0x00000024; /* 36 bits physical */
1974 /* reserved values: zero */
1983 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1986 uint32_t esp_mask
, esp
, ebp
;
1988 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1989 ssp
= env
->segs
[R_SS
].base
;
1998 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
2001 stl(ssp
+ (esp
& esp_mask
), t1
);
2008 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
2011 stw(ssp
+ (esp
& esp_mask
), t1
);
2015 #ifdef TARGET_X86_64
2016 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
2018 target_ulong esp
, ebp
;
2038 stw(esp
, lduw(ebp
));
2046 void helper_lldt(int selector
)
2050 int index
, entry_limit
;
2054 if ((selector
& 0xfffc) == 0) {
2055 /* XXX: NULL selector case: invalid LDT */
2060 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2062 index
= selector
& ~7;
2063 #ifdef TARGET_X86_64
2064 if (env
->hflags
& HF_LMA_MASK
)
2069 if ((index
+ entry_limit
) > dt
->limit
)
2070 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2071 ptr
= dt
->base
+ index
;
2072 e1
= ldl_kernel(ptr
);
2073 e2
= ldl_kernel(ptr
+ 4);
2074 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2075 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2076 if (!(e2
& DESC_P_MASK
))
2077 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2078 #ifdef TARGET_X86_64
2079 if (env
->hflags
& HF_LMA_MASK
) {
2081 e3
= ldl_kernel(ptr
+ 8);
2082 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2083 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2087 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2090 env
->ldt
.selector
= selector
;
2093 void helper_ltr(int selector
)
2097 int index
, type
, entry_limit
;
2101 if ((selector
& 0xfffc) == 0) {
2102 /* NULL selector case: invalid TR */
2108 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2110 index
= selector
& ~7;
2111 #ifdef TARGET_X86_64
2112 if (env
->hflags
& HF_LMA_MASK
)
2117 if ((index
+ entry_limit
) > dt
->limit
)
2118 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2119 ptr
= dt
->base
+ index
;
2120 e1
= ldl_kernel(ptr
);
2121 e2
= ldl_kernel(ptr
+ 4);
2122 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2123 if ((e2
& DESC_S_MASK
) ||
2124 (type
!= 1 && type
!= 9))
2125 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2126 if (!(e2
& DESC_P_MASK
))
2127 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2128 #ifdef TARGET_X86_64
2129 if (env
->hflags
& HF_LMA_MASK
) {
2131 e3
= ldl_kernel(ptr
+ 8);
2132 e4
= ldl_kernel(ptr
+ 12);
2133 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2134 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2135 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2136 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2140 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2142 e2
|= DESC_TSS_BUSY_MASK
;
2143 stl_kernel(ptr
+ 4, e2
);
2145 env
->tr
.selector
= selector
;
2148 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2149 void helper_load_seg(int seg_reg
, int selector
)
2158 cpl
= env
->hflags
& HF_CPL_MASK
;
2159 if ((selector
& 0xfffc) == 0) {
2160 /* null selector case */
2162 #ifdef TARGET_X86_64
2163 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2166 raise_exception_err(EXCP0D_GPF
, 0);
2167 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2174 index
= selector
& ~7;
2175 if ((index
+ 7) > dt
->limit
)
2176 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2177 ptr
= dt
->base
+ index
;
2178 e1
= ldl_kernel(ptr
);
2179 e2
= ldl_kernel(ptr
+ 4);
2181 if (!(e2
& DESC_S_MASK
))
2182 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2184 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2185 if (seg_reg
== R_SS
) {
2186 /* must be writable segment */
2187 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2188 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2189 if (rpl
!= cpl
|| dpl
!= cpl
)
2190 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2192 /* must be readable segment */
2193 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2194 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2196 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2197 /* if not conforming code, test rights */
2198 if (dpl
< cpl
|| dpl
< rpl
)
2199 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2203 if (!(e2
& DESC_P_MASK
)) {
2204 if (seg_reg
== R_SS
)
2205 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2207 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2210 /* set the access bit if not already set */
2211 if (!(e2
& DESC_A_MASK
)) {
2213 stl_kernel(ptr
+ 4, e2
);
2216 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2217 get_seg_base(e1
, e2
),
2218 get_seg_limit(e1
, e2
),
2221 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2222 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2227 /* protected mode jump */
2228 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2229 int next_eip_addend
)
2232 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2233 target_ulong next_eip
;
2235 if ((new_cs
& 0xfffc) == 0)
2236 raise_exception_err(EXCP0D_GPF
, 0);
2237 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2238 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2239 cpl
= env
->hflags
& HF_CPL_MASK
;
2240 if (e2
& DESC_S_MASK
) {
2241 if (!(e2
& DESC_CS_MASK
))
2242 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2243 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2244 if (e2
& DESC_C_MASK
) {
2245 /* conforming code segment */
2247 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2249 /* non conforming code segment */
2252 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2254 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2256 if (!(e2
& DESC_P_MASK
))
2257 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2258 limit
= get_seg_limit(e1
, e2
);
2259 if (new_eip
> limit
&&
2260 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2261 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2262 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2263 get_seg_base(e1
, e2
), limit
, e2
);
2266 /* jump to call or task gate */
2267 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2269 cpl
= env
->hflags
& HF_CPL_MASK
;
2270 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2272 case 1: /* 286 TSS */
2273 case 9: /* 386 TSS */
2274 case 5: /* task gate */
2275 if (dpl
< cpl
|| dpl
< rpl
)
2276 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2277 next_eip
= env
->eip
+ next_eip_addend
;
2278 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2279 CC_OP
= CC_OP_EFLAGS
;
2281 case 4: /* 286 call gate */
2282 case 12: /* 386 call gate */
2283 if ((dpl
< cpl
) || (dpl
< rpl
))
2284 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2285 if (!(e2
& DESC_P_MASK
))
2286 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2288 new_eip
= (e1
& 0xffff);
2290 new_eip
|= (e2
& 0xffff0000);
2291 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2292 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2293 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2294 /* must be code segment */
2295 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2296 (DESC_S_MASK
| DESC_CS_MASK
)))
2297 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2298 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2299 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2300 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2301 if (!(e2
& DESC_P_MASK
))
2302 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2303 limit
= get_seg_limit(e1
, e2
);
2304 if (new_eip
> limit
)
2305 raise_exception_err(EXCP0D_GPF
, 0);
2306 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2307 get_seg_base(e1
, e2
), limit
, e2
);
2311 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2317 /* real mode call */
2318 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2319 int shift
, int next_eip
)
2322 uint32_t esp
, esp_mask
;
2327 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2328 ssp
= env
->segs
[R_SS
].base
;
2330 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2331 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2333 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2334 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2337 SET_ESP(esp
, esp_mask
);
2339 env
->segs
[R_CS
].selector
= new_cs
;
2340 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2343 /* protected mode call */
2344 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2345 int shift
, int next_eip_addend
)
2348 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2349 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2350 uint32_t val
, limit
, old_sp_mask
;
2351 target_ulong ssp
, old_ssp
, next_eip
;
2353 next_eip
= env
->eip
+ next_eip_addend
;
2355 if (loglevel
& CPU_LOG_PCALL
) {
2356 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2357 new_cs
, (uint32_t)new_eip
, shift
);
2358 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2361 if ((new_cs
& 0xfffc) == 0)
2362 raise_exception_err(EXCP0D_GPF
, 0);
2363 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2364 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2365 cpl
= env
->hflags
& HF_CPL_MASK
;
2367 if (loglevel
& CPU_LOG_PCALL
) {
2368 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2371 if (e2
& DESC_S_MASK
) {
2372 if (!(e2
& DESC_CS_MASK
))
2373 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2374 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2375 if (e2
& DESC_C_MASK
) {
2376 /* conforming code segment */
2378 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2380 /* non conforming code segment */
2383 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2385 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2387 if (!(e2
& DESC_P_MASK
))
2388 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2390 #ifdef TARGET_X86_64
2391 /* XXX: check 16/32 bit cases in long mode */
2396 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2397 PUSHQ(rsp
, next_eip
);
2398 /* from this point, not restartable */
2400 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2401 get_seg_base(e1
, e2
),
2402 get_seg_limit(e1
, e2
), e2
);
2408 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2409 ssp
= env
->segs
[R_SS
].base
;
2411 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2412 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2414 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2415 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2418 limit
= get_seg_limit(e1
, e2
);
2419 if (new_eip
> limit
)
2420 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2421 /* from this point, not restartable */
2422 SET_ESP(sp
, sp_mask
);
2423 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2424 get_seg_base(e1
, e2
), limit
, e2
);
2428 /* check gate type */
2429 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2430 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2433 case 1: /* available 286 TSS */
2434 case 9: /* available 386 TSS */
2435 case 5: /* task gate */
2436 if (dpl
< cpl
|| dpl
< rpl
)
2437 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2438 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2439 CC_OP
= CC_OP_EFLAGS
;
2441 case 4: /* 286 call gate */
2442 case 12: /* 386 call gate */
2445 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2450 if (dpl
< cpl
|| dpl
< rpl
)
2451 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2452 /* check valid bit */
2453 if (!(e2
& DESC_P_MASK
))
2454 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2455 selector
= e1
>> 16;
2456 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2457 param_count
= e2
& 0x1f;
2458 if ((selector
& 0xfffc) == 0)
2459 raise_exception_err(EXCP0D_GPF
, 0);
2461 if (load_segment(&e1
, &e2
, selector
) != 0)
2462 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2463 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2464 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2465 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2467 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2468 if (!(e2
& DESC_P_MASK
))
2469 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2471 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2472 /* to inner privilege */
2473 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2475 if (loglevel
& CPU_LOG_PCALL
)
2476 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2477 ss
, sp
, param_count
, ESP
);
2479 if ((ss
& 0xfffc) == 0)
2480 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2481 if ((ss
& 3) != dpl
)
2482 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2483 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2484 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2485 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2487 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2488 if (!(ss_e2
& DESC_S_MASK
) ||
2489 (ss_e2
& DESC_CS_MASK
) ||
2490 !(ss_e2
& DESC_W_MASK
))
2491 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2492 if (!(ss_e2
& DESC_P_MASK
))
2493 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2495 // push_size = ((param_count * 2) + 8) << shift;
2497 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2498 old_ssp
= env
->segs
[R_SS
].base
;
2500 sp_mask
= get_sp_mask(ss_e2
);
2501 ssp
= get_seg_base(ss_e1
, ss_e2
);
2503 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2504 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2505 for(i
= param_count
- 1; i
>= 0; i
--) {
2506 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2507 PUSHL(ssp
, sp
, sp_mask
, val
);
2510 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2511 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2512 for(i
= param_count
- 1; i
>= 0; i
--) {
2513 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2514 PUSHW(ssp
, sp
, sp_mask
, val
);
2519 /* to same privilege */
2521 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2522 ssp
= env
->segs
[R_SS
].base
;
2523 // push_size = (4 << shift);
2528 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2529 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2531 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2532 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2535 /* from this point, not restartable */
2538 ss
= (ss
& ~3) | dpl
;
2539 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2541 get_seg_limit(ss_e1
, ss_e2
),
2545 selector
= (selector
& ~3) | dpl
;
2546 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2547 get_seg_base(e1
, e2
),
2548 get_seg_limit(e1
, e2
),
2550 cpu_x86_set_cpl(env
, dpl
);
2551 SET_ESP(sp
, sp_mask
);
2555 if (kqemu_is_ok(env
)) {
2556 env
->exception_index
= -1;
2562 /* real and vm86 mode iret */
2563 void helper_iret_real(int shift
)
2565 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2569 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2571 ssp
= env
->segs
[R_SS
].base
;
2574 POPL(ssp
, sp
, sp_mask
, new_eip
);
2575 POPL(ssp
, sp
, sp_mask
, new_cs
);
2577 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2580 POPW(ssp
, sp
, sp_mask
, new_eip
);
2581 POPW(ssp
, sp
, sp_mask
, new_cs
);
2582 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2584 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2585 load_seg_vm(R_CS
, new_cs
);
2587 if (env
->eflags
& VM_MASK
)
2588 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2590 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2592 eflags_mask
&= 0xffff;
2593 load_eflags(new_eflags
, eflags_mask
);
2594 env
->hflags2
&= ~HF2_NMI_MASK
;
2597 static inline void validate_seg(int seg_reg
, int cpl
)
2602 /* XXX: on x86_64, we do not want to nullify FS and GS because
2603 they may still contain a valid base. I would be interested to
2604 know how a real x86_64 CPU behaves */
2605 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2606 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2609 e2
= env
->segs
[seg_reg
].flags
;
2610 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2611 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2612 /* data or non conforming code segment */
2614 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2619 /* protected mode iret */
2620 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2622 uint32_t new_cs
, new_eflags
, new_ss
;
2623 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2624 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2625 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2626 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2628 #ifdef TARGET_X86_64
2633 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2635 ssp
= env
->segs
[R_SS
].base
;
2636 new_eflags
= 0; /* avoid warning */
2637 #ifdef TARGET_X86_64
2643 POPQ(sp
, new_eflags
);
2649 POPL(ssp
, sp
, sp_mask
, new_eip
);
2650 POPL(ssp
, sp
, sp_mask
, new_cs
);
2653 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2654 if (new_eflags
& VM_MASK
)
2655 goto return_to_vm86
;
2659 POPW(ssp
, sp
, sp_mask
, new_eip
);
2660 POPW(ssp
, sp
, sp_mask
, new_cs
);
2662 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2665 if (loglevel
& CPU_LOG_PCALL
) {
2666 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2667 new_cs
, new_eip
, shift
, addend
);
2668 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2671 if ((new_cs
& 0xfffc) == 0)
2672 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2673 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2674 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2675 if (!(e2
& DESC_S_MASK
) ||
2676 !(e2
& DESC_CS_MASK
))
2677 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2678 cpl
= env
->hflags
& HF_CPL_MASK
;
2681 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2682 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2683 if (e2
& DESC_C_MASK
) {
2685 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2688 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2690 if (!(e2
& DESC_P_MASK
))
2691 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2694 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2695 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2696 /* return to same privilege level */
2697 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2698 get_seg_base(e1
, e2
),
2699 get_seg_limit(e1
, e2
),
2702 /* return to different privilege level */
2703 #ifdef TARGET_X86_64
2712 POPL(ssp
, sp
, sp_mask
, new_esp
);
2713 POPL(ssp
, sp
, sp_mask
, new_ss
);
2717 POPW(ssp
, sp
, sp_mask
, new_esp
);
2718 POPW(ssp
, sp
, sp_mask
, new_ss
);
2721 if (loglevel
& CPU_LOG_PCALL
) {
2722 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2726 if ((new_ss
& 0xfffc) == 0) {
2727 #ifdef TARGET_X86_64
2728 /* NULL ss is allowed in long mode if cpl != 3*/
2729 /* XXX: test CS64 ? */
2730 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2731 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2733 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2734 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2735 DESC_W_MASK
| DESC_A_MASK
);
2736 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2740 raise_exception_err(EXCP0D_GPF
, 0);
2743 if ((new_ss
& 3) != rpl
)
2744 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2745 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2746 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2747 if (!(ss_e2
& DESC_S_MASK
) ||
2748 (ss_e2
& DESC_CS_MASK
) ||
2749 !(ss_e2
& DESC_W_MASK
))
2750 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2751 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2753 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2754 if (!(ss_e2
& DESC_P_MASK
))
2755 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2756 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2757 get_seg_base(ss_e1
, ss_e2
),
2758 get_seg_limit(ss_e1
, ss_e2
),
2762 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2763 get_seg_base(e1
, e2
),
2764 get_seg_limit(e1
, e2
),
2766 cpu_x86_set_cpl(env
, rpl
);
2768 #ifdef TARGET_X86_64
2769 if (env
->hflags
& HF_CS64_MASK
)
2773 sp_mask
= get_sp_mask(ss_e2
);
2775 /* validate data segments */
2776 validate_seg(R_ES
, rpl
);
2777 validate_seg(R_DS
, rpl
);
2778 validate_seg(R_FS
, rpl
);
2779 validate_seg(R_GS
, rpl
);
2783 SET_ESP(sp
, sp_mask
);
2786 /* NOTE: 'cpl' is the _old_ CPL */
2787 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2789 eflags_mask
|= IOPL_MASK
;
2790 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2792 eflags_mask
|= IF_MASK
;
2794 eflags_mask
&= 0xffff;
2795 load_eflags(new_eflags
, eflags_mask
);
2800 POPL(ssp
, sp
, sp_mask
, new_esp
);
2801 POPL(ssp
, sp
, sp_mask
, new_ss
);
2802 POPL(ssp
, sp
, sp_mask
, new_es
);
2803 POPL(ssp
, sp
, sp_mask
, new_ds
);
2804 POPL(ssp
, sp
, sp_mask
, new_fs
);
2805 POPL(ssp
, sp
, sp_mask
, new_gs
);
2807 /* modify processor state */
2808 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2809 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2810 load_seg_vm(R_CS
, new_cs
& 0xffff);
2811 cpu_x86_set_cpl(env
, 3);
2812 load_seg_vm(R_SS
, new_ss
& 0xffff);
2813 load_seg_vm(R_ES
, new_es
& 0xffff);
2814 load_seg_vm(R_DS
, new_ds
& 0xffff);
2815 load_seg_vm(R_FS
, new_fs
& 0xffff);
2816 load_seg_vm(R_GS
, new_gs
& 0xffff);
2818 env
->eip
= new_eip
& 0xffff;
2822 void helper_iret_protected(int shift
, int next_eip
)
2824 int tss_selector
, type
;
2827 /* specific case for TSS */
2828 if (env
->eflags
& NT_MASK
) {
2829 #ifdef TARGET_X86_64
2830 if (env
->hflags
& HF_LMA_MASK
)
2831 raise_exception_err(EXCP0D_GPF
, 0);
2833 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2834 if (tss_selector
& 4)
2835 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2836 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2837 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2838 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2839 /* NOTE: we check both segment and busy TSS */
2841 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2842 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2844 helper_ret_protected(shift
, 1, 0);
2846 env
->hflags2
&= ~HF2_NMI_MASK
;
2848 if (kqemu_is_ok(env
)) {
2849 CC_OP
= CC_OP_EFLAGS
;
2850 env
->exception_index
= -1;
2856 void helper_lret_protected(int shift
, int addend
)
2858 helper_ret_protected(shift
, 0, addend
);
2860 if (kqemu_is_ok(env
)) {
2861 env
->exception_index
= -1;
2867 void helper_sysenter(void)
2869 if (env
->sysenter_cs
== 0) {
2870 raise_exception_err(EXCP0D_GPF
, 0);
2872 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2873 cpu_x86_set_cpl(env
, 0);
2874 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2876 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2878 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2879 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2881 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2883 DESC_W_MASK
| DESC_A_MASK
);
2884 ESP
= env
->sysenter_esp
;
2885 EIP
= env
->sysenter_eip
;
2888 void helper_sysexit(void)
2892 cpl
= env
->hflags
& HF_CPL_MASK
;
2893 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2894 raise_exception_err(EXCP0D_GPF
, 0);
2896 cpu_x86_set_cpl(env
, 3);
2897 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2899 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2900 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2901 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2902 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2904 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2905 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2906 DESC_W_MASK
| DESC_A_MASK
);
2910 if (kqemu_is_ok(env
)) {
2911 env
->exception_index
= -1;
2917 #if defined(CONFIG_USER_ONLY)
2918 target_ulong
helper_read_crN(int reg
)
2923 void helper_write_crN(int reg
, target_ulong t0
)
2927 target_ulong
helper_read_crN(int reg
)
2931 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2937 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2938 val
= cpu_get_apic_tpr(env
);
2947 void helper_write_crN(int reg
, target_ulong t0
)
2949 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2952 cpu_x86_update_cr0(env
, t0
);
2955 cpu_x86_update_cr3(env
, t0
);
2958 cpu_x86_update_cr4(env
, t0
);
2961 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2962 cpu_set_apic_tpr(env
, t0
);
2964 env
->v_tpr
= t0
& 0x0f;
2973 void helper_lmsw(target_ulong t0
)
2975 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2976 if already set to one. */
2977 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2978 helper_write_crN(0, t0
);
2981 void helper_clts(void)
2983 env
->cr
[0] &= ~CR0_TS_MASK
;
2984 env
->hflags
&= ~HF_TS_MASK
;
2988 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2993 void helper_invlpg(target_ulong addr
)
2995 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2996 tlb_flush_page(env
, addr
);
2999 void helper_rdtsc(void)
3003 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3004 raise_exception(EXCP0D_GPF
);
3006 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3008 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
3009 EAX
= (uint32_t)(val
);
3010 EDX
= (uint32_t)(val
>> 32);
3013 void helper_rdpmc(void)
3015 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3016 raise_exception(EXCP0D_GPF
);
3018 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3020 /* currently unimplemented */
3021 raise_exception_err(EXCP06_ILLOP
, 0);
3024 #if defined(CONFIG_USER_ONLY)
3025 void helper_wrmsr(void)
3029 void helper_rdmsr(void)
3033 void helper_wrmsr(void)
3037 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3039 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3041 switch((uint32_t)ECX
) {
3042 case MSR_IA32_SYSENTER_CS
:
3043 env
->sysenter_cs
= val
& 0xffff;
3045 case MSR_IA32_SYSENTER_ESP
:
3046 env
->sysenter_esp
= val
;
3048 case MSR_IA32_SYSENTER_EIP
:
3049 env
->sysenter_eip
= val
;
3051 case MSR_IA32_APICBASE
:
3052 cpu_set_apic_base(env
, val
);
3056 uint64_t update_mask
;
3058 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3059 update_mask
|= MSR_EFER_SCE
;
3060 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3061 update_mask
|= MSR_EFER_LME
;
3062 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3063 update_mask
|= MSR_EFER_FFXSR
;
3064 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3065 update_mask
|= MSR_EFER_NXE
;
3066 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3067 update_mask
|= MSR_EFER_SVME
;
3068 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3069 (val
& update_mask
));
3078 case MSR_VM_HSAVE_PA
:
3079 env
->vm_hsave
= val
;
3081 #ifdef TARGET_X86_64
3092 env
->segs
[R_FS
].base
= val
;
3095 env
->segs
[R_GS
].base
= val
;
3097 case MSR_KERNELGSBASE
:
3098 env
->kernelgsbase
= val
;
3102 /* XXX: exception ? */
3107 void helper_rdmsr(void)
3111 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3113 switch((uint32_t)ECX
) {
3114 case MSR_IA32_SYSENTER_CS
:
3115 val
= env
->sysenter_cs
;
3117 case MSR_IA32_SYSENTER_ESP
:
3118 val
= env
->sysenter_esp
;
3120 case MSR_IA32_SYSENTER_EIP
:
3121 val
= env
->sysenter_eip
;
3123 case MSR_IA32_APICBASE
:
3124 val
= cpu_get_apic_base(env
);
3135 case MSR_VM_HSAVE_PA
:
3136 val
= env
->vm_hsave
;
3138 #ifdef TARGET_X86_64
3149 val
= env
->segs
[R_FS
].base
;
3152 val
= env
->segs
[R_GS
].base
;
3154 case MSR_KERNELGSBASE
:
3155 val
= env
->kernelgsbase
;
3159 case MSR_QPI_COMMBASE
:
3160 if (env
->kqemu_enabled
) {
3161 val
= kqemu_comm_base
;
3168 /* XXX: exception ? */
3172 EAX
= (uint32_t)(val
);
3173 EDX
= (uint32_t)(val
>> 32);
3177 target_ulong
helper_lsl(target_ulong selector1
)
3180 uint32_t e1
, e2
, eflags
, selector
;
3181 int rpl
, dpl
, cpl
, type
;
3183 selector
= selector1
& 0xffff;
3184 eflags
= cc_table
[CC_OP
].compute_all();
3185 if (load_segment(&e1
, &e2
, selector
) != 0)
3188 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3189 cpl
= env
->hflags
& HF_CPL_MASK
;
3190 if (e2
& DESC_S_MASK
) {
3191 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3194 if (dpl
< cpl
|| dpl
< rpl
)
3198 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3209 if (dpl
< cpl
|| dpl
< rpl
) {
3211 CC_SRC
= eflags
& ~CC_Z
;
3215 limit
= get_seg_limit(e1
, e2
);
3216 CC_SRC
= eflags
| CC_Z
;
3220 target_ulong
helper_lar(target_ulong selector1
)
3222 uint32_t e1
, e2
, eflags
, selector
;
3223 int rpl
, dpl
, cpl
, type
;
3225 selector
= selector1
& 0xffff;
3226 eflags
= cc_table
[CC_OP
].compute_all();
3227 if ((selector
& 0xfffc) == 0)
3229 if (load_segment(&e1
, &e2
, selector
) != 0)
3232 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3233 cpl
= env
->hflags
& HF_CPL_MASK
;
3234 if (e2
& DESC_S_MASK
) {
3235 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3238 if (dpl
< cpl
|| dpl
< rpl
)
3242 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3256 if (dpl
< cpl
|| dpl
< rpl
) {
3258 CC_SRC
= eflags
& ~CC_Z
;
3262 CC_SRC
= eflags
| CC_Z
;
3263 return e2
& 0x00f0ff00;
3266 void helper_verr(target_ulong selector1
)
3268 uint32_t e1
, e2
, eflags
, selector
;
3271 selector
= selector1
& 0xffff;
3272 eflags
= cc_table
[CC_OP
].compute_all();
3273 if ((selector
& 0xfffc) == 0)
3275 if (load_segment(&e1
, &e2
, selector
) != 0)
3277 if (!(e2
& DESC_S_MASK
))
3280 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3281 cpl
= env
->hflags
& HF_CPL_MASK
;
3282 if (e2
& DESC_CS_MASK
) {
3283 if (!(e2
& DESC_R_MASK
))
3285 if (!(e2
& DESC_C_MASK
)) {
3286 if (dpl
< cpl
|| dpl
< rpl
)
3290 if (dpl
< cpl
|| dpl
< rpl
) {
3292 CC_SRC
= eflags
& ~CC_Z
;
3296 CC_SRC
= eflags
| CC_Z
;
3299 void helper_verw(target_ulong selector1
)
3301 uint32_t e1
, e2
, eflags
, selector
;
3304 selector
= selector1
& 0xffff;
3305 eflags
= cc_table
[CC_OP
].compute_all();
3306 if ((selector
& 0xfffc) == 0)
3308 if (load_segment(&e1
, &e2
, selector
) != 0)
3310 if (!(e2
& DESC_S_MASK
))
3313 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3314 cpl
= env
->hflags
& HF_CPL_MASK
;
3315 if (e2
& DESC_CS_MASK
) {
3318 if (dpl
< cpl
|| dpl
< rpl
)
3320 if (!(e2
& DESC_W_MASK
)) {
3322 CC_SRC
= eflags
& ~CC_Z
;
3326 CC_SRC
= eflags
| CC_Z
;
3329 /* x87 FPU helpers */
3331 static void fpu_set_exception(int mask
)
3334 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3335 env
->fpus
|= FPUS_SE
| FPUS_B
;
3338 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3341 fpu_set_exception(FPUS_ZE
);
3345 void fpu_raise_exception(void)
3347 if (env
->cr
[0] & CR0_NE_MASK
) {
3348 raise_exception(EXCP10_COPR
);
3350 #if !defined(CONFIG_USER_ONLY)
3357 void helper_flds_FT0(uint32_t val
)
3364 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3367 void helper_fldl_FT0(uint64_t val
)
3374 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3377 void helper_fildl_FT0(int32_t val
)
3379 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3382 void helper_flds_ST0(uint32_t val
)
3389 new_fpstt
= (env
->fpstt
- 1) & 7;
3391 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3392 env
->fpstt
= new_fpstt
;
3393 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3396 void helper_fldl_ST0(uint64_t val
)
3403 new_fpstt
= (env
->fpstt
- 1) & 7;
3405 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3406 env
->fpstt
= new_fpstt
;
3407 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3410 void helper_fildl_ST0(int32_t val
)
3413 new_fpstt
= (env
->fpstt
- 1) & 7;
3414 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3415 env
->fpstt
= new_fpstt
;
3416 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3419 void helper_fildll_ST0(int64_t val
)
3422 new_fpstt
= (env
->fpstt
- 1) & 7;
3423 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3424 env
->fpstt
= new_fpstt
;
3425 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3428 uint32_t helper_fsts_ST0(void)
3434 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3438 uint64_t helper_fstl_ST0(void)
3444 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3448 int32_t helper_fist_ST0(void)
3451 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3452 if (val
!= (int16_t)val
)
3457 int32_t helper_fistl_ST0(void)
3460 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3464 int64_t helper_fistll_ST0(void)
3467 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3471 int32_t helper_fistt_ST0(void)
3474 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3475 if (val
!= (int16_t)val
)
3480 int32_t helper_fisttl_ST0(void)
3483 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3487 int64_t helper_fisttll_ST0(void)
3490 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3494 void helper_fldt_ST0(target_ulong ptr
)
3497 new_fpstt
= (env
->fpstt
- 1) & 7;
3498 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3499 env
->fpstt
= new_fpstt
;
3500 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3503 void helper_fstt_ST0(target_ulong ptr
)
3505 helper_fstt(ST0
, ptr
);
3508 void helper_fpush(void)
3513 void helper_fpop(void)
3518 void helper_fdecstp(void)
3520 env
->fpstt
= (env
->fpstt
- 1) & 7;
3521 env
->fpus
&= (~0x4700);
3524 void helper_fincstp(void)
3526 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3527 env
->fpus
&= (~0x4700);
3532 void helper_ffree_STN(int st_index
)
3534 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3537 void helper_fmov_ST0_FT0(void)
3542 void helper_fmov_FT0_STN(int st_index
)
3547 void helper_fmov_ST0_STN(int st_index
)
3552 void helper_fmov_STN_ST0(int st_index
)
3557 void helper_fxchg_ST0_STN(int st_index
)
3565 /* FPU operations */
3567 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3569 void helper_fcom_ST0_FT0(void)
3573 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3574 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3578 void helper_fucom_ST0_FT0(void)
3582 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3583 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3587 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3589 void helper_fcomi_ST0_FT0(void)
3594 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3595 eflags
= cc_table
[CC_OP
].compute_all();
3596 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3601 void helper_fucomi_ST0_FT0(void)
3606 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3607 eflags
= cc_table
[CC_OP
].compute_all();
3608 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3613 void helper_fadd_ST0_FT0(void)
3618 void helper_fmul_ST0_FT0(void)
3623 void helper_fsub_ST0_FT0(void)
3628 void helper_fsubr_ST0_FT0(void)
3633 void helper_fdiv_ST0_FT0(void)
3635 ST0
= helper_fdiv(ST0
, FT0
);
3638 void helper_fdivr_ST0_FT0(void)
3640 ST0
= helper_fdiv(FT0
, ST0
);
3643 /* fp operations between STN and ST0 */
3645 void helper_fadd_STN_ST0(int st_index
)
3647 ST(st_index
) += ST0
;
3650 void helper_fmul_STN_ST0(int st_index
)
3652 ST(st_index
) *= ST0
;
3655 void helper_fsub_STN_ST0(int st_index
)
3657 ST(st_index
) -= ST0
;
3660 void helper_fsubr_STN_ST0(int st_index
)
3667 void helper_fdiv_STN_ST0(int st_index
)
3671 *p
= helper_fdiv(*p
, ST0
);
3674 void helper_fdivr_STN_ST0(int st_index
)
3678 *p
= helper_fdiv(ST0
, *p
);
3681 /* misc FPU operations */
3682 void helper_fchs_ST0(void)
3684 ST0
= floatx_chs(ST0
);
3687 void helper_fabs_ST0(void)
3689 ST0
= floatx_abs(ST0
);
3692 void helper_fld1_ST0(void)
3697 void helper_fldl2t_ST0(void)
3702 void helper_fldl2e_ST0(void)
3707 void helper_fldpi_ST0(void)
3712 void helper_fldlg2_ST0(void)
3717 void helper_fldln2_ST0(void)
3722 void helper_fldz_ST0(void)
3727 void helper_fldz_FT0(void)
3732 uint32_t helper_fnstsw(void)
3734 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3737 uint32_t helper_fnstcw(void)
3742 static void update_fp_status(void)
3746 /* set rounding mode */
3747 switch(env
->fpuc
& RC_MASK
) {
3750 rnd_type
= float_round_nearest_even
;
3753 rnd_type
= float_round_down
;
3756 rnd_type
= float_round_up
;
3759 rnd_type
= float_round_to_zero
;
3762 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3764 switch((env
->fpuc
>> 8) & 3) {
3776 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3780 void helper_fldcw(uint32_t val
)
3786 void helper_fclex(void)
3788 env
->fpus
&= 0x7f00;
3791 void helper_fwait(void)
3793 if (env
->fpus
& FPUS_SE
)
3794 fpu_raise_exception();
3798 void helper_fninit(void)
3815 void helper_fbld_ST0(target_ulong ptr
)
3823 for(i
= 8; i
>= 0; i
--) {
3825 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3828 if (ldub(ptr
+ 9) & 0x80)
3834 void helper_fbst_ST0(target_ulong ptr
)
3837 target_ulong mem_ref
, mem_end
;
3840 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3842 mem_end
= mem_ref
+ 9;
3849 while (mem_ref
< mem_end
) {
3854 v
= ((v
/ 10) << 4) | (v
% 10);
3857 while (mem_ref
< mem_end
) {
3862 void helper_f2xm1(void)
3864 ST0
= pow(2.0,ST0
) - 1.0;
3867 void helper_fyl2x(void)
3869 CPU86_LDouble fptemp
;
3873 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3877 env
->fpus
&= (~0x4700);
3882 void helper_fptan(void)
3884 CPU86_LDouble fptemp
;
3887 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3893 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3894 /* the above code is for |arg| < 2**52 only */
3898 void helper_fpatan(void)
3900 CPU86_LDouble fptemp
, fpsrcop
;
3904 ST1
= atan2(fpsrcop
,fptemp
);
3908 void helper_fxtract(void)
3910 CPU86_LDoubleU temp
;
3911 unsigned int expdif
;
3914 expdif
= EXPD(temp
) - EXPBIAS
;
3915 /*DP exponent bias*/
3922 void helper_fprem1(void)
3924 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3925 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3927 signed long long int q
;
3929 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3930 ST0
= 0.0 / 0.0; /* NaN */
3931 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3937 fpsrcop1
.d
= fpsrcop
;
3939 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3942 /* optimisation? taken from the AMD docs */
3943 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3944 /* ST0 is unchanged */
3949 dblq
= fpsrcop
/ fptemp
;
3950 /* round dblq towards nearest integer */
3952 ST0
= fpsrcop
- fptemp
* dblq
;
3954 /* convert dblq to q by truncating towards zero */
3956 q
= (signed long long int)(-dblq
);
3958 q
= (signed long long int)dblq
;
3960 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3961 /* (C0,C3,C1) <-- (q2,q1,q0) */
3962 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3963 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3964 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3966 env
->fpus
|= 0x400; /* C2 <-- 1 */
3967 fptemp
= pow(2.0, expdif
- 50);
3968 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3969 /* fpsrcop = integer obtained by chopping */
3970 fpsrcop
= (fpsrcop
< 0.0) ?
3971 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3972 ST0
-= (ST1
* fpsrcop
* fptemp
);
3976 void helper_fprem(void)
3978 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3979 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3981 signed long long int q
;
3983 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3984 ST0
= 0.0 / 0.0; /* NaN */
3985 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3989 fpsrcop
= (CPU86_LDouble
)ST0
;
3990 fptemp
= (CPU86_LDouble
)ST1
;
3991 fpsrcop1
.d
= fpsrcop
;
3993 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3996 /* optimisation? taken from the AMD docs */
3997 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998 /* ST0 is unchanged */
4002 if ( expdif
< 53 ) {
4003 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4004 /* round dblq towards zero */
4005 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4006 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4008 /* convert dblq to q by truncating towards zero */
4010 q
= (signed long long int)(-dblq
);
4012 q
= (signed long long int)dblq
;
4014 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015 /* (C0,C3,C1) <-- (q2,q1,q0) */
4016 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4017 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4018 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4020 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4021 env
->fpus
|= 0x400; /* C2 <-- 1 */
4022 fptemp
= pow(2.0, (double)(expdif
- N
));
4023 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4024 /* fpsrcop = integer obtained by chopping */
4025 fpsrcop
= (fpsrcop
< 0.0) ?
4026 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4027 ST0
-= (ST1
* fpsrcop
* fptemp
);
4031 void helper_fyl2xp1(void)
4033 CPU86_LDouble fptemp
;
4036 if ((fptemp
+1.0)>0.0) {
4037 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4041 env
->fpus
&= (~0x4700);
4046 void helper_fsqrt(void)
4048 CPU86_LDouble fptemp
;
4052 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4058 void helper_fsincos(void)
4060 CPU86_LDouble fptemp
;
4063 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4069 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4070 /* the above code is for |arg| < 2**63 only */
4074 void helper_frndint(void)
4076 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4079 void helper_fscale(void)
4081 ST0
= ldexp (ST0
, (int)(ST1
));
4084 void helper_fsin(void)
4086 CPU86_LDouble fptemp
;
4089 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4093 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4094 /* the above code is for |arg| < 2**53 only */
4098 void helper_fcos(void)
4100 CPU86_LDouble fptemp
;
4103 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4107 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4108 /* the above code is for |arg5 < 2**63 only */
4112 void helper_fxam_ST0(void)
4114 CPU86_LDoubleU temp
;
4119 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4121 env
->fpus
|= 0x200; /* C1 <-- 1 */
4123 /* XXX: test fptags too */
4124 expdif
= EXPD(temp
);
4125 if (expdif
== MAXEXPD
) {
4126 #ifdef USE_X86LDOUBLE
4127 if (MANTD(temp
) == 0x8000000000000000ULL
)
4129 if (MANTD(temp
) == 0)
4131 env
->fpus
|= 0x500 /*Infinity*/;
4133 env
->fpus
|= 0x100 /*NaN*/;
4134 } else if (expdif
== 0) {
4135 if (MANTD(temp
) == 0)
4136 env
->fpus
|= 0x4000 /*Zero*/;
4138 env
->fpus
|= 0x4400 /*Denormal*/;
4144 void helper_fstenv(target_ulong ptr
, int data32
)
4146 int fpus
, fptag
, exp
, i
;
4150 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4152 for (i
=7; i
>=0; i
--) {
4154 if (env
->fptags
[i
]) {
4157 tmp
.d
= env
->fpregs
[i
].d
;
4160 if (exp
== 0 && mant
== 0) {
4163 } else if (exp
== 0 || exp
== MAXEXPD
4164 #ifdef USE_X86LDOUBLE
4165 || (mant
& (1LL << 63)) == 0
4168 /* NaNs, infinity, denormal */
4175 stl(ptr
, env
->fpuc
);
4177 stl(ptr
+ 8, fptag
);
4178 stl(ptr
+ 12, 0); /* fpip */
4179 stl(ptr
+ 16, 0); /* fpcs */
4180 stl(ptr
+ 20, 0); /* fpoo */
4181 stl(ptr
+ 24, 0); /* fpos */
4184 stw(ptr
, env
->fpuc
);
4186 stw(ptr
+ 4, fptag
);
4194 void helper_fldenv(target_ulong ptr
, int data32
)
4199 env
->fpuc
= lduw(ptr
);
4200 fpus
= lduw(ptr
+ 4);
4201 fptag
= lduw(ptr
+ 8);
4204 env
->fpuc
= lduw(ptr
);
4205 fpus
= lduw(ptr
+ 2);
4206 fptag
= lduw(ptr
+ 4);
4208 env
->fpstt
= (fpus
>> 11) & 7;
4209 env
->fpus
= fpus
& ~0x3800;
4210 for(i
= 0;i
< 8; i
++) {
4211 env
->fptags
[i
] = ((fptag
& 3) == 3);
4216 void helper_fsave(target_ulong ptr
, int data32
)
4221 helper_fstenv(ptr
, data32
);
4223 ptr
+= (14 << data32
);
4224 for(i
= 0;i
< 8; i
++) {
4226 helper_fstt(tmp
, ptr
);
4244 void helper_frstor(target_ulong ptr
, int data32
)
4249 helper_fldenv(ptr
, data32
);
4250 ptr
+= (14 << data32
);
4252 for(i
= 0;i
< 8; i
++) {
4253 tmp
= helper_fldt(ptr
);
4259 void helper_fxsave(target_ulong ptr
, int data64
)
4261 int fpus
, fptag
, i
, nb_xmm_regs
;
4265 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4267 for(i
= 0; i
< 8; i
++) {
4268 fptag
|= (env
->fptags
[i
] << i
);
4270 stw(ptr
, env
->fpuc
);
4272 stw(ptr
+ 4, fptag
^ 0xff);
4273 #ifdef TARGET_X86_64
4275 stq(ptr
+ 0x08, 0); /* rip */
4276 stq(ptr
+ 0x10, 0); /* rdp */
4280 stl(ptr
+ 0x08, 0); /* eip */
4281 stl(ptr
+ 0x0c, 0); /* sel */
4282 stl(ptr
+ 0x10, 0); /* dp */
4283 stl(ptr
+ 0x14, 0); /* sel */
4287 for(i
= 0;i
< 8; i
++) {
4289 helper_fstt(tmp
, addr
);
4293 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4294 /* XXX: finish it */
4295 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4296 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4297 if (env
->hflags
& HF_CS64_MASK
)
4302 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4303 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4304 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4310 void helper_fxrstor(target_ulong ptr
, int data64
)
4312 int i
, fpus
, fptag
, nb_xmm_regs
;
4316 env
->fpuc
= lduw(ptr
);
4317 fpus
= lduw(ptr
+ 2);
4318 fptag
= lduw(ptr
+ 4);
4319 env
->fpstt
= (fpus
>> 11) & 7;
4320 env
->fpus
= fpus
& ~0x3800;
4322 for(i
= 0;i
< 8; i
++) {
4323 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4327 for(i
= 0;i
< 8; i
++) {
4328 tmp
= helper_fldt(addr
);
4333 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4334 /* XXX: finish it */
4335 env
->mxcsr
= ldl(ptr
+ 0x18);
4337 if (env
->hflags
& HF_CS64_MASK
)
4342 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4343 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4344 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4350 #ifndef USE_X86LDOUBLE
4352 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4354 CPU86_LDoubleU temp
;
4359 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4360 /* exponent + sign */
4361 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4362 e
|= SIGND(temp
) >> 16;
4366 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4368 CPU86_LDoubleU temp
;
4372 /* XXX: handle overflow ? */
4373 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4374 e
|= (upper
>> 4) & 0x800; /* sign */
4375 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4377 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4380 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4387 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4389 CPU86_LDoubleU temp
;
4392 *pmant
= temp
.l
.lower
;
4393 *pexp
= temp
.l
.upper
;
4396 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4398 CPU86_LDoubleU temp
;
4400 temp
.l
.upper
= upper
;
4401 temp
.l
.lower
= mant
;
4406 #ifdef TARGET_X86_64
4408 //#define DEBUG_MULDIV
4410 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4419 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4423 add128(plow
, phigh
, 1, 0);
4426 /* return TRUE if overflow */
4427 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4429 uint64_t q
, r
, a1
, a0
;
4442 /* XXX: use a better algorithm */
4443 for(i
= 0; i
< 64; i
++) {
4445 a1
= (a1
<< 1) | (a0
>> 63);
4446 if (ab
|| a1
>= b
) {
4452 a0
= (a0
<< 1) | qb
;
4454 #if defined(DEBUG_MULDIV)
4455 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4456 *phigh
, *plow
, b
, a0
, a1
);
4464 /* return TRUE if overflow */
4465 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4468 sa
= ((int64_t)*phigh
< 0);
4470 neg128(plow
, phigh
);
4474 if (div64(plow
, phigh
, b
) != 0)
4477 if (*plow
> (1ULL << 63))
4481 if (*plow
>= (1ULL << 63))
4489 void helper_mulq_EAX_T0(target_ulong t0
)
4493 mulu64(&r0
, &r1
, EAX
, t0
);
4500 void helper_imulq_EAX_T0(target_ulong t0
)
4504 muls64(&r0
, &r1
, EAX
, t0
);
4508 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4511 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4515 muls64(&r0
, &r1
, t0
, t1
);
4517 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4521 void helper_divq_EAX(target_ulong t0
)
4525 raise_exception(EXCP00_DIVZ
);
4529 if (div64(&r0
, &r1
, t0
))
4530 raise_exception(EXCP00_DIVZ
);
4535 void helper_idivq_EAX(target_ulong t0
)
4539 raise_exception(EXCP00_DIVZ
);
4543 if (idiv64(&r0
, &r1
, t0
))
4544 raise_exception(EXCP00_DIVZ
);
4550 void helper_hlt(void)
4552 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4554 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4556 env
->exception_index
= EXCP_HLT
;
4560 void helper_monitor(target_ulong ptr
)
4562 if ((uint32_t)ECX
!= 0)
4563 raise_exception(EXCP0D_GPF
);
4564 /* XXX: store address ? */
4565 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4568 void helper_mwait(void)
4570 if ((uint32_t)ECX
!= 0)
4571 raise_exception(EXCP0D_GPF
);
4572 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4573 /* XXX: not complete but not completely erroneous */
4574 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4575 /* more than one CPU: do not sleep because another CPU may
4582 void helper_debug(void)
4584 env
->exception_index
= EXCP_DEBUG
;
4588 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4590 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4593 void helper_raise_exception(int exception_index
)
4595 raise_exception(exception_index
);
4598 void helper_cli(void)
4600 env
->eflags
&= ~IF_MASK
;
4603 void helper_sti(void)
4605 env
->eflags
|= IF_MASK
;
4609 /* vm86plus instructions */
4610 void helper_cli_vm(void)
4612 env
->eflags
&= ~VIF_MASK
;
4615 void helper_sti_vm(void)
4617 env
->eflags
|= VIF_MASK
;
4618 if (env
->eflags
& VIP_MASK
) {
4619 raise_exception(EXCP0D_GPF
);
4624 void helper_set_inhibit_irq(void)
4626 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4629 void helper_reset_inhibit_irq(void)
4631 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4634 void helper_boundw(target_ulong a0
, int v
)
4638 high
= ldsw(a0
+ 2);
4640 if (v
< low
|| v
> high
) {
4641 raise_exception(EXCP05_BOUND
);
4646 void helper_boundl(target_ulong a0
, int v
)
4651 if (v
< low
|| v
> high
) {
4652 raise_exception(EXCP05_BOUND
);
4657 static float approx_rsqrt(float a
)
4659 return 1.0 / sqrt(a
);
4662 static float approx_rcp(float a
)
4667 #if !defined(CONFIG_USER_ONLY)
4669 #define MMUSUFFIX _mmu
4672 #include "softmmu_template.h"
4675 #include "softmmu_template.h"
4678 #include "softmmu_template.h"
4681 #include "softmmu_template.h"
4685 /* try to fill the TLB and return an exception if error. If retaddr is
4686 NULL, it means that the function was called in C code (i.e. not
4687 from generated code or from helper.c) */
4688 /* XXX: fix it to restore all registers */
4689 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4691 TranslationBlock
*tb
;
4694 CPUX86State
*saved_env
;
4696 /* XXX: hack to restore env in all cases, even if not called from
4699 env
= cpu_single_env
;
4701 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4704 /* now we have a real cpu fault */
4705 pc
= (unsigned long)retaddr
;
4706 tb
= tb_find_pc(pc
);
4708 /* the PC is inside the translated code. It means that we have
4709 a virtual CPU fault */
4710 cpu_restore_state(tb
, env
, pc
, NULL
);
4713 raise_exception_err(env
->exception_index
, env
->error_code
);
4719 /* Secure Virtual Machine helpers */
4721 #if defined(CONFIG_USER_ONLY)
4723 void helper_vmrun(int aflag
, int next_eip_addend
)
4726 void helper_vmmcall(void)
4729 void helper_vmload(int aflag
)
4732 void helper_vmsave(int aflag
)
4735 void helper_stgi(void)
4738 void helper_clgi(void)
4741 void helper_skinit(void)
4744 void helper_invlpga(int aflag
)
4747 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4750 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4754 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4755 uint32_t next_eip_addend
)
4760 static inline void svm_save_seg(target_phys_addr_t addr
,
4761 const SegmentCache
*sc
)
4763 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4765 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4767 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4769 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4770 (sc
->flags
>> 8) | ((sc
->flags
>> 12) & 0x0f00));
4773 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4777 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4778 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4779 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4780 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4781 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4784 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4785 CPUState
*env
, int seg_reg
)
4787 SegmentCache sc1
, *sc
= &sc1
;
4788 svm_load_seg(addr
, sc
);
4789 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4790 sc
->base
, sc
->limit
, sc
->flags
);
4793 void helper_vmrun(int aflag
, int next_eip_addend
)
4799 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4804 addr
= (uint32_t)EAX
;
4806 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4807 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
4809 env
->vm_vmcb
= addr
;
4811 /* save the current CPU state in the hsave page */
4812 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4813 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4815 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4816 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4818 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4819 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4820 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4821 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4822 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4823 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4825 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4826 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4828 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4830 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4832 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4834 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4837 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4838 EIP
+ next_eip_addend
);
4839 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4840 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4842 /* load the interception bitmaps so we do not need to access the
4844 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4845 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4846 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4847 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4848 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4849 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4851 /* enable intercepts */
4852 env
->hflags
|= HF_SVMI_MASK
;
4854 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4856 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4857 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4859 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4860 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4862 /* clear exit_info_2 so we behave like the real hardware */
4863 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4865 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4866 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4867 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4868 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4869 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4870 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4871 if (int_ctl
& V_INTR_MASKING_MASK
) {
4872 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4873 env
->hflags2
|= HF2_VINTR_MASK
;
4874 if (env
->eflags
& IF_MASK
)
4875 env
->hflags2
|= HF2_HIF_MASK
;
4879 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4881 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4882 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4883 CC_OP
= CC_OP_EFLAGS
;
4885 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4887 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4889 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4891 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4894 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4896 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4897 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4898 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4899 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4900 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4902 /* FIXME: guest state consistency checks */
4904 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4905 case TLB_CONTROL_DO_NOTHING
:
4907 case TLB_CONTROL_FLUSH_ALL_ASID
:
4908 /* FIXME: this is not 100% correct but should work for now */
4913 env
->hflags2
|= HF2_GIF_MASK
;
4915 if (int_ctl
& V_IRQ_MASK
) {
4916 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4919 /* maybe we need to inject an event */
4920 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4921 if (event_inj
& SVM_EVTINJ_VALID
) {
4922 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4923 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4924 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4925 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4927 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4928 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4929 /* FIXME: need to implement valid_err */
4930 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4931 case SVM_EVTINJ_TYPE_INTR
:
4932 env
->exception_index
= vector
;
4933 env
->error_code
= event_inj_err
;
4934 env
->exception_is_int
= 0;
4935 env
->exception_next_eip
= -1;
4936 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4937 fprintf(logfile
, "INTR");
4938 /* XXX: is it always correct ? */
4939 do_interrupt(vector
, 0, 0, 0, 1);
4941 case SVM_EVTINJ_TYPE_NMI
:
4942 env
->exception_index
= EXCP02_NMI
;
4943 env
->error_code
= event_inj_err
;
4944 env
->exception_is_int
= 0;
4945 env
->exception_next_eip
= EIP
;
4946 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4947 fprintf(logfile
, "NMI");
4950 case SVM_EVTINJ_TYPE_EXEPT
:
4951 env
->exception_index
= vector
;
4952 env
->error_code
= event_inj_err
;
4953 env
->exception_is_int
= 0;
4954 env
->exception_next_eip
= -1;
4955 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4956 fprintf(logfile
, "EXEPT");
4959 case SVM_EVTINJ_TYPE_SOFT
:
4960 env
->exception_index
= vector
;
4961 env
->error_code
= event_inj_err
;
4962 env
->exception_is_int
= 1;
4963 env
->exception_next_eip
= EIP
;
4964 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4965 fprintf(logfile
, "SOFT");
4969 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4970 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4974 void helper_vmmcall(void)
4976 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
4977 raise_exception(EXCP06_ILLOP
);
4980 void helper_vmload(int aflag
)
4983 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
4988 addr
= (uint32_t)EAX
;
4990 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4991 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4992 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4993 env
->segs
[R_FS
].base
);
4995 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
4997 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
4999 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5001 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5004 #ifdef TARGET_X86_64
5005 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5006 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5007 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5008 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5010 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5011 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5012 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5013 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5016 void helper_vmsave(int aflag
)
5019 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5024 addr
= (uint32_t)EAX
;
5026 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5027 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5028 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5029 env
->segs
[R_FS
].base
);
5031 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5033 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5035 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5037 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5040 #ifdef TARGET_X86_64
5041 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5042 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5043 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5044 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5046 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5047 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5048 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5049 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5052 void helper_stgi(void)
5054 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5055 env
->hflags2
|= HF2_GIF_MASK
;
5058 void helper_clgi(void)
5060 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5061 env
->hflags2
&= ~HF2_GIF_MASK
;
5064 void helper_skinit(void)
5066 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5067 /* XXX: not implemented */
5068 raise_exception(EXCP06_ILLOP
);
5071 void helper_invlpga(int aflag
)
5074 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5079 addr
= (uint32_t)EAX
;
5081 /* XXX: could use the ASID to see if it is needed to do the
5083 tlb_flush_page(env
, addr
);
5086 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5088 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5091 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5092 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5093 helper_vmexit(type
, param
);
5096 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5097 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5098 helper_vmexit(type
, param
);
5101 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5102 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5103 helper_vmexit(type
, param
);
5106 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5107 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5108 helper_vmexit(type
, param
);
5111 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5112 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5113 helper_vmexit(type
, param
);
5117 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5118 /* FIXME: this should be read in at vmrun (faster this way?) */
5119 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5121 switch((uint32_t)ECX
) {
5126 case 0xc0000000 ... 0xc0001fff:
5127 t0
= (8192 + ECX
- 0xc0000000) * 2;
5131 case 0xc0010000 ... 0xc0011fff:
5132 t0
= (16384 + ECX
- 0xc0010000) * 2;
5137 helper_vmexit(type
, param
);
5142 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5143 helper_vmexit(type
, param
);
5147 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5148 helper_vmexit(type
, param
);
5154 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5155 uint32_t next_eip_addend
)
5157 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5158 /* FIXME: this should be read in at vmrun (faster this way?) */
5159 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5160 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5161 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5163 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5164 env
->eip
+ next_eip_addend
);
5165 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5170 /* Note: currently only 32 bits of exit_code are used */
5171 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5175 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5176 fprintf(logfile
,"vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5177 exit_code
, exit_info_1
,
5178 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5181 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5182 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5183 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5185 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5188 /* Save the VM state in the vmcb */
5189 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5191 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5193 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5195 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5198 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5199 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5201 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5202 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5204 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5205 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5206 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5207 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5208 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5210 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5211 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5212 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5213 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5214 int_ctl
|= V_IRQ_MASK
;
5215 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5217 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5218 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5219 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5220 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5221 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5222 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5223 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5225 /* Reload the host state from vm_hsave */
5226 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5227 env
->hflags
&= ~HF_SVMI_MASK
;
5229 env
->intercept_exceptions
= 0;
5230 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5231 env
->tsc_offset
= 0;
5233 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5234 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5236 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5237 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5239 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5240 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5241 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5242 /* we need to set the efer after the crs so the hidden flags get
5245 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5247 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5248 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5249 CC_OP
= CC_OP_EFLAGS
;
5251 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5253 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5255 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5257 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5260 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5261 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5262 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5264 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5265 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5268 cpu_x86_set_cpl(env
, 0);
5269 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5270 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5272 env
->hflags2
&= ~HF2_GIF_MASK
;
5273 /* FIXME: Resets the current ASID register to zero (host ASID). */
5275 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5277 /* Clears the TSC_OFFSET inside the processor. */
5279 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5280 from the page table indicated the host's CR3. If the PDPEs contain
5281 illegal state, the processor causes a shutdown. */
5283 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5284 env
->cr
[0] |= CR0_PE_MASK
;
5285 env
->eflags
&= ~VM_MASK
;
5287 /* Disables all breakpoints in the host DR7 register. */
5289 /* Checks the reloaded host state for consistency. */
5291 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5292 host's code segment or non-canonical (in the case of long mode), a
5293 #GP fault is delivered inside the host.) */
5295 /* remove any pending exception */
5296 env
->exception_index
= -1;
5297 env
->error_code
= 0;
5298 env
->old_exception
= -1;
5306 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5307 void helper_enter_mmx(void)
5310 *(uint32_t *)(env
->fptags
) = 0;
5311 *(uint32_t *)(env
->fptags
+ 4) = 0;
5314 void helper_emms(void)
5316 /* set to empty state */
5317 *(uint32_t *)(env
->fptags
) = 0x01010101;
5318 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5322 void helper_movq(uint64_t *d
, uint64_t *s
)
5328 #include "ops_sse.h"
5331 #include "ops_sse.h"
5334 #include "helper_template.h"
5338 #include "helper_template.h"
5342 #include "helper_template.h"
5345 #ifdef TARGET_X86_64
5348 #include "helper_template.h"
5353 /* bit operations */
5354 target_ulong
helper_bsf(target_ulong t0
)
5361 while ((res
& 1) == 0) {
5368 target_ulong
helper_bsr(target_ulong t0
)
5371 target_ulong res
, mask
;
5374 count
= TARGET_LONG_BITS
- 1;
5375 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5376 while ((res
& mask
) == 0) {
5384 static int compute_all_eflags(void)
5389 static int compute_c_eflags(void)
5391 return CC_SRC
& CC_C
;
5394 CCTable cc_table
[CC_OP_NB
] = {
5395 [CC_OP_DYNAMIC
] = { /* should never happen */ },
5397 [CC_OP_EFLAGS
] = { compute_all_eflags
, compute_c_eflags
},
5399 [CC_OP_MULB
] = { compute_all_mulb
, compute_c_mull
},
5400 [CC_OP_MULW
] = { compute_all_mulw
, compute_c_mull
},
5401 [CC_OP_MULL
] = { compute_all_mull
, compute_c_mull
},
5403 [CC_OP_ADDB
] = { compute_all_addb
, compute_c_addb
},
5404 [CC_OP_ADDW
] = { compute_all_addw
, compute_c_addw
},
5405 [CC_OP_ADDL
] = { compute_all_addl
, compute_c_addl
},
5407 [CC_OP_ADCB
] = { compute_all_adcb
, compute_c_adcb
},
5408 [CC_OP_ADCW
] = { compute_all_adcw
, compute_c_adcw
},
5409 [CC_OP_ADCL
] = { compute_all_adcl
, compute_c_adcl
},
5411 [CC_OP_SUBB
] = { compute_all_subb
, compute_c_subb
},
5412 [CC_OP_SUBW
] = { compute_all_subw
, compute_c_subw
},
5413 [CC_OP_SUBL
] = { compute_all_subl
, compute_c_subl
},
5415 [CC_OP_SBBB
] = { compute_all_sbbb
, compute_c_sbbb
},
5416 [CC_OP_SBBW
] = { compute_all_sbbw
, compute_c_sbbw
},
5417 [CC_OP_SBBL
] = { compute_all_sbbl
, compute_c_sbbl
},
5419 [CC_OP_LOGICB
] = { compute_all_logicb
, compute_c_logicb
},
5420 [CC_OP_LOGICW
] = { compute_all_logicw
, compute_c_logicw
},
5421 [CC_OP_LOGICL
] = { compute_all_logicl
, compute_c_logicl
},
5423 [CC_OP_INCB
] = { compute_all_incb
, compute_c_incl
},
5424 [CC_OP_INCW
] = { compute_all_incw
, compute_c_incl
},
5425 [CC_OP_INCL
] = { compute_all_incl
, compute_c_incl
},
5427 [CC_OP_DECB
] = { compute_all_decb
, compute_c_incl
},
5428 [CC_OP_DECW
] = { compute_all_decw
, compute_c_incl
},
5429 [CC_OP_DECL
] = { compute_all_decl
, compute_c_incl
},
5431 [CC_OP_SHLB
] = { compute_all_shlb
, compute_c_shlb
},
5432 [CC_OP_SHLW
] = { compute_all_shlw
, compute_c_shlw
},
5433 [CC_OP_SHLL
] = { compute_all_shll
, compute_c_shll
},
5435 [CC_OP_SARB
] = { compute_all_sarb
, compute_c_sarl
},
5436 [CC_OP_SARW
] = { compute_all_sarw
, compute_c_sarl
},
5437 [CC_OP_SARL
] = { compute_all_sarl
, compute_c_sarl
},
5439 #ifdef TARGET_X86_64
5440 [CC_OP_MULQ
] = { compute_all_mulq
, compute_c_mull
},
5442 [CC_OP_ADDQ
] = { compute_all_addq
, compute_c_addq
},
5444 [CC_OP_ADCQ
] = { compute_all_adcq
, compute_c_adcq
},
5446 [CC_OP_SUBQ
] = { compute_all_subq
, compute_c_subq
},
5448 [CC_OP_SBBQ
] = { compute_all_sbbq
, compute_c_sbbq
},
5450 [CC_OP_LOGICQ
] = { compute_all_logicq
, compute_c_logicq
},
5452 [CC_OP_INCQ
] = { compute_all_incq
, compute_c_incl
},
5454 [CC_OP_DECQ
] = { compute_all_decq
, compute_c_incl
},
5456 [CC_OP_SHLQ
] = { compute_all_shlq
, compute_c_shlq
},
5458 [CC_OP_SARQ
] = { compute_all_sarq
, compute_c_sarl
},