4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #define raise_exception_err(a, b)\
27 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
32 const uint8_t parity_table
[256] = {
33 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
34 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
35 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 const uint8_t rclw_table
[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
76 const uint8_t rclb_table
[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk
[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
96 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
100 spin_lock(&global_cpu_lock
);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock
);
108 /* return non zero if error */
109 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
120 index
= selector
& ~7;
121 if ((index
+ 7) > dt
->limit
)
123 ptr
= dt
->base
+ index
;
124 *e1_ptr
= ldl_kernel(ptr
);
125 *e2_ptr
= ldl_kernel(ptr
+ 4);
129 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
132 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
133 if (e2
& DESC_G_MASK
)
134 limit
= (limit
<< 12) | 0xfff;
138 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
140 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
143 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
145 sc
->base
= get_seg_base(e1
, e2
);
146 sc
->limit
= get_seg_limit(e1
, e2
);
150 /* init the segment cache in vm86 mode. */
151 static inline void load_seg_vm(int seg
, int selector
)
154 cpu_x86_load_seg_cache(env
, seg
, selector
,
155 (selector
<< 4), 0xffff, 0);
158 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
159 uint32_t *esp_ptr
, int dpl
)
161 int type
, index
, shift
;
166 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
167 for(i
=0;i
<env
->tr
.limit
;i
++) {
168 printf("%02x ", env
->tr
.base
[i
]);
169 if ((i
& 7) == 7) printf("\n");
175 if (!(env
->tr
.flags
& DESC_P_MASK
))
176 cpu_abort(env
, "invalid tss");
177 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
179 cpu_abort(env
, "invalid tss type");
181 index
= (dpl
* 4 + 2) << shift
;
182 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
183 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
185 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
186 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
188 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
189 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
193 /* XXX: merge with load_seg() */
194 static void tss_load_seg(int seg_reg
, int selector
)
199 if ((selector
& 0xfffc) != 0) {
200 if (load_segment(&e1
, &e2
, selector
) != 0)
201 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
202 if (!(e2
& DESC_S_MASK
))
203 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
205 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
206 cpl
= env
->hflags
& HF_CPL_MASK
;
207 if (seg_reg
== R_CS
) {
208 if (!(e2
& DESC_CS_MASK
))
209 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
210 /* XXX: is it correct ? */
212 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
213 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
214 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
215 } else if (seg_reg
== R_SS
) {
216 /* SS must be writable data */
217 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
218 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
219 if (dpl
!= cpl
|| dpl
!= rpl
)
220 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
222 /* not readable code */
223 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
224 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
225 /* if data or non conforming code, checks the rights */
226 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
227 if (dpl
< cpl
|| dpl
< rpl
)
228 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
231 if (!(e2
& DESC_P_MASK
))
232 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
233 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
234 get_seg_base(e1
, e2
),
235 get_seg_limit(e1
, e2
),
238 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
239 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 #define SWITCH_TSS_JMP 0
244 #define SWITCH_TSS_IRET 1
245 #define SWITCH_TSS_CALL 2
247 /* XXX: restore CPU state in registers (PowerPC case) */
248 static void switch_tss(int tss_selector
,
249 uint32_t e1
, uint32_t e2
, int source
,
252 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
253 target_ulong tss_base
;
254 uint32_t new_regs
[8], new_segs
[6];
255 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
256 uint32_t old_eflags
, eflags_mask
;
261 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
263 if (loglevel
& CPU_LOG_PCALL
)
264 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
267 /* if task gate, we read the TSS segment and we load it */
269 if (!(e2
& DESC_P_MASK
))
270 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
271 tss_selector
= e1
>> 16;
272 if (tss_selector
& 4)
273 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
274 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
275 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
276 if (e2
& DESC_S_MASK
)
277 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
278 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
280 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
283 if (!(e2
& DESC_P_MASK
))
284 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
290 tss_limit
= get_seg_limit(e1
, e2
);
291 tss_base
= get_seg_base(e1
, e2
);
292 if ((tss_selector
& 4) != 0 ||
293 tss_limit
< tss_limit_max
)
294 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
295 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
297 old_tss_limit_max
= 103;
299 old_tss_limit_max
= 43;
301 /* read all the registers from the new TSS */
304 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
305 new_eip
= ldl_kernel(tss_base
+ 0x20);
306 new_eflags
= ldl_kernel(tss_base
+ 0x24);
307 for(i
= 0; i
< 8; i
++)
308 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
309 for(i
= 0; i
< 6; i
++)
310 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
311 new_ldt
= lduw_kernel(tss_base
+ 0x60);
312 new_trap
= ldl_kernel(tss_base
+ 0x64);
316 new_eip
= lduw_kernel(tss_base
+ 0x0e);
317 new_eflags
= lduw_kernel(tss_base
+ 0x10);
318 for(i
= 0; i
< 8; i
++)
319 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
320 for(i
= 0; i
< 4; i
++)
321 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
322 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
328 /* NOTE: we must avoid memory exceptions during the task switch,
329 so we make dummy accesses before */
330 /* XXX: it can still fail in some cases, so a bigger hack is
331 necessary to valid the TLB after having done the accesses */
333 v1
= ldub_kernel(env
->tr
.base
);
334 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
335 stb_kernel(env
->tr
.base
, v1
);
336 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
338 /* clear busy bit (it is restartable) */
339 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
342 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
343 e2
= ldl_kernel(ptr
+ 4);
344 e2
&= ~DESC_TSS_BUSY_MASK
;
345 stl_kernel(ptr
+ 4, e2
);
347 old_eflags
= compute_eflags();
348 if (source
== SWITCH_TSS_IRET
)
349 old_eflags
&= ~NT_MASK
;
351 /* save the current state in the old TSS */
354 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
355 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
356 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
357 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
358 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
359 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
360 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
361 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
362 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
363 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
364 for(i
= 0; i
< 6; i
++)
365 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
368 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
369 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
370 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
371 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
372 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
373 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
374 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
375 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
376 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
377 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
378 for(i
= 0; i
< 4; i
++)
379 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
382 /* now if an exception occurs, it will occurs in the next task
385 if (source
== SWITCH_TSS_CALL
) {
386 stw_kernel(tss_base
, env
->tr
.selector
);
387 new_eflags
|= NT_MASK
;
391 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
394 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
395 e2
= ldl_kernel(ptr
+ 4);
396 e2
|= DESC_TSS_BUSY_MASK
;
397 stl_kernel(ptr
+ 4, e2
);
400 /* set the new CPU state */
401 /* from this point, any exception which occurs can give problems */
402 env
->cr
[0] |= CR0_TS_MASK
;
403 env
->hflags
|= HF_TS_MASK
;
404 env
->tr
.selector
= tss_selector
;
405 env
->tr
.base
= tss_base
;
406 env
->tr
.limit
= tss_limit
;
407 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
409 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
410 cpu_x86_update_cr3(env
, new_cr3
);
413 /* load all registers without an exception, then reload them with
414 possible exception */
416 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
417 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
419 eflags_mask
&= 0xffff;
420 load_eflags(new_eflags
, eflags_mask
);
421 /* XXX: what to do in 16 bit case ? */
430 if (new_eflags
& VM_MASK
) {
431 for(i
= 0; i
< 6; i
++)
432 load_seg_vm(i
, new_segs
[i
]);
433 /* in vm86, CPL is always 3 */
434 cpu_x86_set_cpl(env
, 3);
436 /* CPL is set the RPL of CS */
437 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
438 /* first just selectors as the rest may trigger exceptions */
439 for(i
= 0; i
< 6; i
++)
440 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
443 env
->ldt
.selector
= new_ldt
& ~4;
450 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
452 if ((new_ldt
& 0xfffc) != 0) {
454 index
= new_ldt
& ~7;
455 if ((index
+ 7) > dt
->limit
)
456 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
457 ptr
= dt
->base
+ index
;
458 e1
= ldl_kernel(ptr
);
459 e2
= ldl_kernel(ptr
+ 4);
460 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
461 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
462 if (!(e2
& DESC_P_MASK
))
463 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
464 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
467 /* load the segments */
468 if (!(new_eflags
& VM_MASK
)) {
469 tss_load_seg(R_CS
, new_segs
[R_CS
]);
470 tss_load_seg(R_SS
, new_segs
[R_SS
]);
471 tss_load_seg(R_ES
, new_segs
[R_ES
]);
472 tss_load_seg(R_DS
, new_segs
[R_DS
]);
473 tss_load_seg(R_FS
, new_segs
[R_FS
]);
474 tss_load_seg(R_GS
, new_segs
[R_GS
]);
477 /* check that EIP is in the CS segment limits */
478 if (new_eip
> env
->segs
[R_CS
].limit
) {
479 /* XXX: different exception if CALL ? */
480 raise_exception_err(EXCP0D_GPF
, 0);
484 /* check if Port I/O is allowed in TSS */
485 static inline void check_io(int addr
, int size
)
487 int io_offset
, val
, mask
;
489 /* TSS must be a valid 32 bit one */
490 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
491 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
494 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
495 io_offset
+= (addr
>> 3);
496 /* Note: the check needs two bytes */
497 if ((io_offset
+ 1) > env
->tr
.limit
)
499 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
501 mask
= (1 << size
) - 1;
502 /* all bits must be zero to allow the I/O */
503 if ((val
& mask
) != 0) {
505 raise_exception_err(EXCP0D_GPF
, 0);
509 void check_iob_T0(void)
514 void check_iow_T0(void)
519 void check_iol_T0(void)
524 void check_iob_DX(void)
526 check_io(EDX
& 0xffff, 1);
529 void check_iow_DX(void)
531 check_io(EDX
& 0xffff, 2);
534 void check_iol_DX(void)
536 check_io(EDX
& 0xffff, 4);
539 static inline unsigned int get_sp_mask(unsigned int e2
)
541 if (e2
& DESC_B_MASK
)
548 #define SET_ESP(val, sp_mask)\
550 if ((sp_mask) == 0xffff)\
551 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
552 else if ((sp_mask) == 0xffffffffLL)\
553 ESP = (uint32_t)(val);\
558 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561 /* XXX: add a is_user flag to have proper security support */
562 #define PUSHW(ssp, sp, sp_mask, val)\
565 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568 #define PUSHL(ssp, sp, sp_mask, val)\
571 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574 #define POPW(ssp, sp, sp_mask, val)\
576 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
580 #define POPL(ssp, sp, sp_mask, val)\
582 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
586 /* protected mode interrupt */
587 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
588 unsigned int next_eip
, int is_hw
)
591 target_ulong ptr
, ssp
;
592 int type
, dpl
, selector
, ss_dpl
, cpl
;
593 int has_error_code
, new_stack
, shift
;
594 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
595 uint32_t old_eip
, sp_mask
;
596 int svm_should_check
= 1;
598 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
600 svm_should_check
= 0;
604 && (INTERCEPTEDl(_exceptions
, 1 << intno
)
606 raise_interrupt(intno
, is_int
, error_code
, 0);
609 if (!is_int
&& !is_hw
) {
628 if (intno
* 8 + 7 > dt
->limit
)
629 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
630 ptr
= dt
->base
+ intno
* 8;
631 e1
= ldl_kernel(ptr
);
632 e2
= ldl_kernel(ptr
+ 4);
633 /* check gate type */
634 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
636 case 5: /* task gate */
637 /* must do that check here to return the correct error code */
638 if (!(e2
& DESC_P_MASK
))
639 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
640 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
641 if (has_error_code
) {
644 /* push the error code */
645 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
647 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
651 esp
= (ESP
- (2 << shift
)) & mask
;
652 ssp
= env
->segs
[R_SS
].base
+ esp
;
654 stl_kernel(ssp
, error_code
);
656 stw_kernel(ssp
, error_code
);
660 case 6: /* 286 interrupt gate */
661 case 7: /* 286 trap gate */
662 case 14: /* 386 interrupt gate */
663 case 15: /* 386 trap gate */
666 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
669 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
670 cpl
= env
->hflags
& HF_CPL_MASK
;
671 /* check privledge if software int */
672 if (is_int
&& dpl
< cpl
)
673 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
674 /* check valid bit */
675 if (!(e2
& DESC_P_MASK
))
676 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
678 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
679 if ((selector
& 0xfffc) == 0)
680 raise_exception_err(EXCP0D_GPF
, 0);
682 if (load_segment(&e1
, &e2
, selector
) != 0)
683 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
684 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
685 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
686 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
688 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
689 if (!(e2
& DESC_P_MASK
))
690 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
691 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
692 /* to inner privilege */
693 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
694 if ((ss
& 0xfffc) == 0)
695 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
697 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
698 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
699 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
700 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
702 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
703 if (!(ss_e2
& DESC_S_MASK
) ||
704 (ss_e2
& DESC_CS_MASK
) ||
705 !(ss_e2
& DESC_W_MASK
))
706 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
707 if (!(ss_e2
& DESC_P_MASK
))
708 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
710 sp_mask
= get_sp_mask(ss_e2
);
711 ssp
= get_seg_base(ss_e1
, ss_e2
);
712 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
713 /* to same privilege */
714 if (env
->eflags
& VM_MASK
)
715 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
717 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
718 ssp
= env
->segs
[R_SS
].base
;
722 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
723 new_stack
= 0; /* avoid warning */
724 sp_mask
= 0; /* avoid warning */
725 ssp
= 0; /* avoid warning */
726 esp
= 0; /* avoid warning */
732 /* XXX: check that enough room is available */
733 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
734 if (env
->eflags
& VM_MASK
)
740 if (env
->eflags
& VM_MASK
) {
741 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
742 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
743 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
744 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
746 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
747 PUSHL(ssp
, esp
, sp_mask
, ESP
);
749 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
750 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
751 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
752 if (has_error_code
) {
753 PUSHL(ssp
, esp
, sp_mask
, error_code
);
757 if (env
->eflags
& VM_MASK
) {
758 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
759 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
760 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
761 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
763 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
764 PUSHW(ssp
, esp
, sp_mask
, ESP
);
766 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
767 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
768 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
769 if (has_error_code
) {
770 PUSHW(ssp
, esp
, sp_mask
, error_code
);
775 if (env
->eflags
& VM_MASK
) {
776 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
777 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
778 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
781 ss
= (ss
& ~3) | dpl
;
782 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
783 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
785 SET_ESP(esp
, sp_mask
);
787 selector
= (selector
& ~3) | dpl
;
788 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
789 get_seg_base(e1
, e2
),
790 get_seg_limit(e1
, e2
),
792 cpu_x86_set_cpl(env
, dpl
);
795 /* interrupt gate clear IF mask */
796 if ((type
& 1) == 0) {
797 env
->eflags
&= ~IF_MASK
;
799 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
804 #define PUSHQ(sp, val)\
807 stq_kernel(sp, (val));\
810 #define POPQ(sp, val)\
812 val = ldq_kernel(sp);\
816 static inline target_ulong
get_rsp_from_tss(int level
)
821 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
822 env
->tr
.base
, env
->tr
.limit
);
825 if (!(env
->tr
.flags
& DESC_P_MASK
))
826 cpu_abort(env
, "invalid tss");
827 index
= 8 * level
+ 4;
828 if ((index
+ 7) > env
->tr
.limit
)
829 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
830 return ldq_kernel(env
->tr
.base
+ index
);
833 /* 64 bit interrupt */
834 static void do_interrupt64(int intno
, int is_int
, int error_code
,
835 target_ulong next_eip
, int is_hw
)
839 int type
, dpl
, selector
, cpl
, ist
;
840 int has_error_code
, new_stack
;
841 uint32_t e1
, e2
, e3
, ss
;
842 target_ulong old_eip
, esp
, offset
;
843 int svm_should_check
= 1;
845 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
847 svm_should_check
= 0;
850 && INTERCEPTEDl(_exceptions
, 1 << intno
)
852 raise_interrupt(intno
, is_int
, error_code
, 0);
855 if (!is_int
&& !is_hw
) {
874 if (intno
* 16 + 15 > dt
->limit
)
875 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
876 ptr
= dt
->base
+ intno
* 16;
877 e1
= ldl_kernel(ptr
);
878 e2
= ldl_kernel(ptr
+ 4);
879 e3
= ldl_kernel(ptr
+ 8);
880 /* check gate type */
881 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
883 case 14: /* 386 interrupt gate */
884 case 15: /* 386 trap gate */
887 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
890 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
891 cpl
= env
->hflags
& HF_CPL_MASK
;
892 /* check privledge if software int */
893 if (is_int
&& dpl
< cpl
)
894 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
895 /* check valid bit */
896 if (!(e2
& DESC_P_MASK
))
897 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
899 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
901 if ((selector
& 0xfffc) == 0)
902 raise_exception_err(EXCP0D_GPF
, 0);
904 if (load_segment(&e1
, &e2
, selector
) != 0)
905 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
906 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
907 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
908 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
910 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
911 if (!(e2
& DESC_P_MASK
))
912 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
913 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
914 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
915 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
916 /* to inner privilege */
918 esp
= get_rsp_from_tss(ist
+ 3);
920 esp
= get_rsp_from_tss(dpl
);
921 esp
&= ~0xfLL
; /* align stack */
924 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
925 /* to same privilege */
926 if (env
->eflags
& VM_MASK
)
927 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
930 esp
= get_rsp_from_tss(ist
+ 3);
933 esp
&= ~0xfLL
; /* align stack */
936 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
937 new_stack
= 0; /* avoid warning */
938 esp
= 0; /* avoid warning */
941 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
943 PUSHQ(esp
, compute_eflags());
944 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
946 if (has_error_code
) {
947 PUSHQ(esp
, error_code
);
952 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
956 selector
= (selector
& ~3) | dpl
;
957 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
958 get_seg_base(e1
, e2
),
959 get_seg_limit(e1
, e2
),
961 cpu_x86_set_cpl(env
, dpl
);
964 /* interrupt gate clear IF mask */
965 if ((type
& 1) == 0) {
966 env
->eflags
&= ~IF_MASK
;
968 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
972 void helper_syscall(int next_eip_addend
)
976 if (!(env
->efer
& MSR_EFER_SCE
)) {
977 raise_exception_err(EXCP06_ILLOP
, 0);
979 selector
= (env
->star
>> 32) & 0xffff;
981 if (env
->hflags
& HF_LMA_MASK
) {
984 ECX
= env
->eip
+ next_eip_addend
;
985 env
->regs
[11] = compute_eflags();
987 code64
= env
->hflags
& HF_CS64_MASK
;
989 cpu_x86_set_cpl(env
, 0);
990 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
992 DESC_G_MASK
| DESC_P_MASK
|
994 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
995 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
997 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
999 DESC_W_MASK
| DESC_A_MASK
);
1000 env
->eflags
&= ~env
->fmask
;
1002 env
->eip
= env
->lstar
;
1004 env
->eip
= env
->cstar
;
1008 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1010 cpu_x86_set_cpl(env
, 0);
1011 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1013 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1015 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1016 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1018 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1020 DESC_W_MASK
| DESC_A_MASK
);
1021 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1022 env
->eip
= (uint32_t)env
->star
;
1026 void helper_sysret(int dflag
)
1030 if (!(env
->efer
& MSR_EFER_SCE
)) {
1031 raise_exception_err(EXCP06_ILLOP
, 0);
1033 cpl
= env
->hflags
& HF_CPL_MASK
;
1034 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1035 raise_exception_err(EXCP0D_GPF
, 0);
1037 selector
= (env
->star
>> 48) & 0xffff;
1038 #ifdef TARGET_X86_64
1039 if (env
->hflags
& HF_LMA_MASK
) {
1041 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1043 DESC_G_MASK
| DESC_P_MASK
|
1044 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1045 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1049 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1052 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1053 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1054 env
->eip
= (uint32_t)ECX
;
1056 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1058 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1059 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1060 DESC_W_MASK
| DESC_A_MASK
);
1061 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1062 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1063 cpu_x86_set_cpl(env
, 3);
1067 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1069 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1070 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1071 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1072 env
->eip
= (uint32_t)ECX
;
1073 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1075 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1076 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1077 DESC_W_MASK
| DESC_A_MASK
);
1078 env
->eflags
|= IF_MASK
;
1079 cpu_x86_set_cpl(env
, 3);
1082 if (kqemu_is_ok(env
)) {
1083 if (env
->hflags
& HF_LMA_MASK
)
1084 CC_OP
= CC_OP_EFLAGS
;
1085 env
->exception_index
= -1;
1091 /* real mode interrupt */
1092 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1093 unsigned int next_eip
)
1096 target_ulong ptr
, ssp
;
1098 uint32_t offset
, esp
;
1099 uint32_t old_cs
, old_eip
;
1100 int svm_should_check
= 1;
1102 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
1104 svm_should_check
= 0;
1106 if (svm_should_check
1107 && INTERCEPTEDl(_exceptions
, 1 << intno
)
1109 raise_interrupt(intno
, is_int
, error_code
, 0);
1111 /* real mode (simpler !) */
1113 if (intno
* 4 + 3 > dt
->limit
)
1114 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1115 ptr
= dt
->base
+ intno
* 4;
1116 offset
= lduw_kernel(ptr
);
1117 selector
= lduw_kernel(ptr
+ 2);
1119 ssp
= env
->segs
[R_SS
].base
;
1124 old_cs
= env
->segs
[R_CS
].selector
;
1125 /* XXX: use SS segment size ? */
1126 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1127 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1128 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1130 /* update processor state */
1131 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1133 env
->segs
[R_CS
].selector
= selector
;
1134 env
->segs
[R_CS
].base
= (selector
<< 4);
1135 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1138 /* fake user mode interrupt */
1139 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1140 target_ulong next_eip
)
1148 ptr
= dt
->base
+ (intno
* 8);
1149 e2
= ldl_kernel(ptr
+ 4);
1151 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1152 cpl
= env
->hflags
& HF_CPL_MASK
;
1153 /* check privledge if software int */
1154 if (is_int
&& dpl
< cpl
)
1155 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1157 /* Since we emulate only user space, we cannot do more than
1158 exiting the emulation with the suitable exception and error
1165 * Begin execution of an interruption. is_int is TRUE if coming from
1166 * the int instruction. next_eip is the EIP value AFTER the interrupt
1167 * instruction. It is only relevant if is_int is TRUE.
1169 void do_interrupt(int intno
, int is_int
, int error_code
,
1170 target_ulong next_eip
, int is_hw
)
1172 if (loglevel
& CPU_LOG_INT
) {
1173 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1175 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1176 count
, intno
, error_code
, is_int
,
1177 env
->hflags
& HF_CPL_MASK
,
1178 env
->segs
[R_CS
].selector
, EIP
,
1179 (int)env
->segs
[R_CS
].base
+ EIP
,
1180 env
->segs
[R_SS
].selector
, ESP
);
1181 if (intno
== 0x0e) {
1182 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1184 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1186 fprintf(logfile
, "\n");
1187 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1192 fprintf(logfile
, " code=");
1193 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1194 for(i
= 0; i
< 16; i
++) {
1195 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1197 fprintf(logfile
, "\n");
1203 if (env
->cr
[0] & CR0_PE_MASK
) {
1205 if (env
->hflags
& HF_LMA_MASK
) {
1206 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1210 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1213 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1218 * Check nested exceptions and change to double or triple fault if
1219 * needed. It should only be called, if this is not an interrupt.
1220 * Returns the new exception number.
1222 int check_exception(int intno
, int *error_code
)
1224 char first_contributory
= env
->old_exception
== 0 ||
1225 (env
->old_exception
>= 10 &&
1226 env
->old_exception
<= 13);
1227 char second_contributory
= intno
== 0 ||
1228 (intno
>= 10 && intno
<= 13);
1230 if (loglevel
& CPU_LOG_INT
)
1231 fprintf(logfile
, "check_exception old: %x new %x\n",
1232 env
->old_exception
, intno
);
1234 if (env
->old_exception
== EXCP08_DBLE
)
1235 cpu_abort(env
, "triple fault");
1237 if ((first_contributory
&& second_contributory
)
1238 || (env
->old_exception
== EXCP0E_PAGE
&&
1239 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1240 intno
= EXCP08_DBLE
;
1244 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1245 (intno
== EXCP08_DBLE
))
1246 env
->old_exception
= intno
;
1252 * Signal an interruption. It is executed in the main CPU loop.
1253 * is_int is TRUE if coming from the int instruction. next_eip is the
1254 * EIP value AFTER the interrupt instruction. It is only relevant if
1257 void raise_interrupt(int intno
, int is_int
, int error_code
,
1258 int next_eip_addend
)
1261 svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1262 intno
= check_exception(intno
, &error_code
);
1265 env
->exception_index
= intno
;
1266 env
->error_code
= error_code
;
1267 env
->exception_is_int
= is_int
;
1268 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1272 /* same as raise_exception_err, but do not restore global registers */
1273 static void raise_exception_err_norestore(int exception_index
, int error_code
)
1275 exception_index
= check_exception(exception_index
, &error_code
);
1277 env
->exception_index
= exception_index
;
1278 env
->error_code
= error_code
;
1279 env
->exception_is_int
= 0;
1280 env
->exception_next_eip
= 0;
1281 longjmp(env
->jmp_env
, 1);
1284 /* shortcuts to generate exceptions */
1286 void (raise_exception_err
)(int exception_index
, int error_code
)
1288 raise_interrupt(exception_index
, 0, error_code
, 0);
1291 void raise_exception(int exception_index
)
1293 raise_interrupt(exception_index
, 0, 0, 0);
1298 #if defined(CONFIG_USER_ONLY)
1300 void do_smm_enter(void)
1304 void helper_rsm(void)
1310 #ifdef TARGET_X86_64
1311 #define SMM_REVISION_ID 0x00020064
1313 #define SMM_REVISION_ID 0x00020000
1316 void do_smm_enter(void)
1318 target_ulong sm_state
;
1322 if (loglevel
& CPU_LOG_INT
) {
1323 fprintf(logfile
, "SMM: enter\n");
1324 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1327 env
->hflags
|= HF_SMM_MASK
;
1328 cpu_smm_update(env
);
1330 sm_state
= env
->smbase
+ 0x8000;
1332 #ifdef TARGET_X86_64
1333 for(i
= 0; i
< 6; i
++) {
1335 offset
= 0x7e00 + i
* 16;
1336 stw_phys(sm_state
+ offset
, dt
->selector
);
1337 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1338 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1339 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1342 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1343 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1345 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1346 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1347 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1348 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1350 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1351 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1353 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1354 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1355 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1356 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1358 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1360 stq_phys(sm_state
+ 0x7ff8, EAX
);
1361 stq_phys(sm_state
+ 0x7ff0, ECX
);
1362 stq_phys(sm_state
+ 0x7fe8, EDX
);
1363 stq_phys(sm_state
+ 0x7fe0, EBX
);
1364 stq_phys(sm_state
+ 0x7fd8, ESP
);
1365 stq_phys(sm_state
+ 0x7fd0, EBP
);
1366 stq_phys(sm_state
+ 0x7fc8, ESI
);
1367 stq_phys(sm_state
+ 0x7fc0, EDI
);
1368 for(i
= 8; i
< 16; i
++)
1369 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1370 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1371 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1372 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1373 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1375 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1376 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1377 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1379 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1380 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1382 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1383 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1384 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1385 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1386 stl_phys(sm_state
+ 0x7fec, EDI
);
1387 stl_phys(sm_state
+ 0x7fe8, ESI
);
1388 stl_phys(sm_state
+ 0x7fe4, EBP
);
1389 stl_phys(sm_state
+ 0x7fe0, ESP
);
1390 stl_phys(sm_state
+ 0x7fdc, EBX
);
1391 stl_phys(sm_state
+ 0x7fd8, EDX
);
1392 stl_phys(sm_state
+ 0x7fd4, ECX
);
1393 stl_phys(sm_state
+ 0x7fd0, EAX
);
1394 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1395 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1397 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1398 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1399 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1400 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1402 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1403 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1404 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1405 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1407 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1408 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1410 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1411 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1413 for(i
= 0; i
< 6; i
++) {
1416 offset
= 0x7f84 + i
* 12;
1418 offset
= 0x7f2c + (i
- 3) * 12;
1419 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1420 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1421 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1422 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1424 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1426 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1427 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1429 /* init SMM cpu state */
1431 #ifdef TARGET_X86_64
1433 env
->hflags
&= ~HF_LMA_MASK
;
1435 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1436 env
->eip
= 0x00008000;
1437 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1439 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1440 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1441 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1442 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1443 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1445 cpu_x86_update_cr0(env
,
1446 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1447 cpu_x86_update_cr4(env
, 0);
1448 env
->dr
[7] = 0x00000400;
1449 CC_OP
= CC_OP_EFLAGS
;
1452 void helper_rsm(void)
1454 target_ulong sm_state
;
1458 sm_state
= env
->smbase
+ 0x8000;
1459 #ifdef TARGET_X86_64
1460 env
->efer
= ldq_phys(sm_state
+ 0x7ed0);
1461 if (env
->efer
& MSR_EFER_LMA
)
1462 env
->hflags
|= HF_LMA_MASK
;
1464 env
->hflags
&= ~HF_LMA_MASK
;
1466 for(i
= 0; i
< 6; i
++) {
1467 offset
= 0x7e00 + i
* 16;
1468 cpu_x86_load_seg_cache(env
, i
,
1469 lduw_phys(sm_state
+ offset
),
1470 ldq_phys(sm_state
+ offset
+ 8),
1471 ldl_phys(sm_state
+ offset
+ 4),
1472 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1475 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1476 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1478 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1479 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1480 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1481 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1483 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1484 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1486 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1487 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1488 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1489 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1491 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1492 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1493 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1494 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1495 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1496 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1497 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1498 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1499 for(i
= 8; i
< 16; i
++)
1500 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1501 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1502 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1503 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1504 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1505 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1507 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1508 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1509 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1511 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1512 if (val
& 0x20000) {
1513 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1516 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1517 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1518 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1519 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1520 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1521 EDI
= ldl_phys(sm_state
+ 0x7fec);
1522 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1523 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1524 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1525 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1526 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1527 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1528 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1529 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1530 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1532 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1533 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1534 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1535 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1537 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1538 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1539 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1540 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1542 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1543 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1545 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1546 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1548 for(i
= 0; i
< 6; i
++) {
1550 offset
= 0x7f84 + i
* 12;
1552 offset
= 0x7f2c + (i
- 3) * 12;
1553 cpu_x86_load_seg_cache(env
, i
,
1554 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1555 ldl_phys(sm_state
+ offset
+ 8),
1556 ldl_phys(sm_state
+ offset
+ 4),
1557 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1559 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1561 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1562 if (val
& 0x20000) {
1563 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1566 CC_OP
= CC_OP_EFLAGS
;
1567 env
->hflags
&= ~HF_SMM_MASK
;
1568 cpu_smm_update(env
);
1570 if (loglevel
& CPU_LOG_INT
) {
1571 fprintf(logfile
, "SMM: after RSM\n");
1572 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1576 #endif /* !CONFIG_USER_ONLY */
1579 #ifdef BUGGY_GCC_DIV64
1580 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1581 call it from another function */
1582 uint32_t div32(uint64_t *q_ptr
, uint64_t num
, uint32_t den
)
1588 int32_t idiv32(int64_t *q_ptr
, int64_t num
, int32_t den
)
1595 void helper_divl_EAX_T0(void)
1597 unsigned int den
, r
;
1600 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1603 raise_exception(EXCP00_DIVZ
);
1605 #ifdef BUGGY_GCC_DIV64
1606 r
= div32(&q
, num
, den
);
1612 raise_exception(EXCP00_DIVZ
);
1617 void helper_idivl_EAX_T0(void)
1622 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1625 raise_exception(EXCP00_DIVZ
);
1627 #ifdef BUGGY_GCC_DIV64
1628 r
= idiv32(&q
, num
, den
);
1633 if (q
!= (int32_t)q
)
1634 raise_exception(EXCP00_DIVZ
);
1639 void helper_cmpxchg8b(void)
1644 eflags
= cc_table
[CC_OP
].compute_all();
1646 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
1647 stq(A0
, ((uint64_t)ECX
<< 32) | EBX
);
1657 void helper_single_step()
1659 env
->dr
[6] |= 0x4000;
1660 raise_exception(EXCP01_SSTP
);
1663 void helper_cpuid(void)
1666 index
= (uint32_t)EAX
;
1668 /* test if maximum index reached */
1669 if (index
& 0x80000000) {
1670 if (index
> env
->cpuid_xlevel
)
1671 index
= env
->cpuid_level
;
1673 if (index
> env
->cpuid_level
)
1674 index
= env
->cpuid_level
;
1679 EAX
= env
->cpuid_level
;
1680 EBX
= env
->cpuid_vendor1
;
1681 EDX
= env
->cpuid_vendor2
;
1682 ECX
= env
->cpuid_vendor3
;
1685 EAX
= env
->cpuid_version
;
1686 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1687 ECX
= env
->cpuid_ext_features
;
1688 EDX
= env
->cpuid_features
;
1691 /* cache info: needed for Pentium Pro compatibility */
1698 EAX
= env
->cpuid_xlevel
;
1699 EBX
= env
->cpuid_vendor1
;
1700 EDX
= env
->cpuid_vendor2
;
1701 ECX
= env
->cpuid_vendor3
;
1704 EAX
= env
->cpuid_features
;
1706 ECX
= env
->cpuid_ext3_features
;
1707 EDX
= env
->cpuid_ext2_features
;
1712 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1713 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1714 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1715 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1718 /* cache info (L1 cache) */
1725 /* cache info (L2 cache) */
1732 /* virtual & phys address size in low 2 bytes. */
1739 /* reserved values: zero */
1748 void helper_enter_level(int level
, int data32
)
1751 uint32_t esp_mask
, esp
, ebp
;
1753 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1754 ssp
= env
->segs
[R_SS
].base
;
1763 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1766 stl(ssp
+ (esp
& esp_mask
), T1
);
1773 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1776 stw(ssp
+ (esp
& esp_mask
), T1
);
1780 #ifdef TARGET_X86_64
1781 void helper_enter64_level(int level
, int data64
)
1783 target_ulong esp
, ebp
;
1803 stw(esp
, lduw(ebp
));
1811 void helper_lldt_T0(void)
1816 int index
, entry_limit
;
1819 selector
= T0
& 0xffff;
1820 if ((selector
& 0xfffc) == 0) {
1821 /* XXX: NULL selector case: invalid LDT */
1826 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1828 index
= selector
& ~7;
1829 #ifdef TARGET_X86_64
1830 if (env
->hflags
& HF_LMA_MASK
)
1835 if ((index
+ entry_limit
) > dt
->limit
)
1836 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1837 ptr
= dt
->base
+ index
;
1838 e1
= ldl_kernel(ptr
);
1839 e2
= ldl_kernel(ptr
+ 4);
1840 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1841 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1842 if (!(e2
& DESC_P_MASK
))
1843 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1844 #ifdef TARGET_X86_64
1845 if (env
->hflags
& HF_LMA_MASK
) {
1847 e3
= ldl_kernel(ptr
+ 8);
1848 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1849 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1853 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1856 env
->ldt
.selector
= selector
;
1859 void helper_ltr_T0(void)
1864 int index
, type
, entry_limit
;
1867 selector
= T0
& 0xffff;
1868 if ((selector
& 0xfffc) == 0) {
1869 /* NULL selector case: invalid TR */
1875 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1877 index
= selector
& ~7;
1878 #ifdef TARGET_X86_64
1879 if (env
->hflags
& HF_LMA_MASK
)
1884 if ((index
+ entry_limit
) > dt
->limit
)
1885 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1886 ptr
= dt
->base
+ index
;
1887 e1
= ldl_kernel(ptr
);
1888 e2
= ldl_kernel(ptr
+ 4);
1889 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1890 if ((e2
& DESC_S_MASK
) ||
1891 (type
!= 1 && type
!= 9))
1892 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1893 if (!(e2
& DESC_P_MASK
))
1894 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1895 #ifdef TARGET_X86_64
1896 if (env
->hflags
& HF_LMA_MASK
) {
1898 e3
= ldl_kernel(ptr
+ 8);
1899 e4
= ldl_kernel(ptr
+ 12);
1900 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
1901 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1902 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1903 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1907 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1909 e2
|= DESC_TSS_BUSY_MASK
;
1910 stl_kernel(ptr
+ 4, e2
);
1912 env
->tr
.selector
= selector
;
1915 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1916 void load_seg(int seg_reg
, int selector
)
1925 cpl
= env
->hflags
& HF_CPL_MASK
;
1926 if ((selector
& 0xfffc) == 0) {
1927 /* null selector case */
1929 #ifdef TARGET_X86_64
1930 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1933 raise_exception_err(EXCP0D_GPF
, 0);
1934 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1941 index
= selector
& ~7;
1942 if ((index
+ 7) > dt
->limit
)
1943 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1944 ptr
= dt
->base
+ index
;
1945 e1
= ldl_kernel(ptr
);
1946 e2
= ldl_kernel(ptr
+ 4);
1948 if (!(e2
& DESC_S_MASK
))
1949 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1951 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1952 if (seg_reg
== R_SS
) {
1953 /* must be writable segment */
1954 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1955 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1956 if (rpl
!= cpl
|| dpl
!= cpl
)
1957 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1959 /* must be readable segment */
1960 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1961 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1963 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1964 /* if not conforming code, test rights */
1965 if (dpl
< cpl
|| dpl
< rpl
)
1966 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1970 if (!(e2
& DESC_P_MASK
)) {
1971 if (seg_reg
== R_SS
)
1972 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1974 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1977 /* set the access bit if not already set */
1978 if (!(e2
& DESC_A_MASK
)) {
1980 stl_kernel(ptr
+ 4, e2
);
1983 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1984 get_seg_base(e1
, e2
),
1985 get_seg_limit(e1
, e2
),
1988 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1989 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1994 /* protected mode jump */
1995 void helper_ljmp_protected_T0_T1(int next_eip_addend
)
1997 int new_cs
, gate_cs
, type
;
1998 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1999 target_ulong new_eip
, next_eip
;
2003 if ((new_cs
& 0xfffc) == 0)
2004 raise_exception_err(EXCP0D_GPF
, 0);
2005 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2006 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2007 cpl
= env
->hflags
& HF_CPL_MASK
;
2008 if (e2
& DESC_S_MASK
) {
2009 if (!(e2
& DESC_CS_MASK
))
2010 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2011 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2012 if (e2
& DESC_C_MASK
) {
2013 /* conforming code segment */
2015 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2017 /* non conforming code segment */
2020 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2022 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2024 if (!(e2
& DESC_P_MASK
))
2025 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2026 limit
= get_seg_limit(e1
, e2
);
2027 if (new_eip
> limit
&&
2028 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2029 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2030 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2031 get_seg_base(e1
, e2
), limit
, e2
);
2034 /* jump to call or task gate */
2035 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2037 cpl
= env
->hflags
& HF_CPL_MASK
;
2038 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2040 case 1: /* 286 TSS */
2041 case 9: /* 386 TSS */
2042 case 5: /* task gate */
2043 if (dpl
< cpl
|| dpl
< rpl
)
2044 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2045 next_eip
= env
->eip
+ next_eip_addend
;
2046 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2047 CC_OP
= CC_OP_EFLAGS
;
2049 case 4: /* 286 call gate */
2050 case 12: /* 386 call gate */
2051 if ((dpl
< cpl
) || (dpl
< rpl
))
2052 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2053 if (!(e2
& DESC_P_MASK
))
2054 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2056 new_eip
= (e1
& 0xffff);
2058 new_eip
|= (e2
& 0xffff0000);
2059 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2060 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2061 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2062 /* must be code segment */
2063 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2064 (DESC_S_MASK
| DESC_CS_MASK
)))
2065 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2066 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2067 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2068 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2069 if (!(e2
& DESC_P_MASK
))
2070 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2071 limit
= get_seg_limit(e1
, e2
);
2072 if (new_eip
> limit
)
2073 raise_exception_err(EXCP0D_GPF
, 0);
2074 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2075 get_seg_base(e1
, e2
), limit
, e2
);
2079 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2085 /* real mode call */
2086 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
2088 int new_cs
, new_eip
;
2089 uint32_t esp
, esp_mask
;
2095 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2096 ssp
= env
->segs
[R_SS
].base
;
2098 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2099 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2101 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2102 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2105 SET_ESP(esp
, esp_mask
);
2107 env
->segs
[R_CS
].selector
= new_cs
;
2108 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2111 /* protected mode call */
2112 void helper_lcall_protected_T0_T1(int shift
, int next_eip_addend
)
2114 int new_cs
, new_stack
, i
;
2115 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2116 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2117 uint32_t val
, limit
, old_sp_mask
;
2118 target_ulong ssp
, old_ssp
, next_eip
, new_eip
;
2122 next_eip
= env
->eip
+ next_eip_addend
;
2124 if (loglevel
& CPU_LOG_PCALL
) {
2125 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2126 new_cs
, (uint32_t)new_eip
, shift
);
2127 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2130 if ((new_cs
& 0xfffc) == 0)
2131 raise_exception_err(EXCP0D_GPF
, 0);
2132 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2133 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2134 cpl
= env
->hflags
& HF_CPL_MASK
;
2136 if (loglevel
& CPU_LOG_PCALL
) {
2137 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2140 if (e2
& DESC_S_MASK
) {
2141 if (!(e2
& DESC_CS_MASK
))
2142 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2143 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2144 if (e2
& DESC_C_MASK
) {
2145 /* conforming code segment */
2147 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2149 /* non conforming code segment */
2152 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2154 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2156 if (!(e2
& DESC_P_MASK
))
2157 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2159 #ifdef TARGET_X86_64
2160 /* XXX: check 16/32 bit cases in long mode */
2165 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2166 PUSHQ(rsp
, next_eip
);
2167 /* from this point, not restartable */
2169 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2170 get_seg_base(e1
, e2
),
2171 get_seg_limit(e1
, e2
), e2
);
2177 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2178 ssp
= env
->segs
[R_SS
].base
;
2180 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2181 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2183 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2184 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2187 limit
= get_seg_limit(e1
, e2
);
2188 if (new_eip
> limit
)
2189 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2190 /* from this point, not restartable */
2191 SET_ESP(sp
, sp_mask
);
2192 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2193 get_seg_base(e1
, e2
), limit
, e2
);
2197 /* check gate type */
2198 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2199 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2202 case 1: /* available 286 TSS */
2203 case 9: /* available 386 TSS */
2204 case 5: /* task gate */
2205 if (dpl
< cpl
|| dpl
< rpl
)
2206 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2207 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2208 CC_OP
= CC_OP_EFLAGS
;
2210 case 4: /* 286 call gate */
2211 case 12: /* 386 call gate */
2214 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2219 if (dpl
< cpl
|| dpl
< rpl
)
2220 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2221 /* check valid bit */
2222 if (!(e2
& DESC_P_MASK
))
2223 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2224 selector
= e1
>> 16;
2225 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2226 param_count
= e2
& 0x1f;
2227 if ((selector
& 0xfffc) == 0)
2228 raise_exception_err(EXCP0D_GPF
, 0);
2230 if (load_segment(&e1
, &e2
, selector
) != 0)
2231 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2232 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2233 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2234 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2236 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2237 if (!(e2
& DESC_P_MASK
))
2238 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2240 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2241 /* to inner privilege */
2242 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2244 if (loglevel
& CPU_LOG_PCALL
)
2245 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2246 ss
, sp
, param_count
, ESP
);
2248 if ((ss
& 0xfffc) == 0)
2249 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2250 if ((ss
& 3) != dpl
)
2251 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2252 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2253 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2254 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2256 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2257 if (!(ss_e2
& DESC_S_MASK
) ||
2258 (ss_e2
& DESC_CS_MASK
) ||
2259 !(ss_e2
& DESC_W_MASK
))
2260 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2261 if (!(ss_e2
& DESC_P_MASK
))
2262 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2264 // push_size = ((param_count * 2) + 8) << shift;
2266 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2267 old_ssp
= env
->segs
[R_SS
].base
;
2269 sp_mask
= get_sp_mask(ss_e2
);
2270 ssp
= get_seg_base(ss_e1
, ss_e2
);
2272 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2273 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2274 for(i
= param_count
- 1; i
>= 0; i
--) {
2275 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2276 PUSHL(ssp
, sp
, sp_mask
, val
);
2279 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2280 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2281 for(i
= param_count
- 1; i
>= 0; i
--) {
2282 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2283 PUSHW(ssp
, sp
, sp_mask
, val
);
2288 /* to same privilege */
2290 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2291 ssp
= env
->segs
[R_SS
].base
;
2292 // push_size = (4 << shift);
2297 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2298 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2300 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2301 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2304 /* from this point, not restartable */
2307 ss
= (ss
& ~3) | dpl
;
2308 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2310 get_seg_limit(ss_e1
, ss_e2
),
2314 selector
= (selector
& ~3) | dpl
;
2315 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2316 get_seg_base(e1
, e2
),
2317 get_seg_limit(e1
, e2
),
2319 cpu_x86_set_cpl(env
, dpl
);
2320 SET_ESP(sp
, sp_mask
);
2324 if (kqemu_is_ok(env
)) {
2325 env
->exception_index
= -1;
2331 /* real and vm86 mode iret */
2332 void helper_iret_real(int shift
)
2334 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2338 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2340 ssp
= env
->segs
[R_SS
].base
;
2343 POPL(ssp
, sp
, sp_mask
, new_eip
);
2344 POPL(ssp
, sp
, sp_mask
, new_cs
);
2346 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2349 POPW(ssp
, sp
, sp_mask
, new_eip
);
2350 POPW(ssp
, sp
, sp_mask
, new_cs
);
2351 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2353 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2354 load_seg_vm(R_CS
, new_cs
);
2356 if (env
->eflags
& VM_MASK
)
2357 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2359 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2361 eflags_mask
&= 0xffff;
2362 load_eflags(new_eflags
, eflags_mask
);
2365 static inline void validate_seg(int seg_reg
, int cpl
)
2370 /* XXX: on x86_64, we do not want to nullify FS and GS because
2371 they may still contain a valid base. I would be interested to
2372 know how a real x86_64 CPU behaves */
2373 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2374 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2377 e2
= env
->segs
[seg_reg
].flags
;
2378 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2379 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2380 /* data or non conforming code segment */
2382 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2387 /* protected mode iret */
2388 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2390 uint32_t new_cs
, new_eflags
, new_ss
;
2391 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2392 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2393 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2394 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2396 #ifdef TARGET_X86_64
2401 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2403 ssp
= env
->segs
[R_SS
].base
;
2404 new_eflags
= 0; /* avoid warning */
2405 #ifdef TARGET_X86_64
2411 POPQ(sp
, new_eflags
);
2417 POPL(ssp
, sp
, sp_mask
, new_eip
);
2418 POPL(ssp
, sp
, sp_mask
, new_cs
);
2421 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2422 if (new_eflags
& VM_MASK
)
2423 goto return_to_vm86
;
2427 POPW(ssp
, sp
, sp_mask
, new_eip
);
2428 POPW(ssp
, sp
, sp_mask
, new_cs
);
2430 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2433 if (loglevel
& CPU_LOG_PCALL
) {
2434 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2435 new_cs
, new_eip
, shift
, addend
);
2436 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2439 if ((new_cs
& 0xfffc) == 0)
2440 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2441 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2442 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2443 if (!(e2
& DESC_S_MASK
) ||
2444 !(e2
& DESC_CS_MASK
))
2445 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2446 cpl
= env
->hflags
& HF_CPL_MASK
;
2449 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2450 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2451 if (e2
& DESC_C_MASK
) {
2453 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2456 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2458 if (!(e2
& DESC_P_MASK
))
2459 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2462 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2463 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2464 /* return to same priledge level */
2465 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2466 get_seg_base(e1
, e2
),
2467 get_seg_limit(e1
, e2
),
2470 /* return to different privilege level */
2471 #ifdef TARGET_X86_64
2480 POPL(ssp
, sp
, sp_mask
, new_esp
);
2481 POPL(ssp
, sp
, sp_mask
, new_ss
);
2485 POPW(ssp
, sp
, sp_mask
, new_esp
);
2486 POPW(ssp
, sp
, sp_mask
, new_ss
);
2489 if (loglevel
& CPU_LOG_PCALL
) {
2490 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2494 if ((new_ss
& 0xfffc) == 0) {
2495 #ifdef TARGET_X86_64
2496 /* NULL ss is allowed in long mode if cpl != 3*/
2497 /* XXX: test CS64 ? */
2498 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2499 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2501 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2502 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2503 DESC_W_MASK
| DESC_A_MASK
);
2504 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2508 raise_exception_err(EXCP0D_GPF
, 0);
2511 if ((new_ss
& 3) != rpl
)
2512 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2513 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2514 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2515 if (!(ss_e2
& DESC_S_MASK
) ||
2516 (ss_e2
& DESC_CS_MASK
) ||
2517 !(ss_e2
& DESC_W_MASK
))
2518 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2519 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2521 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2522 if (!(ss_e2
& DESC_P_MASK
))
2523 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2524 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2525 get_seg_base(ss_e1
, ss_e2
),
2526 get_seg_limit(ss_e1
, ss_e2
),
2530 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2531 get_seg_base(e1
, e2
),
2532 get_seg_limit(e1
, e2
),
2534 cpu_x86_set_cpl(env
, rpl
);
2536 #ifdef TARGET_X86_64
2537 if (env
->hflags
& HF_CS64_MASK
)
2541 sp_mask
= get_sp_mask(ss_e2
);
2543 /* validate data segments */
2544 validate_seg(R_ES
, rpl
);
2545 validate_seg(R_DS
, rpl
);
2546 validate_seg(R_FS
, rpl
);
2547 validate_seg(R_GS
, rpl
);
2551 SET_ESP(sp
, sp_mask
);
2554 /* NOTE: 'cpl' is the _old_ CPL */
2555 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2557 eflags_mask
|= IOPL_MASK
;
2558 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2560 eflags_mask
|= IF_MASK
;
2562 eflags_mask
&= 0xffff;
2563 load_eflags(new_eflags
, eflags_mask
);
2568 POPL(ssp
, sp
, sp_mask
, new_esp
);
2569 POPL(ssp
, sp
, sp_mask
, new_ss
);
2570 POPL(ssp
, sp
, sp_mask
, new_es
);
2571 POPL(ssp
, sp
, sp_mask
, new_ds
);
2572 POPL(ssp
, sp
, sp_mask
, new_fs
);
2573 POPL(ssp
, sp
, sp_mask
, new_gs
);
2575 /* modify processor state */
2576 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2577 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2578 load_seg_vm(R_CS
, new_cs
& 0xffff);
2579 cpu_x86_set_cpl(env
, 3);
2580 load_seg_vm(R_SS
, new_ss
& 0xffff);
2581 load_seg_vm(R_ES
, new_es
& 0xffff);
2582 load_seg_vm(R_DS
, new_ds
& 0xffff);
2583 load_seg_vm(R_FS
, new_fs
& 0xffff);
2584 load_seg_vm(R_GS
, new_gs
& 0xffff);
2586 env
->eip
= new_eip
& 0xffff;
2590 void helper_iret_protected(int shift
, int next_eip
)
2592 int tss_selector
, type
;
2595 /* specific case for TSS */
2596 if (env
->eflags
& NT_MASK
) {
2597 #ifdef TARGET_X86_64
2598 if (env
->hflags
& HF_LMA_MASK
)
2599 raise_exception_err(EXCP0D_GPF
, 0);
2601 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2602 if (tss_selector
& 4)
2603 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2604 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2605 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2606 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2607 /* NOTE: we check both segment and busy TSS */
2609 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2610 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2612 helper_ret_protected(shift
, 1, 0);
2615 if (kqemu_is_ok(env
)) {
2616 CC_OP
= CC_OP_EFLAGS
;
2617 env
->exception_index
= -1;
2623 void helper_lret_protected(int shift
, int addend
)
2625 helper_ret_protected(shift
, 0, addend
);
2627 if (kqemu_is_ok(env
)) {
2628 env
->exception_index
= -1;
2634 void helper_sysenter(void)
2636 if (env
->sysenter_cs
== 0) {
2637 raise_exception_err(EXCP0D_GPF
, 0);
2639 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2640 cpu_x86_set_cpl(env
, 0);
2641 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2643 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2645 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2646 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2648 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2650 DESC_W_MASK
| DESC_A_MASK
);
2651 ESP
= env
->sysenter_esp
;
2652 EIP
= env
->sysenter_eip
;
2655 void helper_sysexit(void)
2659 cpl
= env
->hflags
& HF_CPL_MASK
;
2660 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2661 raise_exception_err(EXCP0D_GPF
, 0);
2663 cpu_x86_set_cpl(env
, 3);
2664 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2666 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2667 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2668 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2669 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2671 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2672 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2673 DESC_W_MASK
| DESC_A_MASK
);
2677 if (kqemu_is_ok(env
)) {
2678 env
->exception_index
= -1;
2684 void helper_movl_crN_T0(int reg
)
2686 #if !defined(CONFIG_USER_ONLY)
2689 cpu_x86_update_cr0(env
, T0
);
2692 cpu_x86_update_cr3(env
, T0
);
2695 cpu_x86_update_cr4(env
, T0
);
2698 cpu_set_apic_tpr(env
, T0
);
2708 void helper_movl_drN_T0(int reg
)
2713 void helper_invlpg(target_ulong addr
)
2715 cpu_x86_flush_tlb(env
, addr
);
2718 void helper_rdtsc(void)
2722 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2723 raise_exception(EXCP0D_GPF
);
2725 val
= cpu_get_tsc(env
);
2726 EAX
= (uint32_t)(val
);
2727 EDX
= (uint32_t)(val
>> 32);
2730 #if defined(CONFIG_USER_ONLY)
2731 void helper_wrmsr(void)
2735 void helper_rdmsr(void)
2739 void helper_wrmsr(void)
2743 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
2745 switch((uint32_t)ECX
) {
2746 case MSR_IA32_SYSENTER_CS
:
2747 env
->sysenter_cs
= val
& 0xffff;
2749 case MSR_IA32_SYSENTER_ESP
:
2750 env
->sysenter_esp
= val
;
2752 case MSR_IA32_SYSENTER_EIP
:
2753 env
->sysenter_eip
= val
;
2755 case MSR_IA32_APICBASE
:
2756 cpu_set_apic_base(env
, val
);
2760 uint64_t update_mask
;
2762 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
2763 update_mask
|= MSR_EFER_SCE
;
2764 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
2765 update_mask
|= MSR_EFER_LME
;
2766 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
2767 update_mask
|= MSR_EFER_FFXSR
;
2768 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
2769 update_mask
|= MSR_EFER_NXE
;
2770 env
->efer
= (env
->efer
& ~update_mask
) |
2771 (val
& update_mask
);
2780 case MSR_VM_HSAVE_PA
:
2781 env
->vm_hsave
= val
;
2783 #ifdef TARGET_X86_64
2794 env
->segs
[R_FS
].base
= val
;
2797 env
->segs
[R_GS
].base
= val
;
2799 case MSR_KERNELGSBASE
:
2800 env
->kernelgsbase
= val
;
2804 /* XXX: exception ? */
2809 void helper_rdmsr(void)
2812 switch((uint32_t)ECX
) {
2813 case MSR_IA32_SYSENTER_CS
:
2814 val
= env
->sysenter_cs
;
2816 case MSR_IA32_SYSENTER_ESP
:
2817 val
= env
->sysenter_esp
;
2819 case MSR_IA32_SYSENTER_EIP
:
2820 val
= env
->sysenter_eip
;
2822 case MSR_IA32_APICBASE
:
2823 val
= cpu_get_apic_base(env
);
2834 case MSR_VM_HSAVE_PA
:
2835 val
= env
->vm_hsave
;
2837 #ifdef TARGET_X86_64
2848 val
= env
->segs
[R_FS
].base
;
2851 val
= env
->segs
[R_GS
].base
;
2853 case MSR_KERNELGSBASE
:
2854 val
= env
->kernelgsbase
;
2858 /* XXX: exception ? */
2862 EAX
= (uint32_t)(val
);
2863 EDX
= (uint32_t)(val
>> 32);
2867 void helper_lsl(void)
2869 unsigned int selector
, limit
;
2870 uint32_t e1
, e2
, eflags
;
2871 int rpl
, dpl
, cpl
, type
;
2873 eflags
= cc_table
[CC_OP
].compute_all();
2874 selector
= T0
& 0xffff;
2875 if (load_segment(&e1
, &e2
, selector
) != 0)
2878 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2879 cpl
= env
->hflags
& HF_CPL_MASK
;
2880 if (e2
& DESC_S_MASK
) {
2881 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2884 if (dpl
< cpl
|| dpl
< rpl
)
2888 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2899 if (dpl
< cpl
|| dpl
< rpl
) {
2901 CC_SRC
= eflags
& ~CC_Z
;
2905 limit
= get_seg_limit(e1
, e2
);
2907 CC_SRC
= eflags
| CC_Z
;
2910 void helper_lar(void)
2912 unsigned int selector
;
2913 uint32_t e1
, e2
, eflags
;
2914 int rpl
, dpl
, cpl
, type
;
2916 eflags
= cc_table
[CC_OP
].compute_all();
2917 selector
= T0
& 0xffff;
2918 if ((selector
& 0xfffc) == 0)
2920 if (load_segment(&e1
, &e2
, selector
) != 0)
2923 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2924 cpl
= env
->hflags
& HF_CPL_MASK
;
2925 if (e2
& DESC_S_MASK
) {
2926 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2929 if (dpl
< cpl
|| dpl
< rpl
)
2933 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2947 if (dpl
< cpl
|| dpl
< rpl
) {
2949 CC_SRC
= eflags
& ~CC_Z
;
2953 T1
= e2
& 0x00f0ff00;
2954 CC_SRC
= eflags
| CC_Z
;
2957 void helper_verr(void)
2959 unsigned int selector
;
2960 uint32_t e1
, e2
, eflags
;
2963 eflags
= cc_table
[CC_OP
].compute_all();
2964 selector
= T0
& 0xffff;
2965 if ((selector
& 0xfffc) == 0)
2967 if (load_segment(&e1
, &e2
, selector
) != 0)
2969 if (!(e2
& DESC_S_MASK
))
2972 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2973 cpl
= env
->hflags
& HF_CPL_MASK
;
2974 if (e2
& DESC_CS_MASK
) {
2975 if (!(e2
& DESC_R_MASK
))
2977 if (!(e2
& DESC_C_MASK
)) {
2978 if (dpl
< cpl
|| dpl
< rpl
)
2982 if (dpl
< cpl
|| dpl
< rpl
) {
2984 CC_SRC
= eflags
& ~CC_Z
;
2988 CC_SRC
= eflags
| CC_Z
;
2991 void helper_verw(void)
2993 unsigned int selector
;
2994 uint32_t e1
, e2
, eflags
;
2997 eflags
= cc_table
[CC_OP
].compute_all();
2998 selector
= T0
& 0xffff;
2999 if ((selector
& 0xfffc) == 0)
3001 if (load_segment(&e1
, &e2
, selector
) != 0)
3003 if (!(e2
& DESC_S_MASK
))
3006 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3007 cpl
= env
->hflags
& HF_CPL_MASK
;
3008 if (e2
& DESC_CS_MASK
) {
3011 if (dpl
< cpl
|| dpl
< rpl
)
3013 if (!(e2
& DESC_W_MASK
)) {
3015 CC_SRC
= eflags
& ~CC_Z
;
3019 CC_SRC
= eflags
| CC_Z
;
3024 void helper_fldt_ST0_A0(void)
3027 new_fpstt
= (env
->fpstt
- 1) & 7;
3028 env
->fpregs
[new_fpstt
].d
= helper_fldt(A0
);
3029 env
->fpstt
= new_fpstt
;
3030 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3033 void helper_fstt_ST0_A0(void)
3035 helper_fstt(ST0
, A0
);
3038 void fpu_set_exception(int mask
)
3041 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3042 env
->fpus
|= FPUS_SE
| FPUS_B
;
3045 CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3048 fpu_set_exception(FPUS_ZE
);
3052 void fpu_raise_exception(void)
3054 if (env
->cr
[0] & CR0_NE_MASK
) {
3055 raise_exception(EXCP10_COPR
);
3057 #if !defined(CONFIG_USER_ONLY)
3066 void helper_fbld_ST0_A0(void)
3074 for(i
= 8; i
>= 0; i
--) {
3076 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3079 if (ldub(A0
+ 9) & 0x80)
3085 void helper_fbst_ST0_A0(void)
3088 target_ulong mem_ref
, mem_end
;
3091 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3093 mem_end
= mem_ref
+ 9;
3100 while (mem_ref
< mem_end
) {
3105 v
= ((v
/ 10) << 4) | (v
% 10);
3108 while (mem_ref
< mem_end
) {
3113 void helper_f2xm1(void)
3115 ST0
= pow(2.0,ST0
) - 1.0;
3118 void helper_fyl2x(void)
3120 CPU86_LDouble fptemp
;
3124 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3128 env
->fpus
&= (~0x4700);
3133 void helper_fptan(void)
3135 CPU86_LDouble fptemp
;
3138 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3144 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3145 /* the above code is for |arg| < 2**52 only */
3149 void helper_fpatan(void)
3151 CPU86_LDouble fptemp
, fpsrcop
;
3155 ST1
= atan2(fpsrcop
,fptemp
);
3159 void helper_fxtract(void)
3161 CPU86_LDoubleU temp
;
3162 unsigned int expdif
;
3165 expdif
= EXPD(temp
) - EXPBIAS
;
3166 /*DP exponent bias*/
3173 void helper_fprem1(void)
3175 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3176 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3178 signed long long int q
;
3180 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3181 ST0
= 0.0 / 0.0; /* NaN */
3182 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3188 fpsrcop1
.d
= fpsrcop
;
3190 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3193 /* optimisation? taken from the AMD docs */
3194 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3195 /* ST0 is unchanged */
3200 dblq
= fpsrcop
/ fptemp
;
3201 /* round dblq towards nearest integer */
3203 ST0
= fpsrcop
- fptemp
* dblq
;
3205 /* convert dblq to q by truncating towards zero */
3207 q
= (signed long long int)(-dblq
);
3209 q
= (signed long long int)dblq
;
3211 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3212 /* (C0,C3,C1) <-- (q2,q1,q0) */
3213 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3214 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3215 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3217 env
->fpus
|= 0x400; /* C2 <-- 1 */
3218 fptemp
= pow(2.0, expdif
- 50);
3219 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3220 /* fpsrcop = integer obtained by chopping */
3221 fpsrcop
= (fpsrcop
< 0.0) ?
3222 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3223 ST0
-= (ST1
* fpsrcop
* fptemp
);
3227 void helper_fprem(void)
3229 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3230 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3232 signed long long int q
;
3234 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3235 ST0
= 0.0 / 0.0; /* NaN */
3236 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3240 fpsrcop
= (CPU86_LDouble
)ST0
;
3241 fptemp
= (CPU86_LDouble
)ST1
;
3242 fpsrcop1
.d
= fpsrcop
;
3244 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3247 /* optimisation? taken from the AMD docs */
3248 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3249 /* ST0 is unchanged */
3253 if ( expdif
< 53 ) {
3254 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
3255 /* round dblq towards zero */
3256 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
3257 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
3259 /* convert dblq to q by truncating towards zero */
3261 q
= (signed long long int)(-dblq
);
3263 q
= (signed long long int)dblq
;
3265 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3266 /* (C0,C3,C1) <-- (q2,q1,q0) */
3267 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3268 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3269 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3271 int N
= 32 + (expdif
% 32); /* as per AMD docs */
3272 env
->fpus
|= 0x400; /* C2 <-- 1 */
3273 fptemp
= pow(2.0, (double)(expdif
- N
));
3274 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3275 /* fpsrcop = integer obtained by chopping */
3276 fpsrcop
= (fpsrcop
< 0.0) ?
3277 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3278 ST0
-= (ST1
* fpsrcop
* fptemp
);
3282 void helper_fyl2xp1(void)
3284 CPU86_LDouble fptemp
;
3287 if ((fptemp
+1.0)>0.0) {
3288 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
3292 env
->fpus
&= (~0x4700);
3297 void helper_fsqrt(void)
3299 CPU86_LDouble fptemp
;
3303 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3309 void helper_fsincos(void)
3311 CPU86_LDouble fptemp
;
3314 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3320 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3321 /* the above code is for |arg| < 2**63 only */
3325 void helper_frndint(void)
3327 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
3330 void helper_fscale(void)
3332 ST0
= ldexp (ST0
, (int)(ST1
));
3335 void helper_fsin(void)
3337 CPU86_LDouble fptemp
;
3340 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3344 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3345 /* the above code is for |arg| < 2**53 only */
3349 void helper_fcos(void)
3351 CPU86_LDouble fptemp
;
3354 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3358 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3359 /* the above code is for |arg5 < 2**63 only */
3363 void helper_fxam_ST0(void)
3365 CPU86_LDoubleU temp
;
3370 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3372 env
->fpus
|= 0x200; /* C1 <-- 1 */
3374 /* XXX: test fptags too */
3375 expdif
= EXPD(temp
);
3376 if (expdif
== MAXEXPD
) {
3377 #ifdef USE_X86LDOUBLE
3378 if (MANTD(temp
) == 0x8000000000000000ULL
)
3380 if (MANTD(temp
) == 0)
3382 env
->fpus
|= 0x500 /*Infinity*/;
3384 env
->fpus
|= 0x100 /*NaN*/;
3385 } else if (expdif
== 0) {
3386 if (MANTD(temp
) == 0)
3387 env
->fpus
|= 0x4000 /*Zero*/;
3389 env
->fpus
|= 0x4400 /*Denormal*/;
3395 void helper_fstenv(target_ulong ptr
, int data32
)
3397 int fpus
, fptag
, exp
, i
;
3401 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3403 for (i
=7; i
>=0; i
--) {
3405 if (env
->fptags
[i
]) {
3408 tmp
.d
= env
->fpregs
[i
].d
;
3411 if (exp
== 0 && mant
== 0) {
3414 } else if (exp
== 0 || exp
== MAXEXPD
3415 #ifdef USE_X86LDOUBLE
3416 || (mant
& (1LL << 63)) == 0
3419 /* NaNs, infinity, denormal */
3426 stl(ptr
, env
->fpuc
);
3428 stl(ptr
+ 8, fptag
);
3429 stl(ptr
+ 12, 0); /* fpip */
3430 stl(ptr
+ 16, 0); /* fpcs */
3431 stl(ptr
+ 20, 0); /* fpoo */
3432 stl(ptr
+ 24, 0); /* fpos */
3435 stw(ptr
, env
->fpuc
);
3437 stw(ptr
+ 4, fptag
);
3445 void helper_fldenv(target_ulong ptr
, int data32
)
3450 env
->fpuc
= lduw(ptr
);
3451 fpus
= lduw(ptr
+ 4);
3452 fptag
= lduw(ptr
+ 8);
3455 env
->fpuc
= lduw(ptr
);
3456 fpus
= lduw(ptr
+ 2);
3457 fptag
= lduw(ptr
+ 4);
3459 env
->fpstt
= (fpus
>> 11) & 7;
3460 env
->fpus
= fpus
& ~0x3800;
3461 for(i
= 0;i
< 8; i
++) {
3462 env
->fptags
[i
] = ((fptag
& 3) == 3);
3467 void helper_fsave(target_ulong ptr
, int data32
)
3472 helper_fstenv(ptr
, data32
);
3474 ptr
+= (14 << data32
);
3475 for(i
= 0;i
< 8; i
++) {
3477 helper_fstt(tmp
, ptr
);
3495 void helper_frstor(target_ulong ptr
, int data32
)
3500 helper_fldenv(ptr
, data32
);
3501 ptr
+= (14 << data32
);
3503 for(i
= 0;i
< 8; i
++) {
3504 tmp
= helper_fldt(ptr
);
3510 void helper_fxsave(target_ulong ptr
, int data64
)
3512 int fpus
, fptag
, i
, nb_xmm_regs
;
3516 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3518 for(i
= 0; i
< 8; i
++) {
3519 fptag
|= (env
->fptags
[i
] << i
);
3521 stw(ptr
, env
->fpuc
);
3523 stw(ptr
+ 4, fptag
^ 0xff);
3526 for(i
= 0;i
< 8; i
++) {
3528 helper_fstt(tmp
, addr
);
3532 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3533 /* XXX: finish it */
3534 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
3535 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
3536 nb_xmm_regs
= 8 << data64
;
3538 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3539 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
3540 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
3546 void helper_fxrstor(target_ulong ptr
, int data64
)
3548 int i
, fpus
, fptag
, nb_xmm_regs
;
3552 env
->fpuc
= lduw(ptr
);
3553 fpus
= lduw(ptr
+ 2);
3554 fptag
= lduw(ptr
+ 4);
3555 env
->fpstt
= (fpus
>> 11) & 7;
3556 env
->fpus
= fpus
& ~0x3800;
3558 for(i
= 0;i
< 8; i
++) {
3559 env
->fptags
[i
] = ((fptag
>> i
) & 1);
3563 for(i
= 0;i
< 8; i
++) {
3564 tmp
= helper_fldt(addr
);
3569 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3570 /* XXX: finish it */
3571 env
->mxcsr
= ldl(ptr
+ 0x18);
3573 nb_xmm_regs
= 8 << data64
;
3575 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3576 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
3577 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
3583 #ifndef USE_X86LDOUBLE
3585 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3587 CPU86_LDoubleU temp
;
3592 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
3593 /* exponent + sign */
3594 e
= EXPD(temp
) - EXPBIAS
+ 16383;
3595 e
|= SIGND(temp
) >> 16;
3599 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3601 CPU86_LDoubleU temp
;
3605 /* XXX: handle overflow ? */
3606 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
3607 e
|= (upper
>> 4) & 0x800; /* sign */
3608 ll
= (mant
>> 11) & ((1LL << 52) - 1);
3610 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
3613 temp
.ll
= ll
| ((uint64_t)e
<< 52);
3620 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3622 CPU86_LDoubleU temp
;
3625 *pmant
= temp
.l
.lower
;
3626 *pexp
= temp
.l
.upper
;
3629 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3631 CPU86_LDoubleU temp
;
3633 temp
.l
.upper
= upper
;
3634 temp
.l
.lower
= mant
;
3639 #ifdef TARGET_X86_64
3641 //#define DEBUG_MULDIV
3643 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
3652 static void neg128(uint64_t *plow
, uint64_t *phigh
)
3656 add128(plow
, phigh
, 1, 0);
3659 /* return TRUE if overflow */
3660 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
3662 uint64_t q
, r
, a1
, a0
;
3675 /* XXX: use a better algorithm */
3676 for(i
= 0; i
< 64; i
++) {
3678 a1
= (a1
<< 1) | (a0
>> 63);
3679 if (ab
|| a1
>= b
) {
3685 a0
= (a0
<< 1) | qb
;
3687 #if defined(DEBUG_MULDIV)
3688 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
3689 *phigh
, *plow
, b
, a0
, a1
);
3697 /* return TRUE if overflow */
3698 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
3701 sa
= ((int64_t)*phigh
< 0);
3703 neg128(plow
, phigh
);
3707 if (div64(plow
, phigh
, b
) != 0)
3710 if (*plow
> (1ULL << 63))
3714 if (*plow
>= (1ULL << 63))
3722 void helper_mulq_EAX_T0(void)
3726 mulu64(&r1
, &r0
, EAX
, T0
);
3733 void helper_imulq_EAX_T0(void)
3737 muls64(&r1
, &r0
, EAX
, T0
);
3741 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3744 void helper_imulq_T0_T1(void)
3748 muls64(&r1
, &r0
, T0
, T1
);
3751 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3754 void helper_divq_EAX_T0(void)
3758 raise_exception(EXCP00_DIVZ
);
3762 if (div64(&r0
, &r1
, T0
))
3763 raise_exception(EXCP00_DIVZ
);
3768 void helper_idivq_EAX_T0(void)
3772 raise_exception(EXCP00_DIVZ
);
3776 if (idiv64(&r0
, &r1
, T0
))
3777 raise_exception(EXCP00_DIVZ
);
3782 void helper_bswapq_T0(void)
3788 void helper_hlt(void)
3790 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
3791 env
->hflags
|= HF_HALTED_MASK
;
3792 env
->exception_index
= EXCP_HLT
;
3796 void helper_monitor(void)
3798 if ((uint32_t)ECX
!= 0)
3799 raise_exception(EXCP0D_GPF
);
3800 /* XXX: store address ? */
3803 void helper_mwait(void)
3805 if ((uint32_t)ECX
!= 0)
3806 raise_exception(EXCP0D_GPF
);
3807 /* XXX: not complete but not completely erroneous */
3808 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
3809 /* more than one CPU: do not sleep because another CPU may
3816 float approx_rsqrt(float a
)
3818 return 1.0 / sqrt(a
);
3821 float approx_rcp(float a
)
3826 void update_fp_status(void)
3830 /* set rounding mode */
3831 switch(env
->fpuc
& RC_MASK
) {
3834 rnd_type
= float_round_nearest_even
;
3837 rnd_type
= float_round_down
;
3840 rnd_type
= float_round_up
;
3843 rnd_type
= float_round_to_zero
;
3846 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3848 switch((env
->fpuc
>> 8) & 3) {
3860 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3864 #if !defined(CONFIG_USER_ONLY)
3866 #define MMUSUFFIX _mmu
3867 #define GETPC() (__builtin_return_address(0))
3870 #include "softmmu_template.h"
3873 #include "softmmu_template.h"
3876 #include "softmmu_template.h"
3879 #include "softmmu_template.h"
3883 /* try to fill the TLB and return an exception if error. If retaddr is
3884 NULL, it means that the function was called in C code (i.e. not
3885 from generated code or from helper.c) */
3886 /* XXX: fix it to restore all registers */
3887 void tlb_fill(target_ulong addr
, int is_write
, int is_user
, void *retaddr
)
3889 TranslationBlock
*tb
;
3892 CPUX86State
*saved_env
;
3894 /* XXX: hack to restore env in all cases, even if not called from
3897 env
= cpu_single_env
;
3899 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
3902 /* now we have a real cpu fault */
3903 pc
= (unsigned long)retaddr
;
3904 tb
= tb_find_pc(pc
);
3906 /* the PC is inside the translated code. It means that we have
3907 a virtual CPU fault */
3908 cpu_restore_state(tb
, env
, pc
, NULL
);
3912 raise_exception_err(env
->exception_index
, env
->error_code
);
3914 raise_exception_err_norestore(env
->exception_index
, env
->error_code
);
3920 /* Secure Virtual Machine helpers */
3922 void helper_stgi(void)
3924 env
->hflags
|= HF_GIF_MASK
;
3927 void helper_clgi(void)
3929 env
->hflags
&= ~HF_GIF_MASK
;
3932 #if defined(CONFIG_USER_ONLY)
3934 void helper_vmrun(target_ulong addr
) { }
3935 void helper_vmmcall(void) { }
3936 void helper_vmload(target_ulong addr
) { }
3937 void helper_vmsave(target_ulong addr
) { }
3938 void helper_skinit(void) { }
3939 void helper_invlpga(void) { }
3940 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
) { }
3941 int svm_check_intercept_param(uint32_t type
, uint64_t param
)
3948 static inline uint32_t
3949 vmcb2cpu_attrib(uint16_t vmcb_attrib
, uint32_t vmcb_base
, uint32_t vmcb_limit
)
3951 return ((vmcb_attrib
& 0x00ff) << 8) /* Type, S, DPL, P */
3952 | ((vmcb_attrib
& 0x0f00) << 12) /* AVL, L, DB, G */
3953 | ((vmcb_base
>> 16) & 0xff) /* Base 23-16 */
3954 | (vmcb_base
& 0xff000000) /* Base 31-24 */
3955 | (vmcb_limit
& 0xf0000); /* Limit 19-16 */
3958 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib
)
3960 return ((cpu_attrib
>> 8) & 0xff) /* Type, S, DPL, P */
3961 | ((cpu_attrib
& 0xf00000) >> 12); /* AVL, L, DB, G */
3964 extern uint8_t *phys_ram_base
;
3965 void helper_vmrun(target_ulong addr
)
3970 if (loglevel
& CPU_LOG_TB_IN_ASM
)
3971 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
3973 env
->vm_vmcb
= addr
;
3976 /* save the current CPU state in the hsave page */
3977 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
3978 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
3980 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
3981 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
3983 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
3984 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
3985 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
3986 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
3987 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
), env
->cr
[8]);
3988 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
3989 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
3991 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
3992 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
3994 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_ES
], es
);
3995 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_CS
], cs
);
3996 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_SS
], ss
);
3997 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_DS
], ds
);
3999 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
), EIP
);
4000 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4001 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4003 /* load the interception bitmaps so we do not need to access the
4005 /* We shift all the intercept bits so we can OR them with the TB
4007 env
->intercept
= (ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
)) << INTERCEPT_INTR
) | INTERCEPT_SVM_MASK
;
4008 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4009 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4010 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4011 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4012 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4014 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4015 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4017 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4018 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4020 /* clear exit_info_2 so we behave like the real hardware */
4021 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4023 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4024 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4025 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4026 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4027 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4028 if (int_ctl
& V_INTR_MASKING_MASK
) {
4029 env
->cr
[8] = int_ctl
& V_TPR_MASK
;
4030 if (env
->eflags
& IF_MASK
)
4031 env
->hflags
|= HF_HIF_MASK
;
4034 #ifdef TARGET_X86_64
4035 env
->efer
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
));
4036 env
->hflags
&= ~HF_LMA_MASK
;
4037 if (env
->efer
& MSR_EFER_LMA
)
4038 env
->hflags
|= HF_LMA_MASK
;
4041 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4042 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4043 CC_OP
= CC_OP_EFLAGS
;
4044 CC_DST
= 0xffffffff;
4046 SVM_LOAD_SEG(env
->vm_vmcb
, ES
, es
);
4047 SVM_LOAD_SEG(env
->vm_vmcb
, CS
, cs
);
4048 SVM_LOAD_SEG(env
->vm_vmcb
, SS
, ss
);
4049 SVM_LOAD_SEG(env
->vm_vmcb
, DS
, ds
);
4051 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4053 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4054 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4055 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4056 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4057 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4059 /* FIXME: guest state consistency checks */
4061 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4062 case TLB_CONTROL_DO_NOTHING
:
4064 case TLB_CONTROL_FLUSH_ALL_ASID
:
4065 /* FIXME: this is not 100% correct but should work for now */
4074 /* maybe we need to inject an event */
4075 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4076 if (event_inj
& SVM_EVTINJ_VALID
) {
4077 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4078 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4079 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4080 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4082 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4083 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4084 /* FIXME: need to implement valid_err */
4085 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4086 case SVM_EVTINJ_TYPE_INTR
:
4087 env
->exception_index
= vector
;
4088 env
->error_code
= event_inj_err
;
4089 env
->exception_is_int
= 1;
4090 env
->exception_next_eip
= -1;
4091 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4092 fprintf(logfile
, "INTR");
4094 case SVM_EVTINJ_TYPE_NMI
:
4095 env
->exception_index
= vector
;
4096 env
->error_code
= event_inj_err
;
4097 env
->exception_is_int
= 1;
4098 env
->exception_next_eip
= EIP
;
4099 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4100 fprintf(logfile
, "NMI");
4102 case SVM_EVTINJ_TYPE_EXEPT
:
4103 env
->exception_index
= vector
;
4104 env
->error_code
= event_inj_err
;
4105 env
->exception_is_int
= 0;
4106 env
->exception_next_eip
= -1;
4107 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4108 fprintf(logfile
, "EXEPT");
4110 case SVM_EVTINJ_TYPE_SOFT
:
4111 env
->exception_index
= vector
;
4112 env
->error_code
= event_inj_err
;
4113 env
->exception_is_int
= 1;
4114 env
->exception_next_eip
= EIP
;
4115 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4116 fprintf(logfile
, "SOFT");
4119 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4120 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4122 if ((int_ctl
& V_IRQ_MASK
) || (env
->intercept
& INTERCEPT_VINTR
)) {
4123 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4129 void helper_vmmcall(void)
4131 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4132 fprintf(logfile
,"vmmcall!\n");
4135 void helper_vmload(target_ulong addr
)
4137 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4138 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4139 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4140 env
->segs
[R_FS
].base
);
4142 SVM_LOAD_SEG2(addr
, segs
[R_FS
], fs
);
4143 SVM_LOAD_SEG2(addr
, segs
[R_GS
], gs
);
4144 SVM_LOAD_SEG2(addr
, tr
, tr
);
4145 SVM_LOAD_SEG2(addr
, ldt
, ldtr
);
4147 #ifdef TARGET_X86_64
4148 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
4149 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
4150 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
4151 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
4153 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
4154 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
4155 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
4156 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
4159 void helper_vmsave(target_ulong addr
)
4161 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4162 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4163 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4164 env
->segs
[R_FS
].base
);
4166 SVM_SAVE_SEG(addr
, segs
[R_FS
], fs
);
4167 SVM_SAVE_SEG(addr
, segs
[R_GS
], gs
);
4168 SVM_SAVE_SEG(addr
, tr
, tr
);
4169 SVM_SAVE_SEG(addr
, ldt
, ldtr
);
4171 #ifdef TARGET_X86_64
4172 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
4173 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
4174 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
4175 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
4177 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
4178 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
4179 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
4180 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
4183 void helper_skinit(void)
4185 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4186 fprintf(logfile
,"skinit!\n");
4189 void helper_invlpga(void)
4194 int svm_check_intercept_param(uint32_t type
, uint64_t param
)
4197 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
4198 if (INTERCEPTEDw(_cr_read
, (1 << (type
- SVM_EXIT_READ_CR0
)))) {
4199 vmexit(type
, param
);
4203 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 8:
4204 if (INTERCEPTEDw(_dr_read
, (1 << (type
- SVM_EXIT_READ_DR0
)))) {
4205 vmexit(type
, param
);
4209 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
4210 if (INTERCEPTEDw(_cr_write
, (1 << (type
- SVM_EXIT_WRITE_CR0
)))) {
4211 vmexit(type
, param
);
4215 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 8:
4216 if (INTERCEPTEDw(_dr_write
, (1 << (type
- SVM_EXIT_WRITE_DR0
)))) {
4217 vmexit(type
, param
);
4221 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 16:
4222 if (INTERCEPTEDl(_exceptions
, (1 << (type
- SVM_EXIT_EXCP_BASE
)))) {
4223 vmexit(type
, param
);
4228 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT
)) {
4229 /* FIXME: this should be read in at vmrun (faster this way?) */
4230 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
4231 uint16_t port
= (uint16_t) (param
>> 16);
4233 if(ldub_phys(addr
+ port
/ 8) & (1 << (port
% 8)))
4234 vmexit(type
, param
);
4239 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT
)) {
4240 /* FIXME: this should be read in at vmrun (faster this way?) */
4241 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
4242 switch((uint32_t)ECX
) {
4247 case 0xc0000000 ... 0xc0001fff:
4248 T0
= (8192 + ECX
- 0xc0000000) * 2;
4252 case 0xc0010000 ... 0xc0011fff:
4253 T0
= (16384 + ECX
- 0xc0010000) * 2;
4258 vmexit(type
, param
);
4261 if (ldub_phys(addr
+ T1
) & ((1 << param
) << T0
))
4262 vmexit(type
, param
);
4267 if (INTERCEPTED((1ULL << ((type
- SVM_EXIT_INTR
) + INTERCEPT_INTR
)))) {
4268 vmexit(type
, param
);
4276 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
)
4280 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4281 fprintf(logfile
,"vmexit(%016" PRIx64
", %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
4282 exit_code
, exit_info_1
,
4283 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
4286 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
4287 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
4288 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4290 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
4293 /* Save the VM state in the vmcb */
4294 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_ES
], es
);
4295 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_CS
], cs
);
4296 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_SS
], ss
);
4297 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_DS
], ds
);
4299 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4300 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4302 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4303 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4305 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4306 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4307 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4308 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4309 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4311 if ((int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
))) & V_INTR_MASKING_MASK
) {
4312 int_ctl
&= ~V_TPR_MASK
;
4313 int_ctl
|= env
->cr
[8] & V_TPR_MASK
;
4314 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
4317 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4318 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
4319 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4320 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4321 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4322 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4323 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
4325 /* Reload the host state from vm_hsave */
4326 env
->hflags
&= ~HF_HIF_MASK
;
4328 env
->intercept_exceptions
= 0;
4329 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
4331 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4332 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4334 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
4335 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4337 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
4338 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
4339 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
4340 if (int_ctl
& V_INTR_MASKING_MASK
)
4341 env
->cr
[8] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
));
4342 /* we need to set the efer after the crs so the hidden flags get set properly */
4343 #ifdef TARGET_X86_64
4344 env
->efer
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
));
4345 env
->hflags
&= ~HF_LMA_MASK
;
4346 if (env
->efer
& MSR_EFER_LMA
)
4347 env
->hflags
|= HF_LMA_MASK
;
4351 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
4352 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4353 CC_OP
= CC_OP_EFLAGS
;
4355 SVM_LOAD_SEG(env
->vm_hsave
, ES
, es
);
4356 SVM_LOAD_SEG(env
->vm_hsave
, CS
, cs
);
4357 SVM_LOAD_SEG(env
->vm_hsave
, SS
, ss
);
4358 SVM_LOAD_SEG(env
->vm_hsave
, DS
, ds
);
4360 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
4361 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
4362 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
4364 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
4365 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
4368 cpu_x86_set_cpl(env
, 0);
4369 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code_hi
), (uint32_t)(exit_code
>> 32));
4370 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
4371 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
4374 /* FIXME: Resets the current ASID register to zero (host ASID). */
4376 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4378 /* Clears the TSC_OFFSET inside the processor. */
4380 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4381 from the page table indicated the host's CR3. If the PDPEs contain
4382 illegal state, the processor causes a shutdown. */
4384 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4385 env
->cr
[0] |= CR0_PE_MASK
;
4386 env
->eflags
&= ~VM_MASK
;
4388 /* Disables all breakpoints in the host DR7 register. */
4390 /* Checks the reloaded host state for consistency. */
4392 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4393 host's code segment or non-canonical (in the case of long mode), a
4394 #GP fault is delivered inside the host.) */
4396 /* remove any pending exception */
4397 env
->exception_index
= -1;
4398 env
->error_code
= 0;
4399 env
->old_exception
= -1;