2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
31 /* return non zero if error */
32 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
33 uint32_t *e2_ptr
, int selector
,
45 index
= selector
& ~7;
46 if ((index
+ 7) > dt
->limit
) {
49 ptr
= dt
->base
+ index
;
50 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
51 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
55 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
56 uint32_t *e2_ptr
, int selector
)
58 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
61 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
65 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
66 if (e2
& DESC_G_MASK
) {
67 limit
= (limit
<< 12) | 0xfff;
72 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
74 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
77 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
80 sc
->base
= get_seg_base(e1
, e2
);
81 sc
->limit
= get_seg_limit(e1
, e2
);
85 /* init the segment cache in vm86 mode. */
86 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
90 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
91 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
92 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
95 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
96 uint32_t *esp_ptr
, int dpl
,
99 X86CPU
*cpu
= env_archcpu(env
);
100 int type
, index
, shift
;
105 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
106 for (i
= 0; i
< env
->tr
.limit
; i
++) {
107 printf("%02x ", env
->tr
.base
[i
]);
116 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
117 cpu_abort(CPU(cpu
), "invalid tss");
119 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
120 if ((type
& 7) != 1) {
121 cpu_abort(CPU(cpu
), "invalid tss type");
124 index
= (dpl
* 4 + 2) << shift
;
125 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
126 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
129 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
130 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
132 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
133 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
137 static void tss_load_seg(CPUX86State
*env
, X86Seg seg_reg
, int selector
,
138 int cpl
, uintptr_t retaddr
)
143 if ((selector
& 0xfffc) != 0) {
144 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
145 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
147 if (!(e2
& DESC_S_MASK
)) {
148 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
151 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
152 if (seg_reg
== R_CS
) {
153 if (!(e2
& DESC_CS_MASK
)) {
154 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
157 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
159 } else if (seg_reg
== R_SS
) {
160 /* SS must be writable data */
161 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
162 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
164 if (dpl
!= cpl
|| dpl
!= rpl
) {
165 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
168 /* not readable code */
169 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
170 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
172 /* if data or non conforming code, checks the rights */
173 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
174 if (dpl
< cpl
|| dpl
< rpl
) {
175 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
179 if (!(e2
& DESC_P_MASK
)) {
180 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
182 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
183 get_seg_base(e1
, e2
),
184 get_seg_limit(e1
, e2
),
187 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
188 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
193 #define SWITCH_TSS_JMP 0
194 #define SWITCH_TSS_IRET 1
195 #define SWITCH_TSS_CALL 2
197 /* XXX: restore CPU state in registers (PowerPC case) */
198 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
199 uint32_t e1
, uint32_t e2
, int source
,
200 uint32_t next_eip
, uintptr_t retaddr
)
202 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
203 target_ulong tss_base
;
204 uint32_t new_regs
[8], new_segs
[6];
205 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
206 uint32_t old_eflags
, eflags_mask
;
211 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
212 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
215 /* if task gate, we read the TSS segment and we load it */
217 if (!(e2
& DESC_P_MASK
)) {
218 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
220 tss_selector
= e1
>> 16;
221 if (tss_selector
& 4) {
222 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
224 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
225 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
227 if (e2
& DESC_S_MASK
) {
228 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
230 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
231 if ((type
& 7) != 1) {
232 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
236 if (!(e2
& DESC_P_MASK
)) {
237 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
245 tss_limit
= get_seg_limit(e1
, e2
);
246 tss_base
= get_seg_base(e1
, e2
);
247 if ((tss_selector
& 4) != 0 ||
248 tss_limit
< tss_limit_max
) {
249 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
251 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
253 old_tss_limit_max
= 103;
255 old_tss_limit_max
= 43;
258 /* read all the registers from the new TSS */
261 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
262 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
263 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
264 for (i
= 0; i
< 8; i
++) {
265 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
268 for (i
= 0; i
< 6; i
++) {
269 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
272 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
273 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
277 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
278 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
279 for (i
= 0; i
< 8; i
++) {
280 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
281 retaddr
) | 0xffff0000;
283 for (i
= 0; i
< 4; i
++) {
284 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
287 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
292 /* XXX: avoid a compiler warning, see
293 http://support.amd.com/us/Processor_TechDocs/24593.pdf
294 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
297 /* NOTE: we must avoid memory exceptions during the task switch,
298 so we make dummy accesses before */
299 /* XXX: it can still fail in some cases, so a bigger hack is
300 necessary to valid the TLB after having done the accesses */
302 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
303 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
304 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
305 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
307 /* clear busy bit (it is restartable) */
308 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
312 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
313 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
314 e2
&= ~DESC_TSS_BUSY_MASK
;
315 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
317 old_eflags
= cpu_compute_eflags(env
);
318 if (source
== SWITCH_TSS_IRET
) {
319 old_eflags
&= ~NT_MASK
;
322 /* save the current state in the old TSS */
325 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
326 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
327 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
328 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
329 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
330 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
331 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
332 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
333 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
334 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
335 for (i
= 0; i
< 6; i
++) {
336 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
337 env
->segs
[i
].selector
, retaddr
);
341 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
342 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
343 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
344 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
345 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
346 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
347 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
348 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
349 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
350 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
351 for (i
= 0; i
< 4; i
++) {
352 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
353 env
->segs
[i
].selector
, retaddr
);
357 /* now if an exception occurs, it will occurs in the next task
360 if (source
== SWITCH_TSS_CALL
) {
361 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
362 new_eflags
|= NT_MASK
;
366 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
370 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
371 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
372 e2
|= DESC_TSS_BUSY_MASK
;
373 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
376 /* set the new CPU state */
377 /* from this point, any exception which occurs can give problems */
378 env
->cr
[0] |= CR0_TS_MASK
;
379 env
->hflags
|= HF_TS_MASK
;
380 env
->tr
.selector
= tss_selector
;
381 env
->tr
.base
= tss_base
;
382 env
->tr
.limit
= tss_limit
;
383 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
385 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
386 cpu_x86_update_cr3(env
, new_cr3
);
389 /* load all registers without an exception, then reload them with
390 possible exception */
392 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
393 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
395 eflags_mask
&= 0xffff;
397 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
398 /* XXX: what to do in 16 bit case? */
399 env
->regs
[R_EAX
] = new_regs
[0];
400 env
->regs
[R_ECX
] = new_regs
[1];
401 env
->regs
[R_EDX
] = new_regs
[2];
402 env
->regs
[R_EBX
] = new_regs
[3];
403 env
->regs
[R_ESP
] = new_regs
[4];
404 env
->regs
[R_EBP
] = new_regs
[5];
405 env
->regs
[R_ESI
] = new_regs
[6];
406 env
->regs
[R_EDI
] = new_regs
[7];
407 if (new_eflags
& VM_MASK
) {
408 for (i
= 0; i
< 6; i
++) {
409 load_seg_vm(env
, i
, new_segs
[i
]);
412 /* first just selectors as the rest may trigger exceptions */
413 for (i
= 0; i
< 6; i
++) {
414 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
418 env
->ldt
.selector
= new_ldt
& ~4;
425 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
428 if ((new_ldt
& 0xfffc) != 0) {
430 index
= new_ldt
& ~7;
431 if ((index
+ 7) > dt
->limit
) {
432 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
434 ptr
= dt
->base
+ index
;
435 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
436 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
437 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
438 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
440 if (!(e2
& DESC_P_MASK
)) {
441 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
443 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
446 /* load the segments */
447 if (!(new_eflags
& VM_MASK
)) {
448 int cpl
= new_segs
[R_CS
] & 3;
449 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
450 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
451 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
452 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
453 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
454 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
457 /* check that env->eip is in the CS segment limits */
458 if (new_eip
> env
->segs
[R_CS
].limit
) {
459 /* XXX: different exception if CALL? */
460 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
463 #ifndef CONFIG_USER_ONLY
464 /* reset local breakpoints */
465 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
466 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
471 static void switch_tss(CPUX86State
*env
, int tss_selector
,
472 uint32_t e1
, uint32_t e2
, int source
,
475 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
478 static inline unsigned int get_sp_mask(unsigned int e2
)
481 if (e2
& DESC_L_MASK
) {
485 if (e2
& DESC_B_MASK
) {
492 int exception_has_error_code(int intno
)
508 #define SET_ESP(val, sp_mask) \
510 if ((sp_mask) == 0xffff) { \
511 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
513 } else if ((sp_mask) == 0xffffffffLL) { \
514 env->regs[R_ESP] = (uint32_t)(val); \
516 env->regs[R_ESP] = (val); \
520 #define SET_ESP(val, sp_mask) \
522 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
523 ((val) & (sp_mask)); \
527 /* in 64-bit machines, this can overflow. So this segment addition macro
528 * can be used to trim the value to 32-bit whenever needed */
529 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
531 /* XXX: add a is_user flag to have proper security support */
532 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
535 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
538 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
541 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
544 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
546 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
550 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
552 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
556 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
557 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
558 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
559 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
561 /* protected mode interrupt */
562 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
563 int error_code
, unsigned int next_eip
,
567 target_ulong ptr
, ssp
;
568 int type
, dpl
, selector
, ss_dpl
, cpl
;
569 int has_error_code
, new_stack
, shift
;
570 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
571 uint32_t old_eip
, sp_mask
;
572 int vm86
= env
->eflags
& VM_MASK
;
575 if (!is_int
&& !is_hw
) {
576 has_error_code
= exception_has_error_code(intno
);
585 if (intno
* 8 + 7 > dt
->limit
) {
586 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
588 ptr
= dt
->base
+ intno
* 8;
589 e1
= cpu_ldl_kernel(env
, ptr
);
590 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
591 /* check gate type */
592 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
594 case 5: /* task gate */
595 case 6: /* 286 interrupt gate */
596 case 7: /* 286 trap gate */
597 case 14: /* 386 interrupt gate */
598 case 15: /* 386 trap gate */
601 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
604 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
605 cpl
= env
->hflags
& HF_CPL_MASK
;
606 /* check privilege if software int */
607 if (is_int
&& dpl
< cpl
) {
608 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
613 /* must do that check here to return the correct error code */
614 if (!(e2
& DESC_P_MASK
)) {
615 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
617 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
618 if (has_error_code
) {
622 /* push the error code */
623 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
625 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
630 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
631 ssp
= env
->segs
[R_SS
].base
+ esp
;
633 cpu_stl_kernel(env
, ssp
, error_code
);
635 cpu_stw_kernel(env
, ssp
, error_code
);
642 /* Otherwise, trap or interrupt gate */
644 /* check valid bit */
645 if (!(e2
& DESC_P_MASK
)) {
646 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
649 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
650 if ((selector
& 0xfffc) == 0) {
651 raise_exception_err(env
, EXCP0D_GPF
, 0);
653 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
654 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
656 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
657 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
659 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
661 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
663 if (!(e2
& DESC_P_MASK
)) {
664 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
666 if (e2
& DESC_C_MASK
) {
670 /* to inner privilege */
671 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
672 if ((ss
& 0xfffc) == 0) {
673 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
675 if ((ss
& 3) != dpl
) {
676 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
678 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
679 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
681 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
683 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
685 if (!(ss_e2
& DESC_S_MASK
) ||
686 (ss_e2
& DESC_CS_MASK
) ||
687 !(ss_e2
& DESC_W_MASK
)) {
688 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
690 if (!(ss_e2
& DESC_P_MASK
)) {
691 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
694 sp_mask
= get_sp_mask(ss_e2
);
695 ssp
= get_seg_base(ss_e1
, ss_e2
);
697 /* to same privilege */
699 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
702 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
703 ssp
= env
->segs
[R_SS
].base
;
704 esp
= env
->regs
[R_ESP
];
710 /* XXX: check that enough room is available */
711 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
720 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
721 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
722 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
723 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
725 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
726 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
728 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
729 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
730 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
731 if (has_error_code
) {
732 PUSHL(ssp
, esp
, sp_mask
, error_code
);
737 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
738 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
739 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
740 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
742 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
743 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
745 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
746 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
747 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
748 if (has_error_code
) {
749 PUSHW(ssp
, esp
, sp_mask
, error_code
);
753 /* interrupt gate clear IF mask */
754 if ((type
& 1) == 0) {
755 env
->eflags
&= ~IF_MASK
;
757 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
761 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
763 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
764 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
766 ss
= (ss
& ~3) | dpl
;
767 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
768 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
770 SET_ESP(esp
, sp_mask
);
772 selector
= (selector
& ~3) | dpl
;
773 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
774 get_seg_base(e1
, e2
),
775 get_seg_limit(e1
, e2
),
782 #define PUSHQ_RA(sp, val, ra) \
785 cpu_stq_kernel_ra(env, sp, (val), ra); \
788 #define POPQ_RA(sp, val, ra) \
790 val = cpu_ldq_kernel_ra(env, sp, ra); \
794 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
795 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
797 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
799 X86CPU
*cpu
= env_archcpu(env
);
803 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
804 env
->tr
.base
, env
->tr
.limit
);
807 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
808 cpu_abort(CPU(cpu
), "invalid tss");
810 index
= 8 * level
+ 4;
811 if ((index
+ 7) > env
->tr
.limit
) {
812 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
814 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
817 /* 64 bit interrupt */
818 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
819 int error_code
, target_ulong next_eip
, int is_hw
)
823 int type
, dpl
, selector
, cpl
, ist
;
824 int has_error_code
, new_stack
;
825 uint32_t e1
, e2
, e3
, ss
;
826 target_ulong old_eip
, esp
, offset
;
829 if (!is_int
&& !is_hw
) {
830 has_error_code
= exception_has_error_code(intno
);
839 if (intno
* 16 + 15 > dt
->limit
) {
840 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
842 ptr
= dt
->base
+ intno
* 16;
843 e1
= cpu_ldl_kernel(env
, ptr
);
844 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
845 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
846 /* check gate type */
847 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
849 case 14: /* 386 interrupt gate */
850 case 15: /* 386 trap gate */
853 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
856 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
857 cpl
= env
->hflags
& HF_CPL_MASK
;
858 /* check privilege if software int */
859 if (is_int
&& dpl
< cpl
) {
860 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
862 /* check valid bit */
863 if (!(e2
& DESC_P_MASK
)) {
864 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
867 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
869 if ((selector
& 0xfffc) == 0) {
870 raise_exception_err(env
, EXCP0D_GPF
, 0);
873 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
874 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
876 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
877 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
879 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
881 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
883 if (!(e2
& DESC_P_MASK
)) {
884 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
886 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
887 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
889 if (e2
& DESC_C_MASK
) {
892 if (dpl
< cpl
|| ist
!= 0) {
893 /* to inner privilege */
895 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
898 /* to same privilege */
899 if (env
->eflags
& VM_MASK
) {
900 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
903 esp
= env
->regs
[R_ESP
];
905 esp
&= ~0xfLL
; /* align stack */
907 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
908 PUSHQ(esp
, env
->regs
[R_ESP
]);
909 PUSHQ(esp
, cpu_compute_eflags(env
));
910 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
912 if (has_error_code
) {
913 PUSHQ(esp
, error_code
);
916 /* interrupt gate clear IF mask */
917 if ((type
& 1) == 0) {
918 env
->eflags
&= ~IF_MASK
;
920 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
924 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, dpl
<< DESC_DPL_SHIFT
);
926 env
->regs
[R_ESP
] = esp
;
928 selector
= (selector
& ~3) | dpl
;
929 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
930 get_seg_base(e1
, e2
),
931 get_seg_limit(e1
, e2
),
938 void helper_sysret(CPUX86State
*env
, int dflag
)
942 if (!(env
->efer
& MSR_EFER_SCE
)) {
943 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
945 cpl
= env
->hflags
& HF_CPL_MASK
;
946 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
947 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
949 selector
= (env
->star
>> 48) & 0xffff;
950 if (env
->hflags
& HF_LMA_MASK
) {
951 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
952 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
955 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
957 DESC_G_MASK
| DESC_P_MASK
|
958 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
959 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
961 env
->eip
= env
->regs
[R_ECX
];
963 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
965 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
966 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
967 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
968 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
970 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
972 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
973 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
974 DESC_W_MASK
| DESC_A_MASK
);
976 env
->eflags
|= IF_MASK
;
977 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
979 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
980 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
981 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
982 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
983 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
985 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
986 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
987 DESC_W_MASK
| DESC_A_MASK
);
992 /* real mode interrupt */
993 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
994 int error_code
, unsigned int next_eip
)
997 target_ulong ptr
, ssp
;
999 uint32_t offset
, esp
;
1000 uint32_t old_cs
, old_eip
;
1002 /* real mode (simpler!) */
1004 if (intno
* 4 + 3 > dt
->limit
) {
1005 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1007 ptr
= dt
->base
+ intno
* 4;
1008 offset
= cpu_lduw_kernel(env
, ptr
);
1009 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1010 esp
= env
->regs
[R_ESP
];
1011 ssp
= env
->segs
[R_SS
].base
;
1017 old_cs
= env
->segs
[R_CS
].selector
;
1018 /* XXX: use SS segment size? */
1019 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1020 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1021 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1023 /* update processor state */
1024 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1026 env
->segs
[R_CS
].selector
= selector
;
1027 env
->segs
[R_CS
].base
= (selector
<< 4);
1028 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1032 * Begin execution of an interruption. is_int is TRUE if coming from
1033 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1034 * instruction. It is only relevant if is_int is TRUE.
1036 void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1037 int error_code
, target_ulong next_eip
, int is_hw
)
1039 CPUX86State
*env
= &cpu
->env
;
1041 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1042 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1045 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1046 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1047 count
, intno
, error_code
, is_int
,
1048 env
->hflags
& HF_CPL_MASK
,
1049 env
->segs
[R_CS
].selector
, env
->eip
,
1050 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1051 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1052 if (intno
== 0x0e) {
1053 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1055 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1058 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1065 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1066 for (i
= 0; i
< 16; i
++) {
1067 qemu_log(" %02x", ldub(ptr
+ i
));
1075 if (env
->cr
[0] & CR0_PE_MASK
) {
1076 #if !defined(CONFIG_USER_ONLY)
1077 if (env
->hflags
& HF_GUEST_MASK
) {
1078 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1081 #ifdef TARGET_X86_64
1082 if (env
->hflags
& HF_LMA_MASK
) {
1083 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1087 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1091 #if !defined(CONFIG_USER_ONLY)
1092 if (env
->hflags
& HF_GUEST_MASK
) {
1093 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1096 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1099 #if !defined(CONFIG_USER_ONLY)
1100 if (env
->hflags
& HF_GUEST_MASK
) {
1101 CPUState
*cs
= CPU(cpu
);
1102 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1103 offsetof(struct vmcb
,
1104 control
.event_inj
));
1107 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1108 event_inj
& ~SVM_EVTINJ_VALID
);
1113 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1115 do_interrupt_all(env_archcpu(env
), intno
, 0, 0, 0, is_hw
);
1118 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1120 X86CPU
*cpu
= X86_CPU(cs
);
1121 CPUX86State
*env
= &cpu
->env
;
1124 interrupt_request
= x86_cpu_pending_interrupt(cs
, interrupt_request
);
1125 if (!interrupt_request
) {
1129 /* Don't process multiple interrupt requests in a single call.
1130 * This is required to make icount-driven execution deterministic.
1132 switch (interrupt_request
) {
1133 #if !defined(CONFIG_USER_ONLY)
1134 case CPU_INTERRUPT_POLL
:
1135 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1136 apic_poll_irq(cpu
->apic_state
);
1139 case CPU_INTERRUPT_SIPI
:
1142 case CPU_INTERRUPT_SMI
:
1143 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
1144 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1145 #ifdef CONFIG_USER_ONLY
1146 cpu_abort(CPU(cpu
), "SMI interrupt: cannot enter SMM in user-mode");
1149 #endif /* CONFIG_USER_ONLY */
1151 case CPU_INTERRUPT_NMI
:
1152 cpu_svm_check_intercept_param(env
, SVM_EXIT_NMI
, 0, 0);
1153 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1154 env
->hflags2
|= HF2_NMI_MASK
;
1155 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1157 case CPU_INTERRUPT_MCE
:
1158 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1159 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1161 case CPU_INTERRUPT_HARD
:
1162 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
1163 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1164 CPU_INTERRUPT_VIRQ
);
1165 intno
= cpu_get_pic_interrupt(env
);
1166 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1167 "Servicing hardware INT=0x%02x\n", intno
);
1168 do_interrupt_x86_hardirq(env
, intno
, 1);
1170 #if !defined(CONFIG_USER_ONLY)
1171 case CPU_INTERRUPT_VIRQ
:
1172 /* FIXME: this should respect TPR */
1173 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
1174 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1175 + offsetof(struct vmcb
, control
.int_vector
));
1176 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1177 "Servicing virtual hardware INT=0x%02x\n", intno
);
1178 do_interrupt_x86_hardirq(env
, intno
, 1);
1179 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1184 /* Ensure that no TB jump will be modified as the program flow was changed. */
1188 void helper_lldt(CPUX86State
*env
, int selector
)
1192 int index
, entry_limit
;
1196 if ((selector
& 0xfffc) == 0) {
1197 /* XXX: NULL selector case: invalid LDT */
1201 if (selector
& 0x4) {
1202 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1205 index
= selector
& ~7;
1206 #ifdef TARGET_X86_64
1207 if (env
->hflags
& HF_LMA_MASK
) {
1214 if ((index
+ entry_limit
) > dt
->limit
) {
1215 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1217 ptr
= dt
->base
+ index
;
1218 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1219 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1220 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1221 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1223 if (!(e2
& DESC_P_MASK
)) {
1224 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1226 #ifdef TARGET_X86_64
1227 if (env
->hflags
& HF_LMA_MASK
) {
1230 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1231 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1232 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1236 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1239 env
->ldt
.selector
= selector
;
1242 void helper_ltr(CPUX86State
*env
, int selector
)
1246 int index
, type
, entry_limit
;
1250 if ((selector
& 0xfffc) == 0) {
1251 /* NULL selector case: invalid TR */
1256 if (selector
& 0x4) {
1257 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1260 index
= selector
& ~7;
1261 #ifdef TARGET_X86_64
1262 if (env
->hflags
& HF_LMA_MASK
) {
1269 if ((index
+ entry_limit
) > dt
->limit
) {
1270 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1272 ptr
= dt
->base
+ index
;
1273 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1274 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1275 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1276 if ((e2
& DESC_S_MASK
) ||
1277 (type
!= 1 && type
!= 9)) {
1278 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1280 if (!(e2
& DESC_P_MASK
)) {
1281 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1283 #ifdef TARGET_X86_64
1284 if (env
->hflags
& HF_LMA_MASK
) {
1287 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1288 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1289 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1290 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1292 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1293 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1297 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1299 e2
|= DESC_TSS_BUSY_MASK
;
1300 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1302 env
->tr
.selector
= selector
;
1305 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1306 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1315 cpl
= env
->hflags
& HF_CPL_MASK
;
1316 if ((selector
& 0xfffc) == 0) {
1317 /* null selector case */
1319 #ifdef TARGET_X86_64
1320 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1323 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1325 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1328 if (selector
& 0x4) {
1333 index
= selector
& ~7;
1334 if ((index
+ 7) > dt
->limit
) {
1335 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1337 ptr
= dt
->base
+ index
;
1338 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1339 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1341 if (!(e2
& DESC_S_MASK
)) {
1342 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1345 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1346 if (seg_reg
== R_SS
) {
1347 /* must be writable segment */
1348 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1349 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1351 if (rpl
!= cpl
|| dpl
!= cpl
) {
1352 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1355 /* must be readable segment */
1356 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1357 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1360 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1361 /* if not conforming code, test rights */
1362 if (dpl
< cpl
|| dpl
< rpl
) {
1363 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1368 if (!(e2
& DESC_P_MASK
)) {
1369 if (seg_reg
== R_SS
) {
1370 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1372 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1376 /* set the access bit if not already set */
1377 if (!(e2
& DESC_A_MASK
)) {
1379 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1382 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1383 get_seg_base(e1
, e2
),
1384 get_seg_limit(e1
, e2
),
1387 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1388 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1393 /* protected mode jump */
1394 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1395 target_ulong next_eip
)
1398 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1400 if ((new_cs
& 0xfffc) == 0) {
1401 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1403 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1404 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1406 cpl
= env
->hflags
& HF_CPL_MASK
;
1407 if (e2
& DESC_S_MASK
) {
1408 if (!(e2
& DESC_CS_MASK
)) {
1409 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1411 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1412 if (e2
& DESC_C_MASK
) {
1413 /* conforming code segment */
1415 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1418 /* non conforming code segment */
1421 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1424 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1427 if (!(e2
& DESC_P_MASK
)) {
1428 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1430 limit
= get_seg_limit(e1
, e2
);
1431 if (new_eip
> limit
&&
1432 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1433 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1435 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1436 get_seg_base(e1
, e2
), limit
, e2
);
1439 /* jump to call or task gate */
1440 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1442 cpl
= env
->hflags
& HF_CPL_MASK
;
1443 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1445 #ifdef TARGET_X86_64
1446 if (env
->efer
& MSR_EFER_LMA
) {
1448 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1453 case 1: /* 286 TSS */
1454 case 9: /* 386 TSS */
1455 case 5: /* task gate */
1456 if (dpl
< cpl
|| dpl
< rpl
) {
1457 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1459 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1461 case 4: /* 286 call gate */
1462 case 12: /* 386 call gate */
1463 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1464 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1466 if (!(e2
& DESC_P_MASK
)) {
1467 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1470 new_eip
= (e1
& 0xffff);
1472 new_eip
|= (e2
& 0xffff0000);
1475 #ifdef TARGET_X86_64
1476 if (env
->efer
& MSR_EFER_LMA
) {
1477 /* load the upper 8 bytes of the 64-bit call gate */
1478 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1479 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1482 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1484 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1487 new_eip
|= ((target_ulong
)e1
) << 32;
1491 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1492 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1494 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1495 /* must be code segment */
1496 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1497 (DESC_S_MASK
| DESC_CS_MASK
))) {
1498 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1500 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1501 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1502 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1504 #ifdef TARGET_X86_64
1505 if (env
->efer
& MSR_EFER_LMA
) {
1506 if (!(e2
& DESC_L_MASK
)) {
1507 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1509 if (e2
& DESC_B_MASK
) {
1510 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1514 if (!(e2
& DESC_P_MASK
)) {
1515 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1517 limit
= get_seg_limit(e1
, e2
);
1518 if (new_eip
> limit
&&
1519 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1520 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1522 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1523 get_seg_base(e1
, e2
), limit
, e2
);
1527 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1533 /* real mode call */
1534 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1535 int shift
, int next_eip
)
1538 uint32_t esp
, esp_mask
;
1542 esp
= env
->regs
[R_ESP
];
1543 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1544 ssp
= env
->segs
[R_SS
].base
;
1546 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1547 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1549 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1550 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1553 SET_ESP(esp
, esp_mask
);
1555 env
->segs
[R_CS
].selector
= new_cs
;
1556 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1559 /* protected mode call */
1560 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1561 int shift
, target_ulong next_eip
)
1564 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, param_count
;
1565 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, type
, ss_dpl
, sp_mask
;
1566 uint32_t val
, limit
, old_sp_mask
;
1567 target_ulong ssp
, old_ssp
, offset
, sp
;
1569 LOG_PCALL("lcall %04x:" TARGET_FMT_lx
" s=%d\n", new_cs
, new_eip
, shift
);
1570 LOG_PCALL_STATE(env_cpu(env
));
1571 if ((new_cs
& 0xfffc) == 0) {
1572 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1574 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1575 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1577 cpl
= env
->hflags
& HF_CPL_MASK
;
1578 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1579 if (e2
& DESC_S_MASK
) {
1580 if (!(e2
& DESC_CS_MASK
)) {
1581 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1583 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1584 if (e2
& DESC_C_MASK
) {
1585 /* conforming code segment */
1587 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1590 /* non conforming code segment */
1593 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1596 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1599 if (!(e2
& DESC_P_MASK
)) {
1600 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1603 #ifdef TARGET_X86_64
1604 /* XXX: check 16/32 bit cases in long mode */
1609 rsp
= env
->regs
[R_ESP
];
1610 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1611 PUSHQ_RA(rsp
, next_eip
, GETPC());
1612 /* from this point, not restartable */
1613 env
->regs
[R_ESP
] = rsp
;
1614 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1615 get_seg_base(e1
, e2
),
1616 get_seg_limit(e1
, e2
), e2
);
1621 sp
= env
->regs
[R_ESP
];
1622 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1623 ssp
= env
->segs
[R_SS
].base
;
1625 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1626 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1628 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1629 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1632 limit
= get_seg_limit(e1
, e2
);
1633 if (new_eip
> limit
) {
1634 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1636 /* from this point, not restartable */
1637 SET_ESP(sp
, sp_mask
);
1638 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1639 get_seg_base(e1
, e2
), limit
, e2
);
1643 /* check gate type */
1644 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1645 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1648 #ifdef TARGET_X86_64
1649 if (env
->efer
& MSR_EFER_LMA
) {
1651 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1657 case 1: /* available 286 TSS */
1658 case 9: /* available 386 TSS */
1659 case 5: /* task gate */
1660 if (dpl
< cpl
|| dpl
< rpl
) {
1661 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1663 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1665 case 4: /* 286 call gate */
1666 case 12: /* 386 call gate */
1669 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1674 if (dpl
< cpl
|| dpl
< rpl
) {
1675 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1677 /* check valid bit */
1678 if (!(e2
& DESC_P_MASK
)) {
1679 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1681 selector
= e1
>> 16;
1682 param_count
= e2
& 0x1f;
1683 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1684 #ifdef TARGET_X86_64
1685 if (env
->efer
& MSR_EFER_LMA
) {
1686 /* load the upper 8 bytes of the 64-bit call gate */
1687 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1688 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1691 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1693 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1696 offset
|= ((target_ulong
)e1
) << 32;
1699 if ((selector
& 0xfffc) == 0) {
1700 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1703 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1704 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1706 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1707 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1709 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1711 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1713 #ifdef TARGET_X86_64
1714 if (env
->efer
& MSR_EFER_LMA
) {
1715 if (!(e2
& DESC_L_MASK
)) {
1716 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1718 if (e2
& DESC_B_MASK
) {
1719 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1724 if (!(e2
& DESC_P_MASK
)) {
1725 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1728 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1729 /* to inner privilege */
1730 #ifdef TARGET_X86_64
1732 sp
= get_rsp_from_tss(env
, dpl
);
1733 ss
= dpl
; /* SS = NULL selector with RPL = new CPL */
1736 ssp
= 0; /* SS base is always zero in IA-32e mode */
1737 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1738 TARGET_FMT_lx
"\n", ss
, sp
, env
->regs
[R_ESP
]);
1743 get_ss_esp_from_tss(env
, &ss
, &sp32
, dpl
, GETPC());
1744 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1745 TARGET_FMT_lx
"\n", ss
, sp32
, param_count
,
1748 if ((ss
& 0xfffc) == 0) {
1749 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1751 if ((ss
& 3) != dpl
) {
1752 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1754 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1755 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1757 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1758 if (ss_dpl
!= dpl
) {
1759 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1761 if (!(ss_e2
& DESC_S_MASK
) ||
1762 (ss_e2
& DESC_CS_MASK
) ||
1763 !(ss_e2
& DESC_W_MASK
)) {
1764 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1766 if (!(ss_e2
& DESC_P_MASK
)) {
1767 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1770 sp_mask
= get_sp_mask(ss_e2
);
1771 ssp
= get_seg_base(ss_e1
, ss_e2
);
1774 /* push_size = ((param_count * 2) + 8) << shift; */
1776 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1777 old_ssp
= env
->segs
[R_SS
].base
;
1778 #ifdef TARGET_X86_64
1780 /* XXX: verify if new stack address is canonical */
1781 PUSHQ_RA(sp
, env
->segs
[R_SS
].selector
, GETPC());
1782 PUSHQ_RA(sp
, env
->regs
[R_ESP
], GETPC());
1783 /* parameters aren't supported for 64-bit call gates */
1787 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1788 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1789 for (i
= param_count
- 1; i
>= 0; i
--) {
1790 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1791 ((env
->regs
[R_ESP
] + i
* 4) &
1792 old_sp_mask
), GETPC());
1793 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1796 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1797 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1798 for (i
= param_count
- 1; i
>= 0; i
--) {
1799 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1800 ((env
->regs
[R_ESP
] + i
* 2) &
1801 old_sp_mask
), GETPC());
1802 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1807 /* to same privilege */
1808 sp
= env
->regs
[R_ESP
];
1809 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1810 ssp
= env
->segs
[R_SS
].base
;
1811 /* push_size = (4 << shift); */
1815 #ifdef TARGET_X86_64
1817 PUSHQ_RA(sp
, env
->segs
[R_CS
].selector
, GETPC());
1818 PUSHQ_RA(sp
, next_eip
, GETPC());
1822 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1823 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1825 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1826 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1829 /* from this point, not restartable */
1832 #ifdef TARGET_X86_64
1834 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
1838 ss
= (ss
& ~3) | dpl
;
1839 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1841 get_seg_limit(ss_e1
, ss_e2
),
1846 selector
= (selector
& ~3) | dpl
;
1847 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1848 get_seg_base(e1
, e2
),
1849 get_seg_limit(e1
, e2
),
1851 SET_ESP(sp
, sp_mask
);
1856 /* real and vm86 mode iret */
1857 void helper_iret_real(CPUX86State
*env
, int shift
)
1859 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1863 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1864 sp
= env
->regs
[R_ESP
];
1865 ssp
= env
->segs
[R_SS
].base
;
1868 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1869 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1871 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1874 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1875 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1876 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1878 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1879 env
->segs
[R_CS
].selector
= new_cs
;
1880 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1882 if (env
->eflags
& VM_MASK
) {
1883 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1886 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1890 eflags_mask
&= 0xffff;
1892 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1893 env
->hflags2
&= ~HF2_NMI_MASK
;
1896 static inline void validate_seg(CPUX86State
*env
, X86Seg seg_reg
, int cpl
)
1901 /* XXX: on x86_64, we do not want to nullify FS and GS because
1902 they may still contain a valid base. I would be interested to
1903 know how a real x86_64 CPU behaves */
1904 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1905 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1909 e2
= env
->segs
[seg_reg
].flags
;
1910 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1911 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1912 /* data or non conforming code segment */
1914 cpu_x86_load_seg_cache(env
, seg_reg
, 0,
1915 env
->segs
[seg_reg
].base
,
1916 env
->segs
[seg_reg
].limit
,
1917 env
->segs
[seg_reg
].flags
& ~DESC_P_MASK
);
1922 /* protected mode iret */
1923 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1924 int is_iret
, int addend
,
1927 uint32_t new_cs
, new_eflags
, new_ss
;
1928 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1929 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1930 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1931 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1933 #ifdef TARGET_X86_64
1939 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1941 sp
= env
->regs
[R_ESP
];
1942 ssp
= env
->segs
[R_SS
].base
;
1943 new_eflags
= 0; /* avoid warning */
1944 #ifdef TARGET_X86_64
1946 POPQ_RA(sp
, new_eip
, retaddr
);
1947 POPQ_RA(sp
, new_cs
, retaddr
);
1950 POPQ_RA(sp
, new_eflags
, retaddr
);
1957 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
1958 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
1961 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
1962 if (new_eflags
& VM_MASK
) {
1963 goto return_to_vm86
;
1968 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
1969 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
1971 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
1975 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
1976 new_cs
, new_eip
, shift
, addend
);
1977 LOG_PCALL_STATE(env_cpu(env
));
1978 if ((new_cs
& 0xfffc) == 0) {
1979 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1981 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
1982 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1984 if (!(e2
& DESC_S_MASK
) ||
1985 !(e2
& DESC_CS_MASK
)) {
1986 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1988 cpl
= env
->hflags
& HF_CPL_MASK
;
1991 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1993 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1994 if (e2
& DESC_C_MASK
) {
1996 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2000 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2003 if (!(e2
& DESC_P_MASK
)) {
2004 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2008 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2009 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2010 /* return to same privilege level */
2011 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2012 get_seg_base(e1
, e2
),
2013 get_seg_limit(e1
, e2
),
2016 /* return to different privilege level */
2017 #ifdef TARGET_X86_64
2019 POPQ_RA(sp
, new_esp
, retaddr
);
2020 POPQ_RA(sp
, new_ss
, retaddr
);
2027 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2028 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2032 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2033 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2036 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2038 if ((new_ss
& 0xfffc) == 0) {
2039 #ifdef TARGET_X86_64
2040 /* NULL ss is allowed in long mode if cpl != 3 */
2041 /* XXX: test CS64? */
2042 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2043 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2045 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2046 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2047 DESC_W_MASK
| DESC_A_MASK
);
2048 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2052 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2055 if ((new_ss
& 3) != rpl
) {
2056 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2058 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2059 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2061 if (!(ss_e2
& DESC_S_MASK
) ||
2062 (ss_e2
& DESC_CS_MASK
) ||
2063 !(ss_e2
& DESC_W_MASK
)) {
2064 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2066 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2068 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2070 if (!(ss_e2
& DESC_P_MASK
)) {
2071 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2073 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2074 get_seg_base(ss_e1
, ss_e2
),
2075 get_seg_limit(ss_e1
, ss_e2
),
2079 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2080 get_seg_base(e1
, e2
),
2081 get_seg_limit(e1
, e2
),
2084 #ifdef TARGET_X86_64
2085 if (env
->hflags
& HF_CS64_MASK
) {
2090 sp_mask
= get_sp_mask(ss_e2
);
2093 /* validate data segments */
2094 validate_seg(env
, R_ES
, rpl
);
2095 validate_seg(env
, R_DS
, rpl
);
2096 validate_seg(env
, R_FS
, rpl
);
2097 validate_seg(env
, R_GS
, rpl
);
2101 SET_ESP(sp
, sp_mask
);
2104 /* NOTE: 'cpl' is the _old_ CPL */
2105 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2107 eflags_mask
|= IOPL_MASK
;
2109 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2111 eflags_mask
|= IF_MASK
;
2114 eflags_mask
&= 0xffff;
2116 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2121 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2122 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2123 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2124 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2125 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2126 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2128 /* modify processor state */
2129 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2130 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2132 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2133 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2134 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2135 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2136 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2137 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2139 env
->eip
= new_eip
& 0xffff;
2140 env
->regs
[R_ESP
] = new_esp
;
2143 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2145 int tss_selector
, type
;
2148 /* specific case for TSS */
2149 if (env
->eflags
& NT_MASK
) {
2150 #ifdef TARGET_X86_64
2151 if (env
->hflags
& HF_LMA_MASK
) {
2152 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2155 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2156 if (tss_selector
& 4) {
2157 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2159 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2160 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2162 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2163 /* NOTE: we check both segment and busy TSS */
2165 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2167 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2169 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2171 env
->hflags2
&= ~HF2_NMI_MASK
;
2174 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2176 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2179 void helper_sysenter(CPUX86State
*env
)
2181 if (env
->sysenter_cs
== 0) {
2182 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2184 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2186 #ifdef TARGET_X86_64
2187 if (env
->hflags
& HF_LMA_MASK
) {
2188 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2190 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2192 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2197 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2199 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2201 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2203 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2205 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2207 DESC_W_MASK
| DESC_A_MASK
);
2208 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2209 env
->eip
= env
->sysenter_eip
;
2212 void helper_sysexit(CPUX86State
*env
, int dflag
)
2216 cpl
= env
->hflags
& HF_CPL_MASK
;
2217 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2218 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2220 #ifdef TARGET_X86_64
2222 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2224 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2225 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2226 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2228 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2230 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2231 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2232 DESC_W_MASK
| DESC_A_MASK
);
2236 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2238 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2239 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2240 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2241 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2243 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2244 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2245 DESC_W_MASK
| DESC_A_MASK
);
2247 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2248 env
->eip
= env
->regs
[R_EDX
];
2251 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2254 uint32_t e1
, e2
, eflags
, selector
;
2255 int rpl
, dpl
, cpl
, type
;
2257 selector
= selector1
& 0xffff;
2258 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2259 if ((selector
& 0xfffc) == 0) {
2262 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2266 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2267 cpl
= env
->hflags
& HF_CPL_MASK
;
2268 if (e2
& DESC_S_MASK
) {
2269 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2272 if (dpl
< cpl
|| dpl
< rpl
) {
2277 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2288 if (dpl
< cpl
|| dpl
< rpl
) {
2290 CC_SRC
= eflags
& ~CC_Z
;
2294 limit
= get_seg_limit(e1
, e2
);
2295 CC_SRC
= eflags
| CC_Z
;
2299 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2301 uint32_t e1
, e2
, eflags
, selector
;
2302 int rpl
, dpl
, cpl
, type
;
2304 selector
= selector1
& 0xffff;
2305 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2306 if ((selector
& 0xfffc) == 0) {
2309 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2313 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2314 cpl
= env
->hflags
& HF_CPL_MASK
;
2315 if (e2
& DESC_S_MASK
) {
2316 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2319 if (dpl
< cpl
|| dpl
< rpl
) {
2324 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2338 if (dpl
< cpl
|| dpl
< rpl
) {
2340 CC_SRC
= eflags
& ~CC_Z
;
2344 CC_SRC
= eflags
| CC_Z
;
2345 return e2
& 0x00f0ff00;
2348 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2350 uint32_t e1
, e2
, eflags
, selector
;
2353 selector
= selector1
& 0xffff;
2354 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2355 if ((selector
& 0xfffc) == 0) {
2358 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2361 if (!(e2
& DESC_S_MASK
)) {
2365 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2366 cpl
= env
->hflags
& HF_CPL_MASK
;
2367 if (e2
& DESC_CS_MASK
) {
2368 if (!(e2
& DESC_R_MASK
)) {
2371 if (!(e2
& DESC_C_MASK
)) {
2372 if (dpl
< cpl
|| dpl
< rpl
) {
2377 if (dpl
< cpl
|| dpl
< rpl
) {
2379 CC_SRC
= eflags
& ~CC_Z
;
2383 CC_SRC
= eflags
| CC_Z
;
2386 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2388 uint32_t e1
, e2
, eflags
, selector
;
2391 selector
= selector1
& 0xffff;
2392 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2393 if ((selector
& 0xfffc) == 0) {
2396 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2399 if (!(e2
& DESC_S_MASK
)) {
2403 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2404 cpl
= env
->hflags
& HF_CPL_MASK
;
2405 if (e2
& DESC_CS_MASK
) {
2408 if (dpl
< cpl
|| dpl
< rpl
) {
2411 if (!(e2
& DESC_W_MASK
)) {
2413 CC_SRC
= eflags
& ~CC_Z
;
2417 CC_SRC
= eflags
| CC_Z
;
2420 /* check if Port I/O is allowed in TSS */
2421 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2424 int io_offset
, val
, mask
;
2426 /* TSS must be a valid 32 bit one */
2427 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2428 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2429 env
->tr
.limit
< 103) {
2432 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2433 io_offset
+= (addr
>> 3);
2434 /* Note: the check needs two bytes */
2435 if ((io_offset
+ 1) > env
->tr
.limit
) {
2438 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2440 mask
= (1 << size
) - 1;
2441 /* all bits must be zero to allow the I/O */
2442 if ((val
& mask
) != 0) {
2444 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2448 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2450 check_io(env
, t0
, 1, GETPC());
2453 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2455 check_io(env
, t0
, 2, GETPC());
2458 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2460 check_io(env
, t0
, 4, GETPC());