coroutine: Extract qemu_aio_coroutine_enter
[qemu/ar7.git] / target / i386 / seg_helper.c
blob0374031ea24fb623239dd71d3a7a020a7cec0aa8
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
29 //#define DEBUG_PCALL
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
40 #ifdef CONFIG_USER_ONLY
41 #define MEMSUFFIX _kernel
42 #define DATA_SIZE 1
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 2
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 4
49 #include "exec/cpu_ldst_useronly_template.h"
51 #define DATA_SIZE 8
52 #include "exec/cpu_ldst_useronly_template.h"
53 #undef MEMSUFFIX
54 #else
55 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56 #define MEMSUFFIX _kernel
57 #define DATA_SIZE 1
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 2
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 4
64 #include "exec/cpu_ldst_template.h"
66 #define DATA_SIZE 8
67 #include "exec/cpu_ldst_template.h"
68 #undef CPU_MMU_INDEX
69 #undef MEMSUFFIX
70 #endif
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
81 if (selector & 0x4) {
82 dt = &env->ldt;
83 } else {
84 dt = &env->gdt;
86 index = selector & ~7;
87 if ((index + 7) > dt->limit) {
88 return -1;
90 ptr = dt->base + index;
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 return 0;
96 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
102 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104 unsigned int limit;
106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107 if (e2 & DESC_G_MASK) {
108 limit = (limit << 12) | 0xfff;
110 return limit;
113 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
118 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129 selector &= 0xffff;
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
136 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
140 X86CPU *cpu = x86_env_get_cpu(env);
141 int type, index, shift;
143 #if 0
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147 for (i = 0; i < env->tr.limit; i++) {
148 printf("%02x ", env->tr.base[i]);
149 if ((i & 7) == 7) {
150 printf("\n");
153 printf("\n");
155 #endif
157 if (!(env->tr.flags & DESC_P_MASK)) {
158 cpu_abort(CPU(cpu), "invalid tss");
160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161 if ((type & 7) != 1) {
162 cpu_abort(CPU(cpu), "invalid tss type");
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
166 if (index + (4 << shift) - 1 > env->tr.limit) {
167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169 if (shift == 0) {
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172 } else {
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
178 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
181 uint32_t e1, e2;
182 int rpl, dpl;
184 if ((selector & 0xfffc) != 0) {
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 if (!(e2 & DESC_S_MASK)) {
189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193 if (seg_reg == R_CS) {
194 if (!(e2 & DESC_CS_MASK)) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 if (dpl != rpl) {
198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 if (dpl != cpl || dpl != rpl) {
206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208 } else {
209 /* not readable code */
210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215 if (dpl < cpl || dpl < rpl) {
216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
220 if (!(e2 & DESC_P_MASK)) {
221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223 cpu_x86_load_seg_cache(env, seg_reg, selector,
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
227 } else {
228 if (seg_reg == R_SS || seg_reg == R_CS) {
229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
234 #define SWITCH_TSS_JMP 0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
258 if (!(e2 & DESC_P_MASK)) {
259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261 tss_selector = e1 >> 16;
262 if (tss_selector & 4) {
263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 if (e2 & DESC_S_MASK) {
269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272 if ((type & 7) != 1) {
273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
277 if (!(e2 & DESC_P_MASK)) {
278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
281 if (type & 8) {
282 tss_limit_max = 103;
283 } else {
284 tss_limit_max = 43;
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
289 tss_limit < tss_limit_max) {
290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293 if (old_type & 8) {
294 old_tss_limit_max = 103;
295 } else {
296 old_tss_limit_max = 43;
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305 for (i = 0; i < 8; i++) {
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
309 for (i = 0; i < 6; i++) {
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320 for (i = 0; i < 8; i++) {
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
324 for (i = 0; i < 4; i++) {
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
353 ptr = env->gdt.base + (env->tr.selector & ~7);
354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355 e2 &= ~DESC_TSS_BUSY_MASK;
356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358 old_eflags = cpu_compute_eflags(env);
359 if (source == SWITCH_TSS_IRET) {
360 old_eflags &= ~NT_MASK;
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376 for (i = 0; i < 6; i++) {
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
380 } else {
381 /* 16 bit */
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392 for (i = 0; i < 4; i++) {
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
398 /* now if an exception occurs, it will occurs in the next task
399 context */
401 if (source == SWITCH_TSS_CALL) {
402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403 new_eflags |= NT_MASK;
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413 e2 |= DESC_TSS_BUSY_MASK;
414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8)) {
436 eflags_mask &= 0xffff;
438 cpu_load_eflags(env, new_eflags, eflags_mask);
439 /* XXX: what to do in 16 bit case? */
440 env->regs[R_EAX] = new_regs[0];
441 env->regs[R_ECX] = new_regs[1];
442 env->regs[R_EDX] = new_regs[2];
443 env->regs[R_EBX] = new_regs[3];
444 env->regs[R_ESP] = new_regs[4];
445 env->regs[R_EBP] = new_regs[5];
446 env->regs[R_ESI] = new_regs[6];
447 env->regs[R_EDI] = new_regs[7];
448 if (new_eflags & VM_MASK) {
449 for (i = 0; i < 6; i++) {
450 load_seg_vm(env, i, new_segs[i]);
452 } else {
453 /* first just selectors as the rest may trigger exceptions */
454 for (i = 0; i < 6; i++) {
455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
464 /* load the LDT */
465 if (new_ldt & 4) {
466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit) {
473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475 ptr = dt->base + index;
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 if (!(e2 & DESC_P_MASK)) {
482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 int cpl = new_segs[R_CS] & 3;
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
498 /* check that env->eip is in the CS segment limits */
499 if (new_eip > env->segs[R_CS].limit) {
500 /* XXX: different exception if CALL? */
501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
504 #ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509 #endif
512 static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
519 static inline unsigned int get_sp_mask(unsigned int e2)
521 if (e2 & DESC_B_MASK) {
522 return 0xffffffff;
523 } else {
524 return 0xffff;
528 static int exception_has_error_code(int intno)
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
540 return 0;
543 #ifdef TARGET_X86_64
544 #define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
554 } while (0)
555 #else
556 #define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
560 } while (0)
561 #endif
563 /* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
567 /* XXX: add a is_user flag to have proper security support */
568 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
570 sp -= 2; \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
574 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
576 sp -= 4; \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
580 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
583 sp += 2; \
586 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
589 sp += 4; \
592 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
597 /* protected mode interrupt */
598 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
607 uint32_t old_eip, sp_mask;
608 int vm86 = env->eflags & VM_MASK;
610 has_error_code = 0;
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
614 if (is_int) {
615 old_eip = next_eip;
616 } else {
617 old_eip = env->eip;
620 dt = &env->idt;
621 if (intno * 8 + 7 > dt->limit) {
622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
624 ptr = dt->base + intno * 8;
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
627 /* check gate type */
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
629 switch (type) {
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
632 if (!(e2 & DESC_P_MASK)) {
633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
640 /* push the error code */
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
643 if (env->segs[R_SS].flags & DESC_B_MASK) {
644 mask = 0xffffffff;
645 } else {
646 mask = 0xffff;
648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
649 ssp = env->segs[R_SS].base + esp;
650 if (shift) {
651 cpu_stl_kernel(env, ssp, error_code);
652 } else {
653 cpu_stw_kernel(env, ssp, error_code);
655 SET_ESP(esp, mask);
657 return;
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
662 break;
663 default:
664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
665 break;
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
669 /* check privilege if software int */
670 if (is_int && dpl < cpl) {
671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
673 /* check valid bit */
674 if (!(e2 & DESC_P_MASK)) {
675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
679 if ((selector & 0xfffc) == 0) {
680 raise_exception_err(env, EXCP0D_GPF, 0);
682 if (load_segment(env, &e1, &e2, selector) != 0) {
683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 if (dpl > cpl) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692 if (!(e2 & DESC_P_MASK)) {
693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
695 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
696 /* to inner privilege */
697 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
698 if ((ss & 0xfffc) == 0) {
699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
701 if ((ss & 3) != dpl) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
708 if (ss_dpl != dpl) {
709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
711 if (!(ss_e2 & DESC_S_MASK) ||
712 (ss_e2 & DESC_CS_MASK) ||
713 !(ss_e2 & DESC_W_MASK)) {
714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
716 if (!(ss_e2 & DESC_P_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 new_stack = 1;
720 sp_mask = get_sp_mask(ss_e2);
721 ssp = get_seg_base(ss_e1, ss_e2);
722 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
723 /* to same privilege */
724 if (vm86) {
725 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
727 new_stack = 0;
728 sp_mask = get_sp_mask(env->segs[R_SS].flags);
729 ssp = env->segs[R_SS].base;
730 esp = env->regs[R_ESP];
731 dpl = cpl;
732 } else {
733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
734 new_stack = 0; /* avoid warning */
735 sp_mask = 0; /* avoid warning */
736 ssp = 0; /* avoid warning */
737 esp = 0; /* avoid warning */
740 shift = type >> 3;
742 #if 0
743 /* XXX: check that enough room is available */
744 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
745 if (vm86) {
746 push_size += 8;
748 push_size <<= shift;
749 #endif
750 if (shift == 1) {
751 if (new_stack) {
752 if (vm86) {
753 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
759 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
761 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
762 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763 PUSHL(ssp, esp, sp_mask, old_eip);
764 if (has_error_code) {
765 PUSHL(ssp, esp, sp_mask, error_code);
767 } else {
768 if (new_stack) {
769 if (vm86) {
770 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
776 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
778 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
779 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780 PUSHW(ssp, esp, sp_mask, old_eip);
781 if (has_error_code) {
782 PUSHW(ssp, esp, sp_mask, error_code);
786 /* interrupt gate clear IF mask */
787 if ((type & 1) == 0) {
788 env->eflags &= ~IF_MASK;
790 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
792 if (new_stack) {
793 if (vm86) {
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
803 SET_ESP(esp, sp_mask);
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
810 env->eip = offset;
813 #ifdef TARGET_X86_64
815 #define PUSHQ_RA(sp, val, ra) \
817 sp -= 8; \
818 cpu_stq_kernel_ra(env, sp, (val), ra); \
821 #define POPQ_RA(sp, val, ra) \
823 val = cpu_ldq_kernel_ra(env, sp, ra); \
824 sp += 8; \
827 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
830 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
832 X86CPU *cpu = x86_env_get_cpu(env);
833 int index;
835 #if 0
836 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
837 env->tr.base, env->tr.limit);
838 #endif
840 if (!(env->tr.flags & DESC_P_MASK)) {
841 cpu_abort(CPU(cpu), "invalid tss");
843 index = 8 * level + 4;
844 if ((index + 7) > env->tr.limit) {
845 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
847 return cpu_ldq_kernel(env, env->tr.base + index);
850 /* 64 bit interrupt */
851 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
852 int error_code, target_ulong next_eip, int is_hw)
854 SegmentCache *dt;
855 target_ulong ptr;
856 int type, dpl, selector, cpl, ist;
857 int has_error_code, new_stack;
858 uint32_t e1, e2, e3, ss;
859 target_ulong old_eip, esp, offset;
861 has_error_code = 0;
862 if (!is_int && !is_hw) {
863 has_error_code = exception_has_error_code(intno);
865 if (is_int) {
866 old_eip = next_eip;
867 } else {
868 old_eip = env->eip;
871 dt = &env->idt;
872 if (intno * 16 + 15 > dt->limit) {
873 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
875 ptr = dt->base + intno * 16;
876 e1 = cpu_ldl_kernel(env, ptr);
877 e2 = cpu_ldl_kernel(env, ptr + 4);
878 e3 = cpu_ldl_kernel(env, ptr + 8);
879 /* check gate type */
880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
881 switch (type) {
882 case 14: /* 386 interrupt gate */
883 case 15: /* 386 trap gate */
884 break;
885 default:
886 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
887 break;
889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
890 cpl = env->hflags & HF_CPL_MASK;
891 /* check privilege if software int */
892 if (is_int && dpl < cpl) {
893 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
895 /* check valid bit */
896 if (!(e2 & DESC_P_MASK)) {
897 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
902 if ((selector & 0xfffc) == 0) {
903 raise_exception_err(env, EXCP0D_GPF, 0);
906 if (load_segment(env, &e1, &e2, selector) != 0) {
907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
913 if (dpl > cpl) {
914 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
916 if (!(e2 & DESC_P_MASK)) {
917 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
919 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
922 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
923 /* to inner privilege */
924 new_stack = 1;
925 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
926 ss = 0;
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK) {
930 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
932 new_stack = 0;
933 esp = env->regs[R_ESP];
934 dpl = cpl;
935 } else {
936 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
937 new_stack = 0; /* avoid warning */
938 esp = 0; /* avoid warning */
940 esp &= ~0xfLL; /* align stack */
942 PUSHQ(esp, env->segs[R_SS].selector);
943 PUSHQ(esp, env->regs[R_ESP]);
944 PUSHQ(esp, cpu_compute_eflags(env));
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
951 /* interrupt gate clear IF mask */
952 if ((type & 1) == 0) {
953 env->eflags &= ~IF_MASK;
955 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
957 if (new_stack) {
958 ss = 0 | dpl;
959 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
961 env->regs[R_ESP] = esp;
963 selector = (selector & ~3) | dpl;
964 cpu_x86_load_seg_cache(env, R_CS, selector,
965 get_seg_base(e1, e2),
966 get_seg_limit(e1, e2),
967 e2);
968 env->eip = offset;
970 #endif
972 #ifdef TARGET_X86_64
973 #if defined(CONFIG_USER_ONLY)
974 void helper_syscall(CPUX86State *env, int next_eip_addend)
976 CPUState *cs = CPU(x86_env_get_cpu(env));
978 cs->exception_index = EXCP_SYSCALL;
979 env->exception_next_eip = env->eip + next_eip_addend;
980 cpu_loop_exit(cs);
982 #else
983 void helper_syscall(CPUX86State *env, int next_eip_addend)
985 int selector;
987 if (!(env->efer & MSR_EFER_SCE)) {
988 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
990 selector = (env->star >> 32) & 0xffff;
991 if (env->hflags & HF_LMA_MASK) {
992 int code64;
994 env->regs[R_ECX] = env->eip + next_eip_addend;
995 env->regs[11] = cpu_compute_eflags(env);
997 code64 = env->hflags & HF_CS64_MASK;
999 env->eflags &= ~env->fmask;
1000 cpu_load_eflags(env, env->eflags, 0);
1001 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 0, 0xffffffff,
1003 DESC_G_MASK | DESC_P_MASK |
1004 DESC_S_MASK |
1005 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1006 DESC_L_MASK);
1007 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1008 0, 0xffffffff,
1009 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1010 DESC_S_MASK |
1011 DESC_W_MASK | DESC_A_MASK);
1012 if (code64) {
1013 env->eip = env->lstar;
1014 } else {
1015 env->eip = env->cstar;
1017 } else {
1018 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1020 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1021 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022 0, 0xffffffff,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024 DESC_S_MASK |
1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_W_MASK | DESC_A_MASK);
1031 env->eip = (uint32_t)env->star;
1034 #endif
1035 #endif
1037 #ifdef TARGET_X86_64
1038 void helper_sysret(CPUX86State *env, int dflag)
1040 int cpl, selector;
1042 if (!(env->efer & MSR_EFER_SCE)) {
1043 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1049 selector = (env->star >> 48) & 0xffff;
1050 if (env->hflags & HF_LMA_MASK) {
1051 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1052 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1053 NT_MASK);
1054 if (dflag == 2) {
1055 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_P_MASK |
1058 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1059 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060 DESC_L_MASK);
1061 env->eip = env->regs[R_ECX];
1062 } else {
1063 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068 env->eip = (uint32_t)env->regs[R_ECX];
1070 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_W_MASK | DESC_A_MASK);
1075 } else {
1076 env->eflags |= IF_MASK;
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082 env->eip = (uint32_t)env->regs[R_ECX];
1083 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_W_MASK | DESC_A_MASK);
1090 #endif
1092 /* real mode interrupt */
1093 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1094 int error_code, unsigned int next_eip)
1096 SegmentCache *dt;
1097 target_ulong ptr, ssp;
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eip;
1102 /* real mode (simpler!) */
1103 dt = &env->idt;
1104 if (intno * 4 + 3 > dt->limit) {
1105 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1107 ptr = dt->base + intno * 4;
1108 offset = cpu_lduw_kernel(env, ptr);
1109 selector = cpu_lduw_kernel(env, ptr + 2);
1110 esp = env->regs[R_ESP];
1111 ssp = env->segs[R_SS].base;
1112 if (is_int) {
1113 old_eip = next_eip;
1114 } else {
1115 old_eip = env->eip;
1117 old_cs = env->segs[R_CS].selector;
1118 /* XXX: use SS segment size? */
1119 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, old_eip);
1123 /* update processor state */
1124 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1131 #if defined(CONFIG_USER_ONLY)
1132 /* fake user mode interrupt. is_int is TRUE if coming from the int
1133 * instruction. next_eip is the env->eip value AFTER the interrupt
1134 * instruction. It is only relevant if is_int is TRUE or if intno
1135 * is EXCP_SYSCALL.
1137 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1138 int error_code, target_ulong next_eip)
1140 if (is_int) {
1141 SegmentCache *dt;
1142 target_ulong ptr;
1143 int dpl, cpl, shift;
1144 uint32_t e2;
1146 dt = &env->idt;
1147 if (env->hflags & HF_LMA_MASK) {
1148 shift = 4;
1149 } else {
1150 shift = 3;
1152 ptr = dt->base + (intno << shift);
1153 e2 = cpu_ldl_kernel(env, ptr + 4);
1155 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1156 cpl = env->hflags & HF_CPL_MASK;
1157 /* check privilege if software int */
1158 if (dpl < cpl) {
1159 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1163 /* Since we emulate only user space, we cannot do more than
1164 exiting the emulation with the suitable exception and error
1165 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1166 if (is_int || intno == EXCP_SYSCALL) {
1167 env->eip = next_eip;
1171 #else
1173 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1174 int error_code, int is_hw, int rm)
1176 CPUState *cs = CPU(x86_env_get_cpu(env));
1177 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1178 control.event_inj));
1180 if (!(event_inj & SVM_EVTINJ_VALID)) {
1181 int type;
1183 if (is_int) {
1184 type = SVM_EVTINJ_TYPE_SOFT;
1185 } else {
1186 type = SVM_EVTINJ_TYPE_EXEPT;
1188 event_inj = intno | type | SVM_EVTINJ_VALID;
1189 if (!rm && exception_has_error_code(intno)) {
1190 event_inj |= SVM_EVTINJ_VALID_ERR;
1191 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1192 control.event_inj_err),
1193 error_code);
1195 x86_stl_phys(cs,
1196 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1197 event_inj);
1200 #endif
1203 * Begin execution of an interruption. is_int is TRUE if coming from
1204 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1205 * instruction. It is only relevant if is_int is TRUE.
1207 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1208 int error_code, target_ulong next_eip, int is_hw)
1210 CPUX86State *env = &cpu->env;
1212 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1213 if ((env->cr[0] & CR0_PE_MASK)) {
1214 static int count;
1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1217 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218 count, intno, error_code, is_int,
1219 env->hflags & HF_CPL_MASK,
1220 env->segs[R_CS].selector, env->eip,
1221 (int)env->segs[R_CS].base + env->eip,
1222 env->segs[R_SS].selector, env->regs[R_ESP]);
1223 if (intno == 0x0e) {
1224 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1225 } else {
1226 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1228 qemu_log("\n");
1229 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1230 #if 0
1232 int i;
1233 target_ulong ptr;
1235 qemu_log(" code=");
1236 ptr = env->segs[R_CS].base + env->eip;
1237 for (i = 0; i < 16; i++) {
1238 qemu_log(" %02x", ldub(ptr + i));
1240 qemu_log("\n");
1242 #endif
1243 count++;
1246 if (env->cr[0] & CR0_PE_MASK) {
1247 #if !defined(CONFIG_USER_ONLY)
1248 if (env->hflags & HF_SVMI_MASK) {
1249 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1251 #endif
1252 #ifdef TARGET_X86_64
1253 if (env->hflags & HF_LMA_MASK) {
1254 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1255 } else
1256 #endif
1258 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1259 is_hw);
1261 } else {
1262 #if !defined(CONFIG_USER_ONLY)
1263 if (env->hflags & HF_SVMI_MASK) {
1264 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1266 #endif
1267 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1270 #if !defined(CONFIG_USER_ONLY)
1271 if (env->hflags & HF_SVMI_MASK) {
1272 CPUState *cs = CPU(cpu);
1273 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1274 offsetof(struct vmcb,
1275 control.event_inj));
1277 x86_stl_phys(cs,
1278 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1279 event_inj & ~SVM_EVTINJ_VALID);
1281 #endif
1284 void x86_cpu_do_interrupt(CPUState *cs)
1286 X86CPU *cpu = X86_CPU(cs);
1287 CPUX86State *env = &cpu->env;
1289 #if defined(CONFIG_USER_ONLY)
1290 /* if user mode only, we simulate a fake exception
1291 which will be handled outside the cpu execution
1292 loop */
1293 do_interrupt_user(env, cs->exception_index,
1294 env->exception_is_int,
1295 env->error_code,
1296 env->exception_next_eip);
1297 /* successfully delivered */
1298 env->old_exception = -1;
1299 #else
1300 if (cs->exception_index >= EXCP_VMEXIT) {
1301 assert(env->old_exception == -1);
1302 do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1303 } else {
1304 do_interrupt_all(cpu, cs->exception_index,
1305 env->exception_is_int,
1306 env->error_code,
1307 env->exception_next_eip, 0);
1308 /* successfully delivered */
1309 env->old_exception = -1;
1311 #endif
1314 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1316 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1319 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1321 X86CPU *cpu = X86_CPU(cs);
1322 CPUX86State *env = &cpu->env;
1323 bool ret = false;
1325 #if !defined(CONFIG_USER_ONLY)
1326 if (interrupt_request & CPU_INTERRUPT_POLL) {
1327 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1328 apic_poll_irq(cpu->apic_state);
1329 /* Don't process multiple interrupt requests in a single call.
1330 This is required to make icount-driven execution deterministic. */
1331 return true;
1333 #endif
1334 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1335 do_cpu_sipi(cpu);
1336 ret = true;
1337 } else if (env->hflags2 & HF2_GIF_MASK) {
1338 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1339 !(env->hflags & HF_SMM_MASK)) {
1340 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1341 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1342 do_smm_enter(cpu);
1343 ret = true;
1344 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1345 !(env->hflags2 & HF2_NMI_MASK)) {
1346 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1347 env->hflags2 |= HF2_NMI_MASK;
1348 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1349 ret = true;
1350 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1351 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1352 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1353 ret = true;
1354 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1355 (((env->hflags2 & HF2_VINTR_MASK) &&
1356 (env->hflags2 & HF2_HIF_MASK)) ||
1357 (!(env->hflags2 & HF2_VINTR_MASK) &&
1358 (env->eflags & IF_MASK &&
1359 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1360 int intno;
1361 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1362 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1363 CPU_INTERRUPT_VIRQ);
1364 intno = cpu_get_pic_interrupt(env);
1365 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1366 "Servicing hardware INT=0x%02x\n", intno);
1367 do_interrupt_x86_hardirq(env, intno, 1);
1368 /* ensure that no TB jump will be modified as
1369 the program flow was changed */
1370 ret = true;
1371 #if !defined(CONFIG_USER_ONLY)
1372 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1373 (env->eflags & IF_MASK) &&
1374 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1375 int intno;
1376 /* FIXME: this should respect TPR */
1377 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1378 intno = x86_ldl_phys(cs, env->vm_vmcb
1379 + offsetof(struct vmcb, control.int_vector));
1380 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1381 "Servicing virtual hardware INT=0x%02x\n", intno);
1382 do_interrupt_x86_hardirq(env, intno, 1);
1383 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1384 ret = true;
1385 #endif
1389 return ret;
1392 void helper_lldt(CPUX86State *env, int selector)
1394 SegmentCache *dt;
1395 uint32_t e1, e2;
1396 int index, entry_limit;
1397 target_ulong ptr;
1399 selector &= 0xffff;
1400 if ((selector & 0xfffc) == 0) {
1401 /* XXX: NULL selector case: invalid LDT */
1402 env->ldt.base = 0;
1403 env->ldt.limit = 0;
1404 } else {
1405 if (selector & 0x4) {
1406 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1408 dt = &env->gdt;
1409 index = selector & ~7;
1410 #ifdef TARGET_X86_64
1411 if (env->hflags & HF_LMA_MASK) {
1412 entry_limit = 15;
1413 } else
1414 #endif
1416 entry_limit = 7;
1418 if ((index + entry_limit) > dt->limit) {
1419 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1421 ptr = dt->base + index;
1422 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1423 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1424 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1425 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1427 if (!(e2 & DESC_P_MASK)) {
1428 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1430 #ifdef TARGET_X86_64
1431 if (env->hflags & HF_LMA_MASK) {
1432 uint32_t e3;
1434 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1435 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1436 env->ldt.base |= (target_ulong)e3 << 32;
1437 } else
1438 #endif
1440 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1443 env->ldt.selector = selector;
1446 void helper_ltr(CPUX86State *env, int selector)
1448 SegmentCache *dt;
1449 uint32_t e1, e2;
1450 int index, type, entry_limit;
1451 target_ulong ptr;
1453 selector &= 0xffff;
1454 if ((selector & 0xfffc) == 0) {
1455 /* NULL selector case: invalid TR */
1456 env->tr.base = 0;
1457 env->tr.limit = 0;
1458 env->tr.flags = 0;
1459 } else {
1460 if (selector & 0x4) {
1461 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1463 dt = &env->gdt;
1464 index = selector & ~7;
1465 #ifdef TARGET_X86_64
1466 if (env->hflags & HF_LMA_MASK) {
1467 entry_limit = 15;
1468 } else
1469 #endif
1471 entry_limit = 7;
1473 if ((index + entry_limit) > dt->limit) {
1474 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1476 ptr = dt->base + index;
1477 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1478 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1479 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1480 if ((e2 & DESC_S_MASK) ||
1481 (type != 1 && type != 9)) {
1482 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1484 if (!(e2 & DESC_P_MASK)) {
1485 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1487 #ifdef TARGET_X86_64
1488 if (env->hflags & HF_LMA_MASK) {
1489 uint32_t e3, e4;
1491 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1492 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1493 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1494 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1496 load_seg_cache_raw_dt(&env->tr, e1, e2);
1497 env->tr.base |= (target_ulong)e3 << 32;
1498 } else
1499 #endif
1501 load_seg_cache_raw_dt(&env->tr, e1, e2);
1503 e2 |= DESC_TSS_BUSY_MASK;
1504 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1506 env->tr.selector = selector;
1509 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1510 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1512 uint32_t e1, e2;
1513 int cpl, dpl, rpl;
1514 SegmentCache *dt;
1515 int index;
1516 target_ulong ptr;
1518 selector &= 0xffff;
1519 cpl = env->hflags & HF_CPL_MASK;
1520 if ((selector & 0xfffc) == 0) {
1521 /* null selector case */
1522 if (seg_reg == R_SS
1523 #ifdef TARGET_X86_64
1524 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1525 #endif
1527 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1529 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1530 } else {
1532 if (selector & 0x4) {
1533 dt = &env->ldt;
1534 } else {
1535 dt = &env->gdt;
1537 index = selector & ~7;
1538 if ((index + 7) > dt->limit) {
1539 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1541 ptr = dt->base + index;
1542 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1543 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1545 if (!(e2 & DESC_S_MASK)) {
1546 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548 rpl = selector & 3;
1549 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1550 if (seg_reg == R_SS) {
1551 /* must be writable segment */
1552 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1553 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1555 if (rpl != cpl || dpl != cpl) {
1556 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1558 } else {
1559 /* must be readable segment */
1560 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1561 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1564 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1565 /* if not conforming code, test rights */
1566 if (dpl < cpl || dpl < rpl) {
1567 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1572 if (!(e2 & DESC_P_MASK)) {
1573 if (seg_reg == R_SS) {
1574 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1575 } else {
1576 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1580 /* set the access bit if not already set */
1581 if (!(e2 & DESC_A_MASK)) {
1582 e2 |= DESC_A_MASK;
1583 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1586 cpu_x86_load_seg_cache(env, seg_reg, selector,
1587 get_seg_base(e1, e2),
1588 get_seg_limit(e1, e2),
1589 e2);
1590 #if 0
1591 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1592 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1593 #endif
1597 /* protected mode jump */
1598 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1599 target_ulong next_eip)
1601 int gate_cs, type;
1602 uint32_t e1, e2, cpl, dpl, rpl, limit;
1604 if ((new_cs & 0xfffc) == 0) {
1605 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1607 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1608 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1610 cpl = env->hflags & HF_CPL_MASK;
1611 if (e2 & DESC_S_MASK) {
1612 if (!(e2 & DESC_CS_MASK)) {
1613 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1615 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1616 if (e2 & DESC_C_MASK) {
1617 /* conforming code segment */
1618 if (dpl > cpl) {
1619 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1621 } else {
1622 /* non conforming code segment */
1623 rpl = new_cs & 3;
1624 if (rpl > cpl) {
1625 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1627 if (dpl != cpl) {
1628 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1631 if (!(e2 & DESC_P_MASK)) {
1632 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1634 limit = get_seg_limit(e1, e2);
1635 if (new_eip > limit &&
1636 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1637 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1639 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1640 get_seg_base(e1, e2), limit, e2);
1641 env->eip = new_eip;
1642 } else {
1643 /* jump to call or task gate */
1644 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1645 rpl = new_cs & 3;
1646 cpl = env->hflags & HF_CPL_MASK;
1647 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1648 switch (type) {
1649 case 1: /* 286 TSS */
1650 case 9: /* 386 TSS */
1651 case 5: /* task gate */
1652 if (dpl < cpl || dpl < rpl) {
1653 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1655 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1656 break;
1657 case 4: /* 286 call gate */
1658 case 12: /* 386 call gate */
1659 if ((dpl < cpl) || (dpl < rpl)) {
1660 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1662 if (!(e2 & DESC_P_MASK)) {
1663 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1665 gate_cs = e1 >> 16;
1666 new_eip = (e1 & 0xffff);
1667 if (type == 12) {
1668 new_eip |= (e2 & 0xffff0000);
1670 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1671 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1673 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1674 /* must be code segment */
1675 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1676 (DESC_S_MASK | DESC_CS_MASK))) {
1677 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1679 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1680 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1681 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1683 if (!(e2 & DESC_P_MASK)) {
1684 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1686 limit = get_seg_limit(e1, e2);
1687 if (new_eip > limit) {
1688 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1690 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1691 get_seg_base(e1, e2), limit, e2);
1692 env->eip = new_eip;
1693 break;
1694 default:
1695 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1696 break;
1701 /* real mode call */
1702 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1703 int shift, int next_eip)
1705 int new_eip;
1706 uint32_t esp, esp_mask;
1707 target_ulong ssp;
1709 new_eip = new_eip1;
1710 esp = env->regs[R_ESP];
1711 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1712 ssp = env->segs[R_SS].base;
1713 if (shift) {
1714 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1715 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1716 } else {
1717 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1718 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1721 SET_ESP(esp, esp_mask);
1722 env->eip = new_eip;
1723 env->segs[R_CS].selector = new_cs;
1724 env->segs[R_CS].base = (new_cs << 4);
1727 /* protected mode call */
1728 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1729 int shift, target_ulong next_eip)
1731 int new_stack, i;
1732 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1733 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1734 uint32_t val, limit, old_sp_mask;
1735 target_ulong ssp, old_ssp;
1737 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1738 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1739 if ((new_cs & 0xfffc) == 0) {
1740 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1742 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1743 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1745 cpl = env->hflags & HF_CPL_MASK;
1746 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1747 if (e2 & DESC_S_MASK) {
1748 if (!(e2 & DESC_CS_MASK)) {
1749 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1751 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1752 if (e2 & DESC_C_MASK) {
1753 /* conforming code segment */
1754 if (dpl > cpl) {
1755 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1757 } else {
1758 /* non conforming code segment */
1759 rpl = new_cs & 3;
1760 if (rpl > cpl) {
1761 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1763 if (dpl != cpl) {
1764 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1767 if (!(e2 & DESC_P_MASK)) {
1768 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1771 #ifdef TARGET_X86_64
1772 /* XXX: check 16/32 bit cases in long mode */
1773 if (shift == 2) {
1774 target_ulong rsp;
1776 /* 64 bit case */
1777 rsp = env->regs[R_ESP];
1778 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1779 PUSHQ_RA(rsp, next_eip, GETPC());
1780 /* from this point, not restartable */
1781 env->regs[R_ESP] = rsp;
1782 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1783 get_seg_base(e1, e2),
1784 get_seg_limit(e1, e2), e2);
1785 env->eip = new_eip;
1786 } else
1787 #endif
1789 sp = env->regs[R_ESP];
1790 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1791 ssp = env->segs[R_SS].base;
1792 if (shift) {
1793 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1794 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1795 } else {
1796 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1797 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1800 limit = get_seg_limit(e1, e2);
1801 if (new_eip > limit) {
1802 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1804 /* from this point, not restartable */
1805 SET_ESP(sp, sp_mask);
1806 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1807 get_seg_base(e1, e2), limit, e2);
1808 env->eip = new_eip;
1810 } else {
1811 /* check gate type */
1812 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1813 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1814 rpl = new_cs & 3;
1815 switch (type) {
1816 case 1: /* available 286 TSS */
1817 case 9: /* available 386 TSS */
1818 case 5: /* task gate */
1819 if (dpl < cpl || dpl < rpl) {
1820 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1822 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1823 return;
1824 case 4: /* 286 call gate */
1825 case 12: /* 386 call gate */
1826 break;
1827 default:
1828 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1829 break;
1831 shift = type >> 3;
1833 if (dpl < cpl || dpl < rpl) {
1834 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1836 /* check valid bit */
1837 if (!(e2 & DESC_P_MASK)) {
1838 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1840 selector = e1 >> 16;
1841 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1842 param_count = e2 & 0x1f;
1843 if ((selector & 0xfffc) == 0) {
1844 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1847 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1848 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1850 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1851 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1853 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1854 if (dpl > cpl) {
1855 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1857 if (!(e2 & DESC_P_MASK)) {
1858 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1861 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1862 /* to inner privilege */
1863 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1864 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1865 TARGET_FMT_lx "\n", ss, sp, param_count,
1866 env->regs[R_ESP]);
1867 if ((ss & 0xfffc) == 0) {
1868 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1870 if ((ss & 3) != dpl) {
1871 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1873 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1874 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1876 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1877 if (ss_dpl != dpl) {
1878 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1880 if (!(ss_e2 & DESC_S_MASK) ||
1881 (ss_e2 & DESC_CS_MASK) ||
1882 !(ss_e2 & DESC_W_MASK)) {
1883 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1885 if (!(ss_e2 & DESC_P_MASK)) {
1886 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1889 /* push_size = ((param_count * 2) + 8) << shift; */
1891 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1892 old_ssp = env->segs[R_SS].base;
1894 sp_mask = get_sp_mask(ss_e2);
1895 ssp = get_seg_base(ss_e1, ss_e2);
1896 if (shift) {
1897 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1898 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1899 for (i = param_count - 1; i >= 0; i--) {
1900 val = cpu_ldl_kernel_ra(env, old_ssp +
1901 ((env->regs[R_ESP] + i * 4) &
1902 old_sp_mask), GETPC());
1903 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1905 } else {
1906 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1907 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1908 for (i = param_count - 1; i >= 0; i--) {
1909 val = cpu_lduw_kernel_ra(env, old_ssp +
1910 ((env->regs[R_ESP] + i * 2) &
1911 old_sp_mask), GETPC());
1912 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1915 new_stack = 1;
1916 } else {
1917 /* to same privilege */
1918 sp = env->regs[R_ESP];
1919 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1920 ssp = env->segs[R_SS].base;
1921 /* push_size = (4 << shift); */
1922 new_stack = 0;
1925 if (shift) {
1926 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1927 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1928 } else {
1929 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1930 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1933 /* from this point, not restartable */
1935 if (new_stack) {
1936 ss = (ss & ~3) | dpl;
1937 cpu_x86_load_seg_cache(env, R_SS, ss,
1938 ssp,
1939 get_seg_limit(ss_e1, ss_e2),
1940 ss_e2);
1943 selector = (selector & ~3) | dpl;
1944 cpu_x86_load_seg_cache(env, R_CS, selector,
1945 get_seg_base(e1, e2),
1946 get_seg_limit(e1, e2),
1947 e2);
1948 SET_ESP(sp, sp_mask);
1949 env->eip = offset;
1953 /* real and vm86 mode iret */
1954 void helper_iret_real(CPUX86State *env, int shift)
1956 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1957 target_ulong ssp;
1958 int eflags_mask;
1960 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1961 sp = env->regs[R_ESP];
1962 ssp = env->segs[R_SS].base;
1963 if (shift == 1) {
1964 /* 32 bits */
1965 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1966 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1967 new_cs &= 0xffff;
1968 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1969 } else {
1970 /* 16 bits */
1971 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1972 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1973 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1975 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1976 env->segs[R_CS].selector = new_cs;
1977 env->segs[R_CS].base = (new_cs << 4);
1978 env->eip = new_eip;
1979 if (env->eflags & VM_MASK) {
1980 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1981 NT_MASK;
1982 } else {
1983 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1984 RF_MASK | NT_MASK;
1986 if (shift == 0) {
1987 eflags_mask &= 0xffff;
1989 cpu_load_eflags(env, new_eflags, eflags_mask);
1990 env->hflags2 &= ~HF2_NMI_MASK;
1993 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1995 int dpl;
1996 uint32_t e2;
1998 /* XXX: on x86_64, we do not want to nullify FS and GS because
1999 they may still contain a valid base. I would be interested to
2000 know how a real x86_64 CPU behaves */
2001 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2002 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2003 return;
2006 e2 = env->segs[seg_reg].flags;
2007 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2008 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2009 /* data or non conforming code segment */
2010 if (dpl < cpl) {
2011 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2016 /* protected mode iret */
2017 static inline void helper_ret_protected(CPUX86State *env, int shift,
2018 int is_iret, int addend,
2019 uintptr_t retaddr)
2021 uint32_t new_cs, new_eflags, new_ss;
2022 uint32_t new_es, new_ds, new_fs, new_gs;
2023 uint32_t e1, e2, ss_e1, ss_e2;
2024 int cpl, dpl, rpl, eflags_mask, iopl;
2025 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2027 #ifdef TARGET_X86_64
2028 if (shift == 2) {
2029 sp_mask = -1;
2030 } else
2031 #endif
2033 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2035 sp = env->regs[R_ESP];
2036 ssp = env->segs[R_SS].base;
2037 new_eflags = 0; /* avoid warning */
2038 #ifdef TARGET_X86_64
2039 if (shift == 2) {
2040 POPQ_RA(sp, new_eip, retaddr);
2041 POPQ_RA(sp, new_cs, retaddr);
2042 new_cs &= 0xffff;
2043 if (is_iret) {
2044 POPQ_RA(sp, new_eflags, retaddr);
2046 } else
2047 #endif
2049 if (shift == 1) {
2050 /* 32 bits */
2051 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2052 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2053 new_cs &= 0xffff;
2054 if (is_iret) {
2055 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2056 if (new_eflags & VM_MASK) {
2057 goto return_to_vm86;
2060 } else {
2061 /* 16 bits */
2062 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2063 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2064 if (is_iret) {
2065 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2069 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2070 new_cs, new_eip, shift, addend);
2071 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2072 if ((new_cs & 0xfffc) == 0) {
2073 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2075 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2076 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2078 if (!(e2 & DESC_S_MASK) ||
2079 !(e2 & DESC_CS_MASK)) {
2080 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2082 cpl = env->hflags & HF_CPL_MASK;
2083 rpl = new_cs & 3;
2084 if (rpl < cpl) {
2085 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2087 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2088 if (e2 & DESC_C_MASK) {
2089 if (dpl > rpl) {
2090 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2092 } else {
2093 if (dpl != rpl) {
2094 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2097 if (!(e2 & DESC_P_MASK)) {
2098 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2101 sp += addend;
2102 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2103 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2104 /* return to same privilege level */
2105 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2106 get_seg_base(e1, e2),
2107 get_seg_limit(e1, e2),
2108 e2);
2109 } else {
2110 /* return to different privilege level */
2111 #ifdef TARGET_X86_64
2112 if (shift == 2) {
2113 POPQ_RA(sp, new_esp, retaddr);
2114 POPQ_RA(sp, new_ss, retaddr);
2115 new_ss &= 0xffff;
2116 } else
2117 #endif
2119 if (shift == 1) {
2120 /* 32 bits */
2121 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2122 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2123 new_ss &= 0xffff;
2124 } else {
2125 /* 16 bits */
2126 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2127 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2130 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2131 new_ss, new_esp);
2132 if ((new_ss & 0xfffc) == 0) {
2133 #ifdef TARGET_X86_64
2134 /* NULL ss is allowed in long mode if cpl != 3 */
2135 /* XXX: test CS64? */
2136 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2137 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2138 0, 0xffffffff,
2139 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2140 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2141 DESC_W_MASK | DESC_A_MASK);
2142 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2143 } else
2144 #endif
2146 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2148 } else {
2149 if ((new_ss & 3) != rpl) {
2150 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2152 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2153 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2155 if (!(ss_e2 & DESC_S_MASK) ||
2156 (ss_e2 & DESC_CS_MASK) ||
2157 !(ss_e2 & DESC_W_MASK)) {
2158 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2160 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2161 if (dpl != rpl) {
2162 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2164 if (!(ss_e2 & DESC_P_MASK)) {
2165 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2167 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2168 get_seg_base(ss_e1, ss_e2),
2169 get_seg_limit(ss_e1, ss_e2),
2170 ss_e2);
2173 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2174 get_seg_base(e1, e2),
2175 get_seg_limit(e1, e2),
2176 e2);
2177 sp = new_esp;
2178 #ifdef TARGET_X86_64
2179 if (env->hflags & HF_CS64_MASK) {
2180 sp_mask = -1;
2181 } else
2182 #endif
2184 sp_mask = get_sp_mask(ss_e2);
2187 /* validate data segments */
2188 validate_seg(env, R_ES, rpl);
2189 validate_seg(env, R_DS, rpl);
2190 validate_seg(env, R_FS, rpl);
2191 validate_seg(env, R_GS, rpl);
2193 sp += addend;
2195 SET_ESP(sp, sp_mask);
2196 env->eip = new_eip;
2197 if (is_iret) {
2198 /* NOTE: 'cpl' is the _old_ CPL */
2199 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2200 if (cpl == 0) {
2201 eflags_mask |= IOPL_MASK;
2203 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2204 if (cpl <= iopl) {
2205 eflags_mask |= IF_MASK;
2207 if (shift == 0) {
2208 eflags_mask &= 0xffff;
2210 cpu_load_eflags(env, new_eflags, eflags_mask);
2212 return;
2214 return_to_vm86:
2215 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2216 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2217 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2218 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2219 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2220 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2222 /* modify processor state */
2223 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2224 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2225 VIP_MASK);
2226 load_seg_vm(env, R_CS, new_cs & 0xffff);
2227 load_seg_vm(env, R_SS, new_ss & 0xffff);
2228 load_seg_vm(env, R_ES, new_es & 0xffff);
2229 load_seg_vm(env, R_DS, new_ds & 0xffff);
2230 load_seg_vm(env, R_FS, new_fs & 0xffff);
2231 load_seg_vm(env, R_GS, new_gs & 0xffff);
2233 env->eip = new_eip & 0xffff;
2234 env->regs[R_ESP] = new_esp;
2237 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2239 int tss_selector, type;
2240 uint32_t e1, e2;
2242 /* specific case for TSS */
2243 if (env->eflags & NT_MASK) {
2244 #ifdef TARGET_X86_64
2245 if (env->hflags & HF_LMA_MASK) {
2246 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2248 #endif
2249 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2250 if (tss_selector & 4) {
2251 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2253 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2254 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2256 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2257 /* NOTE: we check both segment and busy TSS */
2258 if (type != 3) {
2259 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2261 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2262 } else {
2263 helper_ret_protected(env, shift, 1, 0, GETPC());
2265 env->hflags2 &= ~HF2_NMI_MASK;
2268 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2270 helper_ret_protected(env, shift, 0, addend, GETPC());
2273 void helper_sysenter(CPUX86State *env)
2275 if (env->sysenter_cs == 0) {
2276 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2278 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2280 #ifdef TARGET_X86_64
2281 if (env->hflags & HF_LMA_MASK) {
2282 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2283 0, 0xffffffff,
2284 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2285 DESC_S_MASK |
2286 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2287 DESC_L_MASK);
2288 } else
2289 #endif
2291 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2292 0, 0xffffffff,
2293 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2294 DESC_S_MASK |
2295 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2297 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2298 0, 0xffffffff,
2299 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2300 DESC_S_MASK |
2301 DESC_W_MASK | DESC_A_MASK);
2302 env->regs[R_ESP] = env->sysenter_esp;
2303 env->eip = env->sysenter_eip;
2306 void helper_sysexit(CPUX86State *env, int dflag)
2308 int cpl;
2310 cpl = env->hflags & HF_CPL_MASK;
2311 if (env->sysenter_cs == 0 || cpl != 0) {
2312 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2314 #ifdef TARGET_X86_64
2315 if (dflag == 2) {
2316 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2317 3, 0, 0xffffffff,
2318 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2319 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2320 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2321 DESC_L_MASK);
2322 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2323 3, 0, 0xffffffff,
2324 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2325 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2326 DESC_W_MASK | DESC_A_MASK);
2327 } else
2328 #endif
2330 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2331 3, 0, 0xffffffff,
2332 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2333 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2334 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2335 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2336 3, 0, 0xffffffff,
2337 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2338 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2339 DESC_W_MASK | DESC_A_MASK);
2341 env->regs[R_ESP] = env->regs[R_ECX];
2342 env->eip = env->regs[R_EDX];
2345 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2347 unsigned int limit;
2348 uint32_t e1, e2, eflags, selector;
2349 int rpl, dpl, cpl, type;
2351 selector = selector1 & 0xffff;
2352 eflags = cpu_cc_compute_all(env, CC_OP);
2353 if ((selector & 0xfffc) == 0) {
2354 goto fail;
2356 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2357 goto fail;
2359 rpl = selector & 3;
2360 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2361 cpl = env->hflags & HF_CPL_MASK;
2362 if (e2 & DESC_S_MASK) {
2363 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2364 /* conforming */
2365 } else {
2366 if (dpl < cpl || dpl < rpl) {
2367 goto fail;
2370 } else {
2371 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2372 switch (type) {
2373 case 1:
2374 case 2:
2375 case 3:
2376 case 9:
2377 case 11:
2378 break;
2379 default:
2380 goto fail;
2382 if (dpl < cpl || dpl < rpl) {
2383 fail:
2384 CC_SRC = eflags & ~CC_Z;
2385 return 0;
2388 limit = get_seg_limit(e1, e2);
2389 CC_SRC = eflags | CC_Z;
2390 return limit;
2393 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2395 uint32_t e1, e2, eflags, selector;
2396 int rpl, dpl, cpl, type;
2398 selector = selector1 & 0xffff;
2399 eflags = cpu_cc_compute_all(env, CC_OP);
2400 if ((selector & 0xfffc) == 0) {
2401 goto fail;
2403 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2404 goto fail;
2406 rpl = selector & 3;
2407 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2408 cpl = env->hflags & HF_CPL_MASK;
2409 if (e2 & DESC_S_MASK) {
2410 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2411 /* conforming */
2412 } else {
2413 if (dpl < cpl || dpl < rpl) {
2414 goto fail;
2417 } else {
2418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2419 switch (type) {
2420 case 1:
2421 case 2:
2422 case 3:
2423 case 4:
2424 case 5:
2425 case 9:
2426 case 11:
2427 case 12:
2428 break;
2429 default:
2430 goto fail;
2432 if (dpl < cpl || dpl < rpl) {
2433 fail:
2434 CC_SRC = eflags & ~CC_Z;
2435 return 0;
2438 CC_SRC = eflags | CC_Z;
2439 return e2 & 0x00f0ff00;
2442 void helper_verr(CPUX86State *env, target_ulong selector1)
2444 uint32_t e1, e2, eflags, selector;
2445 int rpl, dpl, cpl;
2447 selector = selector1 & 0xffff;
2448 eflags = cpu_cc_compute_all(env, CC_OP);
2449 if ((selector & 0xfffc) == 0) {
2450 goto fail;
2452 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2453 goto fail;
2455 if (!(e2 & DESC_S_MASK)) {
2456 goto fail;
2458 rpl = selector & 3;
2459 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2460 cpl = env->hflags & HF_CPL_MASK;
2461 if (e2 & DESC_CS_MASK) {
2462 if (!(e2 & DESC_R_MASK)) {
2463 goto fail;
2465 if (!(e2 & DESC_C_MASK)) {
2466 if (dpl < cpl || dpl < rpl) {
2467 goto fail;
2470 } else {
2471 if (dpl < cpl || dpl < rpl) {
2472 fail:
2473 CC_SRC = eflags & ~CC_Z;
2474 return;
2477 CC_SRC = eflags | CC_Z;
2480 void helper_verw(CPUX86State *env, target_ulong selector1)
2482 uint32_t e1, e2, eflags, selector;
2483 int rpl, dpl, cpl;
2485 selector = selector1 & 0xffff;
2486 eflags = cpu_cc_compute_all(env, CC_OP);
2487 if ((selector & 0xfffc) == 0) {
2488 goto fail;
2490 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2491 goto fail;
2493 if (!(e2 & DESC_S_MASK)) {
2494 goto fail;
2496 rpl = selector & 3;
2497 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2498 cpl = env->hflags & HF_CPL_MASK;
2499 if (e2 & DESC_CS_MASK) {
2500 goto fail;
2501 } else {
2502 if (dpl < cpl || dpl < rpl) {
2503 goto fail;
2505 if (!(e2 & DESC_W_MASK)) {
2506 fail:
2507 CC_SRC = eflags & ~CC_Z;
2508 return;
2511 CC_SRC = eflags | CC_Z;
2514 #if defined(CONFIG_USER_ONLY)
2515 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2517 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2518 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2519 selector &= 0xffff;
2520 cpu_x86_load_seg_cache(env, seg_reg, selector,
2521 (selector << 4), 0xffff,
2522 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2523 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2524 } else {
2525 helper_load_seg(env, seg_reg, selector);
2528 #endif
2530 /* check if Port I/O is allowed in TSS */
2531 static inline void check_io(CPUX86State *env, int addr, int size,
2532 uintptr_t retaddr)
2534 int io_offset, val, mask;
2536 /* TSS must be a valid 32 bit one */
2537 if (!(env->tr.flags & DESC_P_MASK) ||
2538 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2539 env->tr.limit < 103) {
2540 goto fail;
2542 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2543 io_offset += (addr >> 3);
2544 /* Note: the check needs two bytes */
2545 if ((io_offset + 1) > env->tr.limit) {
2546 goto fail;
2548 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2549 val >>= (addr & 7);
2550 mask = (1 << size) - 1;
2551 /* all bits must be zero to allow the I/O */
2552 if ((val & mask) != 0) {
2553 fail:
2554 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2558 void helper_check_iob(CPUX86State *env, uint32_t t0)
2560 check_io(env, t0, 1, GETPC());
2563 void helper_check_iow(CPUX86State *env, uint32_t t0)
2565 check_io(env, t0, 2, GETPC());
2568 void helper_check_iol(CPUX86State *env, uint32_t t0)
2570 check_io(env, t0, 4, GETPC());