trace: fix documentation
[qemu/ar7.git] / target-i386 / seg_helper.c
blob4c7cab79ffaec1a4df48ed3f3394c0635d73cf1a
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
27 //#define DEBUG_PCALL
29 #ifdef DEBUG_PCALL
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(cpu) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 #else
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(cpu) do { } while (0)
36 #endif
38 #ifdef CONFIG_USER_ONLY
39 #define MEMSUFFIX _kernel
40 #define DATA_SIZE 1
41 #include "exec/cpu_ldst_useronly_template.h"
43 #define DATA_SIZE 2
44 #include "exec/cpu_ldst_useronly_template.h"
46 #define DATA_SIZE 4
47 #include "exec/cpu_ldst_useronly_template.h"
49 #define DATA_SIZE 8
50 #include "exec/cpu_ldst_useronly_template.h"
51 #undef MEMSUFFIX
52 #else
53 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
54 #define MEMSUFFIX _kernel
55 #define DATA_SIZE 1
56 #include "exec/cpu_ldst_template.h"
58 #define DATA_SIZE 2
59 #include "exec/cpu_ldst_template.h"
61 #define DATA_SIZE 4
62 #include "exec/cpu_ldst_template.h"
64 #define DATA_SIZE 8
65 #include "exec/cpu_ldst_template.h"
66 #undef CPU_MMU_INDEX
67 #undef MEMSUFFIX
68 #endif
70 /* return non zero if error */
71 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
72 uint32_t *e2_ptr, int selector,
73 uintptr_t retaddr)
75 SegmentCache *dt;
76 int index;
77 target_ulong ptr;
79 if (selector & 0x4) {
80 dt = &env->ldt;
81 } else {
82 dt = &env->gdt;
84 index = selector & ~7;
85 if ((index + 7) > dt->limit) {
86 return -1;
88 ptr = dt->base + index;
89 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
90 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
91 return 0;
94 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
95 uint32_t *e2_ptr, int selector)
97 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
102 unsigned int limit;
104 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
105 if (e2 & DESC_G_MASK) {
106 limit = (limit << 12) | 0xfff;
108 return limit;
111 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
113 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
116 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
117 uint32_t e2)
119 sc->base = get_seg_base(e1, e2);
120 sc->limit = get_seg_limit(e1, e2);
121 sc->flags = e2;
124 /* init the segment cache in vm86 mode. */
125 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
127 selector &= 0xffff;
129 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
130 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
131 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
134 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
135 uint32_t *esp_ptr, int dpl,
136 uintptr_t retaddr)
138 X86CPU *cpu = x86_env_get_cpu(env);
139 int type, index, shift;
141 #if 0
143 int i;
144 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
145 for (i = 0; i < env->tr.limit; i++) {
146 printf("%02x ", env->tr.base[i]);
147 if ((i & 7) == 7) {
148 printf("\n");
151 printf("\n");
153 #endif
155 if (!(env->tr.flags & DESC_P_MASK)) {
156 cpu_abort(CPU(cpu), "invalid tss");
158 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
159 if ((type & 7) != 1) {
160 cpu_abort(CPU(cpu), "invalid tss type");
162 shift = type >> 3;
163 index = (dpl * 4 + 2) << shift;
164 if (index + (4 << shift) - 1 > env->tr.limit) {
165 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
167 if (shift == 0) {
168 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
170 } else {
171 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
172 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
176 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
177 uintptr_t retaddr)
179 uint32_t e1, e2;
180 int rpl, dpl;
182 if ((selector & 0xfffc) != 0) {
183 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
186 if (!(e2 & DESC_S_MASK)) {
187 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
189 rpl = selector & 3;
190 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
191 if (seg_reg == R_CS) {
192 if (!(e2 & DESC_CS_MASK)) {
193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
195 if (dpl != rpl) {
196 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
198 } else if (seg_reg == R_SS) {
199 /* SS must be writable data */
200 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
203 if (dpl != cpl || dpl != rpl) {
204 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
206 } else {
207 /* not readable code */
208 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
209 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
211 /* if data or non conforming code, checks the rights */
212 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
213 if (dpl < cpl || dpl < rpl) {
214 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
218 if (!(e2 & DESC_P_MASK)) {
219 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
221 cpu_x86_load_seg_cache(env, seg_reg, selector,
222 get_seg_base(e1, e2),
223 get_seg_limit(e1, e2),
224 e2);
225 } else {
226 if (seg_reg == R_SS || seg_reg == R_CS) {
227 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
232 #define SWITCH_TSS_JMP 0
233 #define SWITCH_TSS_IRET 1
234 #define SWITCH_TSS_CALL 2
236 /* XXX: restore CPU state in registers (PowerPC case) */
237 static void switch_tss_ra(CPUX86State *env, int tss_selector,
238 uint32_t e1, uint32_t e2, int source,
239 uint32_t next_eip, uintptr_t retaddr)
241 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
242 target_ulong tss_base;
243 uint32_t new_regs[8], new_segs[6];
244 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
245 uint32_t old_eflags, eflags_mask;
246 SegmentCache *dt;
247 int index;
248 target_ulong ptr;
250 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
251 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
252 source);
254 /* if task gate, we read the TSS segment and we load it */
255 if (type == 5) {
256 if (!(e2 & DESC_P_MASK)) {
257 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
259 tss_selector = e1 >> 16;
260 if (tss_selector & 4) {
261 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
263 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
264 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
266 if (e2 & DESC_S_MASK) {
267 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
269 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
270 if ((type & 7) != 1) {
271 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
275 if (!(e2 & DESC_P_MASK)) {
276 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
279 if (type & 8) {
280 tss_limit_max = 103;
281 } else {
282 tss_limit_max = 43;
284 tss_limit = get_seg_limit(e1, e2);
285 tss_base = get_seg_base(e1, e2);
286 if ((tss_selector & 4) != 0 ||
287 tss_limit < tss_limit_max) {
288 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
290 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
291 if (old_type & 8) {
292 old_tss_limit_max = 103;
293 } else {
294 old_tss_limit_max = 43;
297 /* read all the registers from the new TSS */
298 if (type & 8) {
299 /* 32 bit */
300 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
301 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
302 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
303 for (i = 0; i < 8; i++) {
304 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
305 retaddr);
307 for (i = 0; i < 6; i++) {
308 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
309 retaddr);
311 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
312 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
313 } else {
314 /* 16 bit */
315 new_cr3 = 0;
316 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
317 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
318 for (i = 0; i < 8; i++) {
319 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
320 retaddr) | 0xffff0000;
322 for (i = 0; i < 4; i++) {
323 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
324 retaddr);
326 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
327 new_segs[R_FS] = 0;
328 new_segs[R_GS] = 0;
329 new_trap = 0;
331 /* XXX: avoid a compiler warning, see
332 http://support.amd.com/us/Processor_TechDocs/24593.pdf
333 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
334 (void)new_trap;
336 /* NOTE: we must avoid memory exceptions during the task switch,
337 so we make dummy accesses before */
338 /* XXX: it can still fail in some cases, so a bigger hack is
339 necessary to valid the TLB after having done the accesses */
341 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
342 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
344 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
346 /* clear busy bit (it is restartable) */
347 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
348 target_ulong ptr;
349 uint32_t e2;
351 ptr = env->gdt.base + (env->tr.selector & ~7);
352 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
353 e2 &= ~DESC_TSS_BUSY_MASK;
354 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
356 old_eflags = cpu_compute_eflags(env);
357 if (source == SWITCH_TSS_IRET) {
358 old_eflags &= ~NT_MASK;
361 /* save the current state in the old TSS */
362 if (type & 8) {
363 /* 32 bit */
364 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
374 for (i = 0; i < 6; i++) {
375 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
376 env->segs[i].selector, retaddr);
378 } else {
379 /* 16 bit */
380 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
390 for (i = 0; i < 4; i++) {
391 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
392 env->segs[i].selector, retaddr);
396 /* now if an exception occurs, it will occurs in the next task
397 context */
399 if (source == SWITCH_TSS_CALL) {
400 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
401 new_eflags |= NT_MASK;
404 /* set busy bit */
405 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
406 target_ulong ptr;
407 uint32_t e2;
409 ptr = env->gdt.base + (tss_selector & ~7);
410 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
411 e2 |= DESC_TSS_BUSY_MASK;
412 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
415 /* set the new CPU state */
416 /* from this point, any exception which occurs can give problems */
417 env->cr[0] |= CR0_TS_MASK;
418 env->hflags |= HF_TS_MASK;
419 env->tr.selector = tss_selector;
420 env->tr.base = tss_base;
421 env->tr.limit = tss_limit;
422 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
424 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
425 cpu_x86_update_cr3(env, new_cr3);
428 /* load all registers without an exception, then reload them with
429 possible exception */
430 env->eip = new_eip;
431 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
432 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
433 if (!(type & 8)) {
434 eflags_mask &= 0xffff;
436 cpu_load_eflags(env, new_eflags, eflags_mask);
437 /* XXX: what to do in 16 bit case? */
438 env->regs[R_EAX] = new_regs[0];
439 env->regs[R_ECX] = new_regs[1];
440 env->regs[R_EDX] = new_regs[2];
441 env->regs[R_EBX] = new_regs[3];
442 env->regs[R_ESP] = new_regs[4];
443 env->regs[R_EBP] = new_regs[5];
444 env->regs[R_ESI] = new_regs[6];
445 env->regs[R_EDI] = new_regs[7];
446 if (new_eflags & VM_MASK) {
447 for (i = 0; i < 6; i++) {
448 load_seg_vm(env, i, new_segs[i]);
450 } else {
451 /* first just selectors as the rest may trigger exceptions */
452 for (i = 0; i < 6; i++) {
453 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
457 env->ldt.selector = new_ldt & ~4;
458 env->ldt.base = 0;
459 env->ldt.limit = 0;
460 env->ldt.flags = 0;
462 /* load the LDT */
463 if (new_ldt & 4) {
464 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
467 if ((new_ldt & 0xfffc) != 0) {
468 dt = &env->gdt;
469 index = new_ldt & ~7;
470 if ((index + 7) > dt->limit) {
471 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
473 ptr = dt->base + index;
474 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
475 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
476 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
479 if (!(e2 & DESC_P_MASK)) {
480 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
482 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 /* load the segments */
486 if (!(new_eflags & VM_MASK)) {
487 int cpl = new_segs[R_CS] & 3;
488 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
489 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
490 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
491 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
492 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
493 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
496 /* check that env->eip is in the CS segment limits */
497 if (new_eip > env->segs[R_CS].limit) {
498 /* XXX: different exception if CALL? */
499 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
502 #ifndef CONFIG_USER_ONLY
503 /* reset local breakpoints */
504 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
505 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
507 #endif
510 static void switch_tss(CPUX86State *env, int tss_selector,
511 uint32_t e1, uint32_t e2, int source,
512 uint32_t next_eip)
514 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517 static inline unsigned int get_sp_mask(unsigned int e2)
519 if (e2 & DESC_B_MASK) {
520 return 0xffffffff;
521 } else {
522 return 0xffff;
526 static int exception_has_error_code(int intno)
528 switch (intno) {
529 case 8:
530 case 10:
531 case 11:
532 case 12:
533 case 13:
534 case 14:
535 case 17:
536 return 1;
538 return 0;
541 #ifdef TARGET_X86_64
542 #define SET_ESP(val, sp_mask) \
543 do { \
544 if ((sp_mask) == 0xffff) { \
545 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
546 ((val) & 0xffff); \
547 } else if ((sp_mask) == 0xffffffffLL) { \
548 env->regs[R_ESP] = (uint32_t)(val); \
549 } else { \
550 env->regs[R_ESP] = (val); \
552 } while (0)
553 #else
554 #define SET_ESP(val, sp_mask) \
555 do { \
556 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
557 ((val) & (sp_mask)); \
558 } while (0)
559 #endif
561 /* in 64-bit machines, this can overflow. So this segment addition macro
562 * can be used to trim the value to 32-bit whenever needed */
563 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
565 /* XXX: add a is_user flag to have proper security support */
566 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
568 sp -= 2; \
569 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
572 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
574 sp -= 4; \
575 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
578 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
580 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
581 sp += 2; \
584 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
586 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
587 sp += 4; \
590 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
591 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
592 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
593 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
595 /* protected mode interrupt */
596 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
597 int error_code, unsigned int next_eip,
598 int is_hw)
600 SegmentCache *dt;
601 target_ulong ptr, ssp;
602 int type, dpl, selector, ss_dpl, cpl;
603 int has_error_code, new_stack, shift;
604 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
605 uint32_t old_eip, sp_mask;
606 int vm86 = env->eflags & VM_MASK;
608 has_error_code = 0;
609 if (!is_int && !is_hw) {
610 has_error_code = exception_has_error_code(intno);
612 if (is_int) {
613 old_eip = next_eip;
614 } else {
615 old_eip = env->eip;
618 dt = &env->idt;
619 if (intno * 8 + 7 > dt->limit) {
620 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
622 ptr = dt->base + intno * 8;
623 e1 = cpu_ldl_kernel(env, ptr);
624 e2 = cpu_ldl_kernel(env, ptr + 4);
625 /* check gate type */
626 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
627 switch (type) {
628 case 5: /* task gate */
629 /* must do that check here to return the correct error code */
630 if (!(e2 & DESC_P_MASK)) {
631 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
633 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
634 if (has_error_code) {
635 int type;
636 uint32_t mask;
638 /* push the error code */
639 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
640 shift = type >> 3;
641 if (env->segs[R_SS].flags & DESC_B_MASK) {
642 mask = 0xffffffff;
643 } else {
644 mask = 0xffff;
646 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
647 ssp = env->segs[R_SS].base + esp;
648 if (shift) {
649 cpu_stl_kernel(env, ssp, error_code);
650 } else {
651 cpu_stw_kernel(env, ssp, error_code);
653 SET_ESP(esp, mask);
655 return;
656 case 6: /* 286 interrupt gate */
657 case 7: /* 286 trap gate */
658 case 14: /* 386 interrupt gate */
659 case 15: /* 386 trap gate */
660 break;
661 default:
662 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
663 break;
665 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
666 cpl = env->hflags & HF_CPL_MASK;
667 /* check privilege if software int */
668 if (is_int && dpl < cpl) {
669 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
671 /* check valid bit */
672 if (!(e2 & DESC_P_MASK)) {
673 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
675 selector = e1 >> 16;
676 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
677 if ((selector & 0xfffc) == 0) {
678 raise_exception_err(env, EXCP0D_GPF, 0);
680 if (load_segment(env, &e1, &e2, selector) != 0) {
681 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
683 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
684 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
686 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
687 if (dpl > cpl) {
688 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
690 if (!(e2 & DESC_P_MASK)) {
691 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
693 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
696 if ((ss & 0xfffc) == 0) {
697 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
699 if ((ss & 3) != dpl) {
700 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
702 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
703 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
705 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
706 if (ss_dpl != dpl) {
707 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
709 if (!(ss_e2 & DESC_S_MASK) ||
710 (ss_e2 & DESC_CS_MASK) ||
711 !(ss_e2 & DESC_W_MASK)) {
712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
714 if (!(ss_e2 & DESC_P_MASK)) {
715 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
717 new_stack = 1;
718 sp_mask = get_sp_mask(ss_e2);
719 ssp = get_seg_base(ss_e1, ss_e2);
720 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
721 /* to same privilege */
722 if (vm86) {
723 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
725 new_stack = 0;
726 sp_mask = get_sp_mask(env->segs[R_SS].flags);
727 ssp = env->segs[R_SS].base;
728 esp = env->regs[R_ESP];
729 dpl = cpl;
730 } else {
731 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
732 new_stack = 0; /* avoid warning */
733 sp_mask = 0; /* avoid warning */
734 ssp = 0; /* avoid warning */
735 esp = 0; /* avoid warning */
738 shift = type >> 3;
740 #if 0
741 /* XXX: check that enough room is available */
742 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
743 if (vm86) {
744 push_size += 8;
746 push_size <<= shift;
747 #endif
748 if (shift == 1) {
749 if (new_stack) {
750 if (vm86) {
751 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
752 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
753 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
757 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
759 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
760 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
761 PUSHL(ssp, esp, sp_mask, old_eip);
762 if (has_error_code) {
763 PUSHL(ssp, esp, sp_mask, error_code);
765 } else {
766 if (new_stack) {
767 if (vm86) {
768 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
769 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
770 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
774 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
776 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
777 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
778 PUSHW(ssp, esp, sp_mask, old_eip);
779 if (has_error_code) {
780 PUSHW(ssp, esp, sp_mask, error_code);
784 /* interrupt gate clear IF mask */
785 if ((type & 1) == 0) {
786 env->eflags &= ~IF_MASK;
788 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
790 if (new_stack) {
791 if (vm86) {
792 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
797 ss = (ss & ~3) | dpl;
798 cpu_x86_load_seg_cache(env, R_SS, ss,
799 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
801 SET_ESP(esp, sp_mask);
803 selector = (selector & ~3) | dpl;
804 cpu_x86_load_seg_cache(env, R_CS, selector,
805 get_seg_base(e1, e2),
806 get_seg_limit(e1, e2),
807 e2);
808 env->eip = offset;
811 #ifdef TARGET_X86_64
813 #define PUSHQ_RA(sp, val, ra) \
815 sp -= 8; \
816 cpu_stq_kernel_ra(env, sp, (val), ra); \
819 #define POPQ_RA(sp, val, ra) \
821 val = cpu_ldq_kernel_ra(env, sp, ra); \
822 sp += 8; \
825 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
826 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
828 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
830 X86CPU *cpu = x86_env_get_cpu(env);
831 int index;
833 #if 0
834 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
835 env->tr.base, env->tr.limit);
836 #endif
838 if (!(env->tr.flags & DESC_P_MASK)) {
839 cpu_abort(CPU(cpu), "invalid tss");
841 index = 8 * level + 4;
842 if ((index + 7) > env->tr.limit) {
843 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
845 return cpu_ldq_kernel(env, env->tr.base + index);
848 /* 64 bit interrupt */
849 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
850 int error_code, target_ulong next_eip, int is_hw)
852 SegmentCache *dt;
853 target_ulong ptr;
854 int type, dpl, selector, cpl, ist;
855 int has_error_code, new_stack;
856 uint32_t e1, e2, e3, ss;
857 target_ulong old_eip, esp, offset;
859 has_error_code = 0;
860 if (!is_int && !is_hw) {
861 has_error_code = exception_has_error_code(intno);
863 if (is_int) {
864 old_eip = next_eip;
865 } else {
866 old_eip = env->eip;
869 dt = &env->idt;
870 if (intno * 16 + 15 > dt->limit) {
871 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
873 ptr = dt->base + intno * 16;
874 e1 = cpu_ldl_kernel(env, ptr);
875 e2 = cpu_ldl_kernel(env, ptr + 4);
876 e3 = cpu_ldl_kernel(env, ptr + 8);
877 /* check gate type */
878 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
879 switch (type) {
880 case 14: /* 386 interrupt gate */
881 case 15: /* 386 trap gate */
882 break;
883 default:
884 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
885 break;
887 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
888 cpl = env->hflags & HF_CPL_MASK;
889 /* check privilege if software int */
890 if (is_int && dpl < cpl) {
891 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
893 /* check valid bit */
894 if (!(e2 & DESC_P_MASK)) {
895 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
897 selector = e1 >> 16;
898 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
899 ist = e2 & 7;
900 if ((selector & 0xfffc) == 0) {
901 raise_exception_err(env, EXCP0D_GPF, 0);
904 if (load_segment(env, &e1, &e2, selector) != 0) {
905 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
907 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911 if (dpl > cpl) {
912 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
914 if (!(e2 & DESC_P_MASK)) {
915 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
917 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
918 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
920 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
921 /* to inner privilege */
922 new_stack = 1;
923 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
924 ss = 0;
925 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
926 /* to same privilege */
927 if (env->eflags & VM_MASK) {
928 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
930 new_stack = 0;
931 esp = env->regs[R_ESP];
932 dpl = cpl;
933 } else {
934 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
935 new_stack = 0; /* avoid warning */
936 esp = 0; /* avoid warning */
938 esp &= ~0xfLL; /* align stack */
940 PUSHQ(esp, env->segs[R_SS].selector);
941 PUSHQ(esp, env->regs[R_ESP]);
942 PUSHQ(esp, cpu_compute_eflags(env));
943 PUSHQ(esp, env->segs[R_CS].selector);
944 PUSHQ(esp, old_eip);
945 if (has_error_code) {
946 PUSHQ(esp, error_code);
949 /* interrupt gate clear IF mask */
950 if ((type & 1) == 0) {
951 env->eflags &= ~IF_MASK;
953 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
955 if (new_stack) {
956 ss = 0 | dpl;
957 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
959 env->regs[R_ESP] = esp;
961 selector = (selector & ~3) | dpl;
962 cpu_x86_load_seg_cache(env, R_CS, selector,
963 get_seg_base(e1, e2),
964 get_seg_limit(e1, e2),
965 e2);
966 env->eip = offset;
968 #endif
970 #ifdef TARGET_X86_64
971 #if defined(CONFIG_USER_ONLY)
972 void helper_syscall(CPUX86State *env, int next_eip_addend)
974 CPUState *cs = CPU(x86_env_get_cpu(env));
976 cs->exception_index = EXCP_SYSCALL;
977 env->exception_next_eip = env->eip + next_eip_addend;
978 cpu_loop_exit(cs);
980 #else
981 void helper_syscall(CPUX86State *env, int next_eip_addend)
983 int selector;
985 if (!(env->efer & MSR_EFER_SCE)) {
986 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
988 selector = (env->star >> 32) & 0xffff;
989 if (env->hflags & HF_LMA_MASK) {
990 int code64;
992 env->regs[R_ECX] = env->eip + next_eip_addend;
993 env->regs[11] = cpu_compute_eflags(env);
995 code64 = env->hflags & HF_CS64_MASK;
997 env->eflags &= ~env->fmask;
998 cpu_load_eflags(env, env->eflags, 0);
999 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1000 0, 0xffffffff,
1001 DESC_G_MASK | DESC_P_MASK |
1002 DESC_S_MASK |
1003 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1004 DESC_L_MASK);
1005 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006 0, 0xffffffff,
1007 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008 DESC_S_MASK |
1009 DESC_W_MASK | DESC_A_MASK);
1010 if (code64) {
1011 env->eip = env->lstar;
1012 } else {
1013 env->eip = env->cstar;
1015 } else {
1016 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1018 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1019 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1020 0, 0xffffffff,
1021 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1022 DESC_S_MASK |
1023 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1024 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1025 0, 0xffffffff,
1026 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027 DESC_S_MASK |
1028 DESC_W_MASK | DESC_A_MASK);
1029 env->eip = (uint32_t)env->star;
1032 #endif
1033 #endif
1035 #ifdef TARGET_X86_64
1036 void helper_sysret(CPUX86State *env, int dflag)
1038 int cpl, selector;
1040 if (!(env->efer & MSR_EFER_SCE)) {
1041 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1043 cpl = env->hflags & HF_CPL_MASK;
1044 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1045 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1047 selector = (env->star >> 48) & 0xffff;
1048 if (env->hflags & HF_LMA_MASK) {
1049 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1050 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1051 NT_MASK);
1052 if (dflag == 2) {
1053 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1054 0, 0xffffffff,
1055 DESC_G_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1058 DESC_L_MASK);
1059 env->eip = env->regs[R_ECX];
1060 } else {
1061 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1062 0, 0xffffffff,
1063 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1064 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1065 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1066 env->eip = (uint32_t)env->regs[R_ECX];
1068 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1069 0, 0xffffffff,
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072 DESC_W_MASK | DESC_A_MASK);
1073 } else {
1074 env->eflags |= IF_MASK;
1075 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1076 0, 0xffffffff,
1077 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1078 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1080 env->eip = (uint32_t)env->regs[R_ECX];
1081 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_W_MASK | DESC_A_MASK);
1088 #endif
1090 /* real mode interrupt */
1091 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1092 int error_code, unsigned int next_eip)
1094 SegmentCache *dt;
1095 target_ulong ptr, ssp;
1096 int selector;
1097 uint32_t offset, esp;
1098 uint32_t old_cs, old_eip;
1100 /* real mode (simpler!) */
1101 dt = &env->idt;
1102 if (intno * 4 + 3 > dt->limit) {
1103 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1105 ptr = dt->base + intno * 4;
1106 offset = cpu_lduw_kernel(env, ptr);
1107 selector = cpu_lduw_kernel(env, ptr + 2);
1108 esp = env->regs[R_ESP];
1109 ssp = env->segs[R_SS].base;
1110 if (is_int) {
1111 old_eip = next_eip;
1112 } else {
1113 old_eip = env->eip;
1115 old_cs = env->segs[R_CS].selector;
1116 /* XXX: use SS segment size? */
1117 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1118 PUSHW(ssp, esp, 0xffff, old_cs);
1119 PUSHW(ssp, esp, 0xffff, old_eip);
1121 /* update processor state */
1122 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1123 env->eip = offset;
1124 env->segs[R_CS].selector = selector;
1125 env->segs[R_CS].base = (selector << 4);
1126 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1129 #if defined(CONFIG_USER_ONLY)
1130 /* fake user mode interrupt */
1131 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1132 int error_code, target_ulong next_eip)
1134 SegmentCache *dt;
1135 target_ulong ptr;
1136 int dpl, cpl, shift;
1137 uint32_t e2;
1139 dt = &env->idt;
1140 if (env->hflags & HF_LMA_MASK) {
1141 shift = 4;
1142 } else {
1143 shift = 3;
1145 ptr = dt->base + (intno << shift);
1146 e2 = cpu_ldl_kernel(env, ptr + 4);
1148 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1149 cpl = env->hflags & HF_CPL_MASK;
1150 /* check privilege if software int */
1151 if (is_int && dpl < cpl) {
1152 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1155 /* Since we emulate only user space, we cannot do more than
1156 exiting the emulation with the suitable exception and error
1157 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1158 if (is_int || intno == EXCP_SYSCALL) {
1159 env->eip = next_eip;
1163 #else
1165 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1166 int error_code, int is_hw, int rm)
1168 CPUState *cs = CPU(x86_env_get_cpu(env));
1169 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1170 control.event_inj));
1172 if (!(event_inj & SVM_EVTINJ_VALID)) {
1173 int type;
1175 if (is_int) {
1176 type = SVM_EVTINJ_TYPE_SOFT;
1177 } else {
1178 type = SVM_EVTINJ_TYPE_EXEPT;
1180 event_inj = intno | type | SVM_EVTINJ_VALID;
1181 if (!rm && exception_has_error_code(intno)) {
1182 event_inj |= SVM_EVTINJ_VALID_ERR;
1183 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1184 control.event_inj_err),
1185 error_code);
1187 x86_stl_phys(cs,
1188 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1189 event_inj);
1192 #endif
1195 * Begin execution of an interruption. is_int is TRUE if coming from
1196 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1197 * instruction. It is only relevant if is_int is TRUE.
1199 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1200 int error_code, target_ulong next_eip, int is_hw)
1202 CPUX86State *env = &cpu->env;
1204 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1205 if ((env->cr[0] & CR0_PE_MASK)) {
1206 static int count;
1208 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1209 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1210 count, intno, error_code, is_int,
1211 env->hflags & HF_CPL_MASK,
1212 env->segs[R_CS].selector, env->eip,
1213 (int)env->segs[R_CS].base + env->eip,
1214 env->segs[R_SS].selector, env->regs[R_ESP]);
1215 if (intno == 0x0e) {
1216 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1217 } else {
1218 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1220 qemu_log("\n");
1221 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1222 #if 0
1224 int i;
1225 target_ulong ptr;
1227 qemu_log(" code=");
1228 ptr = env->segs[R_CS].base + env->eip;
1229 for (i = 0; i < 16; i++) {
1230 qemu_log(" %02x", ldub(ptr + i));
1232 qemu_log("\n");
1234 #endif
1235 count++;
1238 if (env->cr[0] & CR0_PE_MASK) {
1239 #if !defined(CONFIG_USER_ONLY)
1240 if (env->hflags & HF_SVMI_MASK) {
1241 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1243 #endif
1244 #ifdef TARGET_X86_64
1245 if (env->hflags & HF_LMA_MASK) {
1246 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1247 } else
1248 #endif
1250 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1251 is_hw);
1253 } else {
1254 #if !defined(CONFIG_USER_ONLY)
1255 if (env->hflags & HF_SVMI_MASK) {
1256 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1258 #endif
1259 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1262 #if !defined(CONFIG_USER_ONLY)
1263 if (env->hflags & HF_SVMI_MASK) {
1264 CPUState *cs = CPU(cpu);
1265 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1266 offsetof(struct vmcb,
1267 control.event_inj));
1269 x86_stl_phys(cs,
1270 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1271 event_inj & ~SVM_EVTINJ_VALID);
1273 #endif
1276 void x86_cpu_do_interrupt(CPUState *cs)
1278 X86CPU *cpu = X86_CPU(cs);
1279 CPUX86State *env = &cpu->env;
1281 #if defined(CONFIG_USER_ONLY)
1282 /* if user mode only, we simulate a fake exception
1283 which will be handled outside the cpu execution
1284 loop */
1285 do_interrupt_user(env, cs->exception_index,
1286 env->exception_is_int,
1287 env->error_code,
1288 env->exception_next_eip);
1289 /* successfully delivered */
1290 env->old_exception = -1;
1291 #else
1292 /* simulate a real cpu exception. On i386, it can
1293 trigger new exceptions, but we do not handle
1294 double or triple faults yet. */
1295 do_interrupt_all(cpu, cs->exception_index,
1296 env->exception_is_int,
1297 env->error_code,
1298 env->exception_next_eip, 0);
1299 /* successfully delivered */
1300 env->old_exception = -1;
1301 #endif
1304 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1306 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1309 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1311 X86CPU *cpu = X86_CPU(cs);
1312 CPUX86State *env = &cpu->env;
1313 bool ret = false;
1315 #if !defined(CONFIG_USER_ONLY)
1316 if (interrupt_request & CPU_INTERRUPT_POLL) {
1317 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1318 apic_poll_irq(cpu->apic_state);
1319 /* Don't process multiple interrupt requests in a single call.
1320 This is required to make icount-driven execution deterministic. */
1321 return true;
1323 #endif
1324 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1325 do_cpu_sipi(cpu);
1326 } else if (env->hflags2 & HF2_GIF_MASK) {
1327 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1328 !(env->hflags & HF_SMM_MASK)) {
1329 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1330 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1331 do_smm_enter(cpu);
1332 ret = true;
1333 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1334 !(env->hflags2 & HF2_NMI_MASK)) {
1335 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1336 env->hflags2 |= HF2_NMI_MASK;
1337 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1338 ret = true;
1339 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1340 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1341 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1342 ret = true;
1343 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1344 (((env->hflags2 & HF2_VINTR_MASK) &&
1345 (env->hflags2 & HF2_HIF_MASK)) ||
1346 (!(env->hflags2 & HF2_VINTR_MASK) &&
1347 (env->eflags & IF_MASK &&
1348 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1349 int intno;
1350 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1351 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1352 CPU_INTERRUPT_VIRQ);
1353 intno = cpu_get_pic_interrupt(env);
1354 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1355 "Servicing hardware INT=0x%02x\n", intno);
1356 do_interrupt_x86_hardirq(env, intno, 1);
1357 /* ensure that no TB jump will be modified as
1358 the program flow was changed */
1359 ret = true;
1360 #if !defined(CONFIG_USER_ONLY)
1361 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1362 (env->eflags & IF_MASK) &&
1363 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1364 int intno;
1365 /* FIXME: this should respect TPR */
1366 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1367 intno = x86_ldl_phys(cs, env->vm_vmcb
1368 + offsetof(struct vmcb, control.int_vector));
1369 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1370 "Servicing virtual hardware INT=0x%02x\n", intno);
1371 do_interrupt_x86_hardirq(env, intno, 1);
1372 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1373 ret = true;
1374 #endif
1378 return ret;
1381 void helper_enter_level(CPUX86State *env, int level, int data32,
1382 target_ulong t1)
1384 target_ulong ssp;
1385 uint32_t esp_mask, esp, ebp;
1387 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1388 ssp = env->segs[R_SS].base;
1389 ebp = env->regs[R_EBP];
1390 esp = env->regs[R_ESP];
1391 if (data32) {
1392 /* 32 bit */
1393 esp -= 4;
1394 while (--level) {
1395 esp -= 4;
1396 ebp -= 4;
1397 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1398 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1399 GETPC()),
1400 GETPC());
1402 esp -= 4;
1403 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1404 } else {
1405 /* 16 bit */
1406 esp -= 2;
1407 while (--level) {
1408 esp -= 2;
1409 ebp -= 2;
1410 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1411 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1412 GETPC()),
1413 GETPC());
1415 esp -= 2;
1416 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1420 #ifdef TARGET_X86_64
1421 void helper_enter64_level(CPUX86State *env, int level, int data64,
1422 target_ulong t1)
1424 target_ulong esp, ebp;
1426 ebp = env->regs[R_EBP];
1427 esp = env->regs[R_ESP];
1429 if (data64) {
1430 /* 64 bit */
1431 esp -= 8;
1432 while (--level) {
1433 esp -= 8;
1434 ebp -= 8;
1435 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1436 GETPC());
1438 esp -= 8;
1439 cpu_stq_data_ra(env, esp, t1, GETPC());
1440 } else {
1441 /* 16 bit */
1442 esp -= 2;
1443 while (--level) {
1444 esp -= 2;
1445 ebp -= 2;
1446 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1447 GETPC());
1449 esp -= 2;
1450 cpu_stw_data_ra(env, esp, t1, GETPC());
1453 #endif
1455 void helper_lldt(CPUX86State *env, int selector)
1457 SegmentCache *dt;
1458 uint32_t e1, e2;
1459 int index, entry_limit;
1460 target_ulong ptr;
1462 selector &= 0xffff;
1463 if ((selector & 0xfffc) == 0) {
1464 /* XXX: NULL selector case: invalid LDT */
1465 env->ldt.base = 0;
1466 env->ldt.limit = 0;
1467 } else {
1468 if (selector & 0x4) {
1469 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1471 dt = &env->gdt;
1472 index = selector & ~7;
1473 #ifdef TARGET_X86_64
1474 if (env->hflags & HF_LMA_MASK) {
1475 entry_limit = 15;
1476 } else
1477 #endif
1479 entry_limit = 7;
1481 if ((index + entry_limit) > dt->limit) {
1482 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1484 ptr = dt->base + index;
1485 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1486 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1487 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1488 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1490 if (!(e2 & DESC_P_MASK)) {
1491 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1493 #ifdef TARGET_X86_64
1494 if (env->hflags & HF_LMA_MASK) {
1495 uint32_t e3;
1497 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1498 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1499 env->ldt.base |= (target_ulong)e3 << 32;
1500 } else
1501 #endif
1503 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1506 env->ldt.selector = selector;
1509 void helper_ltr(CPUX86State *env, int selector)
1511 SegmentCache *dt;
1512 uint32_t e1, e2;
1513 int index, type, entry_limit;
1514 target_ulong ptr;
1516 selector &= 0xffff;
1517 if ((selector & 0xfffc) == 0) {
1518 /* NULL selector case: invalid TR */
1519 env->tr.base = 0;
1520 env->tr.limit = 0;
1521 env->tr.flags = 0;
1522 } else {
1523 if (selector & 0x4) {
1524 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1526 dt = &env->gdt;
1527 index = selector & ~7;
1528 #ifdef TARGET_X86_64
1529 if (env->hflags & HF_LMA_MASK) {
1530 entry_limit = 15;
1531 } else
1532 #endif
1534 entry_limit = 7;
1536 if ((index + entry_limit) > dt->limit) {
1537 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1539 ptr = dt->base + index;
1540 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1541 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1542 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1543 if ((e2 & DESC_S_MASK) ||
1544 (type != 1 && type != 9)) {
1545 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1547 if (!(e2 & DESC_P_MASK)) {
1548 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1550 #ifdef TARGET_X86_64
1551 if (env->hflags & HF_LMA_MASK) {
1552 uint32_t e3, e4;
1554 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1555 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1556 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1557 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1559 load_seg_cache_raw_dt(&env->tr, e1, e2);
1560 env->tr.base |= (target_ulong)e3 << 32;
1561 } else
1562 #endif
1564 load_seg_cache_raw_dt(&env->tr, e1, e2);
1566 e2 |= DESC_TSS_BUSY_MASK;
1567 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1569 env->tr.selector = selector;
1572 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1573 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1575 uint32_t e1, e2;
1576 int cpl, dpl, rpl;
1577 SegmentCache *dt;
1578 int index;
1579 target_ulong ptr;
1581 selector &= 0xffff;
1582 cpl = env->hflags & HF_CPL_MASK;
1583 if ((selector & 0xfffc) == 0) {
1584 /* null selector case */
1585 if (seg_reg == R_SS
1586 #ifdef TARGET_X86_64
1587 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1588 #endif
1590 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1592 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1593 } else {
1595 if (selector & 0x4) {
1596 dt = &env->ldt;
1597 } else {
1598 dt = &env->gdt;
1600 index = selector & ~7;
1601 if ((index + 7) > dt->limit) {
1602 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1604 ptr = dt->base + index;
1605 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1606 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1608 if (!(e2 & DESC_S_MASK)) {
1609 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1611 rpl = selector & 3;
1612 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1613 if (seg_reg == R_SS) {
1614 /* must be writable segment */
1615 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1616 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1618 if (rpl != cpl || dpl != cpl) {
1619 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1621 } else {
1622 /* must be readable segment */
1623 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1624 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1627 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1628 /* if not conforming code, test rights */
1629 if (dpl < cpl || dpl < rpl) {
1630 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1635 if (!(e2 & DESC_P_MASK)) {
1636 if (seg_reg == R_SS) {
1637 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1638 } else {
1639 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1643 /* set the access bit if not already set */
1644 if (!(e2 & DESC_A_MASK)) {
1645 e2 |= DESC_A_MASK;
1646 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1649 cpu_x86_load_seg_cache(env, seg_reg, selector,
1650 get_seg_base(e1, e2),
1651 get_seg_limit(e1, e2),
1652 e2);
1653 #if 0
1654 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1655 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1656 #endif
1660 /* protected mode jump */
1661 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1662 target_ulong next_eip)
1664 int gate_cs, type;
1665 uint32_t e1, e2, cpl, dpl, rpl, limit;
1667 if ((new_cs & 0xfffc) == 0) {
1668 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1670 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1671 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1673 cpl = env->hflags & HF_CPL_MASK;
1674 if (e2 & DESC_S_MASK) {
1675 if (!(e2 & DESC_CS_MASK)) {
1676 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1678 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1679 if (e2 & DESC_C_MASK) {
1680 /* conforming code segment */
1681 if (dpl > cpl) {
1682 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1684 } else {
1685 /* non conforming code segment */
1686 rpl = new_cs & 3;
1687 if (rpl > cpl) {
1688 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1690 if (dpl != cpl) {
1691 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1694 if (!(e2 & DESC_P_MASK)) {
1695 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1697 limit = get_seg_limit(e1, e2);
1698 if (new_eip > limit &&
1699 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1700 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1702 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1703 get_seg_base(e1, e2), limit, e2);
1704 env->eip = new_eip;
1705 } else {
1706 /* jump to call or task gate */
1707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1708 rpl = new_cs & 3;
1709 cpl = env->hflags & HF_CPL_MASK;
1710 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1711 switch (type) {
1712 case 1: /* 286 TSS */
1713 case 9: /* 386 TSS */
1714 case 5: /* task gate */
1715 if (dpl < cpl || dpl < rpl) {
1716 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1718 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1719 break;
1720 case 4: /* 286 call gate */
1721 case 12: /* 386 call gate */
1722 if ((dpl < cpl) || (dpl < rpl)) {
1723 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1725 if (!(e2 & DESC_P_MASK)) {
1726 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1728 gate_cs = e1 >> 16;
1729 new_eip = (e1 & 0xffff);
1730 if (type == 12) {
1731 new_eip |= (e2 & 0xffff0000);
1733 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1734 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1736 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1737 /* must be code segment */
1738 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1739 (DESC_S_MASK | DESC_CS_MASK))) {
1740 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1742 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1743 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1744 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1746 if (!(e2 & DESC_P_MASK)) {
1747 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1749 limit = get_seg_limit(e1, e2);
1750 if (new_eip > limit) {
1751 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1753 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1754 get_seg_base(e1, e2), limit, e2);
1755 env->eip = new_eip;
1756 break;
1757 default:
1758 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1759 break;
1764 /* real mode call */
1765 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1766 int shift, int next_eip)
1768 int new_eip;
1769 uint32_t esp, esp_mask;
1770 target_ulong ssp;
1772 new_eip = new_eip1;
1773 esp = env->regs[R_ESP];
1774 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1775 ssp = env->segs[R_SS].base;
1776 if (shift) {
1777 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1778 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1779 } else {
1780 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1781 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1784 SET_ESP(esp, esp_mask);
1785 env->eip = new_eip;
1786 env->segs[R_CS].selector = new_cs;
1787 env->segs[R_CS].base = (new_cs << 4);
1790 /* protected mode call */
1791 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1792 int shift, target_ulong next_eip)
1794 int new_stack, i;
1795 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1796 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1797 uint32_t val, limit, old_sp_mask;
1798 target_ulong ssp, old_ssp;
1800 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1801 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1802 if ((new_cs & 0xfffc) == 0) {
1803 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1805 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1806 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1808 cpl = env->hflags & HF_CPL_MASK;
1809 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1810 if (e2 & DESC_S_MASK) {
1811 if (!(e2 & DESC_CS_MASK)) {
1812 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1814 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1815 if (e2 & DESC_C_MASK) {
1816 /* conforming code segment */
1817 if (dpl > cpl) {
1818 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1820 } else {
1821 /* non conforming code segment */
1822 rpl = new_cs & 3;
1823 if (rpl > cpl) {
1824 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1826 if (dpl != cpl) {
1827 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1830 if (!(e2 & DESC_P_MASK)) {
1831 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1834 #ifdef TARGET_X86_64
1835 /* XXX: check 16/32 bit cases in long mode */
1836 if (shift == 2) {
1837 target_ulong rsp;
1839 /* 64 bit case */
1840 rsp = env->regs[R_ESP];
1841 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1842 PUSHQ_RA(rsp, next_eip, GETPC());
1843 /* from this point, not restartable */
1844 env->regs[R_ESP] = rsp;
1845 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1846 get_seg_base(e1, e2),
1847 get_seg_limit(e1, e2), e2);
1848 env->eip = new_eip;
1849 } else
1850 #endif
1852 sp = env->regs[R_ESP];
1853 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1854 ssp = env->segs[R_SS].base;
1855 if (shift) {
1856 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1857 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1858 } else {
1859 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1860 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1863 limit = get_seg_limit(e1, e2);
1864 if (new_eip > limit) {
1865 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1867 /* from this point, not restartable */
1868 SET_ESP(sp, sp_mask);
1869 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1870 get_seg_base(e1, e2), limit, e2);
1871 env->eip = new_eip;
1873 } else {
1874 /* check gate type */
1875 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1876 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1877 rpl = new_cs & 3;
1878 switch (type) {
1879 case 1: /* available 286 TSS */
1880 case 9: /* available 386 TSS */
1881 case 5: /* task gate */
1882 if (dpl < cpl || dpl < rpl) {
1883 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1885 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1886 return;
1887 case 4: /* 286 call gate */
1888 case 12: /* 386 call gate */
1889 break;
1890 default:
1891 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1892 break;
1894 shift = type >> 3;
1896 if (dpl < cpl || dpl < rpl) {
1897 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1899 /* check valid bit */
1900 if (!(e2 & DESC_P_MASK)) {
1901 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1903 selector = e1 >> 16;
1904 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1905 param_count = e2 & 0x1f;
1906 if ((selector & 0xfffc) == 0) {
1907 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1910 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1911 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1913 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1914 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1916 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1917 if (dpl > cpl) {
1918 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1920 if (!(e2 & DESC_P_MASK)) {
1921 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1924 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1925 /* to inner privilege */
1926 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1927 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1928 TARGET_FMT_lx "\n", ss, sp, param_count,
1929 env->regs[R_ESP]);
1930 if ((ss & 0xfffc) == 0) {
1931 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1933 if ((ss & 3) != dpl) {
1934 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1936 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1937 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1939 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1940 if (ss_dpl != dpl) {
1941 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1943 if (!(ss_e2 & DESC_S_MASK) ||
1944 (ss_e2 & DESC_CS_MASK) ||
1945 !(ss_e2 & DESC_W_MASK)) {
1946 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1948 if (!(ss_e2 & DESC_P_MASK)) {
1949 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1952 /* push_size = ((param_count * 2) + 8) << shift; */
1954 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1955 old_ssp = env->segs[R_SS].base;
1957 sp_mask = get_sp_mask(ss_e2);
1958 ssp = get_seg_base(ss_e1, ss_e2);
1959 if (shift) {
1960 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1961 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1962 for (i = param_count - 1; i >= 0; i--) {
1963 val = cpu_ldl_kernel_ra(env, old_ssp +
1964 ((env->regs[R_ESP] + i * 4) &
1965 old_sp_mask), GETPC());
1966 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1968 } else {
1969 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1970 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1971 for (i = param_count - 1; i >= 0; i--) {
1972 val = cpu_lduw_kernel_ra(env, old_ssp +
1973 ((env->regs[R_ESP] + i * 2) &
1974 old_sp_mask), GETPC());
1975 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1978 new_stack = 1;
1979 } else {
1980 /* to same privilege */
1981 sp = env->regs[R_ESP];
1982 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1983 ssp = env->segs[R_SS].base;
1984 /* push_size = (4 << shift); */
1985 new_stack = 0;
1988 if (shift) {
1989 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1990 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1991 } else {
1992 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1993 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1996 /* from this point, not restartable */
1998 if (new_stack) {
1999 ss = (ss & ~3) | dpl;
2000 cpu_x86_load_seg_cache(env, R_SS, ss,
2001 ssp,
2002 get_seg_limit(ss_e1, ss_e2),
2003 ss_e2);
2006 selector = (selector & ~3) | dpl;
2007 cpu_x86_load_seg_cache(env, R_CS, selector,
2008 get_seg_base(e1, e2),
2009 get_seg_limit(e1, e2),
2010 e2);
2011 SET_ESP(sp, sp_mask);
2012 env->eip = offset;
2016 /* real and vm86 mode iret */
2017 void helper_iret_real(CPUX86State *env, int shift)
2019 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2020 target_ulong ssp;
2021 int eflags_mask;
2023 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2024 sp = env->regs[R_ESP];
2025 ssp = env->segs[R_SS].base;
2026 if (shift == 1) {
2027 /* 32 bits */
2028 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2029 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2030 new_cs &= 0xffff;
2031 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2032 } else {
2033 /* 16 bits */
2034 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2035 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2036 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2038 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2039 env->segs[R_CS].selector = new_cs;
2040 env->segs[R_CS].base = (new_cs << 4);
2041 env->eip = new_eip;
2042 if (env->eflags & VM_MASK) {
2043 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2044 NT_MASK;
2045 } else {
2046 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2047 RF_MASK | NT_MASK;
2049 if (shift == 0) {
2050 eflags_mask &= 0xffff;
2052 cpu_load_eflags(env, new_eflags, eflags_mask);
2053 env->hflags2 &= ~HF2_NMI_MASK;
2056 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2058 int dpl;
2059 uint32_t e2;
2061 /* XXX: on x86_64, we do not want to nullify FS and GS because
2062 they may still contain a valid base. I would be interested to
2063 know how a real x86_64 CPU behaves */
2064 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2065 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2066 return;
2069 e2 = env->segs[seg_reg].flags;
2070 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2071 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2072 /* data or non conforming code segment */
2073 if (dpl < cpl) {
2074 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2079 /* protected mode iret */
2080 static inline void helper_ret_protected(CPUX86State *env, int shift,
2081 int is_iret, int addend,
2082 uintptr_t retaddr)
2084 uint32_t new_cs, new_eflags, new_ss;
2085 uint32_t new_es, new_ds, new_fs, new_gs;
2086 uint32_t e1, e2, ss_e1, ss_e2;
2087 int cpl, dpl, rpl, eflags_mask, iopl;
2088 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2090 #ifdef TARGET_X86_64
2091 if (shift == 2) {
2092 sp_mask = -1;
2093 } else
2094 #endif
2096 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2098 sp = env->regs[R_ESP];
2099 ssp = env->segs[R_SS].base;
2100 new_eflags = 0; /* avoid warning */
2101 #ifdef TARGET_X86_64
2102 if (shift == 2) {
2103 POPQ_RA(sp, new_eip, retaddr);
2104 POPQ_RA(sp, new_cs, retaddr);
2105 new_cs &= 0xffff;
2106 if (is_iret) {
2107 POPQ_RA(sp, new_eflags, retaddr);
2109 } else
2110 #endif
2112 if (shift == 1) {
2113 /* 32 bits */
2114 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2115 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2116 new_cs &= 0xffff;
2117 if (is_iret) {
2118 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2119 if (new_eflags & VM_MASK) {
2120 goto return_to_vm86;
2123 } else {
2124 /* 16 bits */
2125 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2126 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2127 if (is_iret) {
2128 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2132 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2133 new_cs, new_eip, shift, addend);
2134 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2135 if ((new_cs & 0xfffc) == 0) {
2136 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2138 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2139 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2141 if (!(e2 & DESC_S_MASK) ||
2142 !(e2 & DESC_CS_MASK)) {
2143 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2145 cpl = env->hflags & HF_CPL_MASK;
2146 rpl = new_cs & 3;
2147 if (rpl < cpl) {
2148 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2150 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2151 if (e2 & DESC_C_MASK) {
2152 if (dpl > rpl) {
2153 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2155 } else {
2156 if (dpl != rpl) {
2157 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2160 if (!(e2 & DESC_P_MASK)) {
2161 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2164 sp += addend;
2165 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2166 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2167 /* return to same privilege level */
2168 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2169 get_seg_base(e1, e2),
2170 get_seg_limit(e1, e2),
2171 e2);
2172 } else {
2173 /* return to different privilege level */
2174 #ifdef TARGET_X86_64
2175 if (shift == 2) {
2176 POPQ_RA(sp, new_esp, retaddr);
2177 POPQ_RA(sp, new_ss, retaddr);
2178 new_ss &= 0xffff;
2179 } else
2180 #endif
2182 if (shift == 1) {
2183 /* 32 bits */
2184 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2185 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2186 new_ss &= 0xffff;
2187 } else {
2188 /* 16 bits */
2189 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2190 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2193 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2194 new_ss, new_esp);
2195 if ((new_ss & 0xfffc) == 0) {
2196 #ifdef TARGET_X86_64
2197 /* NULL ss is allowed in long mode if cpl != 3 */
2198 /* XXX: test CS64? */
2199 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2200 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2201 0, 0xffffffff,
2202 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2203 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2204 DESC_W_MASK | DESC_A_MASK);
2205 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2206 } else
2207 #endif
2209 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2211 } else {
2212 if ((new_ss & 3) != rpl) {
2213 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2215 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2216 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2218 if (!(ss_e2 & DESC_S_MASK) ||
2219 (ss_e2 & DESC_CS_MASK) ||
2220 !(ss_e2 & DESC_W_MASK)) {
2221 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2223 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2224 if (dpl != rpl) {
2225 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2227 if (!(ss_e2 & DESC_P_MASK)) {
2228 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2230 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2231 get_seg_base(ss_e1, ss_e2),
2232 get_seg_limit(ss_e1, ss_e2),
2233 ss_e2);
2236 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2237 get_seg_base(e1, e2),
2238 get_seg_limit(e1, e2),
2239 e2);
2240 sp = new_esp;
2241 #ifdef TARGET_X86_64
2242 if (env->hflags & HF_CS64_MASK) {
2243 sp_mask = -1;
2244 } else
2245 #endif
2247 sp_mask = get_sp_mask(ss_e2);
2250 /* validate data segments */
2251 validate_seg(env, R_ES, rpl);
2252 validate_seg(env, R_DS, rpl);
2253 validate_seg(env, R_FS, rpl);
2254 validate_seg(env, R_GS, rpl);
2256 sp += addend;
2258 SET_ESP(sp, sp_mask);
2259 env->eip = new_eip;
2260 if (is_iret) {
2261 /* NOTE: 'cpl' is the _old_ CPL */
2262 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2263 if (cpl == 0) {
2264 eflags_mask |= IOPL_MASK;
2266 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2267 if (cpl <= iopl) {
2268 eflags_mask |= IF_MASK;
2270 if (shift == 0) {
2271 eflags_mask &= 0xffff;
2273 cpu_load_eflags(env, new_eflags, eflags_mask);
2275 return;
2277 return_to_vm86:
2278 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2279 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2280 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2283 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2285 /* modify processor state */
2286 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2287 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2288 VIP_MASK);
2289 load_seg_vm(env, R_CS, new_cs & 0xffff);
2290 load_seg_vm(env, R_SS, new_ss & 0xffff);
2291 load_seg_vm(env, R_ES, new_es & 0xffff);
2292 load_seg_vm(env, R_DS, new_ds & 0xffff);
2293 load_seg_vm(env, R_FS, new_fs & 0xffff);
2294 load_seg_vm(env, R_GS, new_gs & 0xffff);
2296 env->eip = new_eip & 0xffff;
2297 env->regs[R_ESP] = new_esp;
2300 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2302 int tss_selector, type;
2303 uint32_t e1, e2;
2305 /* specific case for TSS */
2306 if (env->eflags & NT_MASK) {
2307 #ifdef TARGET_X86_64
2308 if (env->hflags & HF_LMA_MASK) {
2309 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2311 #endif
2312 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2313 if (tss_selector & 4) {
2314 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2316 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2317 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2319 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2320 /* NOTE: we check both segment and busy TSS */
2321 if (type != 3) {
2322 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2324 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2325 } else {
2326 helper_ret_protected(env, shift, 1, 0, GETPC());
2328 env->hflags2 &= ~HF2_NMI_MASK;
2331 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2333 helper_ret_protected(env, shift, 0, addend, GETPC());
2336 void helper_sysenter(CPUX86State *env)
2338 if (env->sysenter_cs == 0) {
2339 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2341 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2343 #ifdef TARGET_X86_64
2344 if (env->hflags & HF_LMA_MASK) {
2345 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2346 0, 0xffffffff,
2347 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2348 DESC_S_MASK |
2349 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2350 DESC_L_MASK);
2351 } else
2352 #endif
2354 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2355 0, 0xffffffff,
2356 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2357 DESC_S_MASK |
2358 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2360 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2361 0, 0xffffffff,
2362 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2363 DESC_S_MASK |
2364 DESC_W_MASK | DESC_A_MASK);
2365 env->regs[R_ESP] = env->sysenter_esp;
2366 env->eip = env->sysenter_eip;
2369 void helper_sysexit(CPUX86State *env, int dflag)
2371 int cpl;
2373 cpl = env->hflags & HF_CPL_MASK;
2374 if (env->sysenter_cs == 0 || cpl != 0) {
2375 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2377 #ifdef TARGET_X86_64
2378 if (dflag == 2) {
2379 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2380 3, 0, 0xffffffff,
2381 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2382 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2383 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2384 DESC_L_MASK);
2385 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2386 3, 0, 0xffffffff,
2387 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2388 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2389 DESC_W_MASK | DESC_A_MASK);
2390 } else
2391 #endif
2393 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2394 3, 0, 0xffffffff,
2395 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2396 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2397 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2398 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2399 3, 0, 0xffffffff,
2400 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2401 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2402 DESC_W_MASK | DESC_A_MASK);
2404 env->regs[R_ESP] = env->regs[R_ECX];
2405 env->eip = env->regs[R_EDX];
2408 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2410 unsigned int limit;
2411 uint32_t e1, e2, eflags, selector;
2412 int rpl, dpl, cpl, type;
2414 selector = selector1 & 0xffff;
2415 eflags = cpu_cc_compute_all(env, CC_OP);
2416 if ((selector & 0xfffc) == 0) {
2417 goto fail;
2419 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2420 goto fail;
2422 rpl = selector & 3;
2423 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2424 cpl = env->hflags & HF_CPL_MASK;
2425 if (e2 & DESC_S_MASK) {
2426 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2427 /* conforming */
2428 } else {
2429 if (dpl < cpl || dpl < rpl) {
2430 goto fail;
2433 } else {
2434 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2435 switch (type) {
2436 case 1:
2437 case 2:
2438 case 3:
2439 case 9:
2440 case 11:
2441 break;
2442 default:
2443 goto fail;
2445 if (dpl < cpl || dpl < rpl) {
2446 fail:
2447 CC_SRC = eflags & ~CC_Z;
2448 return 0;
2451 limit = get_seg_limit(e1, e2);
2452 CC_SRC = eflags | CC_Z;
2453 return limit;
2456 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2458 uint32_t e1, e2, eflags, selector;
2459 int rpl, dpl, cpl, type;
2461 selector = selector1 & 0xffff;
2462 eflags = cpu_cc_compute_all(env, CC_OP);
2463 if ((selector & 0xfffc) == 0) {
2464 goto fail;
2466 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2467 goto fail;
2469 rpl = selector & 3;
2470 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2471 cpl = env->hflags & HF_CPL_MASK;
2472 if (e2 & DESC_S_MASK) {
2473 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2474 /* conforming */
2475 } else {
2476 if (dpl < cpl || dpl < rpl) {
2477 goto fail;
2480 } else {
2481 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2482 switch (type) {
2483 case 1:
2484 case 2:
2485 case 3:
2486 case 4:
2487 case 5:
2488 case 9:
2489 case 11:
2490 case 12:
2491 break;
2492 default:
2493 goto fail;
2495 if (dpl < cpl || dpl < rpl) {
2496 fail:
2497 CC_SRC = eflags & ~CC_Z;
2498 return 0;
2501 CC_SRC = eflags | CC_Z;
2502 return e2 & 0x00f0ff00;
2505 void helper_verr(CPUX86State *env, target_ulong selector1)
2507 uint32_t e1, e2, eflags, selector;
2508 int rpl, dpl, cpl;
2510 selector = selector1 & 0xffff;
2511 eflags = cpu_cc_compute_all(env, CC_OP);
2512 if ((selector & 0xfffc) == 0) {
2513 goto fail;
2515 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2516 goto fail;
2518 if (!(e2 & DESC_S_MASK)) {
2519 goto fail;
2521 rpl = selector & 3;
2522 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2523 cpl = env->hflags & HF_CPL_MASK;
2524 if (e2 & DESC_CS_MASK) {
2525 if (!(e2 & DESC_R_MASK)) {
2526 goto fail;
2528 if (!(e2 & DESC_C_MASK)) {
2529 if (dpl < cpl || dpl < rpl) {
2530 goto fail;
2533 } else {
2534 if (dpl < cpl || dpl < rpl) {
2535 fail:
2536 CC_SRC = eflags & ~CC_Z;
2537 return;
2540 CC_SRC = eflags | CC_Z;
2543 void helper_verw(CPUX86State *env, target_ulong selector1)
2545 uint32_t e1, e2, eflags, selector;
2546 int rpl, dpl, cpl;
2548 selector = selector1 & 0xffff;
2549 eflags = cpu_cc_compute_all(env, CC_OP);
2550 if ((selector & 0xfffc) == 0) {
2551 goto fail;
2553 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2554 goto fail;
2556 if (!(e2 & DESC_S_MASK)) {
2557 goto fail;
2559 rpl = selector & 3;
2560 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2561 cpl = env->hflags & HF_CPL_MASK;
2562 if (e2 & DESC_CS_MASK) {
2563 goto fail;
2564 } else {
2565 if (dpl < cpl || dpl < rpl) {
2566 goto fail;
2568 if (!(e2 & DESC_W_MASK)) {
2569 fail:
2570 CC_SRC = eflags & ~CC_Z;
2571 return;
2574 CC_SRC = eflags | CC_Z;
2577 #if defined(CONFIG_USER_ONLY)
2578 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2580 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2581 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2582 selector &= 0xffff;
2583 cpu_x86_load_seg_cache(env, seg_reg, selector,
2584 (selector << 4), 0xffff,
2585 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2586 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2587 } else {
2588 helper_load_seg(env, seg_reg, selector);
2591 #endif
2593 /* check if Port I/O is allowed in TSS */
2594 static inline void check_io(CPUX86State *env, int addr, int size,
2595 uintptr_t retaddr)
2597 int io_offset, val, mask;
2599 /* TSS must be a valid 32 bit one */
2600 if (!(env->tr.flags & DESC_P_MASK) ||
2601 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2602 env->tr.limit < 103) {
2603 goto fail;
2605 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2606 io_offset += (addr >> 3);
2607 /* Note: the check needs two bytes */
2608 if ((io_offset + 1) > env->tr.limit) {
2609 goto fail;
2611 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2612 val >>= (addr & 7);
2613 mask = (1 << size) - 1;
2614 /* all bits must be zero to allow the I/O */
2615 if ((val & mask) != 0) {
2616 fail:
2617 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2621 void helper_check_iob(CPUX86State *env, uint32_t t0)
2623 check_io(env, t0, 1, GETPC());
2626 void helper_check_iow(CPUX86State *env, uint32_t t0)
2628 check_io(env, t0, 2, GETPC());
2631 void helper_check_iol(CPUX86State *env, uint32_t t0)
2633 check_io(env, t0, 4, GETPC());