Merge remote-tracking branch 'remotes/riku/tags/pull-linux-user-20160608' into staging
[qemu/kevin.git] / target-i386 / seg_helper.c
blob97aee092dc686554c62c253e7e1bc899bda66768
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
29 //#define DEBUG_PCALL
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
40 #ifdef CONFIG_USER_ONLY
41 #define MEMSUFFIX _kernel
42 #define DATA_SIZE 1
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 2
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 4
49 #include "exec/cpu_ldst_useronly_template.h"
51 #define DATA_SIZE 8
52 #include "exec/cpu_ldst_useronly_template.h"
53 #undef MEMSUFFIX
54 #else
55 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56 #define MEMSUFFIX _kernel
57 #define DATA_SIZE 1
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 2
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 4
64 #include "exec/cpu_ldst_template.h"
66 #define DATA_SIZE 8
67 #include "exec/cpu_ldst_template.h"
68 #undef CPU_MMU_INDEX
69 #undef MEMSUFFIX
70 #endif
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
81 if (selector & 0x4) {
82 dt = &env->ldt;
83 } else {
84 dt = &env->gdt;
86 index = selector & ~7;
87 if ((index + 7) > dt->limit) {
88 return -1;
90 ptr = dt->base + index;
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 return 0;
96 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
102 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104 unsigned int limit;
106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107 if (e2 & DESC_G_MASK) {
108 limit = (limit << 12) | 0xfff;
110 return limit;
113 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
118 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129 selector &= 0xffff;
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
136 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
140 X86CPU *cpu = x86_env_get_cpu(env);
141 int type, index, shift;
143 #if 0
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147 for (i = 0; i < env->tr.limit; i++) {
148 printf("%02x ", env->tr.base[i]);
149 if ((i & 7) == 7) {
150 printf("\n");
153 printf("\n");
155 #endif
157 if (!(env->tr.flags & DESC_P_MASK)) {
158 cpu_abort(CPU(cpu), "invalid tss");
160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161 if ((type & 7) != 1) {
162 cpu_abort(CPU(cpu), "invalid tss type");
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
166 if (index + (4 << shift) - 1 > env->tr.limit) {
167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169 if (shift == 0) {
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172 } else {
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
178 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
181 uint32_t e1, e2;
182 int rpl, dpl;
184 if ((selector & 0xfffc) != 0) {
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 if (!(e2 & DESC_S_MASK)) {
189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193 if (seg_reg == R_CS) {
194 if (!(e2 & DESC_CS_MASK)) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 if (dpl != rpl) {
198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 if (dpl != cpl || dpl != rpl) {
206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208 } else {
209 /* not readable code */
210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215 if (dpl < cpl || dpl < rpl) {
216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
220 if (!(e2 & DESC_P_MASK)) {
221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223 cpu_x86_load_seg_cache(env, seg_reg, selector,
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
227 } else {
228 if (seg_reg == R_SS || seg_reg == R_CS) {
229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
234 #define SWITCH_TSS_JMP 0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
258 if (!(e2 & DESC_P_MASK)) {
259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261 tss_selector = e1 >> 16;
262 if (tss_selector & 4) {
263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 if (e2 & DESC_S_MASK) {
269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272 if ((type & 7) != 1) {
273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
277 if (!(e2 & DESC_P_MASK)) {
278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
281 if (type & 8) {
282 tss_limit_max = 103;
283 } else {
284 tss_limit_max = 43;
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
289 tss_limit < tss_limit_max) {
290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293 if (old_type & 8) {
294 old_tss_limit_max = 103;
295 } else {
296 old_tss_limit_max = 43;
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305 for (i = 0; i < 8; i++) {
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
309 for (i = 0; i < 6; i++) {
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320 for (i = 0; i < 8; i++) {
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
324 for (i = 0; i < 4; i++) {
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
353 ptr = env->gdt.base + (env->tr.selector & ~7);
354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355 e2 &= ~DESC_TSS_BUSY_MASK;
356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358 old_eflags = cpu_compute_eflags(env);
359 if (source == SWITCH_TSS_IRET) {
360 old_eflags &= ~NT_MASK;
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376 for (i = 0; i < 6; i++) {
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
380 } else {
381 /* 16 bit */
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392 for (i = 0; i < 4; i++) {
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
398 /* now if an exception occurs, it will occurs in the next task
399 context */
401 if (source == SWITCH_TSS_CALL) {
402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403 new_eflags |= NT_MASK;
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413 e2 |= DESC_TSS_BUSY_MASK;
414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8)) {
436 eflags_mask &= 0xffff;
438 cpu_load_eflags(env, new_eflags, eflags_mask);
439 /* XXX: what to do in 16 bit case? */
440 env->regs[R_EAX] = new_regs[0];
441 env->regs[R_ECX] = new_regs[1];
442 env->regs[R_EDX] = new_regs[2];
443 env->regs[R_EBX] = new_regs[3];
444 env->regs[R_ESP] = new_regs[4];
445 env->regs[R_EBP] = new_regs[5];
446 env->regs[R_ESI] = new_regs[6];
447 env->regs[R_EDI] = new_regs[7];
448 if (new_eflags & VM_MASK) {
449 for (i = 0; i < 6; i++) {
450 load_seg_vm(env, i, new_segs[i]);
452 } else {
453 /* first just selectors as the rest may trigger exceptions */
454 for (i = 0; i < 6; i++) {
455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
464 /* load the LDT */
465 if (new_ldt & 4) {
466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit) {
473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475 ptr = dt->base + index;
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 if (!(e2 & DESC_P_MASK)) {
482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 int cpl = new_segs[R_CS] & 3;
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
498 /* check that env->eip is in the CS segment limits */
499 if (new_eip > env->segs[R_CS].limit) {
500 /* XXX: different exception if CALL? */
501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
504 #ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509 #endif
512 static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
519 static inline unsigned int get_sp_mask(unsigned int e2)
521 if (e2 & DESC_B_MASK) {
522 return 0xffffffff;
523 } else {
524 return 0xffff;
528 static int exception_has_error_code(int intno)
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
540 return 0;
543 #ifdef TARGET_X86_64
544 #define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
554 } while (0)
555 #else
556 #define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
560 } while (0)
561 #endif
563 /* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
567 /* XXX: add a is_user flag to have proper security support */
568 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
570 sp -= 2; \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
574 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
576 sp -= 4; \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
580 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
583 sp += 2; \
586 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
589 sp += 4; \
592 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
597 /* protected mode interrupt */
598 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
607 uint32_t old_eip, sp_mask;
608 int vm86 = env->eflags & VM_MASK;
610 has_error_code = 0;
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
614 if (is_int) {
615 old_eip = next_eip;
616 } else {
617 old_eip = env->eip;
620 dt = &env->idt;
621 if (intno * 8 + 7 > dt->limit) {
622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
624 ptr = dt->base + intno * 8;
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
627 /* check gate type */
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
629 switch (type) {
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
632 if (!(e2 & DESC_P_MASK)) {
633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
640 /* push the error code */
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
643 if (env->segs[R_SS].flags & DESC_B_MASK) {
644 mask = 0xffffffff;
645 } else {
646 mask = 0xffff;
648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
649 ssp = env->segs[R_SS].base + esp;
650 if (shift) {
651 cpu_stl_kernel(env, ssp, error_code);
652 } else {
653 cpu_stw_kernel(env, ssp, error_code);
655 SET_ESP(esp, mask);
657 return;
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
662 break;
663 default:
664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
665 break;
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
669 /* check privilege if software int */
670 if (is_int && dpl < cpl) {
671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
673 /* check valid bit */
674 if (!(e2 & DESC_P_MASK)) {
675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
679 if ((selector & 0xfffc) == 0) {
680 raise_exception_err(env, EXCP0D_GPF, 0);
682 if (load_segment(env, &e1, &e2, selector) != 0) {
683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 if (dpl > cpl) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692 if (!(e2 & DESC_P_MASK)) {
693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
695 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
696 /* to inner privilege */
697 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
698 if ((ss & 0xfffc) == 0) {
699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
701 if ((ss & 3) != dpl) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
708 if (ss_dpl != dpl) {
709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
711 if (!(ss_e2 & DESC_S_MASK) ||
712 (ss_e2 & DESC_CS_MASK) ||
713 !(ss_e2 & DESC_W_MASK)) {
714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
716 if (!(ss_e2 & DESC_P_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 new_stack = 1;
720 sp_mask = get_sp_mask(ss_e2);
721 ssp = get_seg_base(ss_e1, ss_e2);
722 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
723 /* to same privilege */
724 if (vm86) {
725 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
727 new_stack = 0;
728 sp_mask = get_sp_mask(env->segs[R_SS].flags);
729 ssp = env->segs[R_SS].base;
730 esp = env->regs[R_ESP];
731 dpl = cpl;
732 } else {
733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
734 new_stack = 0; /* avoid warning */
735 sp_mask = 0; /* avoid warning */
736 ssp = 0; /* avoid warning */
737 esp = 0; /* avoid warning */
740 shift = type >> 3;
742 #if 0
743 /* XXX: check that enough room is available */
744 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
745 if (vm86) {
746 push_size += 8;
748 push_size <<= shift;
749 #endif
750 if (shift == 1) {
751 if (new_stack) {
752 if (vm86) {
753 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
759 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
761 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
762 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763 PUSHL(ssp, esp, sp_mask, old_eip);
764 if (has_error_code) {
765 PUSHL(ssp, esp, sp_mask, error_code);
767 } else {
768 if (new_stack) {
769 if (vm86) {
770 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
776 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
778 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
779 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780 PUSHW(ssp, esp, sp_mask, old_eip);
781 if (has_error_code) {
782 PUSHW(ssp, esp, sp_mask, error_code);
786 /* interrupt gate clear IF mask */
787 if ((type & 1) == 0) {
788 env->eflags &= ~IF_MASK;
790 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
792 if (new_stack) {
793 if (vm86) {
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
803 SET_ESP(esp, sp_mask);
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
810 env->eip = offset;
813 #ifdef TARGET_X86_64
815 #define PUSHQ_RA(sp, val, ra) \
817 sp -= 8; \
818 cpu_stq_kernel_ra(env, sp, (val), ra); \
821 #define POPQ_RA(sp, val, ra) \
823 val = cpu_ldq_kernel_ra(env, sp, ra); \
824 sp += 8; \
827 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
830 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
832 X86CPU *cpu = x86_env_get_cpu(env);
833 int index;
835 #if 0
836 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
837 env->tr.base, env->tr.limit);
838 #endif
840 if (!(env->tr.flags & DESC_P_MASK)) {
841 cpu_abort(CPU(cpu), "invalid tss");
843 index = 8 * level + 4;
844 if ((index + 7) > env->tr.limit) {
845 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
847 return cpu_ldq_kernel(env, env->tr.base + index);
850 /* 64 bit interrupt */
851 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
852 int error_code, target_ulong next_eip, int is_hw)
854 SegmentCache *dt;
855 target_ulong ptr;
856 int type, dpl, selector, cpl, ist;
857 int has_error_code, new_stack;
858 uint32_t e1, e2, e3, ss;
859 target_ulong old_eip, esp, offset;
861 has_error_code = 0;
862 if (!is_int && !is_hw) {
863 has_error_code = exception_has_error_code(intno);
865 if (is_int) {
866 old_eip = next_eip;
867 } else {
868 old_eip = env->eip;
871 dt = &env->idt;
872 if (intno * 16 + 15 > dt->limit) {
873 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
875 ptr = dt->base + intno * 16;
876 e1 = cpu_ldl_kernel(env, ptr);
877 e2 = cpu_ldl_kernel(env, ptr + 4);
878 e3 = cpu_ldl_kernel(env, ptr + 8);
879 /* check gate type */
880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
881 switch (type) {
882 case 14: /* 386 interrupt gate */
883 case 15: /* 386 trap gate */
884 break;
885 default:
886 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
887 break;
889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
890 cpl = env->hflags & HF_CPL_MASK;
891 /* check privilege if software int */
892 if (is_int && dpl < cpl) {
893 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
895 /* check valid bit */
896 if (!(e2 & DESC_P_MASK)) {
897 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
902 if ((selector & 0xfffc) == 0) {
903 raise_exception_err(env, EXCP0D_GPF, 0);
906 if (load_segment(env, &e1, &e2, selector) != 0) {
907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
913 if (dpl > cpl) {
914 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
916 if (!(e2 & DESC_P_MASK)) {
917 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
919 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
922 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
923 /* to inner privilege */
924 new_stack = 1;
925 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
926 ss = 0;
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK) {
930 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
932 new_stack = 0;
933 esp = env->regs[R_ESP];
934 dpl = cpl;
935 } else {
936 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
937 new_stack = 0; /* avoid warning */
938 esp = 0; /* avoid warning */
940 esp &= ~0xfLL; /* align stack */
942 PUSHQ(esp, env->segs[R_SS].selector);
943 PUSHQ(esp, env->regs[R_ESP]);
944 PUSHQ(esp, cpu_compute_eflags(env));
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
951 /* interrupt gate clear IF mask */
952 if ((type & 1) == 0) {
953 env->eflags &= ~IF_MASK;
955 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
957 if (new_stack) {
958 ss = 0 | dpl;
959 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
961 env->regs[R_ESP] = esp;
963 selector = (selector & ~3) | dpl;
964 cpu_x86_load_seg_cache(env, R_CS, selector,
965 get_seg_base(e1, e2),
966 get_seg_limit(e1, e2),
967 e2);
968 env->eip = offset;
970 #endif
972 #ifdef TARGET_X86_64
973 #if defined(CONFIG_USER_ONLY)
974 void helper_syscall(CPUX86State *env, int next_eip_addend)
976 CPUState *cs = CPU(x86_env_get_cpu(env));
978 cs->exception_index = EXCP_SYSCALL;
979 env->exception_next_eip = env->eip + next_eip_addend;
980 cpu_loop_exit(cs);
982 #else
983 void helper_syscall(CPUX86State *env, int next_eip_addend)
985 int selector;
987 if (!(env->efer & MSR_EFER_SCE)) {
988 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
990 selector = (env->star >> 32) & 0xffff;
991 if (env->hflags & HF_LMA_MASK) {
992 int code64;
994 env->regs[R_ECX] = env->eip + next_eip_addend;
995 env->regs[11] = cpu_compute_eflags(env);
997 code64 = env->hflags & HF_CS64_MASK;
999 env->eflags &= ~env->fmask;
1000 cpu_load_eflags(env, env->eflags, 0);
1001 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 0, 0xffffffff,
1003 DESC_G_MASK | DESC_P_MASK |
1004 DESC_S_MASK |
1005 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1006 DESC_L_MASK);
1007 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1008 0, 0xffffffff,
1009 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1010 DESC_S_MASK |
1011 DESC_W_MASK | DESC_A_MASK);
1012 if (code64) {
1013 env->eip = env->lstar;
1014 } else {
1015 env->eip = env->cstar;
1017 } else {
1018 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1020 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1021 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022 0, 0xffffffff,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024 DESC_S_MASK |
1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_W_MASK | DESC_A_MASK);
1031 env->eip = (uint32_t)env->star;
1034 #endif
1035 #endif
1037 #ifdef TARGET_X86_64
1038 void helper_sysret(CPUX86State *env, int dflag)
1040 int cpl, selector;
1042 if (!(env->efer & MSR_EFER_SCE)) {
1043 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1049 selector = (env->star >> 48) & 0xffff;
1050 if (env->hflags & HF_LMA_MASK) {
1051 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1052 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1053 NT_MASK);
1054 if (dflag == 2) {
1055 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_P_MASK |
1058 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1059 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060 DESC_L_MASK);
1061 env->eip = env->regs[R_ECX];
1062 } else {
1063 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068 env->eip = (uint32_t)env->regs[R_ECX];
1070 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_W_MASK | DESC_A_MASK);
1075 } else {
1076 env->eflags |= IF_MASK;
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082 env->eip = (uint32_t)env->regs[R_ECX];
1083 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_W_MASK | DESC_A_MASK);
1090 #endif
1092 /* real mode interrupt */
1093 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1094 int error_code, unsigned int next_eip)
1096 SegmentCache *dt;
1097 target_ulong ptr, ssp;
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eip;
1102 /* real mode (simpler!) */
1103 dt = &env->idt;
1104 if (intno * 4 + 3 > dt->limit) {
1105 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1107 ptr = dt->base + intno * 4;
1108 offset = cpu_lduw_kernel(env, ptr);
1109 selector = cpu_lduw_kernel(env, ptr + 2);
1110 esp = env->regs[R_ESP];
1111 ssp = env->segs[R_SS].base;
1112 if (is_int) {
1113 old_eip = next_eip;
1114 } else {
1115 old_eip = env->eip;
1117 old_cs = env->segs[R_CS].selector;
1118 /* XXX: use SS segment size? */
1119 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, old_eip);
1123 /* update processor state */
1124 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1131 #if defined(CONFIG_USER_ONLY)
1132 /* fake user mode interrupt */
1133 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1134 int error_code, target_ulong next_eip)
1136 SegmentCache *dt;
1137 target_ulong ptr;
1138 int dpl, cpl, shift;
1139 uint32_t e2;
1141 dt = &env->idt;
1142 if (env->hflags & HF_LMA_MASK) {
1143 shift = 4;
1144 } else {
1145 shift = 3;
1147 ptr = dt->base + (intno << shift);
1148 e2 = cpu_ldl_kernel(env, ptr + 4);
1150 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1151 cpl = env->hflags & HF_CPL_MASK;
1152 /* check privilege if software int */
1153 if (is_int && dpl < cpl) {
1154 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1157 /* Since we emulate only user space, we cannot do more than
1158 exiting the emulation with the suitable exception and error
1159 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1160 if (is_int || intno == EXCP_SYSCALL) {
1161 env->eip = next_eip;
1165 #else
1167 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1168 int error_code, int is_hw, int rm)
1170 CPUState *cs = CPU(x86_env_get_cpu(env));
1171 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1172 control.event_inj));
1174 if (!(event_inj & SVM_EVTINJ_VALID)) {
1175 int type;
1177 if (is_int) {
1178 type = SVM_EVTINJ_TYPE_SOFT;
1179 } else {
1180 type = SVM_EVTINJ_TYPE_EXEPT;
1182 event_inj = intno | type | SVM_EVTINJ_VALID;
1183 if (!rm && exception_has_error_code(intno)) {
1184 event_inj |= SVM_EVTINJ_VALID_ERR;
1185 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1186 control.event_inj_err),
1187 error_code);
1189 x86_stl_phys(cs,
1190 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1191 event_inj);
1194 #endif
1197 * Begin execution of an interruption. is_int is TRUE if coming from
1198 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1199 * instruction. It is only relevant if is_int is TRUE.
1201 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1202 int error_code, target_ulong next_eip, int is_hw)
1204 CPUX86State *env = &cpu->env;
1206 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1207 if ((env->cr[0] & CR0_PE_MASK)) {
1208 static int count;
1210 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1211 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1212 count, intno, error_code, is_int,
1213 env->hflags & HF_CPL_MASK,
1214 env->segs[R_CS].selector, env->eip,
1215 (int)env->segs[R_CS].base + env->eip,
1216 env->segs[R_SS].selector, env->regs[R_ESP]);
1217 if (intno == 0x0e) {
1218 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1219 } else {
1220 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1222 qemu_log("\n");
1223 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1224 #if 0
1226 int i;
1227 target_ulong ptr;
1229 qemu_log(" code=");
1230 ptr = env->segs[R_CS].base + env->eip;
1231 for (i = 0; i < 16; i++) {
1232 qemu_log(" %02x", ldub(ptr + i));
1234 qemu_log("\n");
1236 #endif
1237 count++;
1240 if (env->cr[0] & CR0_PE_MASK) {
1241 #if !defined(CONFIG_USER_ONLY)
1242 if (env->hflags & HF_SVMI_MASK) {
1243 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1245 #endif
1246 #ifdef TARGET_X86_64
1247 if (env->hflags & HF_LMA_MASK) {
1248 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1249 } else
1250 #endif
1252 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1253 is_hw);
1255 } else {
1256 #if !defined(CONFIG_USER_ONLY)
1257 if (env->hflags & HF_SVMI_MASK) {
1258 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1260 #endif
1261 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1264 #if !defined(CONFIG_USER_ONLY)
1265 if (env->hflags & HF_SVMI_MASK) {
1266 CPUState *cs = CPU(cpu);
1267 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1268 offsetof(struct vmcb,
1269 control.event_inj));
1271 x86_stl_phys(cs,
1272 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1273 event_inj & ~SVM_EVTINJ_VALID);
1275 #endif
1278 void x86_cpu_do_interrupt(CPUState *cs)
1280 X86CPU *cpu = X86_CPU(cs);
1281 CPUX86State *env = &cpu->env;
1283 #if defined(CONFIG_USER_ONLY)
1284 /* if user mode only, we simulate a fake exception
1285 which will be handled outside the cpu execution
1286 loop */
1287 do_interrupt_user(env, cs->exception_index,
1288 env->exception_is_int,
1289 env->error_code,
1290 env->exception_next_eip);
1291 /* successfully delivered */
1292 env->old_exception = -1;
1293 #else
1294 /* simulate a real cpu exception. On i386, it can
1295 trigger new exceptions, but we do not handle
1296 double or triple faults yet. */
1297 do_interrupt_all(cpu, cs->exception_index,
1298 env->exception_is_int,
1299 env->error_code,
1300 env->exception_next_eip, 0);
1301 /* successfully delivered */
1302 env->old_exception = -1;
1303 #endif
1306 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1308 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1311 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1313 X86CPU *cpu = X86_CPU(cs);
1314 CPUX86State *env = &cpu->env;
1315 bool ret = false;
1317 #if !defined(CONFIG_USER_ONLY)
1318 if (interrupt_request & CPU_INTERRUPT_POLL) {
1319 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1320 apic_poll_irq(cpu->apic_state);
1321 /* Don't process multiple interrupt requests in a single call.
1322 This is required to make icount-driven execution deterministic. */
1323 return true;
1325 #endif
1326 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1327 do_cpu_sipi(cpu);
1328 } else if (env->hflags2 & HF2_GIF_MASK) {
1329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1330 !(env->hflags & HF_SMM_MASK)) {
1331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1332 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1333 do_smm_enter(cpu);
1334 ret = true;
1335 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1336 !(env->hflags2 & HF2_NMI_MASK)) {
1337 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1338 env->hflags2 |= HF2_NMI_MASK;
1339 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1340 ret = true;
1341 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1342 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1343 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1344 ret = true;
1345 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1346 (((env->hflags2 & HF2_VINTR_MASK) &&
1347 (env->hflags2 & HF2_HIF_MASK)) ||
1348 (!(env->hflags2 & HF2_VINTR_MASK) &&
1349 (env->eflags & IF_MASK &&
1350 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1351 int intno;
1352 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1353 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1354 CPU_INTERRUPT_VIRQ);
1355 intno = cpu_get_pic_interrupt(env);
1356 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1357 "Servicing hardware INT=0x%02x\n", intno);
1358 do_interrupt_x86_hardirq(env, intno, 1);
1359 /* ensure that no TB jump will be modified as
1360 the program flow was changed */
1361 ret = true;
1362 #if !defined(CONFIG_USER_ONLY)
1363 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1364 (env->eflags & IF_MASK) &&
1365 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1366 int intno;
1367 /* FIXME: this should respect TPR */
1368 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1369 intno = x86_ldl_phys(cs, env->vm_vmcb
1370 + offsetof(struct vmcb, control.int_vector));
1371 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1372 "Servicing virtual hardware INT=0x%02x\n", intno);
1373 do_interrupt_x86_hardirq(env, intno, 1);
1374 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1375 ret = true;
1376 #endif
1380 return ret;
1383 void helper_lldt(CPUX86State *env, int selector)
1385 SegmentCache *dt;
1386 uint32_t e1, e2;
1387 int index, entry_limit;
1388 target_ulong ptr;
1390 selector &= 0xffff;
1391 if ((selector & 0xfffc) == 0) {
1392 /* XXX: NULL selector case: invalid LDT */
1393 env->ldt.base = 0;
1394 env->ldt.limit = 0;
1395 } else {
1396 if (selector & 0x4) {
1397 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1399 dt = &env->gdt;
1400 index = selector & ~7;
1401 #ifdef TARGET_X86_64
1402 if (env->hflags & HF_LMA_MASK) {
1403 entry_limit = 15;
1404 } else
1405 #endif
1407 entry_limit = 7;
1409 if ((index + entry_limit) > dt->limit) {
1410 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1412 ptr = dt->base + index;
1413 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1414 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1415 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1416 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1418 if (!(e2 & DESC_P_MASK)) {
1419 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1421 #ifdef TARGET_X86_64
1422 if (env->hflags & HF_LMA_MASK) {
1423 uint32_t e3;
1425 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1426 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1427 env->ldt.base |= (target_ulong)e3 << 32;
1428 } else
1429 #endif
1431 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1434 env->ldt.selector = selector;
1437 void helper_ltr(CPUX86State *env, int selector)
1439 SegmentCache *dt;
1440 uint32_t e1, e2;
1441 int index, type, entry_limit;
1442 target_ulong ptr;
1444 selector &= 0xffff;
1445 if ((selector & 0xfffc) == 0) {
1446 /* NULL selector case: invalid TR */
1447 env->tr.base = 0;
1448 env->tr.limit = 0;
1449 env->tr.flags = 0;
1450 } else {
1451 if (selector & 0x4) {
1452 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1454 dt = &env->gdt;
1455 index = selector & ~7;
1456 #ifdef TARGET_X86_64
1457 if (env->hflags & HF_LMA_MASK) {
1458 entry_limit = 15;
1459 } else
1460 #endif
1462 entry_limit = 7;
1464 if ((index + entry_limit) > dt->limit) {
1465 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1467 ptr = dt->base + index;
1468 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1469 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1470 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1471 if ((e2 & DESC_S_MASK) ||
1472 (type != 1 && type != 9)) {
1473 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1475 if (!(e2 & DESC_P_MASK)) {
1476 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1478 #ifdef TARGET_X86_64
1479 if (env->hflags & HF_LMA_MASK) {
1480 uint32_t e3, e4;
1482 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1483 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1484 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1485 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1487 load_seg_cache_raw_dt(&env->tr, e1, e2);
1488 env->tr.base |= (target_ulong)e3 << 32;
1489 } else
1490 #endif
1492 load_seg_cache_raw_dt(&env->tr, e1, e2);
1494 e2 |= DESC_TSS_BUSY_MASK;
1495 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1497 env->tr.selector = selector;
1500 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1501 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1503 uint32_t e1, e2;
1504 int cpl, dpl, rpl;
1505 SegmentCache *dt;
1506 int index;
1507 target_ulong ptr;
1509 selector &= 0xffff;
1510 cpl = env->hflags & HF_CPL_MASK;
1511 if ((selector & 0xfffc) == 0) {
1512 /* null selector case */
1513 if (seg_reg == R_SS
1514 #ifdef TARGET_X86_64
1515 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1516 #endif
1518 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1520 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1521 } else {
1523 if (selector & 0x4) {
1524 dt = &env->ldt;
1525 } else {
1526 dt = &env->gdt;
1528 index = selector & ~7;
1529 if ((index + 7) > dt->limit) {
1530 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1532 ptr = dt->base + index;
1533 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1534 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1536 if (!(e2 & DESC_S_MASK)) {
1537 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1539 rpl = selector & 3;
1540 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1541 if (seg_reg == R_SS) {
1542 /* must be writable segment */
1543 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1544 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1546 if (rpl != cpl || dpl != cpl) {
1547 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1549 } else {
1550 /* must be readable segment */
1551 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1552 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1555 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1556 /* if not conforming code, test rights */
1557 if (dpl < cpl || dpl < rpl) {
1558 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1563 if (!(e2 & DESC_P_MASK)) {
1564 if (seg_reg == R_SS) {
1565 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1566 } else {
1567 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1571 /* set the access bit if not already set */
1572 if (!(e2 & DESC_A_MASK)) {
1573 e2 |= DESC_A_MASK;
1574 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1577 cpu_x86_load_seg_cache(env, seg_reg, selector,
1578 get_seg_base(e1, e2),
1579 get_seg_limit(e1, e2),
1580 e2);
1581 #if 0
1582 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1583 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1584 #endif
1588 /* protected mode jump */
1589 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1590 target_ulong next_eip)
1592 int gate_cs, type;
1593 uint32_t e1, e2, cpl, dpl, rpl, limit;
1595 if ((new_cs & 0xfffc) == 0) {
1596 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1598 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1599 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1601 cpl = env->hflags & HF_CPL_MASK;
1602 if (e2 & DESC_S_MASK) {
1603 if (!(e2 & DESC_CS_MASK)) {
1604 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1606 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1607 if (e2 & DESC_C_MASK) {
1608 /* conforming code segment */
1609 if (dpl > cpl) {
1610 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1612 } else {
1613 /* non conforming code segment */
1614 rpl = new_cs & 3;
1615 if (rpl > cpl) {
1616 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1618 if (dpl != cpl) {
1619 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1622 if (!(e2 & DESC_P_MASK)) {
1623 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1625 limit = get_seg_limit(e1, e2);
1626 if (new_eip > limit &&
1627 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1628 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1630 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1631 get_seg_base(e1, e2), limit, e2);
1632 env->eip = new_eip;
1633 } else {
1634 /* jump to call or task gate */
1635 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1636 rpl = new_cs & 3;
1637 cpl = env->hflags & HF_CPL_MASK;
1638 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1639 switch (type) {
1640 case 1: /* 286 TSS */
1641 case 9: /* 386 TSS */
1642 case 5: /* task gate */
1643 if (dpl < cpl || dpl < rpl) {
1644 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1646 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1647 break;
1648 case 4: /* 286 call gate */
1649 case 12: /* 386 call gate */
1650 if ((dpl < cpl) || (dpl < rpl)) {
1651 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1653 if (!(e2 & DESC_P_MASK)) {
1654 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1656 gate_cs = e1 >> 16;
1657 new_eip = (e1 & 0xffff);
1658 if (type == 12) {
1659 new_eip |= (e2 & 0xffff0000);
1661 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1662 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1664 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1665 /* must be code segment */
1666 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1667 (DESC_S_MASK | DESC_CS_MASK))) {
1668 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1670 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1671 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1672 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1674 if (!(e2 & DESC_P_MASK)) {
1675 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1677 limit = get_seg_limit(e1, e2);
1678 if (new_eip > limit) {
1679 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1681 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1682 get_seg_base(e1, e2), limit, e2);
1683 env->eip = new_eip;
1684 break;
1685 default:
1686 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1687 break;
1692 /* real mode call */
1693 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1694 int shift, int next_eip)
1696 int new_eip;
1697 uint32_t esp, esp_mask;
1698 target_ulong ssp;
1700 new_eip = new_eip1;
1701 esp = env->regs[R_ESP];
1702 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1703 ssp = env->segs[R_SS].base;
1704 if (shift) {
1705 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1706 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1707 } else {
1708 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1709 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1712 SET_ESP(esp, esp_mask);
1713 env->eip = new_eip;
1714 env->segs[R_CS].selector = new_cs;
1715 env->segs[R_CS].base = (new_cs << 4);
1718 /* protected mode call */
1719 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1720 int shift, target_ulong next_eip)
1722 int new_stack, i;
1723 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1724 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1725 uint32_t val, limit, old_sp_mask;
1726 target_ulong ssp, old_ssp;
1728 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1729 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1730 if ((new_cs & 0xfffc) == 0) {
1731 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1733 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1734 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1736 cpl = env->hflags & HF_CPL_MASK;
1737 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1738 if (e2 & DESC_S_MASK) {
1739 if (!(e2 & DESC_CS_MASK)) {
1740 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1742 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1743 if (e2 & DESC_C_MASK) {
1744 /* conforming code segment */
1745 if (dpl > cpl) {
1746 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1748 } else {
1749 /* non conforming code segment */
1750 rpl = new_cs & 3;
1751 if (rpl > cpl) {
1752 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1754 if (dpl != cpl) {
1755 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1758 if (!(e2 & DESC_P_MASK)) {
1759 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1762 #ifdef TARGET_X86_64
1763 /* XXX: check 16/32 bit cases in long mode */
1764 if (shift == 2) {
1765 target_ulong rsp;
1767 /* 64 bit case */
1768 rsp = env->regs[R_ESP];
1769 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1770 PUSHQ_RA(rsp, next_eip, GETPC());
1771 /* from this point, not restartable */
1772 env->regs[R_ESP] = rsp;
1773 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1774 get_seg_base(e1, e2),
1775 get_seg_limit(e1, e2), e2);
1776 env->eip = new_eip;
1777 } else
1778 #endif
1780 sp = env->regs[R_ESP];
1781 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1782 ssp = env->segs[R_SS].base;
1783 if (shift) {
1784 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1785 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1786 } else {
1787 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1788 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1791 limit = get_seg_limit(e1, e2);
1792 if (new_eip > limit) {
1793 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1795 /* from this point, not restartable */
1796 SET_ESP(sp, sp_mask);
1797 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1798 get_seg_base(e1, e2), limit, e2);
1799 env->eip = new_eip;
1801 } else {
1802 /* check gate type */
1803 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1804 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1805 rpl = new_cs & 3;
1806 switch (type) {
1807 case 1: /* available 286 TSS */
1808 case 9: /* available 386 TSS */
1809 case 5: /* task gate */
1810 if (dpl < cpl || dpl < rpl) {
1811 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1813 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1814 return;
1815 case 4: /* 286 call gate */
1816 case 12: /* 386 call gate */
1817 break;
1818 default:
1819 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1820 break;
1822 shift = type >> 3;
1824 if (dpl < cpl || dpl < rpl) {
1825 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1827 /* check valid bit */
1828 if (!(e2 & DESC_P_MASK)) {
1829 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1831 selector = e1 >> 16;
1832 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1833 param_count = e2 & 0x1f;
1834 if ((selector & 0xfffc) == 0) {
1835 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1838 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1839 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1841 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1842 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1844 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1845 if (dpl > cpl) {
1846 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1848 if (!(e2 & DESC_P_MASK)) {
1849 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1852 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1853 /* to inner privilege */
1854 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1855 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1856 TARGET_FMT_lx "\n", ss, sp, param_count,
1857 env->regs[R_ESP]);
1858 if ((ss & 0xfffc) == 0) {
1859 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1861 if ((ss & 3) != dpl) {
1862 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1864 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1865 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1867 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1868 if (ss_dpl != dpl) {
1869 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1871 if (!(ss_e2 & DESC_S_MASK) ||
1872 (ss_e2 & DESC_CS_MASK) ||
1873 !(ss_e2 & DESC_W_MASK)) {
1874 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1876 if (!(ss_e2 & DESC_P_MASK)) {
1877 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1880 /* push_size = ((param_count * 2) + 8) << shift; */
1882 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1883 old_ssp = env->segs[R_SS].base;
1885 sp_mask = get_sp_mask(ss_e2);
1886 ssp = get_seg_base(ss_e1, ss_e2);
1887 if (shift) {
1888 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1889 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1890 for (i = param_count - 1; i >= 0; i--) {
1891 val = cpu_ldl_kernel_ra(env, old_ssp +
1892 ((env->regs[R_ESP] + i * 4) &
1893 old_sp_mask), GETPC());
1894 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1896 } else {
1897 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1898 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1899 for (i = param_count - 1; i >= 0; i--) {
1900 val = cpu_lduw_kernel_ra(env, old_ssp +
1901 ((env->regs[R_ESP] + i * 2) &
1902 old_sp_mask), GETPC());
1903 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1906 new_stack = 1;
1907 } else {
1908 /* to same privilege */
1909 sp = env->regs[R_ESP];
1910 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1911 ssp = env->segs[R_SS].base;
1912 /* push_size = (4 << shift); */
1913 new_stack = 0;
1916 if (shift) {
1917 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1918 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1919 } else {
1920 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1921 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1924 /* from this point, not restartable */
1926 if (new_stack) {
1927 ss = (ss & ~3) | dpl;
1928 cpu_x86_load_seg_cache(env, R_SS, ss,
1929 ssp,
1930 get_seg_limit(ss_e1, ss_e2),
1931 ss_e2);
1934 selector = (selector & ~3) | dpl;
1935 cpu_x86_load_seg_cache(env, R_CS, selector,
1936 get_seg_base(e1, e2),
1937 get_seg_limit(e1, e2),
1938 e2);
1939 SET_ESP(sp, sp_mask);
1940 env->eip = offset;
1944 /* real and vm86 mode iret */
1945 void helper_iret_real(CPUX86State *env, int shift)
1947 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1948 target_ulong ssp;
1949 int eflags_mask;
1951 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1952 sp = env->regs[R_ESP];
1953 ssp = env->segs[R_SS].base;
1954 if (shift == 1) {
1955 /* 32 bits */
1956 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1957 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1958 new_cs &= 0xffff;
1959 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1960 } else {
1961 /* 16 bits */
1962 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1963 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1964 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1966 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1967 env->segs[R_CS].selector = new_cs;
1968 env->segs[R_CS].base = (new_cs << 4);
1969 env->eip = new_eip;
1970 if (env->eflags & VM_MASK) {
1971 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1972 NT_MASK;
1973 } else {
1974 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1975 RF_MASK | NT_MASK;
1977 if (shift == 0) {
1978 eflags_mask &= 0xffff;
1980 cpu_load_eflags(env, new_eflags, eflags_mask);
1981 env->hflags2 &= ~HF2_NMI_MASK;
1984 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1986 int dpl;
1987 uint32_t e2;
1989 /* XXX: on x86_64, we do not want to nullify FS and GS because
1990 they may still contain a valid base. I would be interested to
1991 know how a real x86_64 CPU behaves */
1992 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1993 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1994 return;
1997 e2 = env->segs[seg_reg].flags;
1998 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1999 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2000 /* data or non conforming code segment */
2001 if (dpl < cpl) {
2002 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2007 /* protected mode iret */
2008 static inline void helper_ret_protected(CPUX86State *env, int shift,
2009 int is_iret, int addend,
2010 uintptr_t retaddr)
2012 uint32_t new_cs, new_eflags, new_ss;
2013 uint32_t new_es, new_ds, new_fs, new_gs;
2014 uint32_t e1, e2, ss_e1, ss_e2;
2015 int cpl, dpl, rpl, eflags_mask, iopl;
2016 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2018 #ifdef TARGET_X86_64
2019 if (shift == 2) {
2020 sp_mask = -1;
2021 } else
2022 #endif
2024 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2026 sp = env->regs[R_ESP];
2027 ssp = env->segs[R_SS].base;
2028 new_eflags = 0; /* avoid warning */
2029 #ifdef TARGET_X86_64
2030 if (shift == 2) {
2031 POPQ_RA(sp, new_eip, retaddr);
2032 POPQ_RA(sp, new_cs, retaddr);
2033 new_cs &= 0xffff;
2034 if (is_iret) {
2035 POPQ_RA(sp, new_eflags, retaddr);
2037 } else
2038 #endif
2040 if (shift == 1) {
2041 /* 32 bits */
2042 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2043 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2044 new_cs &= 0xffff;
2045 if (is_iret) {
2046 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2047 if (new_eflags & VM_MASK) {
2048 goto return_to_vm86;
2051 } else {
2052 /* 16 bits */
2053 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2054 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2055 if (is_iret) {
2056 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2060 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2061 new_cs, new_eip, shift, addend);
2062 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2063 if ((new_cs & 0xfffc) == 0) {
2064 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2066 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2067 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2069 if (!(e2 & DESC_S_MASK) ||
2070 !(e2 & DESC_CS_MASK)) {
2071 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2073 cpl = env->hflags & HF_CPL_MASK;
2074 rpl = new_cs & 3;
2075 if (rpl < cpl) {
2076 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2078 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2079 if (e2 & DESC_C_MASK) {
2080 if (dpl > rpl) {
2081 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2083 } else {
2084 if (dpl != rpl) {
2085 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2088 if (!(e2 & DESC_P_MASK)) {
2089 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2092 sp += addend;
2093 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2094 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2095 /* return to same privilege level */
2096 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2097 get_seg_base(e1, e2),
2098 get_seg_limit(e1, e2),
2099 e2);
2100 } else {
2101 /* return to different privilege level */
2102 #ifdef TARGET_X86_64
2103 if (shift == 2) {
2104 POPQ_RA(sp, new_esp, retaddr);
2105 POPQ_RA(sp, new_ss, retaddr);
2106 new_ss &= 0xffff;
2107 } else
2108 #endif
2110 if (shift == 1) {
2111 /* 32 bits */
2112 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2113 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2114 new_ss &= 0xffff;
2115 } else {
2116 /* 16 bits */
2117 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2118 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2121 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2122 new_ss, new_esp);
2123 if ((new_ss & 0xfffc) == 0) {
2124 #ifdef TARGET_X86_64
2125 /* NULL ss is allowed in long mode if cpl != 3 */
2126 /* XXX: test CS64? */
2127 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2128 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2129 0, 0xffffffff,
2130 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2131 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2132 DESC_W_MASK | DESC_A_MASK);
2133 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2134 } else
2135 #endif
2137 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2139 } else {
2140 if ((new_ss & 3) != rpl) {
2141 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2143 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2144 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2146 if (!(ss_e2 & DESC_S_MASK) ||
2147 (ss_e2 & DESC_CS_MASK) ||
2148 !(ss_e2 & DESC_W_MASK)) {
2149 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2151 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2152 if (dpl != rpl) {
2153 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2155 if (!(ss_e2 & DESC_P_MASK)) {
2156 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2158 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2159 get_seg_base(ss_e1, ss_e2),
2160 get_seg_limit(ss_e1, ss_e2),
2161 ss_e2);
2164 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2165 get_seg_base(e1, e2),
2166 get_seg_limit(e1, e2),
2167 e2);
2168 sp = new_esp;
2169 #ifdef TARGET_X86_64
2170 if (env->hflags & HF_CS64_MASK) {
2171 sp_mask = -1;
2172 } else
2173 #endif
2175 sp_mask = get_sp_mask(ss_e2);
2178 /* validate data segments */
2179 validate_seg(env, R_ES, rpl);
2180 validate_seg(env, R_DS, rpl);
2181 validate_seg(env, R_FS, rpl);
2182 validate_seg(env, R_GS, rpl);
2184 sp += addend;
2186 SET_ESP(sp, sp_mask);
2187 env->eip = new_eip;
2188 if (is_iret) {
2189 /* NOTE: 'cpl' is the _old_ CPL */
2190 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2191 if (cpl == 0) {
2192 eflags_mask |= IOPL_MASK;
2194 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2195 if (cpl <= iopl) {
2196 eflags_mask |= IF_MASK;
2198 if (shift == 0) {
2199 eflags_mask &= 0xffff;
2201 cpu_load_eflags(env, new_eflags, eflags_mask);
2203 return;
2205 return_to_vm86:
2206 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2207 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2208 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2209 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2210 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2211 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2213 /* modify processor state */
2214 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2215 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2216 VIP_MASK);
2217 load_seg_vm(env, R_CS, new_cs & 0xffff);
2218 load_seg_vm(env, R_SS, new_ss & 0xffff);
2219 load_seg_vm(env, R_ES, new_es & 0xffff);
2220 load_seg_vm(env, R_DS, new_ds & 0xffff);
2221 load_seg_vm(env, R_FS, new_fs & 0xffff);
2222 load_seg_vm(env, R_GS, new_gs & 0xffff);
2224 env->eip = new_eip & 0xffff;
2225 env->regs[R_ESP] = new_esp;
2228 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2230 int tss_selector, type;
2231 uint32_t e1, e2;
2233 /* specific case for TSS */
2234 if (env->eflags & NT_MASK) {
2235 #ifdef TARGET_X86_64
2236 if (env->hflags & HF_LMA_MASK) {
2237 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2239 #endif
2240 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2241 if (tss_selector & 4) {
2242 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2244 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2245 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2247 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2248 /* NOTE: we check both segment and busy TSS */
2249 if (type != 3) {
2250 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2252 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2253 } else {
2254 helper_ret_protected(env, shift, 1, 0, GETPC());
2256 env->hflags2 &= ~HF2_NMI_MASK;
2259 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2261 helper_ret_protected(env, shift, 0, addend, GETPC());
2264 void helper_sysenter(CPUX86State *env)
2266 if (env->sysenter_cs == 0) {
2267 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2269 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2271 #ifdef TARGET_X86_64
2272 if (env->hflags & HF_LMA_MASK) {
2273 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2274 0, 0xffffffff,
2275 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2276 DESC_S_MASK |
2277 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2278 DESC_L_MASK);
2279 } else
2280 #endif
2282 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2283 0, 0xffffffff,
2284 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2285 DESC_S_MASK |
2286 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2288 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2289 0, 0xffffffff,
2290 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2291 DESC_S_MASK |
2292 DESC_W_MASK | DESC_A_MASK);
2293 env->regs[R_ESP] = env->sysenter_esp;
2294 env->eip = env->sysenter_eip;
2297 void helper_sysexit(CPUX86State *env, int dflag)
2299 int cpl;
2301 cpl = env->hflags & HF_CPL_MASK;
2302 if (env->sysenter_cs == 0 || cpl != 0) {
2303 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2305 #ifdef TARGET_X86_64
2306 if (dflag == 2) {
2307 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2308 3, 0, 0xffffffff,
2309 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2310 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2311 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2312 DESC_L_MASK);
2313 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2314 3, 0, 0xffffffff,
2315 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2316 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2317 DESC_W_MASK | DESC_A_MASK);
2318 } else
2319 #endif
2321 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2322 3, 0, 0xffffffff,
2323 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2324 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2325 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2326 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2327 3, 0, 0xffffffff,
2328 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2329 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2330 DESC_W_MASK | DESC_A_MASK);
2332 env->regs[R_ESP] = env->regs[R_ECX];
2333 env->eip = env->regs[R_EDX];
2336 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2338 unsigned int limit;
2339 uint32_t e1, e2, eflags, selector;
2340 int rpl, dpl, cpl, type;
2342 selector = selector1 & 0xffff;
2343 eflags = cpu_cc_compute_all(env, CC_OP);
2344 if ((selector & 0xfffc) == 0) {
2345 goto fail;
2347 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2348 goto fail;
2350 rpl = selector & 3;
2351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2352 cpl = env->hflags & HF_CPL_MASK;
2353 if (e2 & DESC_S_MASK) {
2354 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2355 /* conforming */
2356 } else {
2357 if (dpl < cpl || dpl < rpl) {
2358 goto fail;
2361 } else {
2362 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2363 switch (type) {
2364 case 1:
2365 case 2:
2366 case 3:
2367 case 9:
2368 case 11:
2369 break;
2370 default:
2371 goto fail;
2373 if (dpl < cpl || dpl < rpl) {
2374 fail:
2375 CC_SRC = eflags & ~CC_Z;
2376 return 0;
2379 limit = get_seg_limit(e1, e2);
2380 CC_SRC = eflags | CC_Z;
2381 return limit;
2384 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2386 uint32_t e1, e2, eflags, selector;
2387 int rpl, dpl, cpl, type;
2389 selector = selector1 & 0xffff;
2390 eflags = cpu_cc_compute_all(env, CC_OP);
2391 if ((selector & 0xfffc) == 0) {
2392 goto fail;
2394 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2395 goto fail;
2397 rpl = selector & 3;
2398 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2399 cpl = env->hflags & HF_CPL_MASK;
2400 if (e2 & DESC_S_MASK) {
2401 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2402 /* conforming */
2403 } else {
2404 if (dpl < cpl || dpl < rpl) {
2405 goto fail;
2408 } else {
2409 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2410 switch (type) {
2411 case 1:
2412 case 2:
2413 case 3:
2414 case 4:
2415 case 5:
2416 case 9:
2417 case 11:
2418 case 12:
2419 break;
2420 default:
2421 goto fail;
2423 if (dpl < cpl || dpl < rpl) {
2424 fail:
2425 CC_SRC = eflags & ~CC_Z;
2426 return 0;
2429 CC_SRC = eflags | CC_Z;
2430 return e2 & 0x00f0ff00;
2433 void helper_verr(CPUX86State *env, target_ulong selector1)
2435 uint32_t e1, e2, eflags, selector;
2436 int rpl, dpl, cpl;
2438 selector = selector1 & 0xffff;
2439 eflags = cpu_cc_compute_all(env, CC_OP);
2440 if ((selector & 0xfffc) == 0) {
2441 goto fail;
2443 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2444 goto fail;
2446 if (!(e2 & DESC_S_MASK)) {
2447 goto fail;
2449 rpl = selector & 3;
2450 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2451 cpl = env->hflags & HF_CPL_MASK;
2452 if (e2 & DESC_CS_MASK) {
2453 if (!(e2 & DESC_R_MASK)) {
2454 goto fail;
2456 if (!(e2 & DESC_C_MASK)) {
2457 if (dpl < cpl || dpl < rpl) {
2458 goto fail;
2461 } else {
2462 if (dpl < cpl || dpl < rpl) {
2463 fail:
2464 CC_SRC = eflags & ~CC_Z;
2465 return;
2468 CC_SRC = eflags | CC_Z;
2471 void helper_verw(CPUX86State *env, target_ulong selector1)
2473 uint32_t e1, e2, eflags, selector;
2474 int rpl, dpl, cpl;
2476 selector = selector1 & 0xffff;
2477 eflags = cpu_cc_compute_all(env, CC_OP);
2478 if ((selector & 0xfffc) == 0) {
2479 goto fail;
2481 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2482 goto fail;
2484 if (!(e2 & DESC_S_MASK)) {
2485 goto fail;
2487 rpl = selector & 3;
2488 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489 cpl = env->hflags & HF_CPL_MASK;
2490 if (e2 & DESC_CS_MASK) {
2491 goto fail;
2492 } else {
2493 if (dpl < cpl || dpl < rpl) {
2494 goto fail;
2496 if (!(e2 & DESC_W_MASK)) {
2497 fail:
2498 CC_SRC = eflags & ~CC_Z;
2499 return;
2502 CC_SRC = eflags | CC_Z;
2505 #if defined(CONFIG_USER_ONLY)
2506 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2508 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2509 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2510 selector &= 0xffff;
2511 cpu_x86_load_seg_cache(env, seg_reg, selector,
2512 (selector << 4), 0xffff,
2513 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2514 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2515 } else {
2516 helper_load_seg(env, seg_reg, selector);
2519 #endif
2521 /* check if Port I/O is allowed in TSS */
2522 static inline void check_io(CPUX86State *env, int addr, int size,
2523 uintptr_t retaddr)
2525 int io_offset, val, mask;
2527 /* TSS must be a valid 32 bit one */
2528 if (!(env->tr.flags & DESC_P_MASK) ||
2529 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2530 env->tr.limit < 103) {
2531 goto fail;
2533 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2534 io_offset += (addr >> 3);
2535 /* Note: the check needs two bytes */
2536 if ((io_offset + 1) > env->tr.limit) {
2537 goto fail;
2539 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2540 val >>= (addr & 7);
2541 mask = (1 << size) - 1;
2542 /* all bits must be zero to allow the I/O */
2543 if ((val & mask) != 0) {
2544 fail:
2545 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2549 void helper_check_iob(CPUX86State *env, uint32_t t0)
2551 check_io(env, t0, 1, GETPC());
2554 void helper_check_iow(CPUX86State *env, uint32_t t0)
2556 check_io(env, t0, 2, GETPC());
2559 void helper_check_iol(CPUX86State *env, uint32_t t0)
2561 check_io(env, t0, 4, GETPC());