Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / target / i386 / seg_helper.c
blobc82bf07e17858577ea188de8b7cc9a5842303c8a
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
29 //#define DEBUG_PCALL
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
40 #ifdef CONFIG_USER_ONLY
41 #define MEMSUFFIX _kernel
42 #define DATA_SIZE 1
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 2
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 4
49 #include "exec/cpu_ldst_useronly_template.h"
51 #define DATA_SIZE 8
52 #include "exec/cpu_ldst_useronly_template.h"
53 #undef MEMSUFFIX
54 #else
55 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56 #define MEMSUFFIX _kernel
57 #define DATA_SIZE 1
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 2
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 4
64 #include "exec/cpu_ldst_template.h"
66 #define DATA_SIZE 8
67 #include "exec/cpu_ldst_template.h"
68 #undef CPU_MMU_INDEX
69 #undef MEMSUFFIX
70 #endif
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
81 if (selector & 0x4) {
82 dt = &env->ldt;
83 } else {
84 dt = &env->gdt;
86 index = selector & ~7;
87 if ((index + 7) > dt->limit) {
88 return -1;
90 ptr = dt->base + index;
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 return 0;
96 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
102 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104 unsigned int limit;
106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107 if (e2 & DESC_G_MASK) {
108 limit = (limit << 12) | 0xfff;
110 return limit;
113 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
118 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129 selector &= 0xffff;
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
136 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
140 X86CPU *cpu = x86_env_get_cpu(env);
141 int type, index, shift;
143 #if 0
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147 for (i = 0; i < env->tr.limit; i++) {
148 printf("%02x ", env->tr.base[i]);
149 if ((i & 7) == 7) {
150 printf("\n");
153 printf("\n");
155 #endif
157 if (!(env->tr.flags & DESC_P_MASK)) {
158 cpu_abort(CPU(cpu), "invalid tss");
160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161 if ((type & 7) != 1) {
162 cpu_abort(CPU(cpu), "invalid tss type");
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
166 if (index + (4 << shift) - 1 > env->tr.limit) {
167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169 if (shift == 0) {
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172 } else {
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
178 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
181 uint32_t e1, e2;
182 int rpl, dpl;
184 if ((selector & 0xfffc) != 0) {
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 if (!(e2 & DESC_S_MASK)) {
189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193 if (seg_reg == R_CS) {
194 if (!(e2 & DESC_CS_MASK)) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 if (dpl != rpl) {
198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 if (dpl != cpl || dpl != rpl) {
206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208 } else {
209 /* not readable code */
210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215 if (dpl < cpl || dpl < rpl) {
216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
220 if (!(e2 & DESC_P_MASK)) {
221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223 cpu_x86_load_seg_cache(env, seg_reg, selector,
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
227 } else {
228 if (seg_reg == R_SS || seg_reg == R_CS) {
229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
234 #define SWITCH_TSS_JMP 0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
258 if (!(e2 & DESC_P_MASK)) {
259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261 tss_selector = e1 >> 16;
262 if (tss_selector & 4) {
263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 if (e2 & DESC_S_MASK) {
269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272 if ((type & 7) != 1) {
273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
277 if (!(e2 & DESC_P_MASK)) {
278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
281 if (type & 8) {
282 tss_limit_max = 103;
283 } else {
284 tss_limit_max = 43;
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
289 tss_limit < tss_limit_max) {
290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293 if (old_type & 8) {
294 old_tss_limit_max = 103;
295 } else {
296 old_tss_limit_max = 43;
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305 for (i = 0; i < 8; i++) {
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
309 for (i = 0; i < 6; i++) {
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320 for (i = 0; i < 8; i++) {
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
324 for (i = 0; i < 4; i++) {
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
353 ptr = env->gdt.base + (env->tr.selector & ~7);
354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355 e2 &= ~DESC_TSS_BUSY_MASK;
356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358 old_eflags = cpu_compute_eflags(env);
359 if (source == SWITCH_TSS_IRET) {
360 old_eflags &= ~NT_MASK;
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376 for (i = 0; i < 6; i++) {
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
380 } else {
381 /* 16 bit */
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392 for (i = 0; i < 4; i++) {
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
398 /* now if an exception occurs, it will occurs in the next task
399 context */
401 if (source == SWITCH_TSS_CALL) {
402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403 new_eflags |= NT_MASK;
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413 e2 |= DESC_TSS_BUSY_MASK;
414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8)) {
436 eflags_mask &= 0xffff;
438 cpu_load_eflags(env, new_eflags, eflags_mask);
439 /* XXX: what to do in 16 bit case? */
440 env->regs[R_EAX] = new_regs[0];
441 env->regs[R_ECX] = new_regs[1];
442 env->regs[R_EDX] = new_regs[2];
443 env->regs[R_EBX] = new_regs[3];
444 env->regs[R_ESP] = new_regs[4];
445 env->regs[R_EBP] = new_regs[5];
446 env->regs[R_ESI] = new_regs[6];
447 env->regs[R_EDI] = new_regs[7];
448 if (new_eflags & VM_MASK) {
449 for (i = 0; i < 6; i++) {
450 load_seg_vm(env, i, new_segs[i]);
452 } else {
453 /* first just selectors as the rest may trigger exceptions */
454 for (i = 0; i < 6; i++) {
455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
464 /* load the LDT */
465 if (new_ldt & 4) {
466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit) {
473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475 ptr = dt->base + index;
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 if (!(e2 & DESC_P_MASK)) {
482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 int cpl = new_segs[R_CS] & 3;
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
498 /* check that env->eip is in the CS segment limits */
499 if (new_eip > env->segs[R_CS].limit) {
500 /* XXX: different exception if CALL? */
501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
504 #ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509 #endif
512 static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
519 static inline unsigned int get_sp_mask(unsigned int e2)
521 if (e2 & DESC_B_MASK) {
522 return 0xffffffff;
523 } else {
524 return 0xffff;
528 static int exception_has_error_code(int intno)
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
540 return 0;
543 #ifdef TARGET_X86_64
544 #define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
554 } while (0)
555 #else
556 #define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
560 } while (0)
561 #endif
563 /* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
567 /* XXX: add a is_user flag to have proper security support */
568 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
570 sp -= 2; \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
574 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
576 sp -= 4; \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
580 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
583 sp += 2; \
586 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
589 sp += 4; \
592 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
597 /* protected mode interrupt */
598 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
607 uint32_t old_eip, sp_mask;
608 int vm86 = env->eflags & VM_MASK;
610 has_error_code = 0;
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
614 if (is_int) {
615 old_eip = next_eip;
616 } else {
617 old_eip = env->eip;
620 dt = &env->idt;
621 if (intno * 8 + 7 > dt->limit) {
622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
624 ptr = dt->base + intno * 8;
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
627 /* check gate type */
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
629 switch (type) {
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
632 if (!(e2 & DESC_P_MASK)) {
633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
640 /* push the error code */
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
643 if (env->segs[R_SS].flags & DESC_B_MASK) {
644 mask = 0xffffffff;
645 } else {
646 mask = 0xffff;
648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
649 ssp = env->segs[R_SS].base + esp;
650 if (shift) {
651 cpu_stl_kernel(env, ssp, error_code);
652 } else {
653 cpu_stw_kernel(env, ssp, error_code);
655 SET_ESP(esp, mask);
657 return;
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
662 break;
663 default:
664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
665 break;
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
669 /* check privilege if software int */
670 if (is_int && dpl < cpl) {
671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
673 /* check valid bit */
674 if (!(e2 & DESC_P_MASK)) {
675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
679 if ((selector & 0xfffc) == 0) {
680 raise_exception_err(env, EXCP0D_GPF, 0);
682 if (load_segment(env, &e1, &e2, selector) != 0) {
683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 if (dpl > cpl) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692 if (!(e2 & DESC_P_MASK)) {
693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
695 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
696 /* to inner privilege */
697 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
698 if ((ss & 0xfffc) == 0) {
699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
701 if ((ss & 3) != dpl) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
708 if (ss_dpl != dpl) {
709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
711 if (!(ss_e2 & DESC_S_MASK) ||
712 (ss_e2 & DESC_CS_MASK) ||
713 !(ss_e2 & DESC_W_MASK)) {
714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
716 if (!(ss_e2 & DESC_P_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 new_stack = 1;
720 sp_mask = get_sp_mask(ss_e2);
721 ssp = get_seg_base(ss_e1, ss_e2);
722 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
723 /* to same privilege */
724 if (vm86) {
725 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
727 new_stack = 0;
728 sp_mask = get_sp_mask(env->segs[R_SS].flags);
729 ssp = env->segs[R_SS].base;
730 esp = env->regs[R_ESP];
731 dpl = cpl;
732 } else {
733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
734 new_stack = 0; /* avoid warning */
735 sp_mask = 0; /* avoid warning */
736 ssp = 0; /* avoid warning */
737 esp = 0; /* avoid warning */
740 shift = type >> 3;
742 #if 0
743 /* XXX: check that enough room is available */
744 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
745 if (vm86) {
746 push_size += 8;
748 push_size <<= shift;
749 #endif
750 if (shift == 1) {
751 if (new_stack) {
752 if (vm86) {
753 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
759 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
761 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
762 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763 PUSHL(ssp, esp, sp_mask, old_eip);
764 if (has_error_code) {
765 PUSHL(ssp, esp, sp_mask, error_code);
767 } else {
768 if (new_stack) {
769 if (vm86) {
770 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
776 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
778 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
779 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780 PUSHW(ssp, esp, sp_mask, old_eip);
781 if (has_error_code) {
782 PUSHW(ssp, esp, sp_mask, error_code);
786 /* interrupt gate clear IF mask */
787 if ((type & 1) == 0) {
788 env->eflags &= ~IF_MASK;
790 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
792 if (new_stack) {
793 if (vm86) {
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
803 SET_ESP(esp, sp_mask);
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
810 env->eip = offset;
813 #ifdef TARGET_X86_64
815 #define PUSHQ_RA(sp, val, ra) \
817 sp -= 8; \
818 cpu_stq_kernel_ra(env, sp, (val), ra); \
821 #define POPQ_RA(sp, val, ra) \
823 val = cpu_ldq_kernel_ra(env, sp, ra); \
824 sp += 8; \
827 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
830 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
832 X86CPU *cpu = x86_env_get_cpu(env);
833 int index;
835 #if 0
836 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
837 env->tr.base, env->tr.limit);
838 #endif
840 if (!(env->tr.flags & DESC_P_MASK)) {
841 cpu_abort(CPU(cpu), "invalid tss");
843 index = 8 * level + 4;
844 if ((index + 7) > env->tr.limit) {
845 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
847 return cpu_ldq_kernel(env, env->tr.base + index);
850 /* 64 bit interrupt */
851 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
852 int error_code, target_ulong next_eip, int is_hw)
854 SegmentCache *dt;
855 target_ulong ptr;
856 int type, dpl, selector, cpl, ist;
857 int has_error_code, new_stack;
858 uint32_t e1, e2, e3, ss;
859 target_ulong old_eip, esp, offset;
861 has_error_code = 0;
862 if (!is_int && !is_hw) {
863 has_error_code = exception_has_error_code(intno);
865 if (is_int) {
866 old_eip = next_eip;
867 } else {
868 old_eip = env->eip;
871 dt = &env->idt;
872 if (intno * 16 + 15 > dt->limit) {
873 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
875 ptr = dt->base + intno * 16;
876 e1 = cpu_ldl_kernel(env, ptr);
877 e2 = cpu_ldl_kernel(env, ptr + 4);
878 e3 = cpu_ldl_kernel(env, ptr + 8);
879 /* check gate type */
880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
881 switch (type) {
882 case 14: /* 386 interrupt gate */
883 case 15: /* 386 trap gate */
884 break;
885 default:
886 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
887 break;
889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
890 cpl = env->hflags & HF_CPL_MASK;
891 /* check privilege if software int */
892 if (is_int && dpl < cpl) {
893 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
895 /* check valid bit */
896 if (!(e2 & DESC_P_MASK)) {
897 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
902 if ((selector & 0xfffc) == 0) {
903 raise_exception_err(env, EXCP0D_GPF, 0);
906 if (load_segment(env, &e1, &e2, selector) != 0) {
907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
913 if (dpl > cpl) {
914 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
916 if (!(e2 & DESC_P_MASK)) {
917 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
919 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
922 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
923 /* to inner privilege */
924 new_stack = 1;
925 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
926 ss = 0;
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK) {
930 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
932 new_stack = 0;
933 esp = env->regs[R_ESP];
934 dpl = cpl;
935 } else {
936 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
937 new_stack = 0; /* avoid warning */
938 esp = 0; /* avoid warning */
940 esp &= ~0xfLL; /* align stack */
942 PUSHQ(esp, env->segs[R_SS].selector);
943 PUSHQ(esp, env->regs[R_ESP]);
944 PUSHQ(esp, cpu_compute_eflags(env));
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
951 /* interrupt gate clear IF mask */
952 if ((type & 1) == 0) {
953 env->eflags &= ~IF_MASK;
955 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
957 if (new_stack) {
958 ss = 0 | dpl;
959 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
961 env->regs[R_ESP] = esp;
963 selector = (selector & ~3) | dpl;
964 cpu_x86_load_seg_cache(env, R_CS, selector,
965 get_seg_base(e1, e2),
966 get_seg_limit(e1, e2),
967 e2);
968 env->eip = offset;
970 #endif
972 #ifdef TARGET_X86_64
973 #if defined(CONFIG_USER_ONLY)
974 void QEMU_NORETURN helper_syscall(CPUX86State *env, int next_eip_addend)
976 CPUState *cs = CPU(x86_env_get_cpu(env));
978 cs->exception_index = EXCP_SYSCALL;
979 env->exception_next_eip = env->eip + next_eip_addend;
980 cpu_loop_exit(cs);
983 void QEMU_NORETURN helper_vsyscall(CPUX86State *env)
985 CPUState *cs = CPU(x86_env_get_cpu(env));
986 cs->exception_index = EXCP_VSYSCALL;
987 cpu_loop_exit(cs);
989 #else
990 void helper_syscall(CPUX86State *env, int next_eip_addend)
992 int selector;
994 if (!(env->efer & MSR_EFER_SCE)) {
995 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
997 selector = (env->star >> 32) & 0xffff;
998 if (env->hflags & HF_LMA_MASK) {
999 int code64;
1001 env->regs[R_ECX] = env->eip + next_eip_addend;
1002 env->regs[11] = cpu_compute_eflags(env);
1004 code64 = env->hflags & HF_CS64_MASK;
1006 env->eflags &= ~env->fmask;
1007 cpu_load_eflags(env, env->eflags, 0);
1008 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1009 0, 0xffffffff,
1010 DESC_G_MASK | DESC_P_MASK |
1011 DESC_S_MASK |
1012 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1013 DESC_L_MASK);
1014 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1015 0, 0xffffffff,
1016 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017 DESC_S_MASK |
1018 DESC_W_MASK | DESC_A_MASK);
1019 if (code64) {
1020 env->eip = env->lstar;
1021 } else {
1022 env->eip = env->cstar;
1024 } else {
1025 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1027 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1028 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1029 0, 0xffffffff,
1030 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1031 DESC_S_MASK |
1032 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1033 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1034 0, 0xffffffff,
1035 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1036 DESC_S_MASK |
1037 DESC_W_MASK | DESC_A_MASK);
1038 env->eip = (uint32_t)env->star;
1041 #endif
1042 #endif
1044 #ifdef TARGET_X86_64
1045 void helper_sysret(CPUX86State *env, int dflag)
1047 int cpl, selector;
1049 if (!(env->efer & MSR_EFER_SCE)) {
1050 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1052 cpl = env->hflags & HF_CPL_MASK;
1053 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1054 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1056 selector = (env->star >> 48) & 0xffff;
1057 if (env->hflags & HF_LMA_MASK) {
1058 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1059 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1060 NT_MASK);
1061 if (dflag == 2) {
1062 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1067 DESC_L_MASK);
1068 env->eip = env->regs[R_ECX];
1069 } else {
1070 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075 env->eip = (uint32_t)env->regs[R_ECX];
1077 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_W_MASK | DESC_A_MASK);
1082 } else {
1083 env->eflags |= IF_MASK;
1084 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1085 0, 0xffffffff,
1086 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1089 env->eip = (uint32_t)env->regs[R_ECX];
1090 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1091 0, 0xffffffff,
1092 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094 DESC_W_MASK | DESC_A_MASK);
1097 #endif
1099 /* real mode interrupt */
1100 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1101 int error_code, unsigned int next_eip)
1103 SegmentCache *dt;
1104 target_ulong ptr, ssp;
1105 int selector;
1106 uint32_t offset, esp;
1107 uint32_t old_cs, old_eip;
1109 /* real mode (simpler!) */
1110 dt = &env->idt;
1111 if (intno * 4 + 3 > dt->limit) {
1112 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1114 ptr = dt->base + intno * 4;
1115 offset = cpu_lduw_kernel(env, ptr);
1116 selector = cpu_lduw_kernel(env, ptr + 2);
1117 esp = env->regs[R_ESP];
1118 ssp = env->segs[R_SS].base;
1119 if (is_int) {
1120 old_eip = next_eip;
1121 } else {
1122 old_eip = env->eip;
1124 old_cs = env->segs[R_CS].selector;
1125 /* XXX: use SS segment size? */
1126 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1127 PUSHW(ssp, esp, 0xffff, old_cs);
1128 PUSHW(ssp, esp, 0xffff, old_eip);
1130 /* update processor state */
1131 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1132 env->eip = offset;
1133 env->segs[R_CS].selector = selector;
1134 env->segs[R_CS].base = (selector << 4);
1135 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1138 #if defined(CONFIG_USER_ONLY)
1139 /* fake user mode interrupt. is_int is TRUE if coming from the int
1140 * instruction. next_eip is the env->eip value AFTER the interrupt
1141 * instruction. It is only relevant if is_int is TRUE or if intno
1142 * is EXCP_SYSCALL.
1144 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1145 int error_code, target_ulong next_eip)
1147 if (is_int) {
1148 SegmentCache *dt;
1149 target_ulong ptr;
1150 int dpl, cpl, shift;
1151 uint32_t e2;
1153 dt = &env->idt;
1154 if (env->hflags & HF_LMA_MASK) {
1155 shift = 4;
1156 } else {
1157 shift = 3;
1159 ptr = dt->base + (intno << shift);
1160 e2 = cpu_ldl_kernel(env, ptr + 4);
1162 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1163 cpl = env->hflags & HF_CPL_MASK;
1164 /* check privilege if software int */
1165 if (dpl < cpl) {
1166 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1170 /* Since we emulate only user space, we cannot do more than
1171 exiting the emulation with the suitable exception and error
1172 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1173 if (is_int || intno == EXCP_SYSCALL) {
1174 env->eip = next_eip;
1178 #else
1180 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1181 int error_code, int is_hw, int rm)
1183 CPUState *cs = CPU(x86_env_get_cpu(env));
1184 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1185 control.event_inj));
1187 if (!(event_inj & SVM_EVTINJ_VALID)) {
1188 int type;
1190 if (is_int) {
1191 type = SVM_EVTINJ_TYPE_SOFT;
1192 } else {
1193 type = SVM_EVTINJ_TYPE_EXEPT;
1195 event_inj = intno | type | SVM_EVTINJ_VALID;
1196 if (!rm && exception_has_error_code(intno)) {
1197 event_inj |= SVM_EVTINJ_VALID_ERR;
1198 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1199 control.event_inj_err),
1200 error_code);
1202 x86_stl_phys(cs,
1203 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1204 event_inj);
1207 #endif
1210 * Begin execution of an interruption. is_int is TRUE if coming from
1211 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1212 * instruction. It is only relevant if is_int is TRUE.
1214 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1215 int error_code, target_ulong next_eip, int is_hw)
1217 CPUX86State *env = &cpu->env;
1219 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1220 if ((env->cr[0] & CR0_PE_MASK)) {
1221 static int count;
1223 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1224 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1225 count, intno, error_code, is_int,
1226 env->hflags & HF_CPL_MASK,
1227 env->segs[R_CS].selector, env->eip,
1228 (int)env->segs[R_CS].base + env->eip,
1229 env->segs[R_SS].selector, env->regs[R_ESP]);
1230 if (intno == 0x0e) {
1231 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1232 } else {
1233 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1235 qemu_log("\n");
1236 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1237 #if 0
1239 int i;
1240 target_ulong ptr;
1242 qemu_log(" code=");
1243 ptr = env->segs[R_CS].base + env->eip;
1244 for (i = 0; i < 16; i++) {
1245 qemu_log(" %02x", ldub(ptr + i));
1247 qemu_log("\n");
1249 #endif
1250 count++;
1253 if (env->cr[0] & CR0_PE_MASK) {
1254 #if !defined(CONFIG_USER_ONLY)
1255 if (env->hflags & HF_SVMI_MASK) {
1256 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1258 #endif
1259 #ifdef TARGET_X86_64
1260 if (env->hflags & HF_LMA_MASK) {
1261 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1262 } else
1263 #endif
1265 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1266 is_hw);
1268 } else {
1269 #if !defined(CONFIG_USER_ONLY)
1270 if (env->hflags & HF_SVMI_MASK) {
1271 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1273 #endif
1274 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1277 #if !defined(CONFIG_USER_ONLY)
1278 if (env->hflags & HF_SVMI_MASK) {
1279 CPUState *cs = CPU(cpu);
1280 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1281 offsetof(struct vmcb,
1282 control.event_inj));
1284 x86_stl_phys(cs,
1285 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1286 event_inj & ~SVM_EVTINJ_VALID);
1288 #endif
1291 void x86_cpu_do_interrupt(CPUState *cs)
1293 X86CPU *cpu = X86_CPU(cs);
1294 CPUX86State *env = &cpu->env;
1296 #if defined(CONFIG_USER_ONLY)
1297 /* if user mode only, we simulate a fake exception
1298 which will be handled outside the cpu execution
1299 loop */
1300 do_interrupt_user(env, cs->exception_index,
1301 env->exception_is_int,
1302 env->error_code,
1303 env->exception_next_eip);
1304 /* successfully delivered */
1305 env->old_exception = -1;
1306 #else
1307 /* simulate a real cpu exception. On i386, it can
1308 trigger new exceptions, but we do not handle
1309 double or triple faults yet. */
1310 do_interrupt_all(cpu, cs->exception_index,
1311 env->exception_is_int,
1312 env->error_code,
1313 env->exception_next_eip, 0);
1314 /* successfully delivered */
1315 env->old_exception = -1;
1316 #endif
1319 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1321 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1324 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1326 X86CPU *cpu = X86_CPU(cs);
1327 CPUX86State *env = &cpu->env;
1328 bool ret = false;
1330 #if !defined(CONFIG_USER_ONLY)
1331 if (interrupt_request & CPU_INTERRUPT_POLL) {
1332 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1333 apic_poll_irq(cpu->apic_state);
1334 /* Don't process multiple interrupt requests in a single call.
1335 This is required to make icount-driven execution deterministic. */
1336 return true;
1338 #endif
1339 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1340 do_cpu_sipi(cpu);
1341 } else if (env->hflags2 & HF2_GIF_MASK) {
1342 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1343 !(env->hflags & HF_SMM_MASK)) {
1344 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1345 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1346 do_smm_enter(cpu);
1347 ret = true;
1348 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1349 !(env->hflags2 & HF2_NMI_MASK)) {
1350 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1351 env->hflags2 |= HF2_NMI_MASK;
1352 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1353 ret = true;
1354 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1355 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1356 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1357 ret = true;
1358 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1359 (((env->hflags2 & HF2_VINTR_MASK) &&
1360 (env->hflags2 & HF2_HIF_MASK)) ||
1361 (!(env->hflags2 & HF2_VINTR_MASK) &&
1362 (env->eflags & IF_MASK &&
1363 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1364 int intno;
1365 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1366 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1367 CPU_INTERRUPT_VIRQ);
1368 intno = cpu_get_pic_interrupt(env);
1369 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1370 "Servicing hardware INT=0x%02x\n", intno);
1371 do_interrupt_x86_hardirq(env, intno, 1);
1372 /* ensure that no TB jump will be modified as
1373 the program flow was changed */
1374 ret = true;
1375 #if !defined(CONFIG_USER_ONLY)
1376 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1377 (env->eflags & IF_MASK) &&
1378 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1379 int intno;
1380 /* FIXME: this should respect TPR */
1381 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1382 intno = x86_ldl_phys(cs, env->vm_vmcb
1383 + offsetof(struct vmcb, control.int_vector));
1384 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1385 "Servicing virtual hardware INT=0x%02x\n", intno);
1386 do_interrupt_x86_hardirq(env, intno, 1);
1387 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1388 ret = true;
1389 #endif
1393 return ret;
1396 void helper_lldt(CPUX86State *env, int selector)
1398 SegmentCache *dt;
1399 uint32_t e1, e2;
1400 int index, entry_limit;
1401 target_ulong ptr;
1403 selector &= 0xffff;
1404 if ((selector & 0xfffc) == 0) {
1405 /* XXX: NULL selector case: invalid LDT */
1406 env->ldt.base = 0;
1407 env->ldt.limit = 0;
1408 } else {
1409 if (selector & 0x4) {
1410 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1412 dt = &env->gdt;
1413 index = selector & ~7;
1414 #ifdef TARGET_X86_64
1415 if (env->hflags & HF_LMA_MASK) {
1416 entry_limit = 15;
1417 } else
1418 #endif
1420 entry_limit = 7;
1422 if ((index + entry_limit) > dt->limit) {
1423 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1425 ptr = dt->base + index;
1426 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1427 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1428 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1429 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1431 if (!(e2 & DESC_P_MASK)) {
1432 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1434 #ifdef TARGET_X86_64
1435 if (env->hflags & HF_LMA_MASK) {
1436 uint32_t e3;
1438 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1439 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1440 env->ldt.base |= (target_ulong)e3 << 32;
1441 } else
1442 #endif
1444 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1447 env->ldt.selector = selector;
1450 void helper_ltr(CPUX86State *env, int selector)
1452 SegmentCache *dt;
1453 uint32_t e1, e2;
1454 int index, type, entry_limit;
1455 target_ulong ptr;
1457 selector &= 0xffff;
1458 if ((selector & 0xfffc) == 0) {
1459 /* NULL selector case: invalid TR */
1460 env->tr.base = 0;
1461 env->tr.limit = 0;
1462 env->tr.flags = 0;
1463 } else {
1464 if (selector & 0x4) {
1465 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1467 dt = &env->gdt;
1468 index = selector & ~7;
1469 #ifdef TARGET_X86_64
1470 if (env->hflags & HF_LMA_MASK) {
1471 entry_limit = 15;
1472 } else
1473 #endif
1475 entry_limit = 7;
1477 if ((index + entry_limit) > dt->limit) {
1478 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1480 ptr = dt->base + index;
1481 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1482 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1483 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1484 if ((e2 & DESC_S_MASK) ||
1485 (type != 1 && type != 9)) {
1486 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1488 if (!(e2 & DESC_P_MASK)) {
1489 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1491 #ifdef TARGET_X86_64
1492 if (env->hflags & HF_LMA_MASK) {
1493 uint32_t e3, e4;
1495 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1496 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1497 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1498 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1500 load_seg_cache_raw_dt(&env->tr, e1, e2);
1501 env->tr.base |= (target_ulong)e3 << 32;
1502 } else
1503 #endif
1505 load_seg_cache_raw_dt(&env->tr, e1, e2);
1507 e2 |= DESC_TSS_BUSY_MASK;
1508 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1510 env->tr.selector = selector;
1513 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1514 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1516 uint32_t e1, e2;
1517 int cpl, dpl, rpl;
1518 SegmentCache *dt;
1519 int index;
1520 target_ulong ptr;
1522 selector &= 0xffff;
1523 cpl = env->hflags & HF_CPL_MASK;
1524 if ((selector & 0xfffc) == 0) {
1525 /* null selector case */
1526 if (seg_reg == R_SS
1527 #ifdef TARGET_X86_64
1528 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1529 #endif
1531 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1533 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1534 } else {
1536 if (selector & 0x4) {
1537 dt = &env->ldt;
1538 } else {
1539 dt = &env->gdt;
1541 index = selector & ~7;
1542 if ((index + 7) > dt->limit) {
1543 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1545 ptr = dt->base + index;
1546 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1547 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1549 if (!(e2 & DESC_S_MASK)) {
1550 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1552 rpl = selector & 3;
1553 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1554 if (seg_reg == R_SS) {
1555 /* must be writable segment */
1556 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1557 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1559 if (rpl != cpl || dpl != cpl) {
1560 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1562 } else {
1563 /* must be readable segment */
1564 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1565 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1568 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1569 /* if not conforming code, test rights */
1570 if (dpl < cpl || dpl < rpl) {
1571 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1576 if (!(e2 & DESC_P_MASK)) {
1577 if (seg_reg == R_SS) {
1578 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1579 } else {
1580 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1584 /* set the access bit if not already set */
1585 if (!(e2 & DESC_A_MASK)) {
1586 e2 |= DESC_A_MASK;
1587 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1590 cpu_x86_load_seg_cache(env, seg_reg, selector,
1591 get_seg_base(e1, e2),
1592 get_seg_limit(e1, e2),
1593 e2);
1594 #if 0
1595 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1596 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1597 #endif
1601 /* protected mode jump */
1602 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1603 target_ulong next_eip)
1605 int gate_cs, type;
1606 uint32_t e1, e2, cpl, dpl, rpl, limit;
1608 if ((new_cs & 0xfffc) == 0) {
1609 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1611 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1612 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1614 cpl = env->hflags & HF_CPL_MASK;
1615 if (e2 & DESC_S_MASK) {
1616 if (!(e2 & DESC_CS_MASK)) {
1617 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1619 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1620 if (e2 & DESC_C_MASK) {
1621 /* conforming code segment */
1622 if (dpl > cpl) {
1623 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1625 } else {
1626 /* non conforming code segment */
1627 rpl = new_cs & 3;
1628 if (rpl > cpl) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1631 if (dpl != cpl) {
1632 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1635 if (!(e2 & DESC_P_MASK)) {
1636 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1638 limit = get_seg_limit(e1, e2);
1639 if (new_eip > limit &&
1640 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1641 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1643 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1644 get_seg_base(e1, e2), limit, e2);
1645 env->eip = new_eip;
1646 } else {
1647 /* jump to call or task gate */
1648 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1649 rpl = new_cs & 3;
1650 cpl = env->hflags & HF_CPL_MASK;
1651 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1652 switch (type) {
1653 case 1: /* 286 TSS */
1654 case 9: /* 386 TSS */
1655 case 5: /* task gate */
1656 if (dpl < cpl || dpl < rpl) {
1657 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1659 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1660 break;
1661 case 4: /* 286 call gate */
1662 case 12: /* 386 call gate */
1663 if ((dpl < cpl) || (dpl < rpl)) {
1664 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1666 if (!(e2 & DESC_P_MASK)) {
1667 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1669 gate_cs = e1 >> 16;
1670 new_eip = (e1 & 0xffff);
1671 if (type == 12) {
1672 new_eip |= (e2 & 0xffff0000);
1674 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1675 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1677 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1678 /* must be code segment */
1679 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1680 (DESC_S_MASK | DESC_CS_MASK))) {
1681 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1683 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1684 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1685 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1687 if (!(e2 & DESC_P_MASK)) {
1688 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1690 limit = get_seg_limit(e1, e2);
1691 if (new_eip > limit) {
1692 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1694 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1695 get_seg_base(e1, e2), limit, e2);
1696 env->eip = new_eip;
1697 break;
1698 default:
1699 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1700 break;
1705 /* real mode call */
1706 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1707 int shift, int next_eip)
1709 int new_eip;
1710 uint32_t esp, esp_mask;
1711 target_ulong ssp;
1713 new_eip = new_eip1;
1714 esp = env->regs[R_ESP];
1715 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1716 ssp = env->segs[R_SS].base;
1717 if (shift) {
1718 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1719 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1720 } else {
1721 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1722 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1725 SET_ESP(esp, esp_mask);
1726 env->eip = new_eip;
1727 env->segs[R_CS].selector = new_cs;
1728 env->segs[R_CS].base = (new_cs << 4);
1731 /* protected mode call */
1732 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1733 int shift, target_ulong next_eip)
1735 int new_stack, i;
1736 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1737 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1738 uint32_t val, limit, old_sp_mask;
1739 target_ulong ssp, old_ssp;
1741 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1742 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1743 if ((new_cs & 0xfffc) == 0) {
1744 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1746 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1747 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1749 cpl = env->hflags & HF_CPL_MASK;
1750 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1751 if (e2 & DESC_S_MASK) {
1752 if (!(e2 & DESC_CS_MASK)) {
1753 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1755 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1756 if (e2 & DESC_C_MASK) {
1757 /* conforming code segment */
1758 if (dpl > cpl) {
1759 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1761 } else {
1762 /* non conforming code segment */
1763 rpl = new_cs & 3;
1764 if (rpl > cpl) {
1765 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1767 if (dpl != cpl) {
1768 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1771 if (!(e2 & DESC_P_MASK)) {
1772 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1775 #ifdef TARGET_X86_64
1776 /* XXX: check 16/32 bit cases in long mode */
1777 if (shift == 2) {
1778 target_ulong rsp;
1780 /* 64 bit case */
1781 rsp = env->regs[R_ESP];
1782 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1783 PUSHQ_RA(rsp, next_eip, GETPC());
1784 /* from this point, not restartable */
1785 env->regs[R_ESP] = rsp;
1786 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1787 get_seg_base(e1, e2),
1788 get_seg_limit(e1, e2), e2);
1789 env->eip = new_eip;
1790 } else
1791 #endif
1793 sp = env->regs[R_ESP];
1794 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1795 ssp = env->segs[R_SS].base;
1796 if (shift) {
1797 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1798 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1799 } else {
1800 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1801 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1804 limit = get_seg_limit(e1, e2);
1805 if (new_eip > limit) {
1806 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1808 /* from this point, not restartable */
1809 SET_ESP(sp, sp_mask);
1810 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1811 get_seg_base(e1, e2), limit, e2);
1812 env->eip = new_eip;
1814 } else {
1815 /* check gate type */
1816 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1817 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1818 rpl = new_cs & 3;
1819 switch (type) {
1820 case 1: /* available 286 TSS */
1821 case 9: /* available 386 TSS */
1822 case 5: /* task gate */
1823 if (dpl < cpl || dpl < rpl) {
1824 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1826 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1827 return;
1828 case 4: /* 286 call gate */
1829 case 12: /* 386 call gate */
1830 break;
1831 default:
1832 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1833 break;
1835 shift = type >> 3;
1837 if (dpl < cpl || dpl < rpl) {
1838 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1840 /* check valid bit */
1841 if (!(e2 & DESC_P_MASK)) {
1842 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1844 selector = e1 >> 16;
1845 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1846 param_count = e2 & 0x1f;
1847 if ((selector & 0xfffc) == 0) {
1848 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1851 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1852 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1854 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1855 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1857 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1858 if (dpl > cpl) {
1859 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1861 if (!(e2 & DESC_P_MASK)) {
1862 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1865 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1866 /* to inner privilege */
1867 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1868 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1869 TARGET_FMT_lx "\n", ss, sp, param_count,
1870 env->regs[R_ESP]);
1871 if ((ss & 0xfffc) == 0) {
1872 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1874 if ((ss & 3) != dpl) {
1875 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1877 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1878 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1880 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1881 if (ss_dpl != dpl) {
1882 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1884 if (!(ss_e2 & DESC_S_MASK) ||
1885 (ss_e2 & DESC_CS_MASK) ||
1886 !(ss_e2 & DESC_W_MASK)) {
1887 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1889 if (!(ss_e2 & DESC_P_MASK)) {
1890 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1893 /* push_size = ((param_count * 2) + 8) << shift; */
1895 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1896 old_ssp = env->segs[R_SS].base;
1898 sp_mask = get_sp_mask(ss_e2);
1899 ssp = get_seg_base(ss_e1, ss_e2);
1900 if (shift) {
1901 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1902 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1903 for (i = param_count - 1; i >= 0; i--) {
1904 val = cpu_ldl_kernel_ra(env, old_ssp +
1905 ((env->regs[R_ESP] + i * 4) &
1906 old_sp_mask), GETPC());
1907 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1909 } else {
1910 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1911 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1912 for (i = param_count - 1; i >= 0; i--) {
1913 val = cpu_lduw_kernel_ra(env, old_ssp +
1914 ((env->regs[R_ESP] + i * 2) &
1915 old_sp_mask), GETPC());
1916 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1919 new_stack = 1;
1920 } else {
1921 /* to same privilege */
1922 sp = env->regs[R_ESP];
1923 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1924 ssp = env->segs[R_SS].base;
1925 /* push_size = (4 << shift); */
1926 new_stack = 0;
1929 if (shift) {
1930 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1931 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1932 } else {
1933 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1934 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1937 /* from this point, not restartable */
1939 if (new_stack) {
1940 ss = (ss & ~3) | dpl;
1941 cpu_x86_load_seg_cache(env, R_SS, ss,
1942 ssp,
1943 get_seg_limit(ss_e1, ss_e2),
1944 ss_e2);
1947 selector = (selector & ~3) | dpl;
1948 cpu_x86_load_seg_cache(env, R_CS, selector,
1949 get_seg_base(e1, e2),
1950 get_seg_limit(e1, e2),
1951 e2);
1952 SET_ESP(sp, sp_mask);
1953 env->eip = offset;
1957 /* real and vm86 mode iret */
1958 void helper_iret_real(CPUX86State *env, int shift)
1960 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1961 target_ulong ssp;
1962 int eflags_mask;
1964 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1965 sp = env->regs[R_ESP];
1966 ssp = env->segs[R_SS].base;
1967 if (shift == 1) {
1968 /* 32 bits */
1969 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1970 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1971 new_cs &= 0xffff;
1972 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1973 } else {
1974 /* 16 bits */
1975 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1976 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1977 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1979 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1980 env->segs[R_CS].selector = new_cs;
1981 env->segs[R_CS].base = (new_cs << 4);
1982 env->eip = new_eip;
1983 if (env->eflags & VM_MASK) {
1984 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1985 NT_MASK;
1986 } else {
1987 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1988 RF_MASK | NT_MASK;
1990 if (shift == 0) {
1991 eflags_mask &= 0xffff;
1993 cpu_load_eflags(env, new_eflags, eflags_mask);
1994 env->hflags2 &= ~HF2_NMI_MASK;
1997 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1999 int dpl;
2000 uint32_t e2;
2002 /* XXX: on x86_64, we do not want to nullify FS and GS because
2003 they may still contain a valid base. I would be interested to
2004 know how a real x86_64 CPU behaves */
2005 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2006 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2007 return;
2010 e2 = env->segs[seg_reg].flags;
2011 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2012 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2013 /* data or non conforming code segment */
2014 if (dpl < cpl) {
2015 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2020 /* protected mode iret */
2021 static inline void helper_ret_protected(CPUX86State *env, int shift,
2022 int is_iret, int addend,
2023 uintptr_t retaddr)
2025 uint32_t new_cs, new_eflags, new_ss;
2026 uint32_t new_es, new_ds, new_fs, new_gs;
2027 uint32_t e1, e2, ss_e1, ss_e2;
2028 int cpl, dpl, rpl, eflags_mask, iopl;
2029 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2031 #ifdef TARGET_X86_64
2032 if (shift == 2) {
2033 sp_mask = -1;
2034 } else
2035 #endif
2037 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2039 sp = env->regs[R_ESP];
2040 ssp = env->segs[R_SS].base;
2041 new_eflags = 0; /* avoid warning */
2042 #ifdef TARGET_X86_64
2043 if (shift == 2) {
2044 POPQ_RA(sp, new_eip, retaddr);
2045 POPQ_RA(sp, new_cs, retaddr);
2046 new_cs &= 0xffff;
2047 if (is_iret) {
2048 POPQ_RA(sp, new_eflags, retaddr);
2050 } else
2051 #endif
2053 if (shift == 1) {
2054 /* 32 bits */
2055 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2056 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2057 new_cs &= 0xffff;
2058 if (is_iret) {
2059 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2060 if (new_eflags & VM_MASK) {
2061 goto return_to_vm86;
2064 } else {
2065 /* 16 bits */
2066 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2067 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2068 if (is_iret) {
2069 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2073 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2074 new_cs, new_eip, shift, addend);
2075 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2076 if ((new_cs & 0xfffc) == 0) {
2077 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2079 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2080 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2082 if (!(e2 & DESC_S_MASK) ||
2083 !(e2 & DESC_CS_MASK)) {
2084 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2086 cpl = env->hflags & HF_CPL_MASK;
2087 rpl = new_cs & 3;
2088 if (rpl < cpl) {
2089 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2091 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2092 if (e2 & DESC_C_MASK) {
2093 if (dpl > rpl) {
2094 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2096 } else {
2097 if (dpl != rpl) {
2098 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2101 if (!(e2 & DESC_P_MASK)) {
2102 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2105 sp += addend;
2106 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2107 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2108 /* return to same privilege level */
2109 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2110 get_seg_base(e1, e2),
2111 get_seg_limit(e1, e2),
2112 e2);
2113 } else {
2114 /* return to different privilege level */
2115 #ifdef TARGET_X86_64
2116 if (shift == 2) {
2117 POPQ_RA(sp, new_esp, retaddr);
2118 POPQ_RA(sp, new_ss, retaddr);
2119 new_ss &= 0xffff;
2120 } else
2121 #endif
2123 if (shift == 1) {
2124 /* 32 bits */
2125 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2126 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2127 new_ss &= 0xffff;
2128 } else {
2129 /* 16 bits */
2130 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2131 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2134 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2135 new_ss, new_esp);
2136 if ((new_ss & 0xfffc) == 0) {
2137 #ifdef TARGET_X86_64
2138 /* NULL ss is allowed in long mode if cpl != 3 */
2139 /* XXX: test CS64? */
2140 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2141 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2142 0, 0xffffffff,
2143 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2144 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2145 DESC_W_MASK | DESC_A_MASK);
2146 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2147 } else
2148 #endif
2150 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2152 } else {
2153 if ((new_ss & 3) != rpl) {
2154 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2156 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2157 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2159 if (!(ss_e2 & DESC_S_MASK) ||
2160 (ss_e2 & DESC_CS_MASK) ||
2161 !(ss_e2 & DESC_W_MASK)) {
2162 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2164 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2165 if (dpl != rpl) {
2166 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2168 if (!(ss_e2 & DESC_P_MASK)) {
2169 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2171 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2172 get_seg_base(ss_e1, ss_e2),
2173 get_seg_limit(ss_e1, ss_e2),
2174 ss_e2);
2177 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2178 get_seg_base(e1, e2),
2179 get_seg_limit(e1, e2),
2180 e2);
2181 sp = new_esp;
2182 #ifdef TARGET_X86_64
2183 if (env->hflags & HF_CS64_MASK) {
2184 sp_mask = -1;
2185 } else
2186 #endif
2188 sp_mask = get_sp_mask(ss_e2);
2191 /* validate data segments */
2192 validate_seg(env, R_ES, rpl);
2193 validate_seg(env, R_DS, rpl);
2194 validate_seg(env, R_FS, rpl);
2195 validate_seg(env, R_GS, rpl);
2197 sp += addend;
2199 SET_ESP(sp, sp_mask);
2200 env->eip = new_eip;
2201 if (is_iret) {
2202 /* NOTE: 'cpl' is the _old_ CPL */
2203 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2204 if (cpl == 0) {
2205 eflags_mask |= IOPL_MASK;
2207 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2208 if (cpl <= iopl) {
2209 eflags_mask |= IF_MASK;
2211 if (shift == 0) {
2212 eflags_mask &= 0xffff;
2214 cpu_load_eflags(env, new_eflags, eflags_mask);
2216 return;
2218 return_to_vm86:
2219 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2220 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2221 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2222 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2223 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2224 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2226 /* modify processor state */
2227 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2228 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2229 VIP_MASK);
2230 load_seg_vm(env, R_CS, new_cs & 0xffff);
2231 load_seg_vm(env, R_SS, new_ss & 0xffff);
2232 load_seg_vm(env, R_ES, new_es & 0xffff);
2233 load_seg_vm(env, R_DS, new_ds & 0xffff);
2234 load_seg_vm(env, R_FS, new_fs & 0xffff);
2235 load_seg_vm(env, R_GS, new_gs & 0xffff);
2237 env->eip = new_eip & 0xffff;
2238 env->regs[R_ESP] = new_esp;
2241 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2243 int tss_selector, type;
2244 uint32_t e1, e2;
2246 /* specific case for TSS */
2247 if (env->eflags & NT_MASK) {
2248 #ifdef TARGET_X86_64
2249 if (env->hflags & HF_LMA_MASK) {
2250 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2252 #endif
2253 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2254 if (tss_selector & 4) {
2255 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2257 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2258 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2260 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2261 /* NOTE: we check both segment and busy TSS */
2262 if (type != 3) {
2263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2265 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2266 } else {
2267 helper_ret_protected(env, shift, 1, 0, GETPC());
2269 env->hflags2 &= ~HF2_NMI_MASK;
2272 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2274 helper_ret_protected(env, shift, 0, addend, GETPC());
2277 void helper_sysenter(CPUX86State *env)
2279 if (env->sysenter_cs == 0) {
2280 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2282 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2284 #ifdef TARGET_X86_64
2285 if (env->hflags & HF_LMA_MASK) {
2286 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2287 0, 0xffffffff,
2288 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2289 DESC_S_MASK |
2290 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2291 DESC_L_MASK);
2292 } else
2293 #endif
2295 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2296 0, 0xffffffff,
2297 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2298 DESC_S_MASK |
2299 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2301 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2302 0, 0xffffffff,
2303 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2304 DESC_S_MASK |
2305 DESC_W_MASK | DESC_A_MASK);
2306 env->regs[R_ESP] = env->sysenter_esp;
2307 env->eip = env->sysenter_eip;
2310 void helper_sysexit(CPUX86State *env, int dflag)
2312 int cpl;
2314 cpl = env->hflags & HF_CPL_MASK;
2315 if (env->sysenter_cs == 0 || cpl != 0) {
2316 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2318 #ifdef TARGET_X86_64
2319 if (dflag == 2) {
2320 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2321 3, 0, 0xffffffff,
2322 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2323 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2324 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2325 DESC_L_MASK);
2326 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2327 3, 0, 0xffffffff,
2328 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2329 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2330 DESC_W_MASK | DESC_A_MASK);
2331 } else
2332 #endif
2334 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2335 3, 0, 0xffffffff,
2336 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2337 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2338 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2339 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2340 3, 0, 0xffffffff,
2341 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2342 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2343 DESC_W_MASK | DESC_A_MASK);
2345 env->regs[R_ESP] = env->regs[R_ECX];
2346 env->eip = env->regs[R_EDX];
2349 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2351 unsigned int limit;
2352 uint32_t e1, e2, eflags, selector;
2353 int rpl, dpl, cpl, type;
2355 selector = selector1 & 0xffff;
2356 eflags = cpu_cc_compute_all(env, CC_OP);
2357 if ((selector & 0xfffc) == 0) {
2358 goto fail;
2360 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2361 goto fail;
2363 rpl = selector & 3;
2364 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2365 cpl = env->hflags & HF_CPL_MASK;
2366 if (e2 & DESC_S_MASK) {
2367 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2368 /* conforming */
2369 } else {
2370 if (dpl < cpl || dpl < rpl) {
2371 goto fail;
2374 } else {
2375 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2376 switch (type) {
2377 case 1:
2378 case 2:
2379 case 3:
2380 case 9:
2381 case 11:
2382 break;
2383 default:
2384 goto fail;
2386 if (dpl < cpl || dpl < rpl) {
2387 fail:
2388 CC_SRC = eflags & ~CC_Z;
2389 return 0;
2392 limit = get_seg_limit(e1, e2);
2393 CC_SRC = eflags | CC_Z;
2394 return limit;
2397 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2399 uint32_t e1, e2, eflags, selector;
2400 int rpl, dpl, cpl, type;
2402 selector = selector1 & 0xffff;
2403 eflags = cpu_cc_compute_all(env, CC_OP);
2404 if ((selector & 0xfffc) == 0) {
2405 goto fail;
2407 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2408 goto fail;
2410 rpl = selector & 3;
2411 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2412 cpl = env->hflags & HF_CPL_MASK;
2413 if (e2 & DESC_S_MASK) {
2414 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2415 /* conforming */
2416 } else {
2417 if (dpl < cpl || dpl < rpl) {
2418 goto fail;
2421 } else {
2422 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2423 switch (type) {
2424 case 1:
2425 case 2:
2426 case 3:
2427 case 4:
2428 case 5:
2429 case 9:
2430 case 11:
2431 case 12:
2432 break;
2433 default:
2434 goto fail;
2436 if (dpl < cpl || dpl < rpl) {
2437 fail:
2438 CC_SRC = eflags & ~CC_Z;
2439 return 0;
2442 CC_SRC = eflags | CC_Z;
2443 return e2 & 0x00f0ff00;
2446 void helper_verr(CPUX86State *env, target_ulong selector1)
2448 uint32_t e1, e2, eflags, selector;
2449 int rpl, dpl, cpl;
2451 selector = selector1 & 0xffff;
2452 eflags = cpu_cc_compute_all(env, CC_OP);
2453 if ((selector & 0xfffc) == 0) {
2454 goto fail;
2456 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2457 goto fail;
2459 if (!(e2 & DESC_S_MASK)) {
2460 goto fail;
2462 rpl = selector & 3;
2463 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2464 cpl = env->hflags & HF_CPL_MASK;
2465 if (e2 & DESC_CS_MASK) {
2466 if (!(e2 & DESC_R_MASK)) {
2467 goto fail;
2469 if (!(e2 & DESC_C_MASK)) {
2470 if (dpl < cpl || dpl < rpl) {
2471 goto fail;
2474 } else {
2475 if (dpl < cpl || dpl < rpl) {
2476 fail:
2477 CC_SRC = eflags & ~CC_Z;
2478 return;
2481 CC_SRC = eflags | CC_Z;
2484 void helper_verw(CPUX86State *env, target_ulong selector1)
2486 uint32_t e1, e2, eflags, selector;
2487 int rpl, dpl, cpl;
2489 selector = selector1 & 0xffff;
2490 eflags = cpu_cc_compute_all(env, CC_OP);
2491 if ((selector & 0xfffc) == 0) {
2492 goto fail;
2494 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2495 goto fail;
2497 if (!(e2 & DESC_S_MASK)) {
2498 goto fail;
2500 rpl = selector & 3;
2501 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2502 cpl = env->hflags & HF_CPL_MASK;
2503 if (e2 & DESC_CS_MASK) {
2504 goto fail;
2505 } else {
2506 if (dpl < cpl || dpl < rpl) {
2507 goto fail;
2509 if (!(e2 & DESC_W_MASK)) {
2510 fail:
2511 CC_SRC = eflags & ~CC_Z;
2512 return;
2515 CC_SRC = eflags | CC_Z;
2518 #if defined(CONFIG_USER_ONLY)
2519 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2521 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2522 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2523 selector &= 0xffff;
2524 cpu_x86_load_seg_cache(env, seg_reg, selector,
2525 (selector << 4), 0xffff,
2526 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2527 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2528 } else {
2529 helper_load_seg(env, seg_reg, selector);
2532 #endif
2534 /* check if Port I/O is allowed in TSS */
2535 static inline void check_io(CPUX86State *env, int addr, int size,
2536 uintptr_t retaddr)
2538 int io_offset, val, mask;
2540 /* TSS must be a valid 32 bit one */
2541 if (!(env->tr.flags & DESC_P_MASK) ||
2542 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2543 env->tr.limit < 103) {
2544 goto fail;
2546 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2547 io_offset += (addr >> 3);
2548 /* Note: the check needs two bytes */
2549 if ((io_offset + 1) > env->tr.limit) {
2550 goto fail;
2552 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2553 val >>= (addr & 7);
2554 mask = (1 << size) - 1;
2555 /* all bits must be zero to allow the I/O */
2556 if ((val & mask) != 0) {
2557 fail:
2558 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2562 void helper_check_iob(CPUX86State *env, uint32_t t0)
2564 check_io(env, t0, 1, GETPC());
2567 void helper_check_iow(CPUX86State *env, uint32_t t0)
2569 check_io(env, t0, 2, GETPC());
2572 void helper_check_iol(CPUX86State *env, uint32_t t0)
2574 check_io(env, t0, 4, GETPC());