Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / target-i386 / seg_helper.c
blob352d51da46e89a7d10f217674a536ad3f5370e01
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
39 #define DATA_SIZE 1
40 #include "exec/cpu_ldst_useronly_template.h"
42 #define DATA_SIZE 2
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 4
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 8
49 #include "exec/cpu_ldst_useronly_template.h"
50 #undef MEMSUFFIX
51 #else
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
54 #define DATA_SIZE 1
55 #include "exec/cpu_ldst_template.h"
57 #define DATA_SIZE 2
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 4
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 8
64 #include "exec/cpu_ldst_template.h"
65 #undef CPU_MMU_INDEX
66 #undef MEMSUFFIX
67 #endif
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector,
72 uintptr_t retaddr)
74 SegmentCache *dt;
75 int index;
76 target_ulong ptr;
78 if (selector & 0x4) {
79 dt = &env->ldt;
80 } else {
81 dt = &env->gdt;
83 index = selector & ~7;
84 if ((index + 7) > dt->limit) {
85 return -1;
87 ptr = dt->base + index;
88 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
89 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
90 return 0;
93 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
94 uint32_t *e2_ptr, int selector)
96 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
101 unsigned int limit;
103 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
104 if (e2 & DESC_G_MASK) {
105 limit = (limit << 12) | 0xfff;
107 return limit;
110 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
112 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
116 uint32_t e2)
118 sc->base = get_seg_base(e1, e2);
119 sc->limit = get_seg_limit(e1, e2);
120 sc->flags = e2;
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
126 selector &= 0xffff;
128 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
129 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
130 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
133 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
134 uint32_t *esp_ptr, int dpl,
135 uintptr_t retaddr)
137 X86CPU *cpu = x86_env_get_cpu(env);
138 int type, index, shift;
140 #if 0
142 int i;
143 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
144 for (i = 0; i < env->tr.limit; i++) {
145 printf("%02x ", env->tr.base[i]);
146 if ((i & 7) == 7) {
147 printf("\n");
150 printf("\n");
152 #endif
154 if (!(env->tr.flags & DESC_P_MASK)) {
155 cpu_abort(CPU(cpu), "invalid tss");
157 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
158 if ((type & 7) != 1) {
159 cpu_abort(CPU(cpu), "invalid tss type");
161 shift = type >> 3;
162 index = (dpl * 4 + 2) << shift;
163 if (index + (4 << shift) - 1 > env->tr.limit) {
164 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
166 if (shift == 0) {
167 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
168 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
169 } else {
170 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
176 uintptr_t retaddr)
178 uint32_t e1, e2;
179 int rpl, dpl;
181 if ((selector & 0xfffc) != 0) {
182 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
183 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
185 if (!(e2 & DESC_S_MASK)) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 rpl = selector & 3;
189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
190 if (seg_reg == R_CS) {
191 if (!(e2 & DESC_CS_MASK)) {
192 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
194 if (dpl != rpl) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 } else if (seg_reg == R_SS) {
198 /* SS must be writable data */
199 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
200 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
202 if (dpl != cpl || dpl != rpl) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 } else {
206 /* not readable code */
207 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
208 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
210 /* if data or non conforming code, checks the rights */
211 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
212 if (dpl < cpl || dpl < rpl) {
213 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 if (!(e2 & DESC_P_MASK)) {
218 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
220 cpu_x86_load_seg_cache(env, seg_reg, selector,
221 get_seg_base(e1, e2),
222 get_seg_limit(e1, e2),
223 e2);
224 } else {
225 if (seg_reg == R_SS || seg_reg == R_CS) {
226 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State *env, int tss_selector,
237 uint32_t e1, uint32_t e2, int source,
238 uint32_t next_eip, uintptr_t retaddr)
240 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
241 target_ulong tss_base;
242 uint32_t new_regs[8], new_segs[6];
243 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
244 uint32_t old_eflags, eflags_mask;
245 SegmentCache *dt;
246 int index;
247 target_ulong ptr;
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
251 source);
253 /* if task gate, we read the TSS segment and we load it */
254 if (type == 5) {
255 if (!(e2 & DESC_P_MASK)) {
256 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
258 tss_selector = e1 >> 16;
259 if (tss_selector & 4) {
260 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
262 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
263 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
265 if (e2 & DESC_S_MASK) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
269 if ((type & 7) != 1) {
270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 if (!(e2 & DESC_P_MASK)) {
275 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
278 if (type & 8) {
279 tss_limit_max = 103;
280 } else {
281 tss_limit_max = 43;
283 tss_limit = get_seg_limit(e1, e2);
284 tss_base = get_seg_base(e1, e2);
285 if ((tss_selector & 4) != 0 ||
286 tss_limit < tss_limit_max) {
287 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
289 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
290 if (old_type & 8) {
291 old_tss_limit_max = 103;
292 } else {
293 old_tss_limit_max = 43;
296 /* read all the registers from the new TSS */
297 if (type & 8) {
298 /* 32 bit */
299 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
300 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
301 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
302 for (i = 0; i < 8; i++) {
303 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
304 retaddr);
306 for (i = 0; i < 6; i++) {
307 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
308 retaddr);
310 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
311 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
312 } else {
313 /* 16 bit */
314 new_cr3 = 0;
315 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
316 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
317 for (i = 0; i < 8; i++) {
318 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
319 retaddr) | 0xffff0000;
321 for (i = 0; i < 4; i++) {
322 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
323 retaddr);
325 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
326 new_segs[R_FS] = 0;
327 new_segs[R_GS] = 0;
328 new_trap = 0;
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
333 (void)new_trap;
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
341 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
342 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
345 /* clear busy bit (it is restartable) */
346 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
347 target_ulong ptr;
348 uint32_t e2;
350 ptr = env->gdt.base + (env->tr.selector & ~7);
351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
352 e2 &= ~DESC_TSS_BUSY_MASK;
353 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
355 old_eflags = cpu_compute_eflags(env);
356 if (source == SWITCH_TSS_IRET) {
357 old_eflags &= ~NT_MASK;
360 /* save the current state in the old TSS */
361 if (type & 8) {
362 /* 32 bit */
363 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
364 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
373 for (i = 0; i < 6; i++) {
374 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
375 env->segs[i].selector, retaddr);
377 } else {
378 /* 16 bit */
379 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
380 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
389 for (i = 0; i < 4; i++) {
390 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
391 env->segs[i].selector, retaddr);
395 /* now if an exception occurs, it will occurs in the next task
396 context */
398 if (source == SWITCH_TSS_CALL) {
399 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
400 new_eflags |= NT_MASK;
403 /* set busy bit */
404 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
405 target_ulong ptr;
406 uint32_t e2;
408 ptr = env->gdt.base + (tss_selector & ~7);
409 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
410 e2 |= DESC_TSS_BUSY_MASK;
411 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env->cr[0] |= CR0_TS_MASK;
417 env->hflags |= HF_TS_MASK;
418 env->tr.selector = tss_selector;
419 env->tr.base = tss_base;
420 env->tr.limit = tss_limit;
421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424 cpu_x86_update_cr3(env, new_cr3);
427 /* load all registers without an exception, then reload them with
428 possible exception */
429 env->eip = new_eip;
430 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
432 if (!(type & 8)) {
433 eflags_mask &= 0xffff;
435 cpu_load_eflags(env, new_eflags, eflags_mask);
436 /* XXX: what to do in 16 bit case? */
437 env->regs[R_EAX] = new_regs[0];
438 env->regs[R_ECX] = new_regs[1];
439 env->regs[R_EDX] = new_regs[2];
440 env->regs[R_EBX] = new_regs[3];
441 env->regs[R_ESP] = new_regs[4];
442 env->regs[R_EBP] = new_regs[5];
443 env->regs[R_ESI] = new_regs[6];
444 env->regs[R_EDI] = new_regs[7];
445 if (new_eflags & VM_MASK) {
446 for (i = 0; i < 6; i++) {
447 load_seg_vm(env, i, new_segs[i]);
449 } else {
450 /* first just selectors as the rest may trigger exceptions */
451 for (i = 0; i < 6; i++) {
452 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 env->ldt.selector = new_ldt & ~4;
457 env->ldt.base = 0;
458 env->ldt.limit = 0;
459 env->ldt.flags = 0;
461 /* load the LDT */
462 if (new_ldt & 4) {
463 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
466 if ((new_ldt & 0xfffc) != 0) {
467 dt = &env->gdt;
468 index = new_ldt & ~7;
469 if ((index + 7) > dt->limit) {
470 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
472 ptr = dt->base + index;
473 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
474 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
475 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
476 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
478 if (!(e2 & DESC_P_MASK)) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 int cpl = new_segs[R_CS] & 3;
487 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
488 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
489 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
490 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
491 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
492 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip > env->segs[R_CS].limit) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
504 for (i = 0; i < DR7_MAX_BP; i++) {
505 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
506 !hw_global_breakpoint_enabled(env->dr[7], i)) {
507 hw_breakpoint_remove(env, i);
510 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
512 #endif
515 static void switch_tss(CPUX86State *env, int tss_selector,
516 uint32_t e1, uint32_t e2, int source,
517 uint32_t next_eip)
519 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
522 static inline unsigned int get_sp_mask(unsigned int e2)
524 if (e2 & DESC_B_MASK) {
525 return 0xffffffff;
526 } else {
527 return 0xffff;
531 static int exception_has_error_code(int intno)
533 switch (intno) {
534 case 8:
535 case 10:
536 case 11:
537 case 12:
538 case 13:
539 case 14:
540 case 17:
541 return 1;
543 return 0;
546 #ifdef TARGET_X86_64
547 #define SET_ESP(val, sp_mask) \
548 do { \
549 if ((sp_mask) == 0xffff) { \
550 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
551 ((val) & 0xffff); \
552 } else if ((sp_mask) == 0xffffffffLL) { \
553 env->regs[R_ESP] = (uint32_t)(val); \
554 } else { \
555 env->regs[R_ESP] = (val); \
557 } while (0)
558 #else
559 #define SET_ESP(val, sp_mask) \
560 do { \
561 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
562 ((val) & (sp_mask)); \
563 } while (0)
564 #endif
566 /* in 64-bit machines, this can overflow. So this segment addition macro
567 * can be used to trim the value to 32-bit whenever needed */
568 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
570 /* XXX: add a is_user flag to have proper security support */
571 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
573 sp -= 2; \
574 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
577 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
579 sp -= 4; \
580 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
583 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
585 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
586 sp += 2; \
589 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
591 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
592 sp += 4; \
595 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
596 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
597 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
598 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
600 /* protected mode interrupt */
601 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
602 int error_code, unsigned int next_eip,
603 int is_hw)
605 SegmentCache *dt;
606 target_ulong ptr, ssp;
607 int type, dpl, selector, ss_dpl, cpl;
608 int has_error_code, new_stack, shift;
609 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
610 uint32_t old_eip, sp_mask;
611 int vm86 = env->eflags & VM_MASK;
613 has_error_code = 0;
614 if (!is_int && !is_hw) {
615 has_error_code = exception_has_error_code(intno);
617 if (is_int) {
618 old_eip = next_eip;
619 } else {
620 old_eip = env->eip;
623 dt = &env->idt;
624 if (intno * 8 + 7 > dt->limit) {
625 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
627 ptr = dt->base + intno * 8;
628 e1 = cpu_ldl_kernel(env, ptr);
629 e2 = cpu_ldl_kernel(env, ptr + 4);
630 /* check gate type */
631 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
632 switch (type) {
633 case 5: /* task gate */
634 /* must do that check here to return the correct error code */
635 if (!(e2 & DESC_P_MASK)) {
636 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
638 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
639 if (has_error_code) {
640 int type;
641 uint32_t mask;
643 /* push the error code */
644 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
645 shift = type >> 3;
646 if (env->segs[R_SS].flags & DESC_B_MASK) {
647 mask = 0xffffffff;
648 } else {
649 mask = 0xffff;
651 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
652 ssp = env->segs[R_SS].base + esp;
653 if (shift) {
654 cpu_stl_kernel(env, ssp, error_code);
655 } else {
656 cpu_stw_kernel(env, ssp, error_code);
658 SET_ESP(esp, mask);
660 return;
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
665 break;
666 default:
667 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
668 break;
670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671 cpl = env->hflags & HF_CPL_MASK;
672 /* check privilege if software int */
673 if (is_int && dpl < cpl) {
674 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK)) {
678 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
680 selector = e1 >> 16;
681 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
682 if ((selector & 0xfffc) == 0) {
683 raise_exception_err(env, EXCP0D_GPF, 0);
685 if (load_segment(env, &e1, &e2, selector) != 0) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
692 if (dpl > cpl) {
693 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
695 if (!(e2 & DESC_P_MASK)) {
696 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
698 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
699 /* to inner privilege */
700 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
701 if ((ss & 0xfffc) == 0) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if ((ss & 3) != dpl) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
711 if (ss_dpl != dpl) {
712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
714 if (!(ss_e2 & DESC_S_MASK) ||
715 (ss_e2 & DESC_CS_MASK) ||
716 !(ss_e2 & DESC_W_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 if (!(ss_e2 & DESC_P_MASK)) {
720 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
722 new_stack = 1;
723 sp_mask = get_sp_mask(ss_e2);
724 ssp = get_seg_base(ss_e1, ss_e2);
725 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
726 /* to same privilege */
727 if (vm86) {
728 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
730 new_stack = 0;
731 sp_mask = get_sp_mask(env->segs[R_SS].flags);
732 ssp = env->segs[R_SS].base;
733 esp = env->regs[R_ESP];
734 dpl = cpl;
735 } else {
736 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0; /* avoid warning */
738 sp_mask = 0; /* avoid warning */
739 ssp = 0; /* avoid warning */
740 esp = 0; /* avoid warning */
743 shift = type >> 3;
745 #if 0
746 /* XXX: check that enough room is available */
747 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
748 if (vm86) {
749 push_size += 8;
751 push_size <<= shift;
752 #endif
753 if (shift == 1) {
754 if (new_stack) {
755 if (vm86) {
756 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
762 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
764 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
765 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
766 PUSHL(ssp, esp, sp_mask, old_eip);
767 if (has_error_code) {
768 PUSHL(ssp, esp, sp_mask, error_code);
770 } else {
771 if (new_stack) {
772 if (vm86) {
773 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
779 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
781 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
782 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
783 PUSHW(ssp, esp, sp_mask, old_eip);
784 if (has_error_code) {
785 PUSHW(ssp, esp, sp_mask, error_code);
789 /* interrupt gate clear IF mask */
790 if ((type & 1) == 0) {
791 env->eflags &= ~IF_MASK;
793 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
795 if (new_stack) {
796 if (vm86) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
806 SET_ESP(esp, sp_mask);
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 env->eip = offset;
816 #ifdef TARGET_X86_64
818 #define PUSHQ_RA(sp, val, ra) \
820 sp -= 8; \
821 cpu_stq_kernel_ra(env, sp, (val), ra); \
824 #define POPQ_RA(sp, val, ra) \
826 val = cpu_ldq_kernel_ra(env, sp, ra); \
827 sp += 8; \
830 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
831 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
833 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
835 X86CPU *cpu = x86_env_get_cpu(env);
836 int index;
838 #if 0
839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840 env->tr.base, env->tr.limit);
841 #endif
843 if (!(env->tr.flags & DESC_P_MASK)) {
844 cpu_abort(CPU(cpu), "invalid tss");
846 index = 8 * level + 4;
847 if ((index + 7) > env->tr.limit) {
848 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
850 return cpu_ldq_kernel(env, env->tr.base + index);
853 /* 64 bit interrupt */
854 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
855 int error_code, target_ulong next_eip, int is_hw)
857 SegmentCache *dt;
858 target_ulong ptr;
859 int type, dpl, selector, cpl, ist;
860 int has_error_code, new_stack;
861 uint32_t e1, e2, e3, ss;
862 target_ulong old_eip, esp, offset;
864 has_error_code = 0;
865 if (!is_int && !is_hw) {
866 has_error_code = exception_has_error_code(intno);
868 if (is_int) {
869 old_eip = next_eip;
870 } else {
871 old_eip = env->eip;
874 dt = &env->idt;
875 if (intno * 16 + 15 > dt->limit) {
876 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = cpu_ldl_kernel(env, ptr);
880 e2 = cpu_ldl_kernel(env, ptr + 4);
881 e3 = cpu_ldl_kernel(env, ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884 switch (type) {
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
887 break;
888 default:
889 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
890 break;
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privilege if software int */
895 if (is_int && dpl < cpl) {
896 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
898 /* check valid bit */
899 if (!(e2 & DESC_P_MASK)) {
900 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
902 selector = e1 >> 16;
903 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 ist = e2 & 7;
905 if ((selector & 0xfffc) == 0) {
906 raise_exception_err(env, EXCP0D_GPF, 0);
909 if (load_segment(env, &e1, &e2, selector) != 0) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916 if (dpl > cpl) {
917 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
919 if (!(e2 & DESC_P_MASK)) {
920 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
922 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
925 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
926 /* to inner privilege */
927 new_stack = 1;
928 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
929 ss = 0;
930 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
931 /* to same privilege */
932 if (env->eflags & VM_MASK) {
933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
935 new_stack = 0;
936 esp = env->regs[R_ESP];
937 dpl = cpl;
938 } else {
939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
940 new_stack = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
943 esp &= ~0xfLL; /* align stack */
945 PUSHQ(esp, env->segs[R_SS].selector);
946 PUSHQ(esp, env->regs[R_ESP]);
947 PUSHQ(esp, cpu_compute_eflags(env));
948 PUSHQ(esp, env->segs[R_CS].selector);
949 PUSHQ(esp, old_eip);
950 if (has_error_code) {
951 PUSHQ(esp, error_code);
954 /* interrupt gate clear IF mask */
955 if ((type & 1) == 0) {
956 env->eflags &= ~IF_MASK;
958 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
960 if (new_stack) {
961 ss = 0 | dpl;
962 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964 env->regs[R_ESP] = esp;
966 selector = (selector & ~3) | dpl;
967 cpu_x86_load_seg_cache(env, R_CS, selector,
968 get_seg_base(e1, e2),
969 get_seg_limit(e1, e2),
970 e2);
971 env->eip = offset;
973 #endif
975 #ifdef TARGET_X86_64
976 #if defined(CONFIG_USER_ONLY)
977 void QEMU_NORETURN helper_syscall(CPUX86State *env, int next_eip_addend)
979 CPUState *cs = CPU(x86_env_get_cpu(env));
981 cs->exception_index = EXCP_SYSCALL;
982 env->exception_next_eip = env->eip + next_eip_addend;
983 cpu_loop_exit(cs);
986 void QEMU_NORETURN helper_vsyscall(CPUX86State *env)
988 CPUState *cs = CPU(x86_env_get_cpu(env));
989 cs->exception_index = EXCP_VSYSCALL;
990 cpu_loop_exit(cs);
992 #else
993 void helper_syscall(CPUX86State *env, int next_eip_addend)
995 int selector;
997 if (!(env->efer & MSR_EFER_SCE)) {
998 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1000 selector = (env->star >> 32) & 0xffff;
1001 if (env->hflags & HF_LMA_MASK) {
1002 int code64;
1004 env->regs[R_ECX] = env->eip + next_eip_addend;
1005 env->regs[11] = cpu_compute_eflags(env);
1007 code64 = env->hflags & HF_CS64_MASK;
1009 env->eflags &= ~env->fmask;
1010 cpu_load_eflags(env, env->eflags, 0);
1011 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1012 0, 0xffffffff,
1013 DESC_G_MASK | DESC_P_MASK |
1014 DESC_S_MASK |
1015 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1016 DESC_L_MASK);
1017 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1018 0, 0xffffffff,
1019 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1020 DESC_S_MASK |
1021 DESC_W_MASK | DESC_A_MASK);
1022 if (code64) {
1023 env->eip = env->lstar;
1024 } else {
1025 env->eip = env->cstar;
1027 } else {
1028 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1030 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1031 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1037 0, 0xffffffff,
1038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 DESC_S_MASK |
1040 DESC_W_MASK | DESC_A_MASK);
1041 env->eip = (uint32_t)env->star;
1044 #endif
1045 #endif
1047 #ifdef TARGET_X86_64
1048 void helper_sysret(CPUX86State *env, int dflag)
1050 int cpl, selector;
1052 if (!(env->efer & MSR_EFER_SCE)) {
1053 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1055 cpl = env->hflags & HF_CPL_MASK;
1056 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1057 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1059 selector = (env->star >> 48) & 0xffff;
1060 if (env->hflags & HF_LMA_MASK) {
1061 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1062 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1063 NT_MASK);
1064 if (dflag == 2) {
1065 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1066 0, 0xffffffff,
1067 DESC_G_MASK | DESC_P_MASK |
1068 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1069 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1070 DESC_L_MASK);
1071 env->eip = env->regs[R_ECX];
1072 } else {
1073 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1074 0, 0xffffffff,
1075 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1076 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1077 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1078 env->eip = (uint32_t)env->regs[R_ECX];
1080 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1081 0, 0xffffffff,
1082 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084 DESC_W_MASK | DESC_A_MASK);
1085 } else {
1086 env->eflags |= IF_MASK;
1087 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1088 0, 0xffffffff,
1089 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1090 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1091 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1092 env->eip = (uint32_t)env->regs[R_ECX];
1093 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1094 0, 0xffffffff,
1095 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1096 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1097 DESC_W_MASK | DESC_A_MASK);
1100 #endif
1102 /* real mode interrupt */
1103 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1104 int error_code, unsigned int next_eip)
1106 SegmentCache *dt;
1107 target_ulong ptr, ssp;
1108 int selector;
1109 uint32_t offset, esp;
1110 uint32_t old_cs, old_eip;
1112 /* real mode (simpler!) */
1113 dt = &env->idt;
1114 if (intno * 4 + 3 > dt->limit) {
1115 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1117 ptr = dt->base + intno * 4;
1118 offset = cpu_lduw_kernel(env, ptr);
1119 selector = cpu_lduw_kernel(env, ptr + 2);
1120 esp = env->regs[R_ESP];
1121 ssp = env->segs[R_SS].base;
1122 if (is_int) {
1123 old_eip = next_eip;
1124 } else {
1125 old_eip = env->eip;
1127 old_cs = env->segs[R_CS].selector;
1128 /* XXX: use SS segment size? */
1129 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1130 PUSHW(ssp, esp, 0xffff, old_cs);
1131 PUSHW(ssp, esp, 0xffff, old_eip);
1133 /* update processor state */
1134 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1135 env->eip = offset;
1136 env->segs[R_CS].selector = selector;
1137 env->segs[R_CS].base = (selector << 4);
1138 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1141 #if defined(CONFIG_USER_ONLY)
1142 /* fake user mode interrupt */
1143 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1144 int error_code, target_ulong next_eip)
1146 SegmentCache *dt;
1147 target_ulong ptr;
1148 int dpl, cpl, shift;
1149 uint32_t e2;
1151 dt = &env->idt;
1152 if (env->hflags & HF_LMA_MASK) {
1153 shift = 4;
1154 } else {
1155 shift = 3;
1157 ptr = dt->base + (intno << shift);
1158 e2 = cpu_ldl_kernel(env, ptr + 4);
1160 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1161 cpl = env->hflags & HF_CPL_MASK;
1162 /* check privilege if software int */
1163 if (is_int && dpl < cpl) {
1164 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1167 /* Since we emulate only user space, we cannot do more than
1168 exiting the emulation with the suitable exception and error
1169 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1170 if (is_int || intno == EXCP_SYSCALL) {
1171 env->eip = next_eip;
1175 #else
1177 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1178 int error_code, int is_hw, int rm)
1180 CPUState *cs = CPU(x86_env_get_cpu(env));
1181 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1182 control.event_inj));
1184 if (!(event_inj & SVM_EVTINJ_VALID)) {
1185 int type;
1187 if (is_int) {
1188 type = SVM_EVTINJ_TYPE_SOFT;
1189 } else {
1190 type = SVM_EVTINJ_TYPE_EXEPT;
1192 event_inj = intno | type | SVM_EVTINJ_VALID;
1193 if (!rm && exception_has_error_code(intno)) {
1194 event_inj |= SVM_EVTINJ_VALID_ERR;
1195 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1196 control.event_inj_err),
1197 error_code);
1199 x86_stl_phys(cs,
1200 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1201 event_inj);
1204 #endif
1207 * Begin execution of an interruption. is_int is TRUE if coming from
1208 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1209 * instruction. It is only relevant if is_int is TRUE.
1211 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1212 int error_code, target_ulong next_eip, int is_hw)
1214 CPUX86State *env = &cpu->env;
1216 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1217 if ((env->cr[0] & CR0_PE_MASK)) {
1218 static int count;
1220 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1221 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1222 count, intno, error_code, is_int,
1223 env->hflags & HF_CPL_MASK,
1224 env->segs[R_CS].selector, env->eip,
1225 (int)env->segs[R_CS].base + env->eip,
1226 env->segs[R_SS].selector, env->regs[R_ESP]);
1227 if (intno == 0x0e) {
1228 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1229 } else {
1230 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1232 qemu_log("\n");
1233 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1234 #if 0
1236 int i;
1237 target_ulong ptr;
1239 qemu_log(" code=");
1240 ptr = env->segs[R_CS].base + env->eip;
1241 for (i = 0; i < 16; i++) {
1242 qemu_log(" %02x", ldub(ptr + i));
1244 qemu_log("\n");
1246 #endif
1247 count++;
1250 if (env->cr[0] & CR0_PE_MASK) {
1251 #if !defined(CONFIG_USER_ONLY)
1252 if (env->hflags & HF_SVMI_MASK) {
1253 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1255 #endif
1256 #ifdef TARGET_X86_64
1257 if (env->hflags & HF_LMA_MASK) {
1258 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1259 } else
1260 #endif
1262 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1263 is_hw);
1265 } else {
1266 #if !defined(CONFIG_USER_ONLY)
1267 if (env->hflags & HF_SVMI_MASK) {
1268 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1270 #endif
1271 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1274 #if !defined(CONFIG_USER_ONLY)
1275 if (env->hflags & HF_SVMI_MASK) {
1276 CPUState *cs = CPU(cpu);
1277 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1278 offsetof(struct vmcb,
1279 control.event_inj));
1281 x86_stl_phys(cs,
1282 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1283 event_inj & ~SVM_EVTINJ_VALID);
1285 #endif
1288 void x86_cpu_do_interrupt(CPUState *cs)
1290 X86CPU *cpu = X86_CPU(cs);
1291 CPUX86State *env = &cpu->env;
1293 #if defined(CONFIG_USER_ONLY)
1294 /* if user mode only, we simulate a fake exception
1295 which will be handled outside the cpu execution
1296 loop */
1297 do_interrupt_user(env, cs->exception_index,
1298 env->exception_is_int,
1299 env->error_code,
1300 env->exception_next_eip);
1301 /* successfully delivered */
1302 env->old_exception = -1;
1303 #else
1304 /* simulate a real cpu exception. On i386, it can
1305 trigger new exceptions, but we do not handle
1306 double or triple faults yet. */
1307 do_interrupt_all(cpu, cs->exception_index,
1308 env->exception_is_int,
1309 env->error_code,
1310 env->exception_next_eip, 0);
1311 /* successfully delivered */
1312 env->old_exception = -1;
1313 #endif
1316 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1318 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1321 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1323 X86CPU *cpu = X86_CPU(cs);
1324 CPUX86State *env = &cpu->env;
1325 bool ret = false;
1327 #if !defined(CONFIG_USER_ONLY)
1328 if (interrupt_request & CPU_INTERRUPT_POLL) {
1329 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1330 apic_poll_irq(cpu->apic_state);
1332 #endif
1333 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1334 do_cpu_sipi(cpu);
1335 } else if (env->hflags2 & HF2_GIF_MASK) {
1336 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1337 !(env->hflags & HF_SMM_MASK)) {
1338 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1339 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1340 do_smm_enter(cpu);
1341 ret = true;
1342 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1343 !(env->hflags2 & HF2_NMI_MASK)) {
1344 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1345 env->hflags2 |= HF2_NMI_MASK;
1346 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1347 ret = true;
1348 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1349 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1350 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1351 ret = true;
1352 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1353 (((env->hflags2 & HF2_VINTR_MASK) &&
1354 (env->hflags2 & HF2_HIF_MASK)) ||
1355 (!(env->hflags2 & HF2_VINTR_MASK) &&
1356 (env->eflags & IF_MASK &&
1357 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1358 int intno;
1359 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1360 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1361 CPU_INTERRUPT_VIRQ);
1362 intno = cpu_get_pic_interrupt(env);
1363 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1364 "Servicing hardware INT=0x%02x\n", intno);
1365 do_interrupt_x86_hardirq(env, intno, 1);
1366 /* ensure that no TB jump will be modified as
1367 the program flow was changed */
1368 ret = true;
1369 #if !defined(CONFIG_USER_ONLY)
1370 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1371 (env->eflags & IF_MASK) &&
1372 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1373 int intno;
1374 /* FIXME: this should respect TPR */
1375 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1376 intno = x86_ldl_phys(cs, env->vm_vmcb
1377 + offsetof(struct vmcb, control.int_vector));
1378 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1379 "Servicing virtual hardware INT=0x%02x\n", intno);
1380 do_interrupt_x86_hardirq(env, intno, 1);
1381 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1382 ret = true;
1383 #endif
1387 return ret;
1390 void helper_enter_level(CPUX86State *env, int level, int data32,
1391 target_ulong t1)
1393 target_ulong ssp;
1394 uint32_t esp_mask, esp, ebp;
1396 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1397 ssp = env->segs[R_SS].base;
1398 ebp = env->regs[R_EBP];
1399 esp = env->regs[R_ESP];
1400 if (data32) {
1401 /* 32 bit */
1402 esp -= 4;
1403 while (--level) {
1404 esp -= 4;
1405 ebp -= 4;
1406 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1407 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1408 GETPC()),
1409 GETPC());
1411 esp -= 4;
1412 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1413 } else {
1414 /* 16 bit */
1415 esp -= 2;
1416 while (--level) {
1417 esp -= 2;
1418 ebp -= 2;
1419 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1420 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1421 GETPC()),
1422 GETPC());
1424 esp -= 2;
1425 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1429 #ifdef TARGET_X86_64
1430 void helper_enter64_level(CPUX86State *env, int level, int data64,
1431 target_ulong t1)
1433 target_ulong esp, ebp;
1435 ebp = env->regs[R_EBP];
1436 esp = env->regs[R_ESP];
1438 if (data64) {
1439 /* 64 bit */
1440 esp -= 8;
1441 while (--level) {
1442 esp -= 8;
1443 ebp -= 8;
1444 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1445 GETPC());
1447 esp -= 8;
1448 cpu_stq_data_ra(env, esp, t1, GETPC());
1449 } else {
1450 /* 16 bit */
1451 esp -= 2;
1452 while (--level) {
1453 esp -= 2;
1454 ebp -= 2;
1455 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1456 GETPC());
1458 esp -= 2;
1459 cpu_stw_data_ra(env, esp, t1, GETPC());
1462 #endif
1464 void helper_lldt(CPUX86State *env, int selector)
1466 SegmentCache *dt;
1467 uint32_t e1, e2;
1468 int index, entry_limit;
1469 target_ulong ptr;
1471 selector &= 0xffff;
1472 if ((selector & 0xfffc) == 0) {
1473 /* XXX: NULL selector case: invalid LDT */
1474 env->ldt.base = 0;
1475 env->ldt.limit = 0;
1476 } else {
1477 if (selector & 0x4) {
1478 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1480 dt = &env->gdt;
1481 index = selector & ~7;
1482 #ifdef TARGET_X86_64
1483 if (env->hflags & HF_LMA_MASK) {
1484 entry_limit = 15;
1485 } else
1486 #endif
1488 entry_limit = 7;
1490 if ((index + entry_limit) > dt->limit) {
1491 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1493 ptr = dt->base + index;
1494 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1495 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1496 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1497 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1499 if (!(e2 & DESC_P_MASK)) {
1500 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1502 #ifdef TARGET_X86_64
1503 if (env->hflags & HF_LMA_MASK) {
1504 uint32_t e3;
1506 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1507 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1508 env->ldt.base |= (target_ulong)e3 << 32;
1509 } else
1510 #endif
1512 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1515 env->ldt.selector = selector;
1518 void helper_ltr(CPUX86State *env, int selector)
1520 SegmentCache *dt;
1521 uint32_t e1, e2;
1522 int index, type, entry_limit;
1523 target_ulong ptr;
1525 selector &= 0xffff;
1526 if ((selector & 0xfffc) == 0) {
1527 /* NULL selector case: invalid TR */
1528 env->tr.base = 0;
1529 env->tr.limit = 0;
1530 env->tr.flags = 0;
1531 } else {
1532 if (selector & 0x4) {
1533 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1535 dt = &env->gdt;
1536 index = selector & ~7;
1537 #ifdef TARGET_X86_64
1538 if (env->hflags & HF_LMA_MASK) {
1539 entry_limit = 15;
1540 } else
1541 #endif
1543 entry_limit = 7;
1545 if ((index + entry_limit) > dt->limit) {
1546 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548 ptr = dt->base + index;
1549 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1550 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1551 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1552 if ((e2 & DESC_S_MASK) ||
1553 (type != 1 && type != 9)) {
1554 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1556 if (!(e2 & DESC_P_MASK)) {
1557 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1559 #ifdef TARGET_X86_64
1560 if (env->hflags & HF_LMA_MASK) {
1561 uint32_t e3, e4;
1563 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1564 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1565 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1566 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1568 load_seg_cache_raw_dt(&env->tr, e1, e2);
1569 env->tr.base |= (target_ulong)e3 << 32;
1570 } else
1571 #endif
1573 load_seg_cache_raw_dt(&env->tr, e1, e2);
1575 e2 |= DESC_TSS_BUSY_MASK;
1576 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1578 env->tr.selector = selector;
1581 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1582 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1584 uint32_t e1, e2;
1585 int cpl, dpl, rpl;
1586 SegmentCache *dt;
1587 int index;
1588 target_ulong ptr;
1590 selector &= 0xffff;
1591 cpl = env->hflags & HF_CPL_MASK;
1592 if ((selector & 0xfffc) == 0) {
1593 /* null selector case */
1594 if (seg_reg == R_SS
1595 #ifdef TARGET_X86_64
1596 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1597 #endif
1599 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1601 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1602 } else {
1604 if (selector & 0x4) {
1605 dt = &env->ldt;
1606 } else {
1607 dt = &env->gdt;
1609 index = selector & ~7;
1610 if ((index + 7) > dt->limit) {
1611 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1613 ptr = dt->base + index;
1614 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1615 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1617 if (!(e2 & DESC_S_MASK)) {
1618 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1620 rpl = selector & 3;
1621 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1622 if (seg_reg == R_SS) {
1623 /* must be writable segment */
1624 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1625 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1627 if (rpl != cpl || dpl != cpl) {
1628 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1630 } else {
1631 /* must be readable segment */
1632 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1633 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1636 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1637 /* if not conforming code, test rights */
1638 if (dpl < cpl || dpl < rpl) {
1639 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1644 if (!(e2 & DESC_P_MASK)) {
1645 if (seg_reg == R_SS) {
1646 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1647 } else {
1648 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1652 /* set the access bit if not already set */
1653 if (!(e2 & DESC_A_MASK)) {
1654 e2 |= DESC_A_MASK;
1655 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1658 cpu_x86_load_seg_cache(env, seg_reg, selector,
1659 get_seg_base(e1, e2),
1660 get_seg_limit(e1, e2),
1661 e2);
1662 #if 0
1663 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1664 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1665 #endif
1669 /* protected mode jump */
1670 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1671 target_ulong next_eip)
1673 int gate_cs, type;
1674 uint32_t e1, e2, cpl, dpl, rpl, limit;
1676 if ((new_cs & 0xfffc) == 0) {
1677 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1679 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1680 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1682 cpl = env->hflags & HF_CPL_MASK;
1683 if (e2 & DESC_S_MASK) {
1684 if (!(e2 & DESC_CS_MASK)) {
1685 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1688 if (e2 & DESC_C_MASK) {
1689 /* conforming code segment */
1690 if (dpl > cpl) {
1691 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1693 } else {
1694 /* non conforming code segment */
1695 rpl = new_cs & 3;
1696 if (rpl > cpl) {
1697 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1699 if (dpl != cpl) {
1700 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1703 if (!(e2 & DESC_P_MASK)) {
1704 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1706 limit = get_seg_limit(e1, e2);
1707 if (new_eip > limit &&
1708 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1709 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1711 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1712 get_seg_base(e1, e2), limit, e2);
1713 env->eip = new_eip;
1714 } else {
1715 /* jump to call or task gate */
1716 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1717 rpl = new_cs & 3;
1718 cpl = env->hflags & HF_CPL_MASK;
1719 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1720 switch (type) {
1721 case 1: /* 286 TSS */
1722 case 9: /* 386 TSS */
1723 case 5: /* task gate */
1724 if (dpl < cpl || dpl < rpl) {
1725 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1727 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1728 break;
1729 case 4: /* 286 call gate */
1730 case 12: /* 386 call gate */
1731 if ((dpl < cpl) || (dpl < rpl)) {
1732 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1734 if (!(e2 & DESC_P_MASK)) {
1735 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1737 gate_cs = e1 >> 16;
1738 new_eip = (e1 & 0xffff);
1739 if (type == 12) {
1740 new_eip |= (e2 & 0xffff0000);
1742 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1743 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1745 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1746 /* must be code segment */
1747 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1748 (DESC_S_MASK | DESC_CS_MASK))) {
1749 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1751 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1752 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1753 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1755 if (!(e2 & DESC_P_MASK)) {
1756 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1758 limit = get_seg_limit(e1, e2);
1759 if (new_eip > limit) {
1760 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1762 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1763 get_seg_base(e1, e2), limit, e2);
1764 env->eip = new_eip;
1765 break;
1766 default:
1767 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1768 break;
1773 /* real mode call */
1774 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1775 int shift, int next_eip)
1777 int new_eip;
1778 uint32_t esp, esp_mask;
1779 target_ulong ssp;
1781 new_eip = new_eip1;
1782 esp = env->regs[R_ESP];
1783 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1784 ssp = env->segs[R_SS].base;
1785 if (shift) {
1786 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1787 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1788 } else {
1789 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1790 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1793 SET_ESP(esp, esp_mask);
1794 env->eip = new_eip;
1795 env->segs[R_CS].selector = new_cs;
1796 env->segs[R_CS].base = (new_cs << 4);
1799 /* protected mode call */
1800 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1801 int shift, target_ulong next_eip)
1803 int new_stack, i;
1804 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1805 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1806 uint32_t val, limit, old_sp_mask;
1807 target_ulong ssp, old_ssp;
1809 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1810 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1811 if ((new_cs & 0xfffc) == 0) {
1812 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1814 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1815 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1817 cpl = env->hflags & HF_CPL_MASK;
1818 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1819 if (e2 & DESC_S_MASK) {
1820 if (!(e2 & DESC_CS_MASK)) {
1821 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1823 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1824 if (e2 & DESC_C_MASK) {
1825 /* conforming code segment */
1826 if (dpl > cpl) {
1827 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1829 } else {
1830 /* non conforming code segment */
1831 rpl = new_cs & 3;
1832 if (rpl > cpl) {
1833 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1835 if (dpl != cpl) {
1836 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1839 if (!(e2 & DESC_P_MASK)) {
1840 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1843 #ifdef TARGET_X86_64
1844 /* XXX: check 16/32 bit cases in long mode */
1845 if (shift == 2) {
1846 target_ulong rsp;
1848 /* 64 bit case */
1849 rsp = env->regs[R_ESP];
1850 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1851 PUSHQ_RA(rsp, next_eip, GETPC());
1852 /* from this point, not restartable */
1853 env->regs[R_ESP] = rsp;
1854 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1855 get_seg_base(e1, e2),
1856 get_seg_limit(e1, e2), e2);
1857 env->eip = new_eip;
1858 } else
1859 #endif
1861 sp = env->regs[R_ESP];
1862 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1863 ssp = env->segs[R_SS].base;
1864 if (shift) {
1865 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1866 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1867 } else {
1868 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1869 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1872 limit = get_seg_limit(e1, e2);
1873 if (new_eip > limit) {
1874 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1876 /* from this point, not restartable */
1877 SET_ESP(sp, sp_mask);
1878 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1879 get_seg_base(e1, e2), limit, e2);
1880 env->eip = new_eip;
1882 } else {
1883 /* check gate type */
1884 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1885 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1886 rpl = new_cs & 3;
1887 switch (type) {
1888 case 1: /* available 286 TSS */
1889 case 9: /* available 386 TSS */
1890 case 5: /* task gate */
1891 if (dpl < cpl || dpl < rpl) {
1892 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1894 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1895 return;
1896 case 4: /* 286 call gate */
1897 case 12: /* 386 call gate */
1898 break;
1899 default:
1900 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1901 break;
1903 shift = type >> 3;
1905 if (dpl < cpl || dpl < rpl) {
1906 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1908 /* check valid bit */
1909 if (!(e2 & DESC_P_MASK)) {
1910 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1912 selector = e1 >> 16;
1913 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1914 param_count = e2 & 0x1f;
1915 if ((selector & 0xfffc) == 0) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1919 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1920 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1922 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1923 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1925 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1926 if (dpl > cpl) {
1927 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1929 if (!(e2 & DESC_P_MASK)) {
1930 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1933 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1934 /* to inner privilege */
1935 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1936 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1937 TARGET_FMT_lx "\n", ss, sp, param_count,
1938 env->regs[R_ESP]);
1939 if ((ss & 0xfffc) == 0) {
1940 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1942 if ((ss & 3) != dpl) {
1943 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1945 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1946 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1948 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1949 if (ss_dpl != dpl) {
1950 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1952 if (!(ss_e2 & DESC_S_MASK) ||
1953 (ss_e2 & DESC_CS_MASK) ||
1954 !(ss_e2 & DESC_W_MASK)) {
1955 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1957 if (!(ss_e2 & DESC_P_MASK)) {
1958 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1961 /* push_size = ((param_count * 2) + 8) << shift; */
1963 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1964 old_ssp = env->segs[R_SS].base;
1966 sp_mask = get_sp_mask(ss_e2);
1967 ssp = get_seg_base(ss_e1, ss_e2);
1968 if (shift) {
1969 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1970 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1971 for (i = param_count - 1; i >= 0; i--) {
1972 val = cpu_ldl_kernel_ra(env, old_ssp +
1973 ((env->regs[R_ESP] + i * 4) &
1974 old_sp_mask), GETPC());
1975 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1977 } else {
1978 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1979 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1980 for (i = param_count - 1; i >= 0; i--) {
1981 val = cpu_lduw_kernel_ra(env, old_ssp +
1982 ((env->regs[R_ESP] + i * 2) &
1983 old_sp_mask), GETPC());
1984 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1987 new_stack = 1;
1988 } else {
1989 /* to same privilege */
1990 sp = env->regs[R_ESP];
1991 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1992 ssp = env->segs[R_SS].base;
1993 /* push_size = (4 << shift); */
1994 new_stack = 0;
1997 if (shift) {
1998 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1999 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2000 } else {
2001 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2002 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2005 /* from this point, not restartable */
2007 if (new_stack) {
2008 ss = (ss & ~3) | dpl;
2009 cpu_x86_load_seg_cache(env, R_SS, ss,
2010 ssp,
2011 get_seg_limit(ss_e1, ss_e2),
2012 ss_e2);
2015 selector = (selector & ~3) | dpl;
2016 cpu_x86_load_seg_cache(env, R_CS, selector,
2017 get_seg_base(e1, e2),
2018 get_seg_limit(e1, e2),
2019 e2);
2020 SET_ESP(sp, sp_mask);
2021 env->eip = offset;
2025 /* real and vm86 mode iret */
2026 void helper_iret_real(CPUX86State *env, int shift)
2028 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2029 target_ulong ssp;
2030 int eflags_mask;
2032 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2033 sp = env->regs[R_ESP];
2034 ssp = env->segs[R_SS].base;
2035 if (shift == 1) {
2036 /* 32 bits */
2037 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2038 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2039 new_cs &= 0xffff;
2040 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2041 } else {
2042 /* 16 bits */
2043 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2044 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2045 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2047 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2048 env->segs[R_CS].selector = new_cs;
2049 env->segs[R_CS].base = (new_cs << 4);
2050 env->eip = new_eip;
2051 if (env->eflags & VM_MASK) {
2052 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2053 NT_MASK;
2054 } else {
2055 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2056 RF_MASK | NT_MASK;
2058 if (shift == 0) {
2059 eflags_mask &= 0xffff;
2061 cpu_load_eflags(env, new_eflags, eflags_mask);
2062 env->hflags2 &= ~HF2_NMI_MASK;
2065 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2067 int dpl;
2068 uint32_t e2;
2070 /* XXX: on x86_64, we do not want to nullify FS and GS because
2071 they may still contain a valid base. I would be interested to
2072 know how a real x86_64 CPU behaves */
2073 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2074 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2075 return;
2078 e2 = env->segs[seg_reg].flags;
2079 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2080 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2081 /* data or non conforming code segment */
2082 if (dpl < cpl) {
2083 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2088 /* protected mode iret */
2089 static inline void helper_ret_protected(CPUX86State *env, int shift,
2090 int is_iret, int addend,
2091 uintptr_t retaddr)
2093 uint32_t new_cs, new_eflags, new_ss;
2094 uint32_t new_es, new_ds, new_fs, new_gs;
2095 uint32_t e1, e2, ss_e1, ss_e2;
2096 int cpl, dpl, rpl, eflags_mask, iopl;
2097 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2099 #ifdef TARGET_X86_64
2100 if (shift == 2) {
2101 sp_mask = -1;
2102 } else
2103 #endif
2105 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2107 sp = env->regs[R_ESP];
2108 ssp = env->segs[R_SS].base;
2109 new_eflags = 0; /* avoid warning */
2110 #ifdef TARGET_X86_64
2111 if (shift == 2) {
2112 POPQ_RA(sp, new_eip, retaddr);
2113 POPQ_RA(sp, new_cs, retaddr);
2114 new_cs &= 0xffff;
2115 if (is_iret) {
2116 POPQ_RA(sp, new_eflags, retaddr);
2118 } else
2119 #endif
2121 if (shift == 1) {
2122 /* 32 bits */
2123 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2124 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2125 new_cs &= 0xffff;
2126 if (is_iret) {
2127 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2128 if (new_eflags & VM_MASK) {
2129 goto return_to_vm86;
2132 } else {
2133 /* 16 bits */
2134 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2135 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2136 if (is_iret) {
2137 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2141 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2142 new_cs, new_eip, shift, addend);
2143 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2144 if ((new_cs & 0xfffc) == 0) {
2145 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2147 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2148 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2150 if (!(e2 & DESC_S_MASK) ||
2151 !(e2 & DESC_CS_MASK)) {
2152 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2154 cpl = env->hflags & HF_CPL_MASK;
2155 rpl = new_cs & 3;
2156 if (rpl < cpl) {
2157 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2159 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2160 if (e2 & DESC_C_MASK) {
2161 if (dpl > rpl) {
2162 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2164 } else {
2165 if (dpl != rpl) {
2166 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2169 if (!(e2 & DESC_P_MASK)) {
2170 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2173 sp += addend;
2174 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2175 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2176 /* return to same privilege level */
2177 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2178 get_seg_base(e1, e2),
2179 get_seg_limit(e1, e2),
2180 e2);
2181 } else {
2182 /* return to different privilege level */
2183 #ifdef TARGET_X86_64
2184 if (shift == 2) {
2185 POPQ_RA(sp, new_esp, retaddr);
2186 POPQ_RA(sp, new_ss, retaddr);
2187 new_ss &= 0xffff;
2188 } else
2189 #endif
2191 if (shift == 1) {
2192 /* 32 bits */
2193 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2194 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2195 new_ss &= 0xffff;
2196 } else {
2197 /* 16 bits */
2198 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2199 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2202 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2203 new_ss, new_esp);
2204 if ((new_ss & 0xfffc) == 0) {
2205 #ifdef TARGET_X86_64
2206 /* NULL ss is allowed in long mode if cpl != 3 */
2207 /* XXX: test CS64? */
2208 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2209 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2210 0, 0xffffffff,
2211 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2212 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2213 DESC_W_MASK | DESC_A_MASK);
2214 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2215 } else
2216 #endif
2218 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2220 } else {
2221 if ((new_ss & 3) != rpl) {
2222 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2224 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2225 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2227 if (!(ss_e2 & DESC_S_MASK) ||
2228 (ss_e2 & DESC_CS_MASK) ||
2229 !(ss_e2 & DESC_W_MASK)) {
2230 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2232 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2233 if (dpl != rpl) {
2234 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2236 if (!(ss_e2 & DESC_P_MASK)) {
2237 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2239 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2240 get_seg_base(ss_e1, ss_e2),
2241 get_seg_limit(ss_e1, ss_e2),
2242 ss_e2);
2245 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2246 get_seg_base(e1, e2),
2247 get_seg_limit(e1, e2),
2248 e2);
2249 sp = new_esp;
2250 #ifdef TARGET_X86_64
2251 if (env->hflags & HF_CS64_MASK) {
2252 sp_mask = -1;
2253 } else
2254 #endif
2256 sp_mask = get_sp_mask(ss_e2);
2259 /* validate data segments */
2260 validate_seg(env, R_ES, rpl);
2261 validate_seg(env, R_DS, rpl);
2262 validate_seg(env, R_FS, rpl);
2263 validate_seg(env, R_GS, rpl);
2265 sp += addend;
2267 SET_ESP(sp, sp_mask);
2268 env->eip = new_eip;
2269 if (is_iret) {
2270 /* NOTE: 'cpl' is the _old_ CPL */
2271 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2272 if (cpl == 0) {
2273 eflags_mask |= IOPL_MASK;
2275 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2276 if (cpl <= iopl) {
2277 eflags_mask |= IF_MASK;
2279 if (shift == 0) {
2280 eflags_mask &= 0xffff;
2282 cpu_load_eflags(env, new_eflags, eflags_mask);
2284 return;
2286 return_to_vm86:
2287 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2288 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2289 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2290 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2291 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2292 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2294 /* modify processor state */
2295 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2296 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2297 VIP_MASK);
2298 load_seg_vm(env, R_CS, new_cs & 0xffff);
2299 load_seg_vm(env, R_SS, new_ss & 0xffff);
2300 load_seg_vm(env, R_ES, new_es & 0xffff);
2301 load_seg_vm(env, R_DS, new_ds & 0xffff);
2302 load_seg_vm(env, R_FS, new_fs & 0xffff);
2303 load_seg_vm(env, R_GS, new_gs & 0xffff);
2305 env->eip = new_eip & 0xffff;
2306 env->regs[R_ESP] = new_esp;
2309 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2311 int tss_selector, type;
2312 uint32_t e1, e2;
2314 /* specific case for TSS */
2315 if (env->eflags & NT_MASK) {
2316 #ifdef TARGET_X86_64
2317 if (env->hflags & HF_LMA_MASK) {
2318 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2320 #endif
2321 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2322 if (tss_selector & 4) {
2323 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2325 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2326 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2328 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2329 /* NOTE: we check both segment and busy TSS */
2330 if (type != 3) {
2331 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2333 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2334 } else {
2335 helper_ret_protected(env, shift, 1, 0, GETPC());
2337 env->hflags2 &= ~HF2_NMI_MASK;
2340 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2342 helper_ret_protected(env, shift, 0, addend, GETPC());
2345 void helper_sysenter(CPUX86State *env)
2347 if (env->sysenter_cs == 0) {
2348 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2350 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2352 #ifdef TARGET_X86_64
2353 if (env->hflags & HF_LMA_MASK) {
2354 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2355 0, 0xffffffff,
2356 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2357 DESC_S_MASK |
2358 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2359 DESC_L_MASK);
2360 } else
2361 #endif
2363 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2364 0, 0xffffffff,
2365 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2366 DESC_S_MASK |
2367 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2369 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2370 0, 0xffffffff,
2371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2372 DESC_S_MASK |
2373 DESC_W_MASK | DESC_A_MASK);
2374 env->regs[R_ESP] = env->sysenter_esp;
2375 env->eip = env->sysenter_eip;
2378 void helper_sysexit(CPUX86State *env, int dflag)
2380 int cpl;
2382 cpl = env->hflags & HF_CPL_MASK;
2383 if (env->sysenter_cs == 0 || cpl != 0) {
2384 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2386 #ifdef TARGET_X86_64
2387 if (dflag == 2) {
2388 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2389 3, 0, 0xffffffff,
2390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2391 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2392 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2393 DESC_L_MASK);
2394 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2395 3, 0, 0xffffffff,
2396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2397 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2398 DESC_W_MASK | DESC_A_MASK);
2399 } else
2400 #endif
2402 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2403 3, 0, 0xffffffff,
2404 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2405 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2406 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2407 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2408 3, 0, 0xffffffff,
2409 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2410 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2411 DESC_W_MASK | DESC_A_MASK);
2413 env->regs[R_ESP] = env->regs[R_ECX];
2414 env->eip = env->regs[R_EDX];
2417 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2419 unsigned int limit;
2420 uint32_t e1, e2, eflags, selector;
2421 int rpl, dpl, cpl, type;
2423 selector = selector1 & 0xffff;
2424 eflags = cpu_cc_compute_all(env, CC_OP);
2425 if ((selector & 0xfffc) == 0) {
2426 goto fail;
2428 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2429 goto fail;
2431 rpl = selector & 3;
2432 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2433 cpl = env->hflags & HF_CPL_MASK;
2434 if (e2 & DESC_S_MASK) {
2435 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2436 /* conforming */
2437 } else {
2438 if (dpl < cpl || dpl < rpl) {
2439 goto fail;
2442 } else {
2443 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2444 switch (type) {
2445 case 1:
2446 case 2:
2447 case 3:
2448 case 9:
2449 case 11:
2450 break;
2451 default:
2452 goto fail;
2454 if (dpl < cpl || dpl < rpl) {
2455 fail:
2456 CC_SRC = eflags & ~CC_Z;
2457 return 0;
2460 limit = get_seg_limit(e1, e2);
2461 CC_SRC = eflags | CC_Z;
2462 return limit;
2465 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2467 uint32_t e1, e2, eflags, selector;
2468 int rpl, dpl, cpl, type;
2470 selector = selector1 & 0xffff;
2471 eflags = cpu_cc_compute_all(env, CC_OP);
2472 if ((selector & 0xfffc) == 0) {
2473 goto fail;
2475 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2476 goto fail;
2478 rpl = selector & 3;
2479 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2480 cpl = env->hflags & HF_CPL_MASK;
2481 if (e2 & DESC_S_MASK) {
2482 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2483 /* conforming */
2484 } else {
2485 if (dpl < cpl || dpl < rpl) {
2486 goto fail;
2489 } else {
2490 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2491 switch (type) {
2492 case 1:
2493 case 2:
2494 case 3:
2495 case 4:
2496 case 5:
2497 case 9:
2498 case 11:
2499 case 12:
2500 break;
2501 default:
2502 goto fail;
2504 if (dpl < cpl || dpl < rpl) {
2505 fail:
2506 CC_SRC = eflags & ~CC_Z;
2507 return 0;
2510 CC_SRC = eflags | CC_Z;
2511 return e2 & 0x00f0ff00;
2514 void helper_verr(CPUX86State *env, target_ulong selector1)
2516 uint32_t e1, e2, eflags, selector;
2517 int rpl, dpl, cpl;
2519 selector = selector1 & 0xffff;
2520 eflags = cpu_cc_compute_all(env, CC_OP);
2521 if ((selector & 0xfffc) == 0) {
2522 goto fail;
2524 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2525 goto fail;
2527 if (!(e2 & DESC_S_MASK)) {
2528 goto fail;
2530 rpl = selector & 3;
2531 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2532 cpl = env->hflags & HF_CPL_MASK;
2533 if (e2 & DESC_CS_MASK) {
2534 if (!(e2 & DESC_R_MASK)) {
2535 goto fail;
2537 if (!(e2 & DESC_C_MASK)) {
2538 if (dpl < cpl || dpl < rpl) {
2539 goto fail;
2542 } else {
2543 if (dpl < cpl || dpl < rpl) {
2544 fail:
2545 CC_SRC = eflags & ~CC_Z;
2546 return;
2549 CC_SRC = eflags | CC_Z;
2552 void helper_verw(CPUX86State *env, target_ulong selector1)
2554 uint32_t e1, e2, eflags, selector;
2555 int rpl, dpl, cpl;
2557 selector = selector1 & 0xffff;
2558 eflags = cpu_cc_compute_all(env, CC_OP);
2559 if ((selector & 0xfffc) == 0) {
2560 goto fail;
2562 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2563 goto fail;
2565 if (!(e2 & DESC_S_MASK)) {
2566 goto fail;
2568 rpl = selector & 3;
2569 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2570 cpl = env->hflags & HF_CPL_MASK;
2571 if (e2 & DESC_CS_MASK) {
2572 goto fail;
2573 } else {
2574 if (dpl < cpl || dpl < rpl) {
2575 goto fail;
2577 if (!(e2 & DESC_W_MASK)) {
2578 fail:
2579 CC_SRC = eflags & ~CC_Z;
2580 return;
2583 CC_SRC = eflags | CC_Z;
2586 #if defined(CONFIG_USER_ONLY)
2587 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2589 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2590 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2591 selector &= 0xffff;
2592 cpu_x86_load_seg_cache(env, seg_reg, selector,
2593 (selector << 4), 0xffff,
2594 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2595 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2596 } else {
2597 helper_load_seg(env, seg_reg, selector);
2600 #endif
2602 /* check if Port I/O is allowed in TSS */
2603 static inline void check_io(CPUX86State *env, int addr, int size,
2604 uintptr_t retaddr)
2606 int io_offset, val, mask;
2608 /* TSS must be a valid 32 bit one */
2609 if (!(env->tr.flags & DESC_P_MASK) ||
2610 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2611 env->tr.limit < 103) {
2612 goto fail;
2614 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2615 io_offset += (addr >> 3);
2616 /* Note: the check needs two bytes */
2617 if ((io_offset + 1) > env->tr.limit) {
2618 goto fail;
2620 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2621 val >>= (addr & 7);
2622 mask = (1 << size) - 1;
2623 /* all bits must be zero to allow the I/O */
2624 if ((val & mask) != 0) {
2625 fail:
2626 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2630 void helper_check_iob(CPUX86State *env, uint32_t t0)
2632 check_io(env, t0, 1, GETPC());
2635 void helper_check_iow(CPUX86State *env, uint32_t t0)
2637 check_io(env, t0, 2, GETPC());
2640 void helper_check_iol(CPUX86State *env, uint32_t t0)
2642 check_io(env, t0, 4, GETPC());