target-i386: Rename XMM_[BWLSDQ] helpers to ZMM_*
[qemu/ar7.git] / target-i386 / seg_helper.c
blob20ee89222490c7acb8c8f6587ae8b93f66aa354e
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
39 #define DATA_SIZE 1
40 #include "exec/cpu_ldst_useronly_template.h"
42 #define DATA_SIZE 2
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 4
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 8
49 #include "exec/cpu_ldst_useronly_template.h"
50 #undef MEMSUFFIX
51 #else
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
54 #define DATA_SIZE 1
55 #include "exec/cpu_ldst_template.h"
57 #define DATA_SIZE 2
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 4
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 8
64 #include "exec/cpu_ldst_template.h"
65 #undef CPU_MMU_INDEX
66 #undef MEMSUFFIX
67 #endif
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector,
72 uintptr_t retaddr)
74 SegmentCache *dt;
75 int index;
76 target_ulong ptr;
78 if (selector & 0x4) {
79 dt = &env->ldt;
80 } else {
81 dt = &env->gdt;
83 index = selector & ~7;
84 if ((index + 7) > dt->limit) {
85 return -1;
87 ptr = dt->base + index;
88 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
89 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
90 return 0;
93 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
94 uint32_t *e2_ptr, int selector)
96 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
101 unsigned int limit;
103 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
104 if (e2 & DESC_G_MASK) {
105 limit = (limit << 12) | 0xfff;
107 return limit;
110 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
112 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
116 uint32_t e2)
118 sc->base = get_seg_base(e1, e2);
119 sc->limit = get_seg_limit(e1, e2);
120 sc->flags = e2;
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
126 selector &= 0xffff;
128 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
129 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
130 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
133 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
134 uint32_t *esp_ptr, int dpl,
135 uintptr_t retaddr)
137 X86CPU *cpu = x86_env_get_cpu(env);
138 int type, index, shift;
140 #if 0
142 int i;
143 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
144 for (i = 0; i < env->tr.limit; i++) {
145 printf("%02x ", env->tr.base[i]);
146 if ((i & 7) == 7) {
147 printf("\n");
150 printf("\n");
152 #endif
154 if (!(env->tr.flags & DESC_P_MASK)) {
155 cpu_abort(CPU(cpu), "invalid tss");
157 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
158 if ((type & 7) != 1) {
159 cpu_abort(CPU(cpu), "invalid tss type");
161 shift = type >> 3;
162 index = (dpl * 4 + 2) << shift;
163 if (index + (4 << shift) - 1 > env->tr.limit) {
164 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
166 if (shift == 0) {
167 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
168 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
169 } else {
170 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
176 uintptr_t retaddr)
178 uint32_t e1, e2;
179 int rpl, dpl;
181 if ((selector & 0xfffc) != 0) {
182 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
183 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
185 if (!(e2 & DESC_S_MASK)) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 rpl = selector & 3;
189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
190 if (seg_reg == R_CS) {
191 if (!(e2 & DESC_CS_MASK)) {
192 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
194 if (dpl != rpl) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 } else if (seg_reg == R_SS) {
198 /* SS must be writable data */
199 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
200 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
202 if (dpl != cpl || dpl != rpl) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 } else {
206 /* not readable code */
207 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
208 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
210 /* if data or non conforming code, checks the rights */
211 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
212 if (dpl < cpl || dpl < rpl) {
213 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 if (!(e2 & DESC_P_MASK)) {
218 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
220 cpu_x86_load_seg_cache(env, seg_reg, selector,
221 get_seg_base(e1, e2),
222 get_seg_limit(e1, e2),
223 e2);
224 } else {
225 if (seg_reg == R_SS || seg_reg == R_CS) {
226 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State *env, int tss_selector,
237 uint32_t e1, uint32_t e2, int source,
238 uint32_t next_eip, uintptr_t retaddr)
240 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
241 target_ulong tss_base;
242 uint32_t new_regs[8], new_segs[6];
243 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
244 uint32_t old_eflags, eflags_mask;
245 SegmentCache *dt;
246 int index;
247 target_ulong ptr;
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
251 source);
253 /* if task gate, we read the TSS segment and we load it */
254 if (type == 5) {
255 if (!(e2 & DESC_P_MASK)) {
256 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
258 tss_selector = e1 >> 16;
259 if (tss_selector & 4) {
260 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
262 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
263 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
265 if (e2 & DESC_S_MASK) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
269 if ((type & 7) != 1) {
270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 if (!(e2 & DESC_P_MASK)) {
275 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
278 if (type & 8) {
279 tss_limit_max = 103;
280 } else {
281 tss_limit_max = 43;
283 tss_limit = get_seg_limit(e1, e2);
284 tss_base = get_seg_base(e1, e2);
285 if ((tss_selector & 4) != 0 ||
286 tss_limit < tss_limit_max) {
287 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
289 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
290 if (old_type & 8) {
291 old_tss_limit_max = 103;
292 } else {
293 old_tss_limit_max = 43;
296 /* read all the registers from the new TSS */
297 if (type & 8) {
298 /* 32 bit */
299 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
300 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
301 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
302 for (i = 0; i < 8; i++) {
303 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
304 retaddr);
306 for (i = 0; i < 6; i++) {
307 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
308 retaddr);
310 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
311 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
312 } else {
313 /* 16 bit */
314 new_cr3 = 0;
315 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
316 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
317 for (i = 0; i < 8; i++) {
318 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
319 retaddr) | 0xffff0000;
321 for (i = 0; i < 4; i++) {
322 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
323 retaddr);
325 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
326 new_segs[R_FS] = 0;
327 new_segs[R_GS] = 0;
328 new_trap = 0;
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
333 (void)new_trap;
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
341 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
342 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
345 /* clear busy bit (it is restartable) */
346 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
347 target_ulong ptr;
348 uint32_t e2;
350 ptr = env->gdt.base + (env->tr.selector & ~7);
351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
352 e2 &= ~DESC_TSS_BUSY_MASK;
353 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
355 old_eflags = cpu_compute_eflags(env);
356 if (source == SWITCH_TSS_IRET) {
357 old_eflags &= ~NT_MASK;
360 /* save the current state in the old TSS */
361 if (type & 8) {
362 /* 32 bit */
363 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
364 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
373 for (i = 0; i < 6; i++) {
374 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
375 env->segs[i].selector, retaddr);
377 } else {
378 /* 16 bit */
379 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
380 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
389 for (i = 0; i < 4; i++) {
390 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
391 env->segs[i].selector, retaddr);
395 /* now if an exception occurs, it will occurs in the next task
396 context */
398 if (source == SWITCH_TSS_CALL) {
399 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
400 new_eflags |= NT_MASK;
403 /* set busy bit */
404 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
405 target_ulong ptr;
406 uint32_t e2;
408 ptr = env->gdt.base + (tss_selector & ~7);
409 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
410 e2 |= DESC_TSS_BUSY_MASK;
411 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env->cr[0] |= CR0_TS_MASK;
417 env->hflags |= HF_TS_MASK;
418 env->tr.selector = tss_selector;
419 env->tr.base = tss_base;
420 env->tr.limit = tss_limit;
421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424 cpu_x86_update_cr3(env, new_cr3);
427 /* load all registers without an exception, then reload them with
428 possible exception */
429 env->eip = new_eip;
430 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
432 if (!(type & 8)) {
433 eflags_mask &= 0xffff;
435 cpu_load_eflags(env, new_eflags, eflags_mask);
436 /* XXX: what to do in 16 bit case? */
437 env->regs[R_EAX] = new_regs[0];
438 env->regs[R_ECX] = new_regs[1];
439 env->regs[R_EDX] = new_regs[2];
440 env->regs[R_EBX] = new_regs[3];
441 env->regs[R_ESP] = new_regs[4];
442 env->regs[R_EBP] = new_regs[5];
443 env->regs[R_ESI] = new_regs[6];
444 env->regs[R_EDI] = new_regs[7];
445 if (new_eflags & VM_MASK) {
446 for (i = 0; i < 6; i++) {
447 load_seg_vm(env, i, new_segs[i]);
449 } else {
450 /* first just selectors as the rest may trigger exceptions */
451 for (i = 0; i < 6; i++) {
452 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 env->ldt.selector = new_ldt & ~4;
457 env->ldt.base = 0;
458 env->ldt.limit = 0;
459 env->ldt.flags = 0;
461 /* load the LDT */
462 if (new_ldt & 4) {
463 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
466 if ((new_ldt & 0xfffc) != 0) {
467 dt = &env->gdt;
468 index = new_ldt & ~7;
469 if ((index + 7) > dt->limit) {
470 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
472 ptr = dt->base + index;
473 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
474 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
475 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
476 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
478 if (!(e2 & DESC_P_MASK)) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 int cpl = new_segs[R_CS] & 3;
487 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
488 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
489 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
490 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
491 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
492 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip > env->segs[R_CS].limit) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
504 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
506 #endif
509 static void switch_tss(CPUX86State *env, int tss_selector,
510 uint32_t e1, uint32_t e2, int source,
511 uint32_t next_eip)
513 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
516 static inline unsigned int get_sp_mask(unsigned int e2)
518 if (e2 & DESC_B_MASK) {
519 return 0xffffffff;
520 } else {
521 return 0xffff;
525 static int exception_has_error_code(int intno)
527 switch (intno) {
528 case 8:
529 case 10:
530 case 11:
531 case 12:
532 case 13:
533 case 14:
534 case 17:
535 return 1;
537 return 0;
540 #ifdef TARGET_X86_64
541 #define SET_ESP(val, sp_mask) \
542 do { \
543 if ((sp_mask) == 0xffff) { \
544 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
545 ((val) & 0xffff); \
546 } else if ((sp_mask) == 0xffffffffLL) { \
547 env->regs[R_ESP] = (uint32_t)(val); \
548 } else { \
549 env->regs[R_ESP] = (val); \
551 } while (0)
552 #else
553 #define SET_ESP(val, sp_mask) \
554 do { \
555 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
556 ((val) & (sp_mask)); \
557 } while (0)
558 #endif
560 /* in 64-bit machines, this can overflow. So this segment addition macro
561 * can be used to trim the value to 32-bit whenever needed */
562 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
564 /* XXX: add a is_user flag to have proper security support */
565 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
567 sp -= 2; \
568 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
571 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
573 sp -= 4; \
574 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
577 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
579 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
580 sp += 2; \
583 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
585 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
586 sp += 4; \
589 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
590 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
591 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
592 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
594 /* protected mode interrupt */
595 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
596 int error_code, unsigned int next_eip,
597 int is_hw)
599 SegmentCache *dt;
600 target_ulong ptr, ssp;
601 int type, dpl, selector, ss_dpl, cpl;
602 int has_error_code, new_stack, shift;
603 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
604 uint32_t old_eip, sp_mask;
605 int vm86 = env->eflags & VM_MASK;
607 has_error_code = 0;
608 if (!is_int && !is_hw) {
609 has_error_code = exception_has_error_code(intno);
611 if (is_int) {
612 old_eip = next_eip;
613 } else {
614 old_eip = env->eip;
617 dt = &env->idt;
618 if (intno * 8 + 7 > dt->limit) {
619 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
621 ptr = dt->base + intno * 8;
622 e1 = cpu_ldl_kernel(env, ptr);
623 e2 = cpu_ldl_kernel(env, ptr + 4);
624 /* check gate type */
625 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
626 switch (type) {
627 case 5: /* task gate */
628 /* must do that check here to return the correct error code */
629 if (!(e2 & DESC_P_MASK)) {
630 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
632 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
633 if (has_error_code) {
634 int type;
635 uint32_t mask;
637 /* push the error code */
638 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
639 shift = type >> 3;
640 if (env->segs[R_SS].flags & DESC_B_MASK) {
641 mask = 0xffffffff;
642 } else {
643 mask = 0xffff;
645 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
646 ssp = env->segs[R_SS].base + esp;
647 if (shift) {
648 cpu_stl_kernel(env, ssp, error_code);
649 } else {
650 cpu_stw_kernel(env, ssp, error_code);
652 SET_ESP(esp, mask);
654 return;
655 case 6: /* 286 interrupt gate */
656 case 7: /* 286 trap gate */
657 case 14: /* 386 interrupt gate */
658 case 15: /* 386 trap gate */
659 break;
660 default:
661 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
662 break;
664 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
665 cpl = env->hflags & HF_CPL_MASK;
666 /* check privilege if software int */
667 if (is_int && dpl < cpl) {
668 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
670 /* check valid bit */
671 if (!(e2 & DESC_P_MASK)) {
672 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
674 selector = e1 >> 16;
675 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
676 if ((selector & 0xfffc) == 0) {
677 raise_exception_err(env, EXCP0D_GPF, 0);
679 if (load_segment(env, &e1, &e2, selector) != 0) {
680 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
682 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
685 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
686 if (dpl > cpl) {
687 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
689 if (!(e2 & DESC_P_MASK)) {
690 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
692 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
693 /* to inner privilege */
694 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
695 if ((ss & 0xfffc) == 0) {
696 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
698 if ((ss & 3) != dpl) {
699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
701 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
705 if (ss_dpl != dpl) {
706 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
708 if (!(ss_e2 & DESC_S_MASK) ||
709 (ss_e2 & DESC_CS_MASK) ||
710 !(ss_e2 & DESC_W_MASK)) {
711 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
713 if (!(ss_e2 & DESC_P_MASK)) {
714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
716 new_stack = 1;
717 sp_mask = get_sp_mask(ss_e2);
718 ssp = get_seg_base(ss_e1, ss_e2);
719 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
720 /* to same privilege */
721 if (vm86) {
722 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
724 new_stack = 0;
725 sp_mask = get_sp_mask(env->segs[R_SS].flags);
726 ssp = env->segs[R_SS].base;
727 esp = env->regs[R_ESP];
728 dpl = cpl;
729 } else {
730 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
731 new_stack = 0; /* avoid warning */
732 sp_mask = 0; /* avoid warning */
733 ssp = 0; /* avoid warning */
734 esp = 0; /* avoid warning */
737 shift = type >> 3;
739 #if 0
740 /* XXX: check that enough room is available */
741 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
742 if (vm86) {
743 push_size += 8;
745 push_size <<= shift;
746 #endif
747 if (shift == 1) {
748 if (new_stack) {
749 if (vm86) {
750 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
751 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
752 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
753 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
756 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
758 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
759 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
760 PUSHL(ssp, esp, sp_mask, old_eip);
761 if (has_error_code) {
762 PUSHL(ssp, esp, sp_mask, error_code);
764 } else {
765 if (new_stack) {
766 if (vm86) {
767 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
768 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
769 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
770 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
773 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
775 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
776 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
777 PUSHW(ssp, esp, sp_mask, old_eip);
778 if (has_error_code) {
779 PUSHW(ssp, esp, sp_mask, error_code);
783 /* interrupt gate clear IF mask */
784 if ((type & 1) == 0) {
785 env->eflags &= ~IF_MASK;
787 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
789 if (new_stack) {
790 if (vm86) {
791 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
792 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
796 ss = (ss & ~3) | dpl;
797 cpu_x86_load_seg_cache(env, R_SS, ss,
798 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
800 SET_ESP(esp, sp_mask);
802 selector = (selector & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_CS, selector,
804 get_seg_base(e1, e2),
805 get_seg_limit(e1, e2),
806 e2);
807 env->eip = offset;
810 #ifdef TARGET_X86_64
812 #define PUSHQ_RA(sp, val, ra) \
814 sp -= 8; \
815 cpu_stq_kernel_ra(env, sp, (val), ra); \
818 #define POPQ_RA(sp, val, ra) \
820 val = cpu_ldq_kernel_ra(env, sp, ra); \
821 sp += 8; \
824 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
825 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
827 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
829 X86CPU *cpu = x86_env_get_cpu(env);
830 int index;
832 #if 0
833 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
834 env->tr.base, env->tr.limit);
835 #endif
837 if (!(env->tr.flags & DESC_P_MASK)) {
838 cpu_abort(CPU(cpu), "invalid tss");
840 index = 8 * level + 4;
841 if ((index + 7) > env->tr.limit) {
842 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
844 return cpu_ldq_kernel(env, env->tr.base + index);
847 /* 64 bit interrupt */
848 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
849 int error_code, target_ulong next_eip, int is_hw)
851 SegmentCache *dt;
852 target_ulong ptr;
853 int type, dpl, selector, cpl, ist;
854 int has_error_code, new_stack;
855 uint32_t e1, e2, e3, ss;
856 target_ulong old_eip, esp, offset;
858 has_error_code = 0;
859 if (!is_int && !is_hw) {
860 has_error_code = exception_has_error_code(intno);
862 if (is_int) {
863 old_eip = next_eip;
864 } else {
865 old_eip = env->eip;
868 dt = &env->idt;
869 if (intno * 16 + 15 > dt->limit) {
870 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
872 ptr = dt->base + intno * 16;
873 e1 = cpu_ldl_kernel(env, ptr);
874 e2 = cpu_ldl_kernel(env, ptr + 4);
875 e3 = cpu_ldl_kernel(env, ptr + 8);
876 /* check gate type */
877 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
878 switch (type) {
879 case 14: /* 386 interrupt gate */
880 case 15: /* 386 trap gate */
881 break;
882 default:
883 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
884 break;
886 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
887 cpl = env->hflags & HF_CPL_MASK;
888 /* check privilege if software int */
889 if (is_int && dpl < cpl) {
890 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
892 /* check valid bit */
893 if (!(e2 & DESC_P_MASK)) {
894 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
896 selector = e1 >> 16;
897 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
898 ist = e2 & 7;
899 if ((selector & 0xfffc) == 0) {
900 raise_exception_err(env, EXCP0D_GPF, 0);
903 if (load_segment(env, &e1, &e2, selector) != 0) {
904 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
906 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
910 if (dpl > cpl) {
911 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
913 if (!(e2 & DESC_P_MASK)) {
914 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
916 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
917 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
919 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
920 /* to inner privilege */
921 new_stack = 1;
922 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
923 ss = 0;
924 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
925 /* to same privilege */
926 if (env->eflags & VM_MASK) {
927 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
929 new_stack = 0;
930 esp = env->regs[R_ESP];
931 dpl = cpl;
932 } else {
933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
934 new_stack = 0; /* avoid warning */
935 esp = 0; /* avoid warning */
937 esp &= ~0xfLL; /* align stack */
939 PUSHQ(esp, env->segs[R_SS].selector);
940 PUSHQ(esp, env->regs[R_ESP]);
941 PUSHQ(esp, cpu_compute_eflags(env));
942 PUSHQ(esp, env->segs[R_CS].selector);
943 PUSHQ(esp, old_eip);
944 if (has_error_code) {
945 PUSHQ(esp, error_code);
948 /* interrupt gate clear IF mask */
949 if ((type & 1) == 0) {
950 env->eflags &= ~IF_MASK;
952 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
954 if (new_stack) {
955 ss = 0 | dpl;
956 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
958 env->regs[R_ESP] = esp;
960 selector = (selector & ~3) | dpl;
961 cpu_x86_load_seg_cache(env, R_CS, selector,
962 get_seg_base(e1, e2),
963 get_seg_limit(e1, e2),
964 e2);
965 env->eip = offset;
967 #endif
969 #ifdef TARGET_X86_64
970 #if defined(CONFIG_USER_ONLY)
971 void helper_syscall(CPUX86State *env, int next_eip_addend)
973 CPUState *cs = CPU(x86_env_get_cpu(env));
975 cs->exception_index = EXCP_SYSCALL;
976 env->exception_next_eip = env->eip + next_eip_addend;
977 cpu_loop_exit(cs);
979 #else
980 void helper_syscall(CPUX86State *env, int next_eip_addend)
982 int selector;
984 if (!(env->efer & MSR_EFER_SCE)) {
985 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
987 selector = (env->star >> 32) & 0xffff;
988 if (env->hflags & HF_LMA_MASK) {
989 int code64;
991 env->regs[R_ECX] = env->eip + next_eip_addend;
992 env->regs[11] = cpu_compute_eflags(env);
994 code64 = env->hflags & HF_CS64_MASK;
996 env->eflags &= ~env->fmask;
997 cpu_load_eflags(env, env->eflags, 0);
998 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
999 0, 0xffffffff,
1000 DESC_G_MASK | DESC_P_MASK |
1001 DESC_S_MASK |
1002 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1003 DESC_L_MASK);
1004 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1005 0, 0xffffffff,
1006 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1007 DESC_S_MASK |
1008 DESC_W_MASK | DESC_A_MASK);
1009 if (code64) {
1010 env->eip = env->lstar;
1011 } else {
1012 env->eip = env->cstar;
1014 } else {
1015 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1017 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1018 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1019 0, 0xffffffff,
1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021 DESC_S_MASK |
1022 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1023 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1024 0, 0xffffffff,
1025 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1026 DESC_S_MASK |
1027 DESC_W_MASK | DESC_A_MASK);
1028 env->eip = (uint32_t)env->star;
1031 #endif
1032 #endif
1034 #ifdef TARGET_X86_64
1035 void helper_sysret(CPUX86State *env, int dflag)
1037 int cpl, selector;
1039 if (!(env->efer & MSR_EFER_SCE)) {
1040 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1042 cpl = env->hflags & HF_CPL_MASK;
1043 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1044 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1046 selector = (env->star >> 48) & 0xffff;
1047 if (env->hflags & HF_LMA_MASK) {
1048 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1049 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1050 NT_MASK);
1051 if (dflag == 2) {
1052 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1053 0, 0xffffffff,
1054 DESC_G_MASK | DESC_P_MASK |
1055 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1057 DESC_L_MASK);
1058 env->eip = env->regs[R_ECX];
1059 } else {
1060 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1061 0, 0xffffffff,
1062 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065 env->eip = (uint32_t)env->regs[R_ECX];
1067 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1068 0, 0xffffffff,
1069 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071 DESC_W_MASK | DESC_A_MASK);
1072 } else {
1073 env->eflags |= IF_MASK;
1074 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079 env->eip = (uint32_t)env->regs[R_ECX];
1080 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1081 0, 0xffffffff,
1082 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084 DESC_W_MASK | DESC_A_MASK);
1087 #endif
1089 /* real mode interrupt */
1090 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1091 int error_code, unsigned int next_eip)
1093 SegmentCache *dt;
1094 target_ulong ptr, ssp;
1095 int selector;
1096 uint32_t offset, esp;
1097 uint32_t old_cs, old_eip;
1099 /* real mode (simpler!) */
1100 dt = &env->idt;
1101 if (intno * 4 + 3 > dt->limit) {
1102 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1104 ptr = dt->base + intno * 4;
1105 offset = cpu_lduw_kernel(env, ptr);
1106 selector = cpu_lduw_kernel(env, ptr + 2);
1107 esp = env->regs[R_ESP];
1108 ssp = env->segs[R_SS].base;
1109 if (is_int) {
1110 old_eip = next_eip;
1111 } else {
1112 old_eip = env->eip;
1114 old_cs = env->segs[R_CS].selector;
1115 /* XXX: use SS segment size? */
1116 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1117 PUSHW(ssp, esp, 0xffff, old_cs);
1118 PUSHW(ssp, esp, 0xffff, old_eip);
1120 /* update processor state */
1121 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1122 env->eip = offset;
1123 env->segs[R_CS].selector = selector;
1124 env->segs[R_CS].base = (selector << 4);
1125 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1128 #if defined(CONFIG_USER_ONLY)
1129 /* fake user mode interrupt */
1130 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1131 int error_code, target_ulong next_eip)
1133 SegmentCache *dt;
1134 target_ulong ptr;
1135 int dpl, cpl, shift;
1136 uint32_t e2;
1138 dt = &env->idt;
1139 if (env->hflags & HF_LMA_MASK) {
1140 shift = 4;
1141 } else {
1142 shift = 3;
1144 ptr = dt->base + (intno << shift);
1145 e2 = cpu_ldl_kernel(env, ptr + 4);
1147 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1148 cpl = env->hflags & HF_CPL_MASK;
1149 /* check privilege if software int */
1150 if (is_int && dpl < cpl) {
1151 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1154 /* Since we emulate only user space, we cannot do more than
1155 exiting the emulation with the suitable exception and error
1156 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1157 if (is_int || intno == EXCP_SYSCALL) {
1158 env->eip = next_eip;
1162 #else
1164 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1165 int error_code, int is_hw, int rm)
1167 CPUState *cs = CPU(x86_env_get_cpu(env));
1168 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1169 control.event_inj));
1171 if (!(event_inj & SVM_EVTINJ_VALID)) {
1172 int type;
1174 if (is_int) {
1175 type = SVM_EVTINJ_TYPE_SOFT;
1176 } else {
1177 type = SVM_EVTINJ_TYPE_EXEPT;
1179 event_inj = intno | type | SVM_EVTINJ_VALID;
1180 if (!rm && exception_has_error_code(intno)) {
1181 event_inj |= SVM_EVTINJ_VALID_ERR;
1182 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1183 control.event_inj_err),
1184 error_code);
1186 x86_stl_phys(cs,
1187 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1188 event_inj);
1191 #endif
1194 * Begin execution of an interruption. is_int is TRUE if coming from
1195 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1196 * instruction. It is only relevant if is_int is TRUE.
1198 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1199 int error_code, target_ulong next_eip, int is_hw)
1201 CPUX86State *env = &cpu->env;
1203 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1204 if ((env->cr[0] & CR0_PE_MASK)) {
1205 static int count;
1207 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1208 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1209 count, intno, error_code, is_int,
1210 env->hflags & HF_CPL_MASK,
1211 env->segs[R_CS].selector, env->eip,
1212 (int)env->segs[R_CS].base + env->eip,
1213 env->segs[R_SS].selector, env->regs[R_ESP]);
1214 if (intno == 0x0e) {
1215 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1216 } else {
1217 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1219 qemu_log("\n");
1220 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1221 #if 0
1223 int i;
1224 target_ulong ptr;
1226 qemu_log(" code=");
1227 ptr = env->segs[R_CS].base + env->eip;
1228 for (i = 0; i < 16; i++) {
1229 qemu_log(" %02x", ldub(ptr + i));
1231 qemu_log("\n");
1233 #endif
1234 count++;
1237 if (env->cr[0] & CR0_PE_MASK) {
1238 #if !defined(CONFIG_USER_ONLY)
1239 if (env->hflags & HF_SVMI_MASK) {
1240 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1242 #endif
1243 #ifdef TARGET_X86_64
1244 if (env->hflags & HF_LMA_MASK) {
1245 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1246 } else
1247 #endif
1249 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1250 is_hw);
1252 } else {
1253 #if !defined(CONFIG_USER_ONLY)
1254 if (env->hflags & HF_SVMI_MASK) {
1255 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1257 #endif
1258 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1261 #if !defined(CONFIG_USER_ONLY)
1262 if (env->hflags & HF_SVMI_MASK) {
1263 CPUState *cs = CPU(cpu);
1264 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1265 offsetof(struct vmcb,
1266 control.event_inj));
1268 x86_stl_phys(cs,
1269 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1270 event_inj & ~SVM_EVTINJ_VALID);
1272 #endif
1275 void x86_cpu_do_interrupt(CPUState *cs)
1277 X86CPU *cpu = X86_CPU(cs);
1278 CPUX86State *env = &cpu->env;
1280 #if defined(CONFIG_USER_ONLY)
1281 /* if user mode only, we simulate a fake exception
1282 which will be handled outside the cpu execution
1283 loop */
1284 do_interrupt_user(env, cs->exception_index,
1285 env->exception_is_int,
1286 env->error_code,
1287 env->exception_next_eip);
1288 /* successfully delivered */
1289 env->old_exception = -1;
1290 #else
1291 /* simulate a real cpu exception. On i386, it can
1292 trigger new exceptions, but we do not handle
1293 double or triple faults yet. */
1294 do_interrupt_all(cpu, cs->exception_index,
1295 env->exception_is_int,
1296 env->error_code,
1297 env->exception_next_eip, 0);
1298 /* successfully delivered */
1299 env->old_exception = -1;
1300 #endif
1303 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1305 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1308 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1310 X86CPU *cpu = X86_CPU(cs);
1311 CPUX86State *env = &cpu->env;
1312 bool ret = false;
1314 #if !defined(CONFIG_USER_ONLY)
1315 if (interrupt_request & CPU_INTERRUPT_POLL) {
1316 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1317 apic_poll_irq(cpu->apic_state);
1318 /* Don't process multiple interrupt requests in a single call.
1319 This is required to make icount-driven execution deterministic. */
1320 return true;
1322 #endif
1323 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1324 do_cpu_sipi(cpu);
1325 } else if (env->hflags2 & HF2_GIF_MASK) {
1326 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1327 !(env->hflags & HF_SMM_MASK)) {
1328 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1329 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1330 do_smm_enter(cpu);
1331 ret = true;
1332 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1333 !(env->hflags2 & HF2_NMI_MASK)) {
1334 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1335 env->hflags2 |= HF2_NMI_MASK;
1336 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1337 ret = true;
1338 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1339 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1340 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1341 ret = true;
1342 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1343 (((env->hflags2 & HF2_VINTR_MASK) &&
1344 (env->hflags2 & HF2_HIF_MASK)) ||
1345 (!(env->hflags2 & HF2_VINTR_MASK) &&
1346 (env->eflags & IF_MASK &&
1347 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1348 int intno;
1349 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1350 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1351 CPU_INTERRUPT_VIRQ);
1352 intno = cpu_get_pic_interrupt(env);
1353 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1354 "Servicing hardware INT=0x%02x\n", intno);
1355 do_interrupt_x86_hardirq(env, intno, 1);
1356 /* ensure that no TB jump will be modified as
1357 the program flow was changed */
1358 ret = true;
1359 #if !defined(CONFIG_USER_ONLY)
1360 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1361 (env->eflags & IF_MASK) &&
1362 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1363 int intno;
1364 /* FIXME: this should respect TPR */
1365 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1366 intno = x86_ldl_phys(cs, env->vm_vmcb
1367 + offsetof(struct vmcb, control.int_vector));
1368 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1369 "Servicing virtual hardware INT=0x%02x\n", intno);
1370 do_interrupt_x86_hardirq(env, intno, 1);
1371 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1372 ret = true;
1373 #endif
1377 return ret;
1380 void helper_enter_level(CPUX86State *env, int level, int data32,
1381 target_ulong t1)
1383 target_ulong ssp;
1384 uint32_t esp_mask, esp, ebp;
1386 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1387 ssp = env->segs[R_SS].base;
1388 ebp = env->regs[R_EBP];
1389 esp = env->regs[R_ESP];
1390 if (data32) {
1391 /* 32 bit */
1392 esp -= 4;
1393 while (--level) {
1394 esp -= 4;
1395 ebp -= 4;
1396 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1397 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1398 GETPC()),
1399 GETPC());
1401 esp -= 4;
1402 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1403 } else {
1404 /* 16 bit */
1405 esp -= 2;
1406 while (--level) {
1407 esp -= 2;
1408 ebp -= 2;
1409 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1410 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1411 GETPC()),
1412 GETPC());
1414 esp -= 2;
1415 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1419 #ifdef TARGET_X86_64
1420 void helper_enter64_level(CPUX86State *env, int level, int data64,
1421 target_ulong t1)
1423 target_ulong esp, ebp;
1425 ebp = env->regs[R_EBP];
1426 esp = env->regs[R_ESP];
1428 if (data64) {
1429 /* 64 bit */
1430 esp -= 8;
1431 while (--level) {
1432 esp -= 8;
1433 ebp -= 8;
1434 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1435 GETPC());
1437 esp -= 8;
1438 cpu_stq_data_ra(env, esp, t1, GETPC());
1439 } else {
1440 /* 16 bit */
1441 esp -= 2;
1442 while (--level) {
1443 esp -= 2;
1444 ebp -= 2;
1445 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1446 GETPC());
1448 esp -= 2;
1449 cpu_stw_data_ra(env, esp, t1, GETPC());
1452 #endif
1454 void helper_lldt(CPUX86State *env, int selector)
1456 SegmentCache *dt;
1457 uint32_t e1, e2;
1458 int index, entry_limit;
1459 target_ulong ptr;
1461 selector &= 0xffff;
1462 if ((selector & 0xfffc) == 0) {
1463 /* XXX: NULL selector case: invalid LDT */
1464 env->ldt.base = 0;
1465 env->ldt.limit = 0;
1466 } else {
1467 if (selector & 0x4) {
1468 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1470 dt = &env->gdt;
1471 index = selector & ~7;
1472 #ifdef TARGET_X86_64
1473 if (env->hflags & HF_LMA_MASK) {
1474 entry_limit = 15;
1475 } else
1476 #endif
1478 entry_limit = 7;
1480 if ((index + entry_limit) > dt->limit) {
1481 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1483 ptr = dt->base + index;
1484 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1485 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1486 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1487 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1489 if (!(e2 & DESC_P_MASK)) {
1490 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1492 #ifdef TARGET_X86_64
1493 if (env->hflags & HF_LMA_MASK) {
1494 uint32_t e3;
1496 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1497 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1498 env->ldt.base |= (target_ulong)e3 << 32;
1499 } else
1500 #endif
1502 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1505 env->ldt.selector = selector;
1508 void helper_ltr(CPUX86State *env, int selector)
1510 SegmentCache *dt;
1511 uint32_t e1, e2;
1512 int index, type, entry_limit;
1513 target_ulong ptr;
1515 selector &= 0xffff;
1516 if ((selector & 0xfffc) == 0) {
1517 /* NULL selector case: invalid TR */
1518 env->tr.base = 0;
1519 env->tr.limit = 0;
1520 env->tr.flags = 0;
1521 } else {
1522 if (selector & 0x4) {
1523 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1525 dt = &env->gdt;
1526 index = selector & ~7;
1527 #ifdef TARGET_X86_64
1528 if (env->hflags & HF_LMA_MASK) {
1529 entry_limit = 15;
1530 } else
1531 #endif
1533 entry_limit = 7;
1535 if ((index + entry_limit) > dt->limit) {
1536 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1538 ptr = dt->base + index;
1539 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1540 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1541 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1542 if ((e2 & DESC_S_MASK) ||
1543 (type != 1 && type != 9)) {
1544 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1546 if (!(e2 & DESC_P_MASK)) {
1547 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1549 #ifdef TARGET_X86_64
1550 if (env->hflags & HF_LMA_MASK) {
1551 uint32_t e3, e4;
1553 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1554 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1555 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1556 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1558 load_seg_cache_raw_dt(&env->tr, e1, e2);
1559 env->tr.base |= (target_ulong)e3 << 32;
1560 } else
1561 #endif
1563 load_seg_cache_raw_dt(&env->tr, e1, e2);
1565 e2 |= DESC_TSS_BUSY_MASK;
1566 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1568 env->tr.selector = selector;
1571 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1572 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1574 uint32_t e1, e2;
1575 int cpl, dpl, rpl;
1576 SegmentCache *dt;
1577 int index;
1578 target_ulong ptr;
1580 selector &= 0xffff;
1581 cpl = env->hflags & HF_CPL_MASK;
1582 if ((selector & 0xfffc) == 0) {
1583 /* null selector case */
1584 if (seg_reg == R_SS
1585 #ifdef TARGET_X86_64
1586 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1587 #endif
1589 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1591 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1592 } else {
1594 if (selector & 0x4) {
1595 dt = &env->ldt;
1596 } else {
1597 dt = &env->gdt;
1599 index = selector & ~7;
1600 if ((index + 7) > dt->limit) {
1601 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1603 ptr = dt->base + index;
1604 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1605 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1607 if (!(e2 & DESC_S_MASK)) {
1608 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1610 rpl = selector & 3;
1611 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1612 if (seg_reg == R_SS) {
1613 /* must be writable segment */
1614 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1615 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1617 if (rpl != cpl || dpl != cpl) {
1618 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1620 } else {
1621 /* must be readable segment */
1622 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1623 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1626 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1627 /* if not conforming code, test rights */
1628 if (dpl < cpl || dpl < rpl) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1634 if (!(e2 & DESC_P_MASK)) {
1635 if (seg_reg == R_SS) {
1636 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1637 } else {
1638 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1642 /* set the access bit if not already set */
1643 if (!(e2 & DESC_A_MASK)) {
1644 e2 |= DESC_A_MASK;
1645 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1648 cpu_x86_load_seg_cache(env, seg_reg, selector,
1649 get_seg_base(e1, e2),
1650 get_seg_limit(e1, e2),
1651 e2);
1652 #if 0
1653 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1654 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1655 #endif
1659 /* protected mode jump */
1660 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1661 target_ulong next_eip)
1663 int gate_cs, type;
1664 uint32_t e1, e2, cpl, dpl, rpl, limit;
1666 if ((new_cs & 0xfffc) == 0) {
1667 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1669 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1670 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1672 cpl = env->hflags & HF_CPL_MASK;
1673 if (e2 & DESC_S_MASK) {
1674 if (!(e2 & DESC_CS_MASK)) {
1675 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1677 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1678 if (e2 & DESC_C_MASK) {
1679 /* conforming code segment */
1680 if (dpl > cpl) {
1681 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1683 } else {
1684 /* non conforming code segment */
1685 rpl = new_cs & 3;
1686 if (rpl > cpl) {
1687 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1689 if (dpl != cpl) {
1690 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1693 if (!(e2 & DESC_P_MASK)) {
1694 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1696 limit = get_seg_limit(e1, e2);
1697 if (new_eip > limit &&
1698 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1699 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1701 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1702 get_seg_base(e1, e2), limit, e2);
1703 env->eip = new_eip;
1704 } else {
1705 /* jump to call or task gate */
1706 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1707 rpl = new_cs & 3;
1708 cpl = env->hflags & HF_CPL_MASK;
1709 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1710 switch (type) {
1711 case 1: /* 286 TSS */
1712 case 9: /* 386 TSS */
1713 case 5: /* task gate */
1714 if (dpl < cpl || dpl < rpl) {
1715 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1717 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1718 break;
1719 case 4: /* 286 call gate */
1720 case 12: /* 386 call gate */
1721 if ((dpl < cpl) || (dpl < rpl)) {
1722 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1724 if (!(e2 & DESC_P_MASK)) {
1725 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1727 gate_cs = e1 >> 16;
1728 new_eip = (e1 & 0xffff);
1729 if (type == 12) {
1730 new_eip |= (e2 & 0xffff0000);
1732 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1733 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1735 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1736 /* must be code segment */
1737 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1738 (DESC_S_MASK | DESC_CS_MASK))) {
1739 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1741 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1742 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1743 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1745 if (!(e2 & DESC_P_MASK)) {
1746 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1748 limit = get_seg_limit(e1, e2);
1749 if (new_eip > limit) {
1750 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1752 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1753 get_seg_base(e1, e2), limit, e2);
1754 env->eip = new_eip;
1755 break;
1756 default:
1757 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1758 break;
1763 /* real mode call */
1764 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1765 int shift, int next_eip)
1767 int new_eip;
1768 uint32_t esp, esp_mask;
1769 target_ulong ssp;
1771 new_eip = new_eip1;
1772 esp = env->regs[R_ESP];
1773 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1774 ssp = env->segs[R_SS].base;
1775 if (shift) {
1776 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1777 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1778 } else {
1779 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1780 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1783 SET_ESP(esp, esp_mask);
1784 env->eip = new_eip;
1785 env->segs[R_CS].selector = new_cs;
1786 env->segs[R_CS].base = (new_cs << 4);
1789 /* protected mode call */
1790 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1791 int shift, target_ulong next_eip)
1793 int new_stack, i;
1794 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1795 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1796 uint32_t val, limit, old_sp_mask;
1797 target_ulong ssp, old_ssp;
1799 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1800 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1801 if ((new_cs & 0xfffc) == 0) {
1802 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1804 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1805 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1807 cpl = env->hflags & HF_CPL_MASK;
1808 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1809 if (e2 & DESC_S_MASK) {
1810 if (!(e2 & DESC_CS_MASK)) {
1811 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1813 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1814 if (e2 & DESC_C_MASK) {
1815 /* conforming code segment */
1816 if (dpl > cpl) {
1817 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1819 } else {
1820 /* non conforming code segment */
1821 rpl = new_cs & 3;
1822 if (rpl > cpl) {
1823 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1825 if (dpl != cpl) {
1826 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1829 if (!(e2 & DESC_P_MASK)) {
1830 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1833 #ifdef TARGET_X86_64
1834 /* XXX: check 16/32 bit cases in long mode */
1835 if (shift == 2) {
1836 target_ulong rsp;
1838 /* 64 bit case */
1839 rsp = env->regs[R_ESP];
1840 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1841 PUSHQ_RA(rsp, next_eip, GETPC());
1842 /* from this point, not restartable */
1843 env->regs[R_ESP] = rsp;
1844 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1845 get_seg_base(e1, e2),
1846 get_seg_limit(e1, e2), e2);
1847 env->eip = new_eip;
1848 } else
1849 #endif
1851 sp = env->regs[R_ESP];
1852 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1853 ssp = env->segs[R_SS].base;
1854 if (shift) {
1855 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1856 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1857 } else {
1858 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1859 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1862 limit = get_seg_limit(e1, e2);
1863 if (new_eip > limit) {
1864 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1866 /* from this point, not restartable */
1867 SET_ESP(sp, sp_mask);
1868 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1869 get_seg_base(e1, e2), limit, e2);
1870 env->eip = new_eip;
1872 } else {
1873 /* check gate type */
1874 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1875 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1876 rpl = new_cs & 3;
1877 switch (type) {
1878 case 1: /* available 286 TSS */
1879 case 9: /* available 386 TSS */
1880 case 5: /* task gate */
1881 if (dpl < cpl || dpl < rpl) {
1882 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1884 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1885 return;
1886 case 4: /* 286 call gate */
1887 case 12: /* 386 call gate */
1888 break;
1889 default:
1890 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1891 break;
1893 shift = type >> 3;
1895 if (dpl < cpl || dpl < rpl) {
1896 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1898 /* check valid bit */
1899 if (!(e2 & DESC_P_MASK)) {
1900 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1902 selector = e1 >> 16;
1903 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1904 param_count = e2 & 0x1f;
1905 if ((selector & 0xfffc) == 0) {
1906 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1909 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1910 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1912 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1913 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1916 if (dpl > cpl) {
1917 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1919 if (!(e2 & DESC_P_MASK)) {
1920 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1923 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1924 /* to inner privilege */
1925 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1926 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1927 TARGET_FMT_lx "\n", ss, sp, param_count,
1928 env->regs[R_ESP]);
1929 if ((ss & 0xfffc) == 0) {
1930 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1932 if ((ss & 3) != dpl) {
1933 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1935 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1936 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1938 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1939 if (ss_dpl != dpl) {
1940 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1942 if (!(ss_e2 & DESC_S_MASK) ||
1943 (ss_e2 & DESC_CS_MASK) ||
1944 !(ss_e2 & DESC_W_MASK)) {
1945 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1947 if (!(ss_e2 & DESC_P_MASK)) {
1948 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1951 /* push_size = ((param_count * 2) + 8) << shift; */
1953 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1954 old_ssp = env->segs[R_SS].base;
1956 sp_mask = get_sp_mask(ss_e2);
1957 ssp = get_seg_base(ss_e1, ss_e2);
1958 if (shift) {
1959 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1960 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1961 for (i = param_count - 1; i >= 0; i--) {
1962 val = cpu_ldl_kernel_ra(env, old_ssp +
1963 ((env->regs[R_ESP] + i * 4) &
1964 old_sp_mask), GETPC());
1965 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1967 } else {
1968 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1969 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1970 for (i = param_count - 1; i >= 0; i--) {
1971 val = cpu_lduw_kernel_ra(env, old_ssp +
1972 ((env->regs[R_ESP] + i * 2) &
1973 old_sp_mask), GETPC());
1974 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1977 new_stack = 1;
1978 } else {
1979 /* to same privilege */
1980 sp = env->regs[R_ESP];
1981 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1982 ssp = env->segs[R_SS].base;
1983 /* push_size = (4 << shift); */
1984 new_stack = 0;
1987 if (shift) {
1988 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1989 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1990 } else {
1991 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1992 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1995 /* from this point, not restartable */
1997 if (new_stack) {
1998 ss = (ss & ~3) | dpl;
1999 cpu_x86_load_seg_cache(env, R_SS, ss,
2000 ssp,
2001 get_seg_limit(ss_e1, ss_e2),
2002 ss_e2);
2005 selector = (selector & ~3) | dpl;
2006 cpu_x86_load_seg_cache(env, R_CS, selector,
2007 get_seg_base(e1, e2),
2008 get_seg_limit(e1, e2),
2009 e2);
2010 SET_ESP(sp, sp_mask);
2011 env->eip = offset;
2015 /* real and vm86 mode iret */
2016 void helper_iret_real(CPUX86State *env, int shift)
2018 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2019 target_ulong ssp;
2020 int eflags_mask;
2022 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2023 sp = env->regs[R_ESP];
2024 ssp = env->segs[R_SS].base;
2025 if (shift == 1) {
2026 /* 32 bits */
2027 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2028 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2029 new_cs &= 0xffff;
2030 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2031 } else {
2032 /* 16 bits */
2033 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2034 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2035 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2037 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2038 env->segs[R_CS].selector = new_cs;
2039 env->segs[R_CS].base = (new_cs << 4);
2040 env->eip = new_eip;
2041 if (env->eflags & VM_MASK) {
2042 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2043 NT_MASK;
2044 } else {
2045 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2046 RF_MASK | NT_MASK;
2048 if (shift == 0) {
2049 eflags_mask &= 0xffff;
2051 cpu_load_eflags(env, new_eflags, eflags_mask);
2052 env->hflags2 &= ~HF2_NMI_MASK;
2055 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2057 int dpl;
2058 uint32_t e2;
2060 /* XXX: on x86_64, we do not want to nullify FS and GS because
2061 they may still contain a valid base. I would be interested to
2062 know how a real x86_64 CPU behaves */
2063 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2064 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2065 return;
2068 e2 = env->segs[seg_reg].flags;
2069 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2070 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2071 /* data or non conforming code segment */
2072 if (dpl < cpl) {
2073 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2078 /* protected mode iret */
2079 static inline void helper_ret_protected(CPUX86State *env, int shift,
2080 int is_iret, int addend,
2081 uintptr_t retaddr)
2083 uint32_t new_cs, new_eflags, new_ss;
2084 uint32_t new_es, new_ds, new_fs, new_gs;
2085 uint32_t e1, e2, ss_e1, ss_e2;
2086 int cpl, dpl, rpl, eflags_mask, iopl;
2087 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2089 #ifdef TARGET_X86_64
2090 if (shift == 2) {
2091 sp_mask = -1;
2092 } else
2093 #endif
2095 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2097 sp = env->regs[R_ESP];
2098 ssp = env->segs[R_SS].base;
2099 new_eflags = 0; /* avoid warning */
2100 #ifdef TARGET_X86_64
2101 if (shift == 2) {
2102 POPQ_RA(sp, new_eip, retaddr);
2103 POPQ_RA(sp, new_cs, retaddr);
2104 new_cs &= 0xffff;
2105 if (is_iret) {
2106 POPQ_RA(sp, new_eflags, retaddr);
2108 } else
2109 #endif
2111 if (shift == 1) {
2112 /* 32 bits */
2113 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2114 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2115 new_cs &= 0xffff;
2116 if (is_iret) {
2117 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2118 if (new_eflags & VM_MASK) {
2119 goto return_to_vm86;
2122 } else {
2123 /* 16 bits */
2124 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2125 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2126 if (is_iret) {
2127 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2131 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2132 new_cs, new_eip, shift, addend);
2133 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2134 if ((new_cs & 0xfffc) == 0) {
2135 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2137 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2138 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2140 if (!(e2 & DESC_S_MASK) ||
2141 !(e2 & DESC_CS_MASK)) {
2142 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2144 cpl = env->hflags & HF_CPL_MASK;
2145 rpl = new_cs & 3;
2146 if (rpl < cpl) {
2147 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2150 if (e2 & DESC_C_MASK) {
2151 if (dpl > rpl) {
2152 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2154 } else {
2155 if (dpl != rpl) {
2156 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2159 if (!(e2 & DESC_P_MASK)) {
2160 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2163 sp += addend;
2164 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2165 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2166 /* return to same privilege level */
2167 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2168 get_seg_base(e1, e2),
2169 get_seg_limit(e1, e2),
2170 e2);
2171 } else {
2172 /* return to different privilege level */
2173 #ifdef TARGET_X86_64
2174 if (shift == 2) {
2175 POPQ_RA(sp, new_esp, retaddr);
2176 POPQ_RA(sp, new_ss, retaddr);
2177 new_ss &= 0xffff;
2178 } else
2179 #endif
2181 if (shift == 1) {
2182 /* 32 bits */
2183 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2184 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2185 new_ss &= 0xffff;
2186 } else {
2187 /* 16 bits */
2188 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2189 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2192 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2193 new_ss, new_esp);
2194 if ((new_ss & 0xfffc) == 0) {
2195 #ifdef TARGET_X86_64
2196 /* NULL ss is allowed in long mode if cpl != 3 */
2197 /* XXX: test CS64? */
2198 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2199 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2200 0, 0xffffffff,
2201 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2202 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2203 DESC_W_MASK | DESC_A_MASK);
2204 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2205 } else
2206 #endif
2208 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2210 } else {
2211 if ((new_ss & 3) != rpl) {
2212 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2214 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2215 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2217 if (!(ss_e2 & DESC_S_MASK) ||
2218 (ss_e2 & DESC_CS_MASK) ||
2219 !(ss_e2 & DESC_W_MASK)) {
2220 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2222 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2223 if (dpl != rpl) {
2224 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2226 if (!(ss_e2 & DESC_P_MASK)) {
2227 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2229 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2230 get_seg_base(ss_e1, ss_e2),
2231 get_seg_limit(ss_e1, ss_e2),
2232 ss_e2);
2235 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2236 get_seg_base(e1, e2),
2237 get_seg_limit(e1, e2),
2238 e2);
2239 sp = new_esp;
2240 #ifdef TARGET_X86_64
2241 if (env->hflags & HF_CS64_MASK) {
2242 sp_mask = -1;
2243 } else
2244 #endif
2246 sp_mask = get_sp_mask(ss_e2);
2249 /* validate data segments */
2250 validate_seg(env, R_ES, rpl);
2251 validate_seg(env, R_DS, rpl);
2252 validate_seg(env, R_FS, rpl);
2253 validate_seg(env, R_GS, rpl);
2255 sp += addend;
2257 SET_ESP(sp, sp_mask);
2258 env->eip = new_eip;
2259 if (is_iret) {
2260 /* NOTE: 'cpl' is the _old_ CPL */
2261 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2262 if (cpl == 0) {
2263 eflags_mask |= IOPL_MASK;
2265 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2266 if (cpl <= iopl) {
2267 eflags_mask |= IF_MASK;
2269 if (shift == 0) {
2270 eflags_mask &= 0xffff;
2272 cpu_load_eflags(env, new_eflags, eflags_mask);
2274 return;
2276 return_to_vm86:
2277 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2278 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2279 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2280 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2284 /* modify processor state */
2285 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2286 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2287 VIP_MASK);
2288 load_seg_vm(env, R_CS, new_cs & 0xffff);
2289 load_seg_vm(env, R_SS, new_ss & 0xffff);
2290 load_seg_vm(env, R_ES, new_es & 0xffff);
2291 load_seg_vm(env, R_DS, new_ds & 0xffff);
2292 load_seg_vm(env, R_FS, new_fs & 0xffff);
2293 load_seg_vm(env, R_GS, new_gs & 0xffff);
2295 env->eip = new_eip & 0xffff;
2296 env->regs[R_ESP] = new_esp;
2299 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2301 int tss_selector, type;
2302 uint32_t e1, e2;
2304 /* specific case for TSS */
2305 if (env->eflags & NT_MASK) {
2306 #ifdef TARGET_X86_64
2307 if (env->hflags & HF_LMA_MASK) {
2308 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2310 #endif
2311 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2312 if (tss_selector & 4) {
2313 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2315 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2316 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2318 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2319 /* NOTE: we check both segment and busy TSS */
2320 if (type != 3) {
2321 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2323 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2324 } else {
2325 helper_ret_protected(env, shift, 1, 0, GETPC());
2327 env->hflags2 &= ~HF2_NMI_MASK;
2330 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2332 helper_ret_protected(env, shift, 0, addend, GETPC());
2335 void helper_sysenter(CPUX86State *env)
2337 if (env->sysenter_cs == 0) {
2338 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2340 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2342 #ifdef TARGET_X86_64
2343 if (env->hflags & HF_LMA_MASK) {
2344 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2345 0, 0xffffffff,
2346 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2347 DESC_S_MASK |
2348 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2349 DESC_L_MASK);
2350 } else
2351 #endif
2353 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2354 0, 0xffffffff,
2355 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2356 DESC_S_MASK |
2357 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2359 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2360 0, 0xffffffff,
2361 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2362 DESC_S_MASK |
2363 DESC_W_MASK | DESC_A_MASK);
2364 env->regs[R_ESP] = env->sysenter_esp;
2365 env->eip = env->sysenter_eip;
2368 void helper_sysexit(CPUX86State *env, int dflag)
2370 int cpl;
2372 cpl = env->hflags & HF_CPL_MASK;
2373 if (env->sysenter_cs == 0 || cpl != 0) {
2374 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2376 #ifdef TARGET_X86_64
2377 if (dflag == 2) {
2378 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2379 3, 0, 0xffffffff,
2380 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2381 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2382 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2383 DESC_L_MASK);
2384 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2385 3, 0, 0xffffffff,
2386 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2387 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2388 DESC_W_MASK | DESC_A_MASK);
2389 } else
2390 #endif
2392 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2393 3, 0, 0xffffffff,
2394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2395 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2396 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2397 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2398 3, 0, 0xffffffff,
2399 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2400 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2401 DESC_W_MASK | DESC_A_MASK);
2403 env->regs[R_ESP] = env->regs[R_ECX];
2404 env->eip = env->regs[R_EDX];
2407 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2409 unsigned int limit;
2410 uint32_t e1, e2, eflags, selector;
2411 int rpl, dpl, cpl, type;
2413 selector = selector1 & 0xffff;
2414 eflags = cpu_cc_compute_all(env, CC_OP);
2415 if ((selector & 0xfffc) == 0) {
2416 goto fail;
2418 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2419 goto fail;
2421 rpl = selector & 3;
2422 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2423 cpl = env->hflags & HF_CPL_MASK;
2424 if (e2 & DESC_S_MASK) {
2425 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2426 /* conforming */
2427 } else {
2428 if (dpl < cpl || dpl < rpl) {
2429 goto fail;
2432 } else {
2433 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2434 switch (type) {
2435 case 1:
2436 case 2:
2437 case 3:
2438 case 9:
2439 case 11:
2440 break;
2441 default:
2442 goto fail;
2444 if (dpl < cpl || dpl < rpl) {
2445 fail:
2446 CC_SRC = eflags & ~CC_Z;
2447 return 0;
2450 limit = get_seg_limit(e1, e2);
2451 CC_SRC = eflags | CC_Z;
2452 return limit;
2455 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2457 uint32_t e1, e2, eflags, selector;
2458 int rpl, dpl, cpl, type;
2460 selector = selector1 & 0xffff;
2461 eflags = cpu_cc_compute_all(env, CC_OP);
2462 if ((selector & 0xfffc) == 0) {
2463 goto fail;
2465 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2466 goto fail;
2468 rpl = selector & 3;
2469 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2470 cpl = env->hflags & HF_CPL_MASK;
2471 if (e2 & DESC_S_MASK) {
2472 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2473 /* conforming */
2474 } else {
2475 if (dpl < cpl || dpl < rpl) {
2476 goto fail;
2479 } else {
2480 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2481 switch (type) {
2482 case 1:
2483 case 2:
2484 case 3:
2485 case 4:
2486 case 5:
2487 case 9:
2488 case 11:
2489 case 12:
2490 break;
2491 default:
2492 goto fail;
2494 if (dpl < cpl || dpl < rpl) {
2495 fail:
2496 CC_SRC = eflags & ~CC_Z;
2497 return 0;
2500 CC_SRC = eflags | CC_Z;
2501 return e2 & 0x00f0ff00;
2504 void helper_verr(CPUX86State *env, target_ulong selector1)
2506 uint32_t e1, e2, eflags, selector;
2507 int rpl, dpl, cpl;
2509 selector = selector1 & 0xffff;
2510 eflags = cpu_cc_compute_all(env, CC_OP);
2511 if ((selector & 0xfffc) == 0) {
2512 goto fail;
2514 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2515 goto fail;
2517 if (!(e2 & DESC_S_MASK)) {
2518 goto fail;
2520 rpl = selector & 3;
2521 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2522 cpl = env->hflags & HF_CPL_MASK;
2523 if (e2 & DESC_CS_MASK) {
2524 if (!(e2 & DESC_R_MASK)) {
2525 goto fail;
2527 if (!(e2 & DESC_C_MASK)) {
2528 if (dpl < cpl || dpl < rpl) {
2529 goto fail;
2532 } else {
2533 if (dpl < cpl || dpl < rpl) {
2534 fail:
2535 CC_SRC = eflags & ~CC_Z;
2536 return;
2539 CC_SRC = eflags | CC_Z;
2542 void helper_verw(CPUX86State *env, target_ulong selector1)
2544 uint32_t e1, e2, eflags, selector;
2545 int rpl, dpl, cpl;
2547 selector = selector1 & 0xffff;
2548 eflags = cpu_cc_compute_all(env, CC_OP);
2549 if ((selector & 0xfffc) == 0) {
2550 goto fail;
2552 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2553 goto fail;
2555 if (!(e2 & DESC_S_MASK)) {
2556 goto fail;
2558 rpl = selector & 3;
2559 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2560 cpl = env->hflags & HF_CPL_MASK;
2561 if (e2 & DESC_CS_MASK) {
2562 goto fail;
2563 } else {
2564 if (dpl < cpl || dpl < rpl) {
2565 goto fail;
2567 if (!(e2 & DESC_W_MASK)) {
2568 fail:
2569 CC_SRC = eflags & ~CC_Z;
2570 return;
2573 CC_SRC = eflags | CC_Z;
2576 #if defined(CONFIG_USER_ONLY)
2577 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2579 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2580 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2581 selector &= 0xffff;
2582 cpu_x86_load_seg_cache(env, seg_reg, selector,
2583 (selector << 4), 0xffff,
2584 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2585 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2586 } else {
2587 helper_load_seg(env, seg_reg, selector);
2590 #endif
2592 /* check if Port I/O is allowed in TSS */
2593 static inline void check_io(CPUX86State *env, int addr, int size,
2594 uintptr_t retaddr)
2596 int io_offset, val, mask;
2598 /* TSS must be a valid 32 bit one */
2599 if (!(env->tr.flags & DESC_P_MASK) ||
2600 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2601 env->tr.limit < 103) {
2602 goto fail;
2604 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2605 io_offset += (addr >> 3);
2606 /* Note: the check needs two bytes */
2607 if ((io_offset + 1) > env->tr.limit) {
2608 goto fail;
2610 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2611 val >>= (addr & 7);
2612 mask = (1 << size) - 1;
2613 /* all bits must be zero to allow the I/O */
2614 if ((val & mask) != 0) {
2615 fail:
2616 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2620 void helper_check_iob(CPUX86State *env, uint32_t t0)
2622 check_io(env, t0, 1, GETPC());
2625 void helper_check_iow(CPUX86State *env, uint32_t t0)
2627 check_io(env, t0, 2, GETPC());
2630 void helper_check_iol(CPUX86State *env, uint32_t t0)
2632 check_io(env, t0, 4, GETPC());