vfio/pci: Rework RTL8168 quirk
[qemu/ar7.git] / target-i386 / seg_helper.c
blob1a3a2e7573436532782a5a0173c06499b5cd7541
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
39 #define DATA_SIZE 1
40 #include "exec/cpu_ldst_useronly_template.h"
42 #define DATA_SIZE 2
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 4
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 8
49 #include "exec/cpu_ldst_useronly_template.h"
50 #undef MEMSUFFIX
51 #else
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
54 #define DATA_SIZE 1
55 #include "exec/cpu_ldst_template.h"
57 #define DATA_SIZE 2
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 4
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 8
64 #include "exec/cpu_ldst_template.h"
65 #undef CPU_MMU_INDEX
66 #undef MEMSUFFIX
67 #endif
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector,
72 uintptr_t retaddr)
74 SegmentCache *dt;
75 int index;
76 target_ulong ptr;
78 if (selector & 0x4) {
79 dt = &env->ldt;
80 } else {
81 dt = &env->gdt;
83 index = selector & ~7;
84 if ((index + 7) > dt->limit) {
85 return -1;
87 ptr = dt->base + index;
88 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
89 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
90 return 0;
93 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
94 uint32_t *e2_ptr, int selector)
96 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
101 unsigned int limit;
103 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
104 if (e2 & DESC_G_MASK) {
105 limit = (limit << 12) | 0xfff;
107 return limit;
110 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
112 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
116 uint32_t e2)
118 sc->base = get_seg_base(e1, e2);
119 sc->limit = get_seg_limit(e1, e2);
120 sc->flags = e2;
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
126 selector &= 0xffff;
128 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
129 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
130 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
133 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
134 uint32_t *esp_ptr, int dpl,
135 uintptr_t retaddr)
137 X86CPU *cpu = x86_env_get_cpu(env);
138 int type, index, shift;
140 #if 0
142 int i;
143 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
144 for (i = 0; i < env->tr.limit; i++) {
145 printf("%02x ", env->tr.base[i]);
146 if ((i & 7) == 7) {
147 printf("\n");
150 printf("\n");
152 #endif
154 if (!(env->tr.flags & DESC_P_MASK)) {
155 cpu_abort(CPU(cpu), "invalid tss");
157 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
158 if ((type & 7) != 1) {
159 cpu_abort(CPU(cpu), "invalid tss type");
161 shift = type >> 3;
162 index = (dpl * 4 + 2) << shift;
163 if (index + (4 << shift) - 1 > env->tr.limit) {
164 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
166 if (shift == 0) {
167 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
168 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
169 } else {
170 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
176 uintptr_t retaddr)
178 uint32_t e1, e2;
179 int rpl, dpl;
181 if ((selector & 0xfffc) != 0) {
182 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
183 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
185 if (!(e2 & DESC_S_MASK)) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 rpl = selector & 3;
189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
190 if (seg_reg == R_CS) {
191 if (!(e2 & DESC_CS_MASK)) {
192 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
194 if (dpl != rpl) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 } else if (seg_reg == R_SS) {
198 /* SS must be writable data */
199 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
200 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
202 if (dpl != cpl || dpl != rpl) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 } else {
206 /* not readable code */
207 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
208 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
210 /* if data or non conforming code, checks the rights */
211 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
212 if (dpl < cpl || dpl < rpl) {
213 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 if (!(e2 & DESC_P_MASK)) {
218 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
220 cpu_x86_load_seg_cache(env, seg_reg, selector,
221 get_seg_base(e1, e2),
222 get_seg_limit(e1, e2),
223 e2);
224 } else {
225 if (seg_reg == R_SS || seg_reg == R_CS) {
226 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State *env, int tss_selector,
237 uint32_t e1, uint32_t e2, int source,
238 uint32_t next_eip, uintptr_t retaddr)
240 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
241 target_ulong tss_base;
242 uint32_t new_regs[8], new_segs[6];
243 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
244 uint32_t old_eflags, eflags_mask;
245 SegmentCache *dt;
246 int index;
247 target_ulong ptr;
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
251 source);
253 /* if task gate, we read the TSS segment and we load it */
254 if (type == 5) {
255 if (!(e2 & DESC_P_MASK)) {
256 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
258 tss_selector = e1 >> 16;
259 if (tss_selector & 4) {
260 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
262 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
263 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
265 if (e2 & DESC_S_MASK) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
269 if ((type & 7) != 1) {
270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 if (!(e2 & DESC_P_MASK)) {
275 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
278 if (type & 8) {
279 tss_limit_max = 103;
280 } else {
281 tss_limit_max = 43;
283 tss_limit = get_seg_limit(e1, e2);
284 tss_base = get_seg_base(e1, e2);
285 if ((tss_selector & 4) != 0 ||
286 tss_limit < tss_limit_max) {
287 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
289 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
290 if (old_type & 8) {
291 old_tss_limit_max = 103;
292 } else {
293 old_tss_limit_max = 43;
296 /* read all the registers from the new TSS */
297 if (type & 8) {
298 /* 32 bit */
299 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
300 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
301 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
302 for (i = 0; i < 8; i++) {
303 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
304 retaddr);
306 for (i = 0; i < 6; i++) {
307 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
308 retaddr);
310 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
311 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
312 } else {
313 /* 16 bit */
314 new_cr3 = 0;
315 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
316 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
317 for (i = 0; i < 8; i++) {
318 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
319 retaddr) | 0xffff0000;
321 for (i = 0; i < 4; i++) {
322 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
323 retaddr);
325 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
326 new_segs[R_FS] = 0;
327 new_segs[R_GS] = 0;
328 new_trap = 0;
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
333 (void)new_trap;
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
341 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
342 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
345 /* clear busy bit (it is restartable) */
346 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
347 target_ulong ptr;
348 uint32_t e2;
350 ptr = env->gdt.base + (env->tr.selector & ~7);
351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
352 e2 &= ~DESC_TSS_BUSY_MASK;
353 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
355 old_eflags = cpu_compute_eflags(env);
356 if (source == SWITCH_TSS_IRET) {
357 old_eflags &= ~NT_MASK;
360 /* save the current state in the old TSS */
361 if (type & 8) {
362 /* 32 bit */
363 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
364 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
373 for (i = 0; i < 6; i++) {
374 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
375 env->segs[i].selector, retaddr);
377 } else {
378 /* 16 bit */
379 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
380 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
389 for (i = 0; i < 4; i++) {
390 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
391 env->segs[i].selector, retaddr);
395 /* now if an exception occurs, it will occurs in the next task
396 context */
398 if (source == SWITCH_TSS_CALL) {
399 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
400 new_eflags |= NT_MASK;
403 /* set busy bit */
404 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
405 target_ulong ptr;
406 uint32_t e2;
408 ptr = env->gdt.base + (tss_selector & ~7);
409 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
410 e2 |= DESC_TSS_BUSY_MASK;
411 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env->cr[0] |= CR0_TS_MASK;
417 env->hflags |= HF_TS_MASK;
418 env->tr.selector = tss_selector;
419 env->tr.base = tss_base;
420 env->tr.limit = tss_limit;
421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424 cpu_x86_update_cr3(env, new_cr3);
427 /* load all registers without an exception, then reload them with
428 possible exception */
429 env->eip = new_eip;
430 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
432 if (!(type & 8)) {
433 eflags_mask &= 0xffff;
435 cpu_load_eflags(env, new_eflags, eflags_mask);
436 /* XXX: what to do in 16 bit case? */
437 env->regs[R_EAX] = new_regs[0];
438 env->regs[R_ECX] = new_regs[1];
439 env->regs[R_EDX] = new_regs[2];
440 env->regs[R_EBX] = new_regs[3];
441 env->regs[R_ESP] = new_regs[4];
442 env->regs[R_EBP] = new_regs[5];
443 env->regs[R_ESI] = new_regs[6];
444 env->regs[R_EDI] = new_regs[7];
445 if (new_eflags & VM_MASK) {
446 for (i = 0; i < 6; i++) {
447 load_seg_vm(env, i, new_segs[i]);
449 } else {
450 /* first just selectors as the rest may trigger exceptions */
451 for (i = 0; i < 6; i++) {
452 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 env->ldt.selector = new_ldt & ~4;
457 env->ldt.base = 0;
458 env->ldt.limit = 0;
459 env->ldt.flags = 0;
461 /* load the LDT */
462 if (new_ldt & 4) {
463 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
466 if ((new_ldt & 0xfffc) != 0) {
467 dt = &env->gdt;
468 index = new_ldt & ~7;
469 if ((index + 7) > dt->limit) {
470 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
472 ptr = dt->base + index;
473 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
474 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
475 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
476 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
478 if (!(e2 & DESC_P_MASK)) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 int cpl = new_segs[R_CS] & 3;
487 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
488 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
489 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
490 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
491 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
492 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip > env->segs[R_CS].limit) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
504 for (i = 0; i < DR7_MAX_BP; i++) {
505 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
506 !hw_global_breakpoint_enabled(env->dr[7], i)) {
507 hw_breakpoint_remove(env, i);
510 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
512 #endif
515 static void switch_tss(CPUX86State *env, int tss_selector,
516 uint32_t e1, uint32_t e2, int source,
517 uint32_t next_eip)
519 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
522 static inline unsigned int get_sp_mask(unsigned int e2)
524 if (e2 & DESC_B_MASK) {
525 return 0xffffffff;
526 } else {
527 return 0xffff;
531 static int exception_has_error_code(int intno)
533 switch (intno) {
534 case 8:
535 case 10:
536 case 11:
537 case 12:
538 case 13:
539 case 14:
540 case 17:
541 return 1;
543 return 0;
546 #ifdef TARGET_X86_64
547 #define SET_ESP(val, sp_mask) \
548 do { \
549 if ((sp_mask) == 0xffff) { \
550 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
551 ((val) & 0xffff); \
552 } else if ((sp_mask) == 0xffffffffLL) { \
553 env->regs[R_ESP] = (uint32_t)(val); \
554 } else { \
555 env->regs[R_ESP] = (val); \
557 } while (0)
558 #else
559 #define SET_ESP(val, sp_mask) \
560 do { \
561 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
562 ((val) & (sp_mask)); \
563 } while (0)
564 #endif
566 /* in 64-bit machines, this can overflow. So this segment addition macro
567 * can be used to trim the value to 32-bit whenever needed */
568 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
570 /* XXX: add a is_user flag to have proper security support */
571 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
573 sp -= 2; \
574 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
577 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
579 sp -= 4; \
580 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
583 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
585 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
586 sp += 2; \
589 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
591 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
592 sp += 4; \
595 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
596 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
597 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
598 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
600 /* protected mode interrupt */
601 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
602 int error_code, unsigned int next_eip,
603 int is_hw)
605 SegmentCache *dt;
606 target_ulong ptr, ssp;
607 int type, dpl, selector, ss_dpl, cpl;
608 int has_error_code, new_stack, shift;
609 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
610 uint32_t old_eip, sp_mask;
611 int vm86 = env->eflags & VM_MASK;
613 has_error_code = 0;
614 if (!is_int && !is_hw) {
615 has_error_code = exception_has_error_code(intno);
617 if (is_int) {
618 old_eip = next_eip;
619 } else {
620 old_eip = env->eip;
623 dt = &env->idt;
624 if (intno * 8 + 7 > dt->limit) {
625 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
627 ptr = dt->base + intno * 8;
628 e1 = cpu_ldl_kernel(env, ptr);
629 e2 = cpu_ldl_kernel(env, ptr + 4);
630 /* check gate type */
631 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
632 switch (type) {
633 case 5: /* task gate */
634 /* must do that check here to return the correct error code */
635 if (!(e2 & DESC_P_MASK)) {
636 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
638 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
639 if (has_error_code) {
640 int type;
641 uint32_t mask;
643 /* push the error code */
644 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
645 shift = type >> 3;
646 if (env->segs[R_SS].flags & DESC_B_MASK) {
647 mask = 0xffffffff;
648 } else {
649 mask = 0xffff;
651 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
652 ssp = env->segs[R_SS].base + esp;
653 if (shift) {
654 cpu_stl_kernel(env, ssp, error_code);
655 } else {
656 cpu_stw_kernel(env, ssp, error_code);
658 SET_ESP(esp, mask);
660 return;
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
665 break;
666 default:
667 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
668 break;
670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671 cpl = env->hflags & HF_CPL_MASK;
672 /* check privilege if software int */
673 if (is_int && dpl < cpl) {
674 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK)) {
678 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
680 selector = e1 >> 16;
681 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
682 if ((selector & 0xfffc) == 0) {
683 raise_exception_err(env, EXCP0D_GPF, 0);
685 if (load_segment(env, &e1, &e2, selector) != 0) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
692 if (dpl > cpl) {
693 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
695 if (!(e2 & DESC_P_MASK)) {
696 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
698 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
699 /* to inner privilege */
700 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
701 if ((ss & 0xfffc) == 0) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if ((ss & 3) != dpl) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
711 if (ss_dpl != dpl) {
712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
714 if (!(ss_e2 & DESC_S_MASK) ||
715 (ss_e2 & DESC_CS_MASK) ||
716 !(ss_e2 & DESC_W_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 if (!(ss_e2 & DESC_P_MASK)) {
720 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
722 new_stack = 1;
723 sp_mask = get_sp_mask(ss_e2);
724 ssp = get_seg_base(ss_e1, ss_e2);
725 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
726 /* to same privilege */
727 if (vm86) {
728 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
730 new_stack = 0;
731 sp_mask = get_sp_mask(env->segs[R_SS].flags);
732 ssp = env->segs[R_SS].base;
733 esp = env->regs[R_ESP];
734 dpl = cpl;
735 } else {
736 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0; /* avoid warning */
738 sp_mask = 0; /* avoid warning */
739 ssp = 0; /* avoid warning */
740 esp = 0; /* avoid warning */
743 shift = type >> 3;
745 #if 0
746 /* XXX: check that enough room is available */
747 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
748 if (vm86) {
749 push_size += 8;
751 push_size <<= shift;
752 #endif
753 if (shift == 1) {
754 if (new_stack) {
755 if (vm86) {
756 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
762 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
764 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
765 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
766 PUSHL(ssp, esp, sp_mask, old_eip);
767 if (has_error_code) {
768 PUSHL(ssp, esp, sp_mask, error_code);
770 } else {
771 if (new_stack) {
772 if (vm86) {
773 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
779 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
781 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
782 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
783 PUSHW(ssp, esp, sp_mask, old_eip);
784 if (has_error_code) {
785 PUSHW(ssp, esp, sp_mask, error_code);
789 /* interrupt gate clear IF mask */
790 if ((type & 1) == 0) {
791 env->eflags &= ~IF_MASK;
793 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
795 if (new_stack) {
796 if (vm86) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
806 SET_ESP(esp, sp_mask);
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 env->eip = offset;
816 #ifdef TARGET_X86_64
818 #define PUSHQ_RA(sp, val, ra) \
820 sp -= 8; \
821 cpu_stq_kernel_ra(env, sp, (val), ra); \
824 #define POPQ_RA(sp, val, ra) \
826 val = cpu_ldq_kernel_ra(env, sp, ra); \
827 sp += 8; \
830 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
831 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
833 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
835 X86CPU *cpu = x86_env_get_cpu(env);
836 int index;
838 #if 0
839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840 env->tr.base, env->tr.limit);
841 #endif
843 if (!(env->tr.flags & DESC_P_MASK)) {
844 cpu_abort(CPU(cpu), "invalid tss");
846 index = 8 * level + 4;
847 if ((index + 7) > env->tr.limit) {
848 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
850 return cpu_ldq_kernel(env, env->tr.base + index);
853 /* 64 bit interrupt */
854 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
855 int error_code, target_ulong next_eip, int is_hw)
857 SegmentCache *dt;
858 target_ulong ptr;
859 int type, dpl, selector, cpl, ist;
860 int has_error_code, new_stack;
861 uint32_t e1, e2, e3, ss;
862 target_ulong old_eip, esp, offset;
864 has_error_code = 0;
865 if (!is_int && !is_hw) {
866 has_error_code = exception_has_error_code(intno);
868 if (is_int) {
869 old_eip = next_eip;
870 } else {
871 old_eip = env->eip;
874 dt = &env->idt;
875 if (intno * 16 + 15 > dt->limit) {
876 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = cpu_ldl_kernel(env, ptr);
880 e2 = cpu_ldl_kernel(env, ptr + 4);
881 e3 = cpu_ldl_kernel(env, ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884 switch (type) {
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
887 break;
888 default:
889 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
890 break;
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privilege if software int */
895 if (is_int && dpl < cpl) {
896 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
898 /* check valid bit */
899 if (!(e2 & DESC_P_MASK)) {
900 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
902 selector = e1 >> 16;
903 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 ist = e2 & 7;
905 if ((selector & 0xfffc) == 0) {
906 raise_exception_err(env, EXCP0D_GPF, 0);
909 if (load_segment(env, &e1, &e2, selector) != 0) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916 if (dpl > cpl) {
917 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
919 if (!(e2 & DESC_P_MASK)) {
920 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
922 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
925 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
926 /* to inner privilege */
927 new_stack = 1;
928 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
929 ss = 0;
930 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
931 /* to same privilege */
932 if (env->eflags & VM_MASK) {
933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
935 new_stack = 0;
936 esp = env->regs[R_ESP];
937 dpl = cpl;
938 } else {
939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
940 new_stack = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
943 esp &= ~0xfLL; /* align stack */
945 PUSHQ(esp, env->segs[R_SS].selector);
946 PUSHQ(esp, env->regs[R_ESP]);
947 PUSHQ(esp, cpu_compute_eflags(env));
948 PUSHQ(esp, env->segs[R_CS].selector);
949 PUSHQ(esp, old_eip);
950 if (has_error_code) {
951 PUSHQ(esp, error_code);
954 /* interrupt gate clear IF mask */
955 if ((type & 1) == 0) {
956 env->eflags &= ~IF_MASK;
958 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
960 if (new_stack) {
961 ss = 0 | dpl;
962 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964 env->regs[R_ESP] = esp;
966 selector = (selector & ~3) | dpl;
967 cpu_x86_load_seg_cache(env, R_CS, selector,
968 get_seg_base(e1, e2),
969 get_seg_limit(e1, e2),
970 e2);
971 env->eip = offset;
973 #endif
975 #ifdef TARGET_X86_64
976 #if defined(CONFIG_USER_ONLY)
977 void helper_syscall(CPUX86State *env, int next_eip_addend)
979 CPUState *cs = CPU(x86_env_get_cpu(env));
981 cs->exception_index = EXCP_SYSCALL;
982 env->exception_next_eip = env->eip + next_eip_addend;
983 cpu_loop_exit(cs);
985 #else
986 void helper_syscall(CPUX86State *env, int next_eip_addend)
988 int selector;
990 if (!(env->efer & MSR_EFER_SCE)) {
991 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
993 selector = (env->star >> 32) & 0xffff;
994 if (env->hflags & HF_LMA_MASK) {
995 int code64;
997 env->regs[R_ECX] = env->eip + next_eip_addend;
998 env->regs[11] = cpu_compute_eflags(env);
1000 code64 = env->hflags & HF_CS64_MASK;
1002 env->eflags &= ~env->fmask;
1003 cpu_load_eflags(env, env->eflags, 0);
1004 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1005 0, 0xffffffff,
1006 DESC_G_MASK | DESC_P_MASK |
1007 DESC_S_MASK |
1008 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1009 DESC_L_MASK);
1010 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1011 0, 0xffffffff,
1012 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1013 DESC_S_MASK |
1014 DESC_W_MASK | DESC_A_MASK);
1015 if (code64) {
1016 env->eip = env->lstar;
1017 } else {
1018 env->eip = env->cstar;
1020 } else {
1021 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1023 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1024 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025 0, 0xffffffff,
1026 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027 DESC_S_MASK |
1028 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1029 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK |
1033 DESC_W_MASK | DESC_A_MASK);
1034 env->eip = (uint32_t)env->star;
1037 #endif
1038 #endif
1040 #ifdef TARGET_X86_64
1041 void helper_sysret(CPUX86State *env, int dflag)
1043 int cpl, selector;
1045 if (!(env->efer & MSR_EFER_SCE)) {
1046 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1048 cpl = env->hflags & HF_CPL_MASK;
1049 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1050 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1052 selector = (env->star >> 48) & 0xffff;
1053 if (env->hflags & HF_LMA_MASK) {
1054 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1055 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1056 NT_MASK);
1057 if (dflag == 2) {
1058 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1059 0, 0xffffffff,
1060 DESC_G_MASK | DESC_P_MASK |
1061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1062 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1063 DESC_L_MASK);
1064 env->eip = env->regs[R_ECX];
1065 } else {
1066 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1071 env->eip = (uint32_t)env->regs[R_ECX];
1073 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1074 0, 0xffffffff,
1075 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1076 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1077 DESC_W_MASK | DESC_A_MASK);
1078 } else {
1079 env->eflags |= IF_MASK;
1080 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1081 0, 0xffffffff,
1082 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1085 env->eip = (uint32_t)env->regs[R_ECX];
1086 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_W_MASK | DESC_A_MASK);
1093 #endif
1095 /* real mode interrupt */
1096 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1097 int error_code, unsigned int next_eip)
1099 SegmentCache *dt;
1100 target_ulong ptr, ssp;
1101 int selector;
1102 uint32_t offset, esp;
1103 uint32_t old_cs, old_eip;
1105 /* real mode (simpler!) */
1106 dt = &env->idt;
1107 if (intno * 4 + 3 > dt->limit) {
1108 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1110 ptr = dt->base + intno * 4;
1111 offset = cpu_lduw_kernel(env, ptr);
1112 selector = cpu_lduw_kernel(env, ptr + 2);
1113 esp = env->regs[R_ESP];
1114 ssp = env->segs[R_SS].base;
1115 if (is_int) {
1116 old_eip = next_eip;
1117 } else {
1118 old_eip = env->eip;
1120 old_cs = env->segs[R_CS].selector;
1121 /* XXX: use SS segment size? */
1122 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1123 PUSHW(ssp, esp, 0xffff, old_cs);
1124 PUSHW(ssp, esp, 0xffff, old_eip);
1126 /* update processor state */
1127 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1128 env->eip = offset;
1129 env->segs[R_CS].selector = selector;
1130 env->segs[R_CS].base = (selector << 4);
1131 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1134 #if defined(CONFIG_USER_ONLY)
1135 /* fake user mode interrupt */
1136 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137 int error_code, target_ulong next_eip)
1139 SegmentCache *dt;
1140 target_ulong ptr;
1141 int dpl, cpl, shift;
1142 uint32_t e2;
1144 dt = &env->idt;
1145 if (env->hflags & HF_LMA_MASK) {
1146 shift = 4;
1147 } else {
1148 shift = 3;
1150 ptr = dt->base + (intno << shift);
1151 e2 = cpu_ldl_kernel(env, ptr + 4);
1153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1154 cpl = env->hflags & HF_CPL_MASK;
1155 /* check privilege if software int */
1156 if (is_int && dpl < cpl) {
1157 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160 /* Since we emulate only user space, we cannot do more than
1161 exiting the emulation with the suitable exception and error
1162 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1163 if (is_int || intno == EXCP_SYSCALL) {
1164 env->eip = next_eip;
1168 #else
1170 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1171 int error_code, int is_hw, int rm)
1173 CPUState *cs = CPU(x86_env_get_cpu(env));
1174 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1175 control.event_inj));
1177 if (!(event_inj & SVM_EVTINJ_VALID)) {
1178 int type;
1180 if (is_int) {
1181 type = SVM_EVTINJ_TYPE_SOFT;
1182 } else {
1183 type = SVM_EVTINJ_TYPE_EXEPT;
1185 event_inj = intno | type | SVM_EVTINJ_VALID;
1186 if (!rm && exception_has_error_code(intno)) {
1187 event_inj |= SVM_EVTINJ_VALID_ERR;
1188 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1189 control.event_inj_err),
1190 error_code);
1192 x86_stl_phys(cs,
1193 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1194 event_inj);
1197 #endif
1200 * Begin execution of an interruption. is_int is TRUE if coming from
1201 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1202 * instruction. It is only relevant if is_int is TRUE.
1204 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1205 int error_code, target_ulong next_eip, int is_hw)
1207 CPUX86State *env = &cpu->env;
1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1211 static int count;
1213 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1214 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1215 count, intno, error_code, is_int,
1216 env->hflags & HF_CPL_MASK,
1217 env->segs[R_CS].selector, env->eip,
1218 (int)env->segs[R_CS].base + env->eip,
1219 env->segs[R_SS].selector, env->regs[R_ESP]);
1220 if (intno == 0x0e) {
1221 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1222 } else {
1223 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1225 qemu_log("\n");
1226 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1227 #if 0
1229 int i;
1230 target_ulong ptr;
1232 qemu_log(" code=");
1233 ptr = env->segs[R_CS].base + env->eip;
1234 for (i = 0; i < 16; i++) {
1235 qemu_log(" %02x", ldub(ptr + i));
1237 qemu_log("\n");
1239 #endif
1240 count++;
1243 if (env->cr[0] & CR0_PE_MASK) {
1244 #if !defined(CONFIG_USER_ONLY)
1245 if (env->hflags & HF_SVMI_MASK) {
1246 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1248 #endif
1249 #ifdef TARGET_X86_64
1250 if (env->hflags & HF_LMA_MASK) {
1251 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1252 } else
1253 #endif
1255 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1256 is_hw);
1258 } else {
1259 #if !defined(CONFIG_USER_ONLY)
1260 if (env->hflags & HF_SVMI_MASK) {
1261 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1263 #endif
1264 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1267 #if !defined(CONFIG_USER_ONLY)
1268 if (env->hflags & HF_SVMI_MASK) {
1269 CPUState *cs = CPU(cpu);
1270 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1271 offsetof(struct vmcb,
1272 control.event_inj));
1274 x86_stl_phys(cs,
1275 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1276 event_inj & ~SVM_EVTINJ_VALID);
1278 #endif
1281 void x86_cpu_do_interrupt(CPUState *cs)
1283 X86CPU *cpu = X86_CPU(cs);
1284 CPUX86State *env = &cpu->env;
1286 #if defined(CONFIG_USER_ONLY)
1287 /* if user mode only, we simulate a fake exception
1288 which will be handled outside the cpu execution
1289 loop */
1290 do_interrupt_user(env, cs->exception_index,
1291 env->exception_is_int,
1292 env->error_code,
1293 env->exception_next_eip);
1294 /* successfully delivered */
1295 env->old_exception = -1;
1296 #else
1297 /* simulate a real cpu exception. On i386, it can
1298 trigger new exceptions, but we do not handle
1299 double or triple faults yet. */
1300 do_interrupt_all(cpu, cs->exception_index,
1301 env->exception_is_int,
1302 env->error_code,
1303 env->exception_next_eip, 0);
1304 /* successfully delivered */
1305 env->old_exception = -1;
1306 #endif
1309 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1311 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1314 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1316 X86CPU *cpu = X86_CPU(cs);
1317 CPUX86State *env = &cpu->env;
1318 bool ret = false;
1320 #if !defined(CONFIG_USER_ONLY)
1321 if (interrupt_request & CPU_INTERRUPT_POLL) {
1322 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1323 apic_poll_irq(cpu->apic_state);
1325 #endif
1326 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1327 do_cpu_sipi(cpu);
1328 } else if (env->hflags2 & HF2_GIF_MASK) {
1329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1330 !(env->hflags & HF_SMM_MASK)) {
1331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1332 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1333 do_smm_enter(cpu);
1334 ret = true;
1335 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1336 !(env->hflags2 & HF2_NMI_MASK)) {
1337 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1338 env->hflags2 |= HF2_NMI_MASK;
1339 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1340 ret = true;
1341 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1342 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1343 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1344 ret = true;
1345 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1346 (((env->hflags2 & HF2_VINTR_MASK) &&
1347 (env->hflags2 & HF2_HIF_MASK)) ||
1348 (!(env->hflags2 & HF2_VINTR_MASK) &&
1349 (env->eflags & IF_MASK &&
1350 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1351 int intno;
1352 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1353 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1354 CPU_INTERRUPT_VIRQ);
1355 intno = cpu_get_pic_interrupt(env);
1356 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1357 "Servicing hardware INT=0x%02x\n", intno);
1358 do_interrupt_x86_hardirq(env, intno, 1);
1359 /* ensure that no TB jump will be modified as
1360 the program flow was changed */
1361 ret = true;
1362 #if !defined(CONFIG_USER_ONLY)
1363 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1364 (env->eflags & IF_MASK) &&
1365 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1366 int intno;
1367 /* FIXME: this should respect TPR */
1368 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1369 intno = x86_ldl_phys(cs, env->vm_vmcb
1370 + offsetof(struct vmcb, control.int_vector));
1371 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1372 "Servicing virtual hardware INT=0x%02x\n", intno);
1373 do_interrupt_x86_hardirq(env, intno, 1);
1374 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1375 ret = true;
1376 #endif
1380 return ret;
1383 void helper_enter_level(CPUX86State *env, int level, int data32,
1384 target_ulong t1)
1386 target_ulong ssp;
1387 uint32_t esp_mask, esp, ebp;
1389 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1390 ssp = env->segs[R_SS].base;
1391 ebp = env->regs[R_EBP];
1392 esp = env->regs[R_ESP];
1393 if (data32) {
1394 /* 32 bit */
1395 esp -= 4;
1396 while (--level) {
1397 esp -= 4;
1398 ebp -= 4;
1399 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1400 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1401 GETPC()),
1402 GETPC());
1404 esp -= 4;
1405 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1406 } else {
1407 /* 16 bit */
1408 esp -= 2;
1409 while (--level) {
1410 esp -= 2;
1411 ebp -= 2;
1412 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1413 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1414 GETPC()),
1415 GETPC());
1417 esp -= 2;
1418 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1422 #ifdef TARGET_X86_64
1423 void helper_enter64_level(CPUX86State *env, int level, int data64,
1424 target_ulong t1)
1426 target_ulong esp, ebp;
1428 ebp = env->regs[R_EBP];
1429 esp = env->regs[R_ESP];
1431 if (data64) {
1432 /* 64 bit */
1433 esp -= 8;
1434 while (--level) {
1435 esp -= 8;
1436 ebp -= 8;
1437 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1438 GETPC());
1440 esp -= 8;
1441 cpu_stq_data_ra(env, esp, t1, GETPC());
1442 } else {
1443 /* 16 bit */
1444 esp -= 2;
1445 while (--level) {
1446 esp -= 2;
1447 ebp -= 2;
1448 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1449 GETPC());
1451 esp -= 2;
1452 cpu_stw_data_ra(env, esp, t1, GETPC());
1455 #endif
1457 void helper_lldt(CPUX86State *env, int selector)
1459 SegmentCache *dt;
1460 uint32_t e1, e2;
1461 int index, entry_limit;
1462 target_ulong ptr;
1464 selector &= 0xffff;
1465 if ((selector & 0xfffc) == 0) {
1466 /* XXX: NULL selector case: invalid LDT */
1467 env->ldt.base = 0;
1468 env->ldt.limit = 0;
1469 } else {
1470 if (selector & 0x4) {
1471 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1473 dt = &env->gdt;
1474 index = selector & ~7;
1475 #ifdef TARGET_X86_64
1476 if (env->hflags & HF_LMA_MASK) {
1477 entry_limit = 15;
1478 } else
1479 #endif
1481 entry_limit = 7;
1483 if ((index + entry_limit) > dt->limit) {
1484 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1486 ptr = dt->base + index;
1487 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1488 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1489 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1490 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1492 if (!(e2 & DESC_P_MASK)) {
1493 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1495 #ifdef TARGET_X86_64
1496 if (env->hflags & HF_LMA_MASK) {
1497 uint32_t e3;
1499 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1500 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1501 env->ldt.base |= (target_ulong)e3 << 32;
1502 } else
1503 #endif
1505 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1508 env->ldt.selector = selector;
1511 void helper_ltr(CPUX86State *env, int selector)
1513 SegmentCache *dt;
1514 uint32_t e1, e2;
1515 int index, type, entry_limit;
1516 target_ulong ptr;
1518 selector &= 0xffff;
1519 if ((selector & 0xfffc) == 0) {
1520 /* NULL selector case: invalid TR */
1521 env->tr.base = 0;
1522 env->tr.limit = 0;
1523 env->tr.flags = 0;
1524 } else {
1525 if (selector & 0x4) {
1526 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1528 dt = &env->gdt;
1529 index = selector & ~7;
1530 #ifdef TARGET_X86_64
1531 if (env->hflags & HF_LMA_MASK) {
1532 entry_limit = 15;
1533 } else
1534 #endif
1536 entry_limit = 7;
1538 if ((index + entry_limit) > dt->limit) {
1539 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1541 ptr = dt->base + index;
1542 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1543 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1544 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1545 if ((e2 & DESC_S_MASK) ||
1546 (type != 1 && type != 9)) {
1547 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1549 if (!(e2 & DESC_P_MASK)) {
1550 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1552 #ifdef TARGET_X86_64
1553 if (env->hflags & HF_LMA_MASK) {
1554 uint32_t e3, e4;
1556 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1557 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1558 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1559 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1561 load_seg_cache_raw_dt(&env->tr, e1, e2);
1562 env->tr.base |= (target_ulong)e3 << 32;
1563 } else
1564 #endif
1566 load_seg_cache_raw_dt(&env->tr, e1, e2);
1568 e2 |= DESC_TSS_BUSY_MASK;
1569 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1571 env->tr.selector = selector;
1574 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1575 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1577 uint32_t e1, e2;
1578 int cpl, dpl, rpl;
1579 SegmentCache *dt;
1580 int index;
1581 target_ulong ptr;
1583 selector &= 0xffff;
1584 cpl = env->hflags & HF_CPL_MASK;
1585 if ((selector & 0xfffc) == 0) {
1586 /* null selector case */
1587 if (seg_reg == R_SS
1588 #ifdef TARGET_X86_64
1589 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1590 #endif
1592 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1594 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1595 } else {
1597 if (selector & 0x4) {
1598 dt = &env->ldt;
1599 } else {
1600 dt = &env->gdt;
1602 index = selector & ~7;
1603 if ((index + 7) > dt->limit) {
1604 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1606 ptr = dt->base + index;
1607 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1608 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1610 if (!(e2 & DESC_S_MASK)) {
1611 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1613 rpl = selector & 3;
1614 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1615 if (seg_reg == R_SS) {
1616 /* must be writable segment */
1617 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1618 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1620 if (rpl != cpl || dpl != cpl) {
1621 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1623 } else {
1624 /* must be readable segment */
1625 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1626 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1629 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1630 /* if not conforming code, test rights */
1631 if (dpl < cpl || dpl < rpl) {
1632 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1637 if (!(e2 & DESC_P_MASK)) {
1638 if (seg_reg == R_SS) {
1639 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1640 } else {
1641 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1645 /* set the access bit if not already set */
1646 if (!(e2 & DESC_A_MASK)) {
1647 e2 |= DESC_A_MASK;
1648 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1651 cpu_x86_load_seg_cache(env, seg_reg, selector,
1652 get_seg_base(e1, e2),
1653 get_seg_limit(e1, e2),
1654 e2);
1655 #if 0
1656 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1657 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1658 #endif
1662 /* protected mode jump */
1663 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1664 target_ulong next_eip)
1666 int gate_cs, type;
1667 uint32_t e1, e2, cpl, dpl, rpl, limit;
1669 if ((new_cs & 0xfffc) == 0) {
1670 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1672 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1673 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1675 cpl = env->hflags & HF_CPL_MASK;
1676 if (e2 & DESC_S_MASK) {
1677 if (!(e2 & DESC_CS_MASK)) {
1678 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1680 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1681 if (e2 & DESC_C_MASK) {
1682 /* conforming code segment */
1683 if (dpl > cpl) {
1684 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1686 } else {
1687 /* non conforming code segment */
1688 rpl = new_cs & 3;
1689 if (rpl > cpl) {
1690 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1692 if (dpl != cpl) {
1693 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1696 if (!(e2 & DESC_P_MASK)) {
1697 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1699 limit = get_seg_limit(e1, e2);
1700 if (new_eip > limit &&
1701 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1702 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1704 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1705 get_seg_base(e1, e2), limit, e2);
1706 env->eip = new_eip;
1707 } else {
1708 /* jump to call or task gate */
1709 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1710 rpl = new_cs & 3;
1711 cpl = env->hflags & HF_CPL_MASK;
1712 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1713 switch (type) {
1714 case 1: /* 286 TSS */
1715 case 9: /* 386 TSS */
1716 case 5: /* task gate */
1717 if (dpl < cpl || dpl < rpl) {
1718 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1720 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1721 break;
1722 case 4: /* 286 call gate */
1723 case 12: /* 386 call gate */
1724 if ((dpl < cpl) || (dpl < rpl)) {
1725 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1727 if (!(e2 & DESC_P_MASK)) {
1728 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1730 gate_cs = e1 >> 16;
1731 new_eip = (e1 & 0xffff);
1732 if (type == 12) {
1733 new_eip |= (e2 & 0xffff0000);
1735 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1736 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1738 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1739 /* must be code segment */
1740 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1741 (DESC_S_MASK | DESC_CS_MASK))) {
1742 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1744 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1745 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1746 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1748 if (!(e2 & DESC_P_MASK)) {
1749 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1751 limit = get_seg_limit(e1, e2);
1752 if (new_eip > limit) {
1753 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1755 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1756 get_seg_base(e1, e2), limit, e2);
1757 env->eip = new_eip;
1758 break;
1759 default:
1760 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1761 break;
1766 /* real mode call */
1767 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1768 int shift, int next_eip)
1770 int new_eip;
1771 uint32_t esp, esp_mask;
1772 target_ulong ssp;
1774 new_eip = new_eip1;
1775 esp = env->regs[R_ESP];
1776 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1777 ssp = env->segs[R_SS].base;
1778 if (shift) {
1779 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1780 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1781 } else {
1782 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1783 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1786 SET_ESP(esp, esp_mask);
1787 env->eip = new_eip;
1788 env->segs[R_CS].selector = new_cs;
1789 env->segs[R_CS].base = (new_cs << 4);
1792 /* protected mode call */
1793 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1794 int shift, target_ulong next_eip)
1796 int new_stack, i;
1797 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1798 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1799 uint32_t val, limit, old_sp_mask;
1800 target_ulong ssp, old_ssp;
1802 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1803 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1804 if ((new_cs & 0xfffc) == 0) {
1805 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1807 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1808 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1810 cpl = env->hflags & HF_CPL_MASK;
1811 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1812 if (e2 & DESC_S_MASK) {
1813 if (!(e2 & DESC_CS_MASK)) {
1814 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1816 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1817 if (e2 & DESC_C_MASK) {
1818 /* conforming code segment */
1819 if (dpl > cpl) {
1820 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1822 } else {
1823 /* non conforming code segment */
1824 rpl = new_cs & 3;
1825 if (rpl > cpl) {
1826 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1828 if (dpl != cpl) {
1829 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1832 if (!(e2 & DESC_P_MASK)) {
1833 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1836 #ifdef TARGET_X86_64
1837 /* XXX: check 16/32 bit cases in long mode */
1838 if (shift == 2) {
1839 target_ulong rsp;
1841 /* 64 bit case */
1842 rsp = env->regs[R_ESP];
1843 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1844 PUSHQ_RA(rsp, next_eip, GETPC());
1845 /* from this point, not restartable */
1846 env->regs[R_ESP] = rsp;
1847 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1848 get_seg_base(e1, e2),
1849 get_seg_limit(e1, e2), e2);
1850 env->eip = new_eip;
1851 } else
1852 #endif
1854 sp = env->regs[R_ESP];
1855 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1856 ssp = env->segs[R_SS].base;
1857 if (shift) {
1858 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1859 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1860 } else {
1861 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1862 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1865 limit = get_seg_limit(e1, e2);
1866 if (new_eip > limit) {
1867 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1869 /* from this point, not restartable */
1870 SET_ESP(sp, sp_mask);
1871 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1872 get_seg_base(e1, e2), limit, e2);
1873 env->eip = new_eip;
1875 } else {
1876 /* check gate type */
1877 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1878 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1879 rpl = new_cs & 3;
1880 switch (type) {
1881 case 1: /* available 286 TSS */
1882 case 9: /* available 386 TSS */
1883 case 5: /* task gate */
1884 if (dpl < cpl || dpl < rpl) {
1885 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1887 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1888 return;
1889 case 4: /* 286 call gate */
1890 case 12: /* 386 call gate */
1891 break;
1892 default:
1893 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1894 break;
1896 shift = type >> 3;
1898 if (dpl < cpl || dpl < rpl) {
1899 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1901 /* check valid bit */
1902 if (!(e2 & DESC_P_MASK)) {
1903 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1905 selector = e1 >> 16;
1906 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1907 param_count = e2 & 0x1f;
1908 if ((selector & 0xfffc) == 0) {
1909 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1912 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1913 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1915 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1919 if (dpl > cpl) {
1920 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1922 if (!(e2 & DESC_P_MASK)) {
1923 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1926 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1927 /* to inner privilege */
1928 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1929 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1930 TARGET_FMT_lx "\n", ss, sp, param_count,
1931 env->regs[R_ESP]);
1932 if ((ss & 0xfffc) == 0) {
1933 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1935 if ((ss & 3) != dpl) {
1936 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1938 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1939 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1941 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1942 if (ss_dpl != dpl) {
1943 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1945 if (!(ss_e2 & DESC_S_MASK) ||
1946 (ss_e2 & DESC_CS_MASK) ||
1947 !(ss_e2 & DESC_W_MASK)) {
1948 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1950 if (!(ss_e2 & DESC_P_MASK)) {
1951 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1954 /* push_size = ((param_count * 2) + 8) << shift; */
1956 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1957 old_ssp = env->segs[R_SS].base;
1959 sp_mask = get_sp_mask(ss_e2);
1960 ssp = get_seg_base(ss_e1, ss_e2);
1961 if (shift) {
1962 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1963 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1964 for (i = param_count - 1; i >= 0; i--) {
1965 val = cpu_ldl_kernel_ra(env, old_ssp +
1966 ((env->regs[R_ESP] + i * 4) &
1967 old_sp_mask), GETPC());
1968 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1970 } else {
1971 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1972 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1973 for (i = param_count - 1; i >= 0; i--) {
1974 val = cpu_lduw_kernel_ra(env, old_ssp +
1975 ((env->regs[R_ESP] + i * 2) &
1976 old_sp_mask), GETPC());
1977 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1980 new_stack = 1;
1981 } else {
1982 /* to same privilege */
1983 sp = env->regs[R_ESP];
1984 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1985 ssp = env->segs[R_SS].base;
1986 /* push_size = (4 << shift); */
1987 new_stack = 0;
1990 if (shift) {
1991 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1992 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1993 } else {
1994 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1995 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1998 /* from this point, not restartable */
2000 if (new_stack) {
2001 ss = (ss & ~3) | dpl;
2002 cpu_x86_load_seg_cache(env, R_SS, ss,
2003 ssp,
2004 get_seg_limit(ss_e1, ss_e2),
2005 ss_e2);
2008 selector = (selector & ~3) | dpl;
2009 cpu_x86_load_seg_cache(env, R_CS, selector,
2010 get_seg_base(e1, e2),
2011 get_seg_limit(e1, e2),
2012 e2);
2013 SET_ESP(sp, sp_mask);
2014 env->eip = offset;
2018 /* real and vm86 mode iret */
2019 void helper_iret_real(CPUX86State *env, int shift)
2021 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2022 target_ulong ssp;
2023 int eflags_mask;
2025 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2026 sp = env->regs[R_ESP];
2027 ssp = env->segs[R_SS].base;
2028 if (shift == 1) {
2029 /* 32 bits */
2030 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2031 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2032 new_cs &= 0xffff;
2033 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2034 } else {
2035 /* 16 bits */
2036 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2037 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2038 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2040 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2041 env->segs[R_CS].selector = new_cs;
2042 env->segs[R_CS].base = (new_cs << 4);
2043 env->eip = new_eip;
2044 if (env->eflags & VM_MASK) {
2045 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2046 NT_MASK;
2047 } else {
2048 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2049 RF_MASK | NT_MASK;
2051 if (shift == 0) {
2052 eflags_mask &= 0xffff;
2054 cpu_load_eflags(env, new_eflags, eflags_mask);
2055 env->hflags2 &= ~HF2_NMI_MASK;
2058 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2060 int dpl;
2061 uint32_t e2;
2063 /* XXX: on x86_64, we do not want to nullify FS and GS because
2064 they may still contain a valid base. I would be interested to
2065 know how a real x86_64 CPU behaves */
2066 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2067 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2068 return;
2071 e2 = env->segs[seg_reg].flags;
2072 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2073 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2074 /* data or non conforming code segment */
2075 if (dpl < cpl) {
2076 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2081 /* protected mode iret */
2082 static inline void helper_ret_protected(CPUX86State *env, int shift,
2083 int is_iret, int addend,
2084 uintptr_t retaddr)
2086 uint32_t new_cs, new_eflags, new_ss;
2087 uint32_t new_es, new_ds, new_fs, new_gs;
2088 uint32_t e1, e2, ss_e1, ss_e2;
2089 int cpl, dpl, rpl, eflags_mask, iopl;
2090 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2092 #ifdef TARGET_X86_64
2093 if (shift == 2) {
2094 sp_mask = -1;
2095 } else
2096 #endif
2098 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2100 sp = env->regs[R_ESP];
2101 ssp = env->segs[R_SS].base;
2102 new_eflags = 0; /* avoid warning */
2103 #ifdef TARGET_X86_64
2104 if (shift == 2) {
2105 POPQ_RA(sp, new_eip, retaddr);
2106 POPQ_RA(sp, new_cs, retaddr);
2107 new_cs &= 0xffff;
2108 if (is_iret) {
2109 POPQ_RA(sp, new_eflags, retaddr);
2111 } else
2112 #endif
2114 if (shift == 1) {
2115 /* 32 bits */
2116 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2117 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2118 new_cs &= 0xffff;
2119 if (is_iret) {
2120 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2121 if (new_eflags & VM_MASK) {
2122 goto return_to_vm86;
2125 } else {
2126 /* 16 bits */
2127 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2128 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2129 if (is_iret) {
2130 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2134 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2135 new_cs, new_eip, shift, addend);
2136 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2137 if ((new_cs & 0xfffc) == 0) {
2138 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2140 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2141 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2143 if (!(e2 & DESC_S_MASK) ||
2144 !(e2 & DESC_CS_MASK)) {
2145 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2147 cpl = env->hflags & HF_CPL_MASK;
2148 rpl = new_cs & 3;
2149 if (rpl < cpl) {
2150 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2152 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2153 if (e2 & DESC_C_MASK) {
2154 if (dpl > rpl) {
2155 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2157 } else {
2158 if (dpl != rpl) {
2159 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2162 if (!(e2 & DESC_P_MASK)) {
2163 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2166 sp += addend;
2167 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2168 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2169 /* return to same privilege level */
2170 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2173 e2);
2174 } else {
2175 /* return to different privilege level */
2176 #ifdef TARGET_X86_64
2177 if (shift == 2) {
2178 POPQ_RA(sp, new_esp, retaddr);
2179 POPQ_RA(sp, new_ss, retaddr);
2180 new_ss &= 0xffff;
2181 } else
2182 #endif
2184 if (shift == 1) {
2185 /* 32 bits */
2186 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2187 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2188 new_ss &= 0xffff;
2189 } else {
2190 /* 16 bits */
2191 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2192 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2195 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2196 new_ss, new_esp);
2197 if ((new_ss & 0xfffc) == 0) {
2198 #ifdef TARGET_X86_64
2199 /* NULL ss is allowed in long mode if cpl != 3 */
2200 /* XXX: test CS64? */
2201 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2202 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2203 0, 0xffffffff,
2204 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2205 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2206 DESC_W_MASK | DESC_A_MASK);
2207 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2208 } else
2209 #endif
2211 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2213 } else {
2214 if ((new_ss & 3) != rpl) {
2215 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2217 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2218 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2220 if (!(ss_e2 & DESC_S_MASK) ||
2221 (ss_e2 & DESC_CS_MASK) ||
2222 !(ss_e2 & DESC_W_MASK)) {
2223 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2225 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2226 if (dpl != rpl) {
2227 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2229 if (!(ss_e2 & DESC_P_MASK)) {
2230 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2232 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2233 get_seg_base(ss_e1, ss_e2),
2234 get_seg_limit(ss_e1, ss_e2),
2235 ss_e2);
2238 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2239 get_seg_base(e1, e2),
2240 get_seg_limit(e1, e2),
2241 e2);
2242 sp = new_esp;
2243 #ifdef TARGET_X86_64
2244 if (env->hflags & HF_CS64_MASK) {
2245 sp_mask = -1;
2246 } else
2247 #endif
2249 sp_mask = get_sp_mask(ss_e2);
2252 /* validate data segments */
2253 validate_seg(env, R_ES, rpl);
2254 validate_seg(env, R_DS, rpl);
2255 validate_seg(env, R_FS, rpl);
2256 validate_seg(env, R_GS, rpl);
2258 sp += addend;
2260 SET_ESP(sp, sp_mask);
2261 env->eip = new_eip;
2262 if (is_iret) {
2263 /* NOTE: 'cpl' is the _old_ CPL */
2264 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2265 if (cpl == 0) {
2266 eflags_mask |= IOPL_MASK;
2268 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2269 if (cpl <= iopl) {
2270 eflags_mask |= IF_MASK;
2272 if (shift == 0) {
2273 eflags_mask &= 0xffff;
2275 cpu_load_eflags(env, new_eflags, eflags_mask);
2277 return;
2279 return_to_vm86:
2280 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2283 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2284 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2285 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2287 /* modify processor state */
2288 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2289 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2290 VIP_MASK);
2291 load_seg_vm(env, R_CS, new_cs & 0xffff);
2292 load_seg_vm(env, R_SS, new_ss & 0xffff);
2293 load_seg_vm(env, R_ES, new_es & 0xffff);
2294 load_seg_vm(env, R_DS, new_ds & 0xffff);
2295 load_seg_vm(env, R_FS, new_fs & 0xffff);
2296 load_seg_vm(env, R_GS, new_gs & 0xffff);
2298 env->eip = new_eip & 0xffff;
2299 env->regs[R_ESP] = new_esp;
2302 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2304 int tss_selector, type;
2305 uint32_t e1, e2;
2307 /* specific case for TSS */
2308 if (env->eflags & NT_MASK) {
2309 #ifdef TARGET_X86_64
2310 if (env->hflags & HF_LMA_MASK) {
2311 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2313 #endif
2314 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2315 if (tss_selector & 4) {
2316 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2318 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2319 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2321 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2322 /* NOTE: we check both segment and busy TSS */
2323 if (type != 3) {
2324 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2326 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2327 } else {
2328 helper_ret_protected(env, shift, 1, 0, GETPC());
2330 env->hflags2 &= ~HF2_NMI_MASK;
2333 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2335 helper_ret_protected(env, shift, 0, addend, GETPC());
2338 void helper_sysenter(CPUX86State *env)
2340 if (env->sysenter_cs == 0) {
2341 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2343 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2345 #ifdef TARGET_X86_64
2346 if (env->hflags & HF_LMA_MASK) {
2347 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2348 0, 0xffffffff,
2349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2350 DESC_S_MASK |
2351 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2352 DESC_L_MASK);
2353 } else
2354 #endif
2356 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2357 0, 0xffffffff,
2358 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2359 DESC_S_MASK |
2360 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2362 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2363 0, 0xffffffff,
2364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2365 DESC_S_MASK |
2366 DESC_W_MASK | DESC_A_MASK);
2367 env->regs[R_ESP] = env->sysenter_esp;
2368 env->eip = env->sysenter_eip;
2371 void helper_sysexit(CPUX86State *env, int dflag)
2373 int cpl;
2375 cpl = env->hflags & HF_CPL_MASK;
2376 if (env->sysenter_cs == 0 || cpl != 0) {
2377 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2379 #ifdef TARGET_X86_64
2380 if (dflag == 2) {
2381 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2382 3, 0, 0xffffffff,
2383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2384 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2386 DESC_L_MASK);
2387 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2388 3, 0, 0xffffffff,
2389 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2390 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2391 DESC_W_MASK | DESC_A_MASK);
2392 } else
2393 #endif
2395 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2396 3, 0, 0xffffffff,
2397 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2399 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2400 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2401 3, 0, 0xffffffff,
2402 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2403 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2404 DESC_W_MASK | DESC_A_MASK);
2406 env->regs[R_ESP] = env->regs[R_ECX];
2407 env->eip = env->regs[R_EDX];
2410 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2412 unsigned int limit;
2413 uint32_t e1, e2, eflags, selector;
2414 int rpl, dpl, cpl, type;
2416 selector = selector1 & 0xffff;
2417 eflags = cpu_cc_compute_all(env, CC_OP);
2418 if ((selector & 0xfffc) == 0) {
2419 goto fail;
2421 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2422 goto fail;
2424 rpl = selector & 3;
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 cpl = env->hflags & HF_CPL_MASK;
2427 if (e2 & DESC_S_MASK) {
2428 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2429 /* conforming */
2430 } else {
2431 if (dpl < cpl || dpl < rpl) {
2432 goto fail;
2435 } else {
2436 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2437 switch (type) {
2438 case 1:
2439 case 2:
2440 case 3:
2441 case 9:
2442 case 11:
2443 break;
2444 default:
2445 goto fail;
2447 if (dpl < cpl || dpl < rpl) {
2448 fail:
2449 CC_SRC = eflags & ~CC_Z;
2450 return 0;
2453 limit = get_seg_limit(e1, e2);
2454 CC_SRC = eflags | CC_Z;
2455 return limit;
2458 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2460 uint32_t e1, e2, eflags, selector;
2461 int rpl, dpl, cpl, type;
2463 selector = selector1 & 0xffff;
2464 eflags = cpu_cc_compute_all(env, CC_OP);
2465 if ((selector & 0xfffc) == 0) {
2466 goto fail;
2468 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2469 goto fail;
2471 rpl = selector & 3;
2472 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473 cpl = env->hflags & HF_CPL_MASK;
2474 if (e2 & DESC_S_MASK) {
2475 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2476 /* conforming */
2477 } else {
2478 if (dpl < cpl || dpl < rpl) {
2479 goto fail;
2482 } else {
2483 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2484 switch (type) {
2485 case 1:
2486 case 2:
2487 case 3:
2488 case 4:
2489 case 5:
2490 case 9:
2491 case 11:
2492 case 12:
2493 break;
2494 default:
2495 goto fail;
2497 if (dpl < cpl || dpl < rpl) {
2498 fail:
2499 CC_SRC = eflags & ~CC_Z;
2500 return 0;
2503 CC_SRC = eflags | CC_Z;
2504 return e2 & 0x00f0ff00;
2507 void helper_verr(CPUX86State *env, target_ulong selector1)
2509 uint32_t e1, e2, eflags, selector;
2510 int rpl, dpl, cpl;
2512 selector = selector1 & 0xffff;
2513 eflags = cpu_cc_compute_all(env, CC_OP);
2514 if ((selector & 0xfffc) == 0) {
2515 goto fail;
2517 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2518 goto fail;
2520 if (!(e2 & DESC_S_MASK)) {
2521 goto fail;
2523 rpl = selector & 3;
2524 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2525 cpl = env->hflags & HF_CPL_MASK;
2526 if (e2 & DESC_CS_MASK) {
2527 if (!(e2 & DESC_R_MASK)) {
2528 goto fail;
2530 if (!(e2 & DESC_C_MASK)) {
2531 if (dpl < cpl || dpl < rpl) {
2532 goto fail;
2535 } else {
2536 if (dpl < cpl || dpl < rpl) {
2537 fail:
2538 CC_SRC = eflags & ~CC_Z;
2539 return;
2542 CC_SRC = eflags | CC_Z;
2545 void helper_verw(CPUX86State *env, target_ulong selector1)
2547 uint32_t e1, e2, eflags, selector;
2548 int rpl, dpl, cpl;
2550 selector = selector1 & 0xffff;
2551 eflags = cpu_cc_compute_all(env, CC_OP);
2552 if ((selector & 0xfffc) == 0) {
2553 goto fail;
2555 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2556 goto fail;
2558 if (!(e2 & DESC_S_MASK)) {
2559 goto fail;
2561 rpl = selector & 3;
2562 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563 cpl = env->hflags & HF_CPL_MASK;
2564 if (e2 & DESC_CS_MASK) {
2565 goto fail;
2566 } else {
2567 if (dpl < cpl || dpl < rpl) {
2568 goto fail;
2570 if (!(e2 & DESC_W_MASK)) {
2571 fail:
2572 CC_SRC = eflags & ~CC_Z;
2573 return;
2576 CC_SRC = eflags | CC_Z;
2579 #if defined(CONFIG_USER_ONLY)
2580 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2582 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2583 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2584 selector &= 0xffff;
2585 cpu_x86_load_seg_cache(env, seg_reg, selector,
2586 (selector << 4), 0xffff,
2587 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2588 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2589 } else {
2590 helper_load_seg(env, seg_reg, selector);
2593 #endif
2595 /* check if Port I/O is allowed in TSS */
2596 static inline void check_io(CPUX86State *env, int addr, int size,
2597 uintptr_t retaddr)
2599 int io_offset, val, mask;
2601 /* TSS must be a valid 32 bit one */
2602 if (!(env->tr.flags & DESC_P_MASK) ||
2603 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2604 env->tr.limit < 103) {
2605 goto fail;
2607 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2608 io_offset += (addr >> 3);
2609 /* Note: the check needs two bytes */
2610 if ((io_offset + 1) > env->tr.limit) {
2611 goto fail;
2613 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2614 val >>= (addr & 7);
2615 mask = (1 << size) - 1;
2616 /* all bits must be zero to allow the I/O */
2617 if ((val & mask) != 0) {
2618 fail:
2619 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2623 void helper_check_iob(CPUX86State *env, uint32_t t0)
2625 check_io(env, t0, 1, GETPC());
2628 void helper_check_iow(CPUX86State *env, uint32_t t0)
2630 check_io(env, t0, 2, GETPC());
2633 void helper_check_iol(CPUX86State *env, uint32_t t0)
2635 check_io(env, t0, 4, GETPC());