Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-03-26' into staging
[qemu.git] / target / i386 / seg_helper.c
blobb96de068cad02d98e49c862564bb5625677b8eb1
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
29 //#define DEBUG_PCALL
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
41 * TODO: Convert callers to compute cpu_mmu_index_kernel once
42 * and use *_mmuidx_ra directly.
44 #define cpu_ldub_kernel_ra(e, p, r) \
45 cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
46 #define cpu_lduw_kernel_ra(e, p, r) \
47 cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
48 #define cpu_ldl_kernel_ra(e, p, r) \
49 cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
50 #define cpu_ldq_kernel_ra(e, p, r) \
51 cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
53 #define cpu_stb_kernel_ra(e, p, v, r) \
54 cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
55 #define cpu_stw_kernel_ra(e, p, v, r) \
56 cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
57 #define cpu_stl_kernel_ra(e, p, v, r) \
58 cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
59 #define cpu_stq_kernel_ra(e, p, v, r) \
60 cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
62 #define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
63 #define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
64 #define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
65 #define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
67 #define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
68 #define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
69 #define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
70 #define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
81 if (selector & 0x4) {
82 dt = &env->ldt;
83 } else {
84 dt = &env->gdt;
86 index = selector & ~7;
87 if ((index + 7) > dt->limit) {
88 return -1;
90 ptr = dt->base + index;
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 return 0;
96 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
102 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104 unsigned int limit;
106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107 if (e2 & DESC_G_MASK) {
108 limit = (limit << 12) | 0xfff;
110 return limit;
113 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
118 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129 selector &= 0xffff;
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
136 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
140 X86CPU *cpu = env_archcpu(env);
141 int type, index, shift;
143 #if 0
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147 for (i = 0; i < env->tr.limit; i++) {
148 printf("%02x ", env->tr.base[i]);
149 if ((i & 7) == 7) {
150 printf("\n");
153 printf("\n");
155 #endif
157 if (!(env->tr.flags & DESC_P_MASK)) {
158 cpu_abort(CPU(cpu), "invalid tss");
160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161 if ((type & 7) != 1) {
162 cpu_abort(CPU(cpu), "invalid tss type");
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
166 if (index + (4 << shift) - 1 > env->tr.limit) {
167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169 if (shift == 0) {
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172 } else {
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
178 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
181 uint32_t e1, e2;
182 int rpl, dpl;
184 if ((selector & 0xfffc) != 0) {
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 if (!(e2 & DESC_S_MASK)) {
189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193 if (seg_reg == R_CS) {
194 if (!(e2 & DESC_CS_MASK)) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 if (dpl != rpl) {
198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 if (dpl != cpl || dpl != rpl) {
206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208 } else {
209 /* not readable code */
210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215 if (dpl < cpl || dpl < rpl) {
216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
220 if (!(e2 & DESC_P_MASK)) {
221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223 cpu_x86_load_seg_cache(env, seg_reg, selector,
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
227 } else {
228 if (seg_reg == R_SS || seg_reg == R_CS) {
229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
234 #define SWITCH_TSS_JMP 0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
258 if (!(e2 & DESC_P_MASK)) {
259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261 tss_selector = e1 >> 16;
262 if (tss_selector & 4) {
263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 if (e2 & DESC_S_MASK) {
269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272 if ((type & 7) != 1) {
273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
277 if (!(e2 & DESC_P_MASK)) {
278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
281 if (type & 8) {
282 tss_limit_max = 103;
283 } else {
284 tss_limit_max = 43;
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
289 tss_limit < tss_limit_max) {
290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293 if (old_type & 8) {
294 old_tss_limit_max = 103;
295 } else {
296 old_tss_limit_max = 43;
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305 for (i = 0; i < 8; i++) {
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
309 for (i = 0; i < 6; i++) {
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320 for (i = 0; i < 8; i++) {
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
324 for (i = 0; i < 4; i++) {
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
353 ptr = env->gdt.base + (env->tr.selector & ~7);
354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355 e2 &= ~DESC_TSS_BUSY_MASK;
356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358 old_eflags = cpu_compute_eflags(env);
359 if (source == SWITCH_TSS_IRET) {
360 old_eflags &= ~NT_MASK;
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376 for (i = 0; i < 6; i++) {
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
380 } else {
381 /* 16 bit */
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392 for (i = 0; i < 4; i++) {
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
398 /* now if an exception occurs, it will occurs in the next task
399 context */
401 if (source == SWITCH_TSS_CALL) {
402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403 new_eflags |= NT_MASK;
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413 e2 |= DESC_TSS_BUSY_MASK;
414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8)) {
436 eflags_mask &= 0xffff;
438 cpu_load_eflags(env, new_eflags, eflags_mask);
439 /* XXX: what to do in 16 bit case? */
440 env->regs[R_EAX] = new_regs[0];
441 env->regs[R_ECX] = new_regs[1];
442 env->regs[R_EDX] = new_regs[2];
443 env->regs[R_EBX] = new_regs[3];
444 env->regs[R_ESP] = new_regs[4];
445 env->regs[R_EBP] = new_regs[5];
446 env->regs[R_ESI] = new_regs[6];
447 env->regs[R_EDI] = new_regs[7];
448 if (new_eflags & VM_MASK) {
449 for (i = 0; i < 6; i++) {
450 load_seg_vm(env, i, new_segs[i]);
452 } else {
453 /* first just selectors as the rest may trigger exceptions */
454 for (i = 0; i < 6; i++) {
455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
464 /* load the LDT */
465 if (new_ldt & 4) {
466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit) {
473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475 ptr = dt->base + index;
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 if (!(e2 & DESC_P_MASK)) {
482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
489 int cpl = new_segs[R_CS] & 3;
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
498 /* check that env->eip is in the CS segment limits */
499 if (new_eip > env->segs[R_CS].limit) {
500 /* XXX: different exception if CALL? */
501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
504 #ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509 #endif
512 static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
519 static inline unsigned int get_sp_mask(unsigned int e2)
521 #ifdef TARGET_X86_64
522 if (e2 & DESC_L_MASK) {
523 return 0;
524 } else
525 #endif
526 if (e2 & DESC_B_MASK) {
527 return 0xffffffff;
528 } else {
529 return 0xffff;
533 static int exception_has_error_code(int intno)
535 switch (intno) {
536 case 8:
537 case 10:
538 case 11:
539 case 12:
540 case 13:
541 case 14:
542 case 17:
543 return 1;
545 return 0;
548 #ifdef TARGET_X86_64
549 #define SET_ESP(val, sp_mask) \
550 do { \
551 if ((sp_mask) == 0xffff) { \
552 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
553 ((val) & 0xffff); \
554 } else if ((sp_mask) == 0xffffffffLL) { \
555 env->regs[R_ESP] = (uint32_t)(val); \
556 } else { \
557 env->regs[R_ESP] = (val); \
559 } while (0)
560 #else
561 #define SET_ESP(val, sp_mask) \
562 do { \
563 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
564 ((val) & (sp_mask)); \
565 } while (0)
566 #endif
568 /* in 64-bit machines, this can overflow. So this segment addition macro
569 * can be used to trim the value to 32-bit whenever needed */
570 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
572 /* XXX: add a is_user flag to have proper security support */
573 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
575 sp -= 2; \
576 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
579 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
581 sp -= 4; \
582 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
585 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
587 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
588 sp += 2; \
591 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
593 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
594 sp += 4; \
597 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
598 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
599 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
600 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
602 /* protected mode interrupt */
603 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
604 int error_code, unsigned int next_eip,
605 int is_hw)
607 SegmentCache *dt;
608 target_ulong ptr, ssp;
609 int type, dpl, selector, ss_dpl, cpl;
610 int has_error_code, new_stack, shift;
611 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
612 uint32_t old_eip, sp_mask;
613 int vm86 = env->eflags & VM_MASK;
615 has_error_code = 0;
616 if (!is_int && !is_hw) {
617 has_error_code = exception_has_error_code(intno);
619 if (is_int) {
620 old_eip = next_eip;
621 } else {
622 old_eip = env->eip;
625 dt = &env->idt;
626 if (intno * 8 + 7 > dt->limit) {
627 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
629 ptr = dt->base + intno * 8;
630 e1 = cpu_ldl_kernel(env, ptr);
631 e2 = cpu_ldl_kernel(env, ptr + 4);
632 /* check gate type */
633 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
634 switch (type) {
635 case 5: /* task gate */
636 /* must do that check here to return the correct error code */
637 if (!(e2 & DESC_P_MASK)) {
638 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
640 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
641 if (has_error_code) {
642 int type;
643 uint32_t mask;
645 /* push the error code */
646 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
647 shift = type >> 3;
648 if (env->segs[R_SS].flags & DESC_B_MASK) {
649 mask = 0xffffffff;
650 } else {
651 mask = 0xffff;
653 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
654 ssp = env->segs[R_SS].base + esp;
655 if (shift) {
656 cpu_stl_kernel(env, ssp, error_code);
657 } else {
658 cpu_stw_kernel(env, ssp, error_code);
660 SET_ESP(esp, mask);
662 return;
663 case 6: /* 286 interrupt gate */
664 case 7: /* 286 trap gate */
665 case 14: /* 386 interrupt gate */
666 case 15: /* 386 trap gate */
667 break;
668 default:
669 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
670 break;
672 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
673 cpl = env->hflags & HF_CPL_MASK;
674 /* check privilege if software int */
675 if (is_int && dpl < cpl) {
676 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
678 /* check valid bit */
679 if (!(e2 & DESC_P_MASK)) {
680 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
682 selector = e1 >> 16;
683 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
684 if ((selector & 0xfffc) == 0) {
685 raise_exception_err(env, EXCP0D_GPF, 0);
687 if (load_segment(env, &e1, &e2, selector) != 0) {
688 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
690 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
691 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
693 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
694 if (dpl > cpl) {
695 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
697 if (!(e2 & DESC_P_MASK)) {
698 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
700 if (e2 & DESC_C_MASK) {
701 dpl = cpl;
703 if (dpl < cpl) {
704 /* to inner privilege */
705 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
706 if ((ss & 0xfffc) == 0) {
707 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
709 if ((ss & 3) != dpl) {
710 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
712 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
713 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
715 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
716 if (ss_dpl != dpl) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 if (!(ss_e2 & DESC_S_MASK) ||
720 (ss_e2 & DESC_CS_MASK) ||
721 !(ss_e2 & DESC_W_MASK)) {
722 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
724 if (!(ss_e2 & DESC_P_MASK)) {
725 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
727 new_stack = 1;
728 sp_mask = get_sp_mask(ss_e2);
729 ssp = get_seg_base(ss_e1, ss_e2);
730 } else {
731 /* to same privilege */
732 if (vm86) {
733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
735 new_stack = 0;
736 sp_mask = get_sp_mask(env->segs[R_SS].flags);
737 ssp = env->segs[R_SS].base;
738 esp = env->regs[R_ESP];
741 shift = type >> 3;
743 #if 0
744 /* XXX: check that enough room is available */
745 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
746 if (vm86) {
747 push_size += 8;
749 push_size <<= shift;
750 #endif
751 if (shift == 1) {
752 if (new_stack) {
753 if (vm86) {
754 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
760 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
762 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
763 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
764 PUSHL(ssp, esp, sp_mask, old_eip);
765 if (has_error_code) {
766 PUSHL(ssp, esp, sp_mask, error_code);
768 } else {
769 if (new_stack) {
770 if (vm86) {
771 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
777 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
779 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
780 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
781 PUSHW(ssp, esp, sp_mask, old_eip);
782 if (has_error_code) {
783 PUSHW(ssp, esp, sp_mask, error_code);
787 /* interrupt gate clear IF mask */
788 if ((type & 1) == 0) {
789 env->eflags &= ~IF_MASK;
791 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
793 if (new_stack) {
794 if (vm86) {
795 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
800 ss = (ss & ~3) | dpl;
801 cpu_x86_load_seg_cache(env, R_SS, ss,
802 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
804 SET_ESP(esp, sp_mask);
806 selector = (selector & ~3) | dpl;
807 cpu_x86_load_seg_cache(env, R_CS, selector,
808 get_seg_base(e1, e2),
809 get_seg_limit(e1, e2),
810 e2);
811 env->eip = offset;
814 #ifdef TARGET_X86_64
816 #define PUSHQ_RA(sp, val, ra) \
818 sp -= 8; \
819 cpu_stq_kernel_ra(env, sp, (val), ra); \
822 #define POPQ_RA(sp, val, ra) \
824 val = cpu_ldq_kernel_ra(env, sp, ra); \
825 sp += 8; \
828 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
829 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
831 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
833 X86CPU *cpu = env_archcpu(env);
834 int index;
836 #if 0
837 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
838 env->tr.base, env->tr.limit);
839 #endif
841 if (!(env->tr.flags & DESC_P_MASK)) {
842 cpu_abort(CPU(cpu), "invalid tss");
844 index = 8 * level + 4;
845 if ((index + 7) > env->tr.limit) {
846 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
848 return cpu_ldq_kernel(env, env->tr.base + index);
851 /* 64 bit interrupt */
852 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
853 int error_code, target_ulong next_eip, int is_hw)
855 SegmentCache *dt;
856 target_ulong ptr;
857 int type, dpl, selector, cpl, ist;
858 int has_error_code, new_stack;
859 uint32_t e1, e2, e3, ss;
860 target_ulong old_eip, esp, offset;
862 has_error_code = 0;
863 if (!is_int && !is_hw) {
864 has_error_code = exception_has_error_code(intno);
866 if (is_int) {
867 old_eip = next_eip;
868 } else {
869 old_eip = env->eip;
872 dt = &env->idt;
873 if (intno * 16 + 15 > dt->limit) {
874 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
876 ptr = dt->base + intno * 16;
877 e1 = cpu_ldl_kernel(env, ptr);
878 e2 = cpu_ldl_kernel(env, ptr + 4);
879 e3 = cpu_ldl_kernel(env, ptr + 8);
880 /* check gate type */
881 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
882 switch (type) {
883 case 14: /* 386 interrupt gate */
884 case 15: /* 386 trap gate */
885 break;
886 default:
887 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
888 break;
890 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
891 cpl = env->hflags & HF_CPL_MASK;
892 /* check privilege if software int */
893 if (is_int && dpl < cpl) {
894 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
896 /* check valid bit */
897 if (!(e2 & DESC_P_MASK)) {
898 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
900 selector = e1 >> 16;
901 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902 ist = e2 & 7;
903 if ((selector & 0xfffc) == 0) {
904 raise_exception_err(env, EXCP0D_GPF, 0);
907 if (load_segment(env, &e1, &e2, selector) != 0) {
908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
910 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
911 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
913 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
914 if (dpl > cpl) {
915 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
917 if (!(e2 & DESC_P_MASK)) {
918 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
920 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
921 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
923 if (e2 & DESC_C_MASK) {
924 dpl = cpl;
926 if (dpl < cpl || ist != 0) {
927 /* to inner privilege */
928 new_stack = 1;
929 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
930 ss = 0;
931 } else {
932 /* to same privilege */
933 if (env->eflags & VM_MASK) {
934 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
936 new_stack = 0;
937 esp = env->regs[R_ESP];
939 esp &= ~0xfLL; /* align stack */
941 PUSHQ(esp, env->segs[R_SS].selector);
942 PUSHQ(esp, env->regs[R_ESP]);
943 PUSHQ(esp, cpu_compute_eflags(env));
944 PUSHQ(esp, env->segs[R_CS].selector);
945 PUSHQ(esp, old_eip);
946 if (has_error_code) {
947 PUSHQ(esp, error_code);
950 /* interrupt gate clear IF mask */
951 if ((type & 1) == 0) {
952 env->eflags &= ~IF_MASK;
954 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956 if (new_stack) {
957 ss = 0 | dpl;
958 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
960 env->regs[R_ESP] = esp;
962 selector = (selector & ~3) | dpl;
963 cpu_x86_load_seg_cache(env, R_CS, selector,
964 get_seg_base(e1, e2),
965 get_seg_limit(e1, e2),
966 e2);
967 env->eip = offset;
969 #endif
971 #ifdef TARGET_X86_64
972 #if defined(CONFIG_USER_ONLY)
973 void helper_syscall(CPUX86State *env, int next_eip_addend)
975 CPUState *cs = env_cpu(env);
977 cs->exception_index = EXCP_SYSCALL;
978 env->exception_next_eip = env->eip + next_eip_addend;
979 cpu_loop_exit(cs);
981 #else
982 void helper_syscall(CPUX86State *env, int next_eip_addend)
984 int selector;
986 if (!(env->efer & MSR_EFER_SCE)) {
987 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
989 selector = (env->star >> 32) & 0xffff;
990 if (env->hflags & HF_LMA_MASK) {
991 int code64;
993 env->regs[R_ECX] = env->eip + next_eip_addend;
994 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
996 code64 = env->hflags & HF_CS64_MASK;
998 env->eflags &= ~(env->fmask | RF_MASK);
999 cpu_load_eflags(env, env->eflags, 0);
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001 0, 0xffffffff,
1002 DESC_G_MASK | DESC_P_MASK |
1003 DESC_S_MASK |
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005 DESC_L_MASK);
1006 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 0, 0xffffffff,
1008 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_S_MASK |
1010 DESC_W_MASK | DESC_A_MASK);
1011 if (code64) {
1012 env->eip = env->lstar;
1013 } else {
1014 env->eip = env->cstar;
1016 } else {
1017 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1019 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1020 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1021 0, 0xffffffff,
1022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023 DESC_S_MASK |
1024 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_S_MASK |
1029 DESC_W_MASK | DESC_A_MASK);
1030 env->eip = (uint32_t)env->star;
1033 #endif
1034 #endif
1036 #ifdef TARGET_X86_64
1037 void helper_sysret(CPUX86State *env, int dflag)
1039 int cpl, selector;
1041 if (!(env->efer & MSR_EFER_SCE)) {
1042 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1044 cpl = env->hflags & HF_CPL_MASK;
1045 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1048 selector = (env->star >> 48) & 0xffff;
1049 if (env->hflags & HF_LMA_MASK) {
1050 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1051 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1052 NT_MASK);
1053 if (dflag == 2) {
1054 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1059 DESC_L_MASK);
1060 env->eip = env->regs[R_ECX];
1061 } else {
1062 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1067 env->eip = (uint32_t)env->regs[R_ECX];
1069 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1070 0, 0xffffffff,
1071 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_W_MASK | DESC_A_MASK);
1074 } else {
1075 env->eflags |= IF_MASK;
1076 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1077 0, 0xffffffff,
1078 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1081 env->eip = (uint32_t)env->regs[R_ECX];
1082 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_W_MASK | DESC_A_MASK);
1089 #endif
1091 /* real mode interrupt */
1092 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1093 int error_code, unsigned int next_eip)
1095 SegmentCache *dt;
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eip;
1101 /* real mode (simpler!) */
1102 dt = &env->idt;
1103 if (intno * 4 + 3 > dt->limit) {
1104 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1106 ptr = dt->base + intno * 4;
1107 offset = cpu_lduw_kernel(env, ptr);
1108 selector = cpu_lduw_kernel(env, ptr + 2);
1109 esp = env->regs[R_ESP];
1110 ssp = env->segs[R_SS].base;
1111 if (is_int) {
1112 old_eip = next_eip;
1113 } else {
1114 old_eip = env->eip;
1116 old_cs = env->segs[R_CS].selector;
1117 /* XXX: use SS segment size? */
1118 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1119 PUSHW(ssp, esp, 0xffff, old_cs);
1120 PUSHW(ssp, esp, 0xffff, old_eip);
1122 /* update processor state */
1123 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1124 env->eip = offset;
1125 env->segs[R_CS].selector = selector;
1126 env->segs[R_CS].base = (selector << 4);
1127 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1130 #if defined(CONFIG_USER_ONLY)
1131 /* fake user mode interrupt. is_int is TRUE if coming from the int
1132 * instruction. next_eip is the env->eip value AFTER the interrupt
1133 * instruction. It is only relevant if is_int is TRUE or if intno
1134 * is EXCP_SYSCALL.
1136 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137 int error_code, target_ulong next_eip)
1139 if (is_int) {
1140 SegmentCache *dt;
1141 target_ulong ptr;
1142 int dpl, cpl, shift;
1143 uint32_t e2;
1145 dt = &env->idt;
1146 if (env->hflags & HF_LMA_MASK) {
1147 shift = 4;
1148 } else {
1149 shift = 3;
1151 ptr = dt->base + (intno << shift);
1152 e2 = cpu_ldl_kernel(env, ptr + 4);
1154 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1155 cpl = env->hflags & HF_CPL_MASK;
1156 /* check privilege if software int */
1157 if (dpl < cpl) {
1158 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1162 /* Since we emulate only user space, we cannot do more than
1163 exiting the emulation with the suitable exception and error
1164 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1165 if (is_int || intno == EXCP_SYSCALL) {
1166 env->eip = next_eip;
1170 #else
1172 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1173 int error_code, int is_hw, int rm)
1175 CPUState *cs = env_cpu(env);
1176 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1177 control.event_inj));
1179 if (!(event_inj & SVM_EVTINJ_VALID)) {
1180 int type;
1182 if (is_int) {
1183 type = SVM_EVTINJ_TYPE_SOFT;
1184 } else {
1185 type = SVM_EVTINJ_TYPE_EXEPT;
1187 event_inj = intno | type | SVM_EVTINJ_VALID;
1188 if (!rm && exception_has_error_code(intno)) {
1189 event_inj |= SVM_EVTINJ_VALID_ERR;
1190 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1191 control.event_inj_err),
1192 error_code);
1194 x86_stl_phys(cs,
1195 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1196 event_inj);
1199 #endif
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1206 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1207 int error_code, target_ulong next_eip, int is_hw)
1209 CPUX86State *env = &cpu->env;
1211 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1212 if ((env->cr[0] & CR0_PE_MASK)) {
1213 static int count;
1215 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1216 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1217 count, intno, error_code, is_int,
1218 env->hflags & HF_CPL_MASK,
1219 env->segs[R_CS].selector, env->eip,
1220 (int)env->segs[R_CS].base + env->eip,
1221 env->segs[R_SS].selector, env->regs[R_ESP]);
1222 if (intno == 0x0e) {
1223 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1224 } else {
1225 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1227 qemu_log("\n");
1228 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1229 #if 0
1231 int i;
1232 target_ulong ptr;
1234 qemu_log(" code=");
1235 ptr = env->segs[R_CS].base + env->eip;
1236 for (i = 0; i < 16; i++) {
1237 qemu_log(" %02x", ldub(ptr + i));
1239 qemu_log("\n");
1241 #endif
1242 count++;
1245 if (env->cr[0] & CR0_PE_MASK) {
1246 #if !defined(CONFIG_USER_ONLY)
1247 if (env->hflags & HF_GUEST_MASK) {
1248 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1250 #endif
1251 #ifdef TARGET_X86_64
1252 if (env->hflags & HF_LMA_MASK) {
1253 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1254 } else
1255 #endif
1257 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1258 is_hw);
1260 } else {
1261 #if !defined(CONFIG_USER_ONLY)
1262 if (env->hflags & HF_GUEST_MASK) {
1263 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1265 #endif
1266 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1269 #if !defined(CONFIG_USER_ONLY)
1270 if (env->hflags & HF_GUEST_MASK) {
1271 CPUState *cs = CPU(cpu);
1272 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1273 offsetof(struct vmcb,
1274 control.event_inj));
1276 x86_stl_phys(cs,
1277 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1278 event_inj & ~SVM_EVTINJ_VALID);
1280 #endif
1283 void x86_cpu_do_interrupt(CPUState *cs)
1285 X86CPU *cpu = X86_CPU(cs);
1286 CPUX86State *env = &cpu->env;
1288 #if defined(CONFIG_USER_ONLY)
1289 /* if user mode only, we simulate a fake exception
1290 which will be handled outside the cpu execution
1291 loop */
1292 do_interrupt_user(env, cs->exception_index,
1293 env->exception_is_int,
1294 env->error_code,
1295 env->exception_next_eip);
1296 /* successfully delivered */
1297 env->old_exception = -1;
1298 #else
1299 if (cs->exception_index >= EXCP_VMEXIT) {
1300 assert(env->old_exception == -1);
1301 do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1302 } else {
1303 do_interrupt_all(cpu, cs->exception_index,
1304 env->exception_is_int,
1305 env->error_code,
1306 env->exception_next_eip, 0);
1307 /* successfully delivered */
1308 env->old_exception = -1;
1310 #endif
1313 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1315 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1318 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1320 X86CPU *cpu = X86_CPU(cs);
1321 CPUX86State *env = &cpu->env;
1322 int intno;
1324 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1325 if (!interrupt_request) {
1326 return false;
1329 /* Don't process multiple interrupt requests in a single call.
1330 * This is required to make icount-driven execution deterministic.
1332 switch (interrupt_request) {
1333 #if !defined(CONFIG_USER_ONLY)
1334 case CPU_INTERRUPT_POLL:
1335 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1336 apic_poll_irq(cpu->apic_state);
1337 break;
1338 #endif
1339 case CPU_INTERRUPT_SIPI:
1340 do_cpu_sipi(cpu);
1341 break;
1342 case CPU_INTERRUPT_SMI:
1343 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1344 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1345 do_smm_enter(cpu);
1346 break;
1347 case CPU_INTERRUPT_NMI:
1348 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1349 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1350 env->hflags2 |= HF2_NMI_MASK;
1351 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1352 break;
1353 case CPU_INTERRUPT_MCE:
1354 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1355 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1356 break;
1357 case CPU_INTERRUPT_HARD:
1358 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1359 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1360 CPU_INTERRUPT_VIRQ);
1361 intno = cpu_get_pic_interrupt(env);
1362 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1363 "Servicing hardware INT=0x%02x\n", intno);
1364 do_interrupt_x86_hardirq(env, intno, 1);
1365 break;
1366 #if !defined(CONFIG_USER_ONLY)
1367 case CPU_INTERRUPT_VIRQ:
1368 /* FIXME: this should respect TPR */
1369 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1370 intno = x86_ldl_phys(cs, env->vm_vmcb
1371 + offsetof(struct vmcb, control.int_vector));
1372 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1373 "Servicing virtual hardware INT=0x%02x\n", intno);
1374 do_interrupt_x86_hardirq(env, intno, 1);
1375 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1376 break;
1377 #endif
1380 /* Ensure that no TB jump will be modified as the program flow was changed. */
1381 return true;
1384 void helper_lldt(CPUX86State *env, int selector)
1386 SegmentCache *dt;
1387 uint32_t e1, e2;
1388 int index, entry_limit;
1389 target_ulong ptr;
1391 selector &= 0xffff;
1392 if ((selector & 0xfffc) == 0) {
1393 /* XXX: NULL selector case: invalid LDT */
1394 env->ldt.base = 0;
1395 env->ldt.limit = 0;
1396 } else {
1397 if (selector & 0x4) {
1398 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1400 dt = &env->gdt;
1401 index = selector & ~7;
1402 #ifdef TARGET_X86_64
1403 if (env->hflags & HF_LMA_MASK) {
1404 entry_limit = 15;
1405 } else
1406 #endif
1408 entry_limit = 7;
1410 if ((index + entry_limit) > dt->limit) {
1411 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1413 ptr = dt->base + index;
1414 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1415 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1416 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1417 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1419 if (!(e2 & DESC_P_MASK)) {
1420 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1422 #ifdef TARGET_X86_64
1423 if (env->hflags & HF_LMA_MASK) {
1424 uint32_t e3;
1426 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1427 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1428 env->ldt.base |= (target_ulong)e3 << 32;
1429 } else
1430 #endif
1432 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1435 env->ldt.selector = selector;
1438 void helper_ltr(CPUX86State *env, int selector)
1440 SegmentCache *dt;
1441 uint32_t e1, e2;
1442 int index, type, entry_limit;
1443 target_ulong ptr;
1445 selector &= 0xffff;
1446 if ((selector & 0xfffc) == 0) {
1447 /* NULL selector case: invalid TR */
1448 env->tr.base = 0;
1449 env->tr.limit = 0;
1450 env->tr.flags = 0;
1451 } else {
1452 if (selector & 0x4) {
1453 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1455 dt = &env->gdt;
1456 index = selector & ~7;
1457 #ifdef TARGET_X86_64
1458 if (env->hflags & HF_LMA_MASK) {
1459 entry_limit = 15;
1460 } else
1461 #endif
1463 entry_limit = 7;
1465 if ((index + entry_limit) > dt->limit) {
1466 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1468 ptr = dt->base + index;
1469 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1470 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1471 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1472 if ((e2 & DESC_S_MASK) ||
1473 (type != 1 && type != 9)) {
1474 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1476 if (!(e2 & DESC_P_MASK)) {
1477 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1479 #ifdef TARGET_X86_64
1480 if (env->hflags & HF_LMA_MASK) {
1481 uint32_t e3, e4;
1483 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1484 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1485 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1486 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1488 load_seg_cache_raw_dt(&env->tr, e1, e2);
1489 env->tr.base |= (target_ulong)e3 << 32;
1490 } else
1491 #endif
1493 load_seg_cache_raw_dt(&env->tr, e1, e2);
1495 e2 |= DESC_TSS_BUSY_MASK;
1496 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1498 env->tr.selector = selector;
1501 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1502 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1504 uint32_t e1, e2;
1505 int cpl, dpl, rpl;
1506 SegmentCache *dt;
1507 int index;
1508 target_ulong ptr;
1510 selector &= 0xffff;
1511 cpl = env->hflags & HF_CPL_MASK;
1512 if ((selector & 0xfffc) == 0) {
1513 /* null selector case */
1514 if (seg_reg == R_SS
1515 #ifdef TARGET_X86_64
1516 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1517 #endif
1519 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1521 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1522 } else {
1524 if (selector & 0x4) {
1525 dt = &env->ldt;
1526 } else {
1527 dt = &env->gdt;
1529 index = selector & ~7;
1530 if ((index + 7) > dt->limit) {
1531 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1533 ptr = dt->base + index;
1534 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1535 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1537 if (!(e2 & DESC_S_MASK)) {
1538 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1540 rpl = selector & 3;
1541 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1542 if (seg_reg == R_SS) {
1543 /* must be writable segment */
1544 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1545 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1547 if (rpl != cpl || dpl != cpl) {
1548 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1550 } else {
1551 /* must be readable segment */
1552 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1553 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1556 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1557 /* if not conforming code, test rights */
1558 if (dpl < cpl || dpl < rpl) {
1559 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1564 if (!(e2 & DESC_P_MASK)) {
1565 if (seg_reg == R_SS) {
1566 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1567 } else {
1568 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1572 /* set the access bit if not already set */
1573 if (!(e2 & DESC_A_MASK)) {
1574 e2 |= DESC_A_MASK;
1575 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1578 cpu_x86_load_seg_cache(env, seg_reg, selector,
1579 get_seg_base(e1, e2),
1580 get_seg_limit(e1, e2),
1581 e2);
1582 #if 0
1583 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1584 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1585 #endif
1589 /* protected mode jump */
1590 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1591 target_ulong next_eip)
1593 int gate_cs, type;
1594 uint32_t e1, e2, cpl, dpl, rpl, limit;
1596 if ((new_cs & 0xfffc) == 0) {
1597 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1599 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1600 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1602 cpl = env->hflags & HF_CPL_MASK;
1603 if (e2 & DESC_S_MASK) {
1604 if (!(e2 & DESC_CS_MASK)) {
1605 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1607 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1608 if (e2 & DESC_C_MASK) {
1609 /* conforming code segment */
1610 if (dpl > cpl) {
1611 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1613 } else {
1614 /* non conforming code segment */
1615 rpl = new_cs & 3;
1616 if (rpl > cpl) {
1617 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1619 if (dpl != cpl) {
1620 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1623 if (!(e2 & DESC_P_MASK)) {
1624 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1626 limit = get_seg_limit(e1, e2);
1627 if (new_eip > limit &&
1628 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1631 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1632 get_seg_base(e1, e2), limit, e2);
1633 env->eip = new_eip;
1634 } else {
1635 /* jump to call or task gate */
1636 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1637 rpl = new_cs & 3;
1638 cpl = env->hflags & HF_CPL_MASK;
1639 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1641 #ifdef TARGET_X86_64
1642 if (env->efer & MSR_EFER_LMA) {
1643 if (type != 12) {
1644 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1647 #endif
1648 switch (type) {
1649 case 1: /* 286 TSS */
1650 case 9: /* 386 TSS */
1651 case 5: /* task gate */
1652 if (dpl < cpl || dpl < rpl) {
1653 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1655 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1656 break;
1657 case 4: /* 286 call gate */
1658 case 12: /* 386 call gate */
1659 if ((dpl < cpl) || (dpl < rpl)) {
1660 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1662 if (!(e2 & DESC_P_MASK)) {
1663 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1665 gate_cs = e1 >> 16;
1666 new_eip = (e1 & 0xffff);
1667 if (type == 12) {
1668 new_eip |= (e2 & 0xffff0000);
1671 #ifdef TARGET_X86_64
1672 if (env->efer & MSR_EFER_LMA) {
1673 /* load the upper 8 bytes of the 64-bit call gate */
1674 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1675 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1676 GETPC());
1678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1679 if (type != 0) {
1680 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1681 GETPC());
1683 new_eip |= ((target_ulong)e1) << 32;
1685 #endif
1687 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1688 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1690 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1691 /* must be code segment */
1692 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1693 (DESC_S_MASK | DESC_CS_MASK))) {
1694 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1696 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1697 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1698 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1700 #ifdef TARGET_X86_64
1701 if (env->efer & MSR_EFER_LMA) {
1702 if (!(e2 & DESC_L_MASK)) {
1703 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1705 if (e2 & DESC_B_MASK) {
1706 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1709 #endif
1710 if (!(e2 & DESC_P_MASK)) {
1711 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1713 limit = get_seg_limit(e1, e2);
1714 if (new_eip > limit &&
1715 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1716 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1718 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1719 get_seg_base(e1, e2), limit, e2);
1720 env->eip = new_eip;
1721 break;
1722 default:
1723 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1724 break;
1729 /* real mode call */
1730 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1731 int shift, int next_eip)
1733 int new_eip;
1734 uint32_t esp, esp_mask;
1735 target_ulong ssp;
1737 new_eip = new_eip1;
1738 esp = env->regs[R_ESP];
1739 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1740 ssp = env->segs[R_SS].base;
1741 if (shift) {
1742 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1743 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1744 } else {
1745 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1746 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1749 SET_ESP(esp, esp_mask);
1750 env->eip = new_eip;
1751 env->segs[R_CS].selector = new_cs;
1752 env->segs[R_CS].base = (new_cs << 4);
1755 /* protected mode call */
1756 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1757 int shift, target_ulong next_eip)
1759 int new_stack, i;
1760 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1761 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1762 uint32_t val, limit, old_sp_mask;
1763 target_ulong ssp, old_ssp, offset, sp;
1765 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1766 LOG_PCALL_STATE(env_cpu(env));
1767 if ((new_cs & 0xfffc) == 0) {
1768 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1770 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1771 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1773 cpl = env->hflags & HF_CPL_MASK;
1774 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1775 if (e2 & DESC_S_MASK) {
1776 if (!(e2 & DESC_CS_MASK)) {
1777 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1780 if (e2 & DESC_C_MASK) {
1781 /* conforming code segment */
1782 if (dpl > cpl) {
1783 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1785 } else {
1786 /* non conforming code segment */
1787 rpl = new_cs & 3;
1788 if (rpl > cpl) {
1789 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1791 if (dpl != cpl) {
1792 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1795 if (!(e2 & DESC_P_MASK)) {
1796 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1799 #ifdef TARGET_X86_64
1800 /* XXX: check 16/32 bit cases in long mode */
1801 if (shift == 2) {
1802 target_ulong rsp;
1804 /* 64 bit case */
1805 rsp = env->regs[R_ESP];
1806 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1807 PUSHQ_RA(rsp, next_eip, GETPC());
1808 /* from this point, not restartable */
1809 env->regs[R_ESP] = rsp;
1810 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1811 get_seg_base(e1, e2),
1812 get_seg_limit(e1, e2), e2);
1813 env->eip = new_eip;
1814 } else
1815 #endif
1817 sp = env->regs[R_ESP];
1818 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1819 ssp = env->segs[R_SS].base;
1820 if (shift) {
1821 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1822 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1823 } else {
1824 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1825 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1828 limit = get_seg_limit(e1, e2);
1829 if (new_eip > limit) {
1830 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1832 /* from this point, not restartable */
1833 SET_ESP(sp, sp_mask);
1834 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1835 get_seg_base(e1, e2), limit, e2);
1836 env->eip = new_eip;
1838 } else {
1839 /* check gate type */
1840 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1841 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1842 rpl = new_cs & 3;
1844 #ifdef TARGET_X86_64
1845 if (env->efer & MSR_EFER_LMA) {
1846 if (type != 12) {
1847 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1850 #endif
1852 switch (type) {
1853 case 1: /* available 286 TSS */
1854 case 9: /* available 386 TSS */
1855 case 5: /* task gate */
1856 if (dpl < cpl || dpl < rpl) {
1857 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1859 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1860 return;
1861 case 4: /* 286 call gate */
1862 case 12: /* 386 call gate */
1863 break;
1864 default:
1865 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1866 break;
1868 shift = type >> 3;
1870 if (dpl < cpl || dpl < rpl) {
1871 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1873 /* check valid bit */
1874 if (!(e2 & DESC_P_MASK)) {
1875 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1877 selector = e1 >> 16;
1878 param_count = e2 & 0x1f;
1879 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1880 #ifdef TARGET_X86_64
1881 if (env->efer & MSR_EFER_LMA) {
1882 /* load the upper 8 bytes of the 64-bit call gate */
1883 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1884 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1885 GETPC());
1887 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1888 if (type != 0) {
1889 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1890 GETPC());
1892 offset |= ((target_ulong)e1) << 32;
1894 #endif
1895 if ((selector & 0xfffc) == 0) {
1896 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1899 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1900 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1902 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1903 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1906 if (dpl > cpl) {
1907 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1909 #ifdef TARGET_X86_64
1910 if (env->efer & MSR_EFER_LMA) {
1911 if (!(e2 & DESC_L_MASK)) {
1912 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1914 if (e2 & DESC_B_MASK) {
1915 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1917 shift++;
1919 #endif
1920 if (!(e2 & DESC_P_MASK)) {
1921 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1924 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1925 /* to inner privilege */
1926 #ifdef TARGET_X86_64
1927 if (shift == 2) {
1928 sp = get_rsp_from_tss(env, dpl);
1929 ss = dpl; /* SS = NULL selector with RPL = new CPL */
1930 new_stack = 1;
1931 sp_mask = 0;
1932 ssp = 0; /* SS base is always zero in IA-32e mode */
1933 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1934 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1935 } else
1936 #endif
1938 uint32_t sp32;
1939 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1940 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1941 TARGET_FMT_lx "\n", ss, sp32, param_count,
1942 env->regs[R_ESP]);
1943 sp = sp32;
1944 if ((ss & 0xfffc) == 0) {
1945 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1947 if ((ss & 3) != dpl) {
1948 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1950 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1951 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1953 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1954 if (ss_dpl != dpl) {
1955 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1957 if (!(ss_e2 & DESC_S_MASK) ||
1958 (ss_e2 & DESC_CS_MASK) ||
1959 !(ss_e2 & DESC_W_MASK)) {
1960 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1962 if (!(ss_e2 & DESC_P_MASK)) {
1963 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1966 sp_mask = get_sp_mask(ss_e2);
1967 ssp = get_seg_base(ss_e1, ss_e2);
1970 /* push_size = ((param_count * 2) + 8) << shift; */
1972 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1973 old_ssp = env->segs[R_SS].base;
1974 #ifdef TARGET_X86_64
1975 if (shift == 2) {
1976 /* XXX: verify if new stack address is canonical */
1977 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1978 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1979 /* parameters aren't supported for 64-bit call gates */
1980 } else
1981 #endif
1982 if (shift == 1) {
1983 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1984 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1985 for (i = param_count - 1; i >= 0; i--) {
1986 val = cpu_ldl_kernel_ra(env, old_ssp +
1987 ((env->regs[R_ESP] + i * 4) &
1988 old_sp_mask), GETPC());
1989 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1991 } else {
1992 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1993 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1994 for (i = param_count - 1; i >= 0; i--) {
1995 val = cpu_lduw_kernel_ra(env, old_ssp +
1996 ((env->regs[R_ESP] + i * 2) &
1997 old_sp_mask), GETPC());
1998 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
2001 new_stack = 1;
2002 } else {
2003 /* to same privilege */
2004 sp = env->regs[R_ESP];
2005 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2006 ssp = env->segs[R_SS].base;
2007 /* push_size = (4 << shift); */
2008 new_stack = 0;
2011 #ifdef TARGET_X86_64
2012 if (shift == 2) {
2013 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2014 PUSHQ_RA(sp, next_eip, GETPC());
2015 } else
2016 #endif
2017 if (shift == 1) {
2018 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2019 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2020 } else {
2021 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2022 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2025 /* from this point, not restartable */
2027 if (new_stack) {
2028 #ifdef TARGET_X86_64
2029 if (shift == 2) {
2030 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2031 } else
2032 #endif
2034 ss = (ss & ~3) | dpl;
2035 cpu_x86_load_seg_cache(env, R_SS, ss,
2036 ssp,
2037 get_seg_limit(ss_e1, ss_e2),
2038 ss_e2);
2042 selector = (selector & ~3) | dpl;
2043 cpu_x86_load_seg_cache(env, R_CS, selector,
2044 get_seg_base(e1, e2),
2045 get_seg_limit(e1, e2),
2046 e2);
2047 SET_ESP(sp, sp_mask);
2048 env->eip = offset;
2052 /* real and vm86 mode iret */
2053 void helper_iret_real(CPUX86State *env, int shift)
2055 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2056 target_ulong ssp;
2057 int eflags_mask;
2059 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2060 sp = env->regs[R_ESP];
2061 ssp = env->segs[R_SS].base;
2062 if (shift == 1) {
2063 /* 32 bits */
2064 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2065 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2066 new_cs &= 0xffff;
2067 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2068 } else {
2069 /* 16 bits */
2070 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2071 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2072 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2074 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2075 env->segs[R_CS].selector = new_cs;
2076 env->segs[R_CS].base = (new_cs << 4);
2077 env->eip = new_eip;
2078 if (env->eflags & VM_MASK) {
2079 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2080 NT_MASK;
2081 } else {
2082 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2083 RF_MASK | NT_MASK;
2085 if (shift == 0) {
2086 eflags_mask &= 0xffff;
2088 cpu_load_eflags(env, new_eflags, eflags_mask);
2089 env->hflags2 &= ~HF2_NMI_MASK;
2092 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2094 int dpl;
2095 uint32_t e2;
2097 /* XXX: on x86_64, we do not want to nullify FS and GS because
2098 they may still contain a valid base. I would be interested to
2099 know how a real x86_64 CPU behaves */
2100 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2101 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2102 return;
2105 e2 = env->segs[seg_reg].flags;
2106 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2107 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2108 /* data or non conforming code segment */
2109 if (dpl < cpl) {
2110 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2115 /* protected mode iret */
2116 static inline void helper_ret_protected(CPUX86State *env, int shift,
2117 int is_iret, int addend,
2118 uintptr_t retaddr)
2120 uint32_t new_cs, new_eflags, new_ss;
2121 uint32_t new_es, new_ds, new_fs, new_gs;
2122 uint32_t e1, e2, ss_e1, ss_e2;
2123 int cpl, dpl, rpl, eflags_mask, iopl;
2124 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2126 #ifdef TARGET_X86_64
2127 if (shift == 2) {
2128 sp_mask = -1;
2129 } else
2130 #endif
2132 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2134 sp = env->regs[R_ESP];
2135 ssp = env->segs[R_SS].base;
2136 new_eflags = 0; /* avoid warning */
2137 #ifdef TARGET_X86_64
2138 if (shift == 2) {
2139 POPQ_RA(sp, new_eip, retaddr);
2140 POPQ_RA(sp, new_cs, retaddr);
2141 new_cs &= 0xffff;
2142 if (is_iret) {
2143 POPQ_RA(sp, new_eflags, retaddr);
2145 } else
2146 #endif
2148 if (shift == 1) {
2149 /* 32 bits */
2150 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2151 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2152 new_cs &= 0xffff;
2153 if (is_iret) {
2154 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2155 if (new_eflags & VM_MASK) {
2156 goto return_to_vm86;
2159 } else {
2160 /* 16 bits */
2161 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2162 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2163 if (is_iret) {
2164 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2168 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2169 new_cs, new_eip, shift, addend);
2170 LOG_PCALL_STATE(env_cpu(env));
2171 if ((new_cs & 0xfffc) == 0) {
2172 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2174 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2175 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2177 if (!(e2 & DESC_S_MASK) ||
2178 !(e2 & DESC_CS_MASK)) {
2179 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2181 cpl = env->hflags & HF_CPL_MASK;
2182 rpl = new_cs & 3;
2183 if (rpl < cpl) {
2184 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2186 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2187 if (e2 & DESC_C_MASK) {
2188 if (dpl > rpl) {
2189 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2191 } else {
2192 if (dpl != rpl) {
2193 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2196 if (!(e2 & DESC_P_MASK)) {
2197 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2200 sp += addend;
2201 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2202 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2203 /* return to same privilege level */
2204 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2205 get_seg_base(e1, e2),
2206 get_seg_limit(e1, e2),
2207 e2);
2208 } else {
2209 /* return to different privilege level */
2210 #ifdef TARGET_X86_64
2211 if (shift == 2) {
2212 POPQ_RA(sp, new_esp, retaddr);
2213 POPQ_RA(sp, new_ss, retaddr);
2214 new_ss &= 0xffff;
2215 } else
2216 #endif
2218 if (shift == 1) {
2219 /* 32 bits */
2220 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2221 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2222 new_ss &= 0xffff;
2223 } else {
2224 /* 16 bits */
2225 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2226 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2229 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2230 new_ss, new_esp);
2231 if ((new_ss & 0xfffc) == 0) {
2232 #ifdef TARGET_X86_64
2233 /* NULL ss is allowed in long mode if cpl != 3 */
2234 /* XXX: test CS64? */
2235 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2236 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2237 0, 0xffffffff,
2238 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2239 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2240 DESC_W_MASK | DESC_A_MASK);
2241 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2242 } else
2243 #endif
2245 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2247 } else {
2248 if ((new_ss & 3) != rpl) {
2249 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2251 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2252 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2254 if (!(ss_e2 & DESC_S_MASK) ||
2255 (ss_e2 & DESC_CS_MASK) ||
2256 !(ss_e2 & DESC_W_MASK)) {
2257 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2259 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2260 if (dpl != rpl) {
2261 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2263 if (!(ss_e2 & DESC_P_MASK)) {
2264 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2266 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2267 get_seg_base(ss_e1, ss_e2),
2268 get_seg_limit(ss_e1, ss_e2),
2269 ss_e2);
2272 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2273 get_seg_base(e1, e2),
2274 get_seg_limit(e1, e2),
2275 e2);
2276 sp = new_esp;
2277 #ifdef TARGET_X86_64
2278 if (env->hflags & HF_CS64_MASK) {
2279 sp_mask = -1;
2280 } else
2281 #endif
2283 sp_mask = get_sp_mask(ss_e2);
2286 /* validate data segments */
2287 validate_seg(env, R_ES, rpl);
2288 validate_seg(env, R_DS, rpl);
2289 validate_seg(env, R_FS, rpl);
2290 validate_seg(env, R_GS, rpl);
2292 sp += addend;
2294 SET_ESP(sp, sp_mask);
2295 env->eip = new_eip;
2296 if (is_iret) {
2297 /* NOTE: 'cpl' is the _old_ CPL */
2298 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2299 if (cpl == 0) {
2300 eflags_mask |= IOPL_MASK;
2302 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2303 if (cpl <= iopl) {
2304 eflags_mask |= IF_MASK;
2306 if (shift == 0) {
2307 eflags_mask &= 0xffff;
2309 cpu_load_eflags(env, new_eflags, eflags_mask);
2311 return;
2313 return_to_vm86:
2314 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2315 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2316 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2317 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2318 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2319 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2321 /* modify processor state */
2322 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2323 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2324 VIP_MASK);
2325 load_seg_vm(env, R_CS, new_cs & 0xffff);
2326 load_seg_vm(env, R_SS, new_ss & 0xffff);
2327 load_seg_vm(env, R_ES, new_es & 0xffff);
2328 load_seg_vm(env, R_DS, new_ds & 0xffff);
2329 load_seg_vm(env, R_FS, new_fs & 0xffff);
2330 load_seg_vm(env, R_GS, new_gs & 0xffff);
2332 env->eip = new_eip & 0xffff;
2333 env->regs[R_ESP] = new_esp;
2336 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2338 int tss_selector, type;
2339 uint32_t e1, e2;
2341 /* specific case for TSS */
2342 if (env->eflags & NT_MASK) {
2343 #ifdef TARGET_X86_64
2344 if (env->hflags & HF_LMA_MASK) {
2345 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2347 #endif
2348 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2349 if (tss_selector & 4) {
2350 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2352 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2353 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2355 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2356 /* NOTE: we check both segment and busy TSS */
2357 if (type != 3) {
2358 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2360 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2361 } else {
2362 helper_ret_protected(env, shift, 1, 0, GETPC());
2364 env->hflags2 &= ~HF2_NMI_MASK;
2367 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2369 helper_ret_protected(env, shift, 0, addend, GETPC());
2372 void helper_sysenter(CPUX86State *env)
2374 if (env->sysenter_cs == 0) {
2375 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2377 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2379 #ifdef TARGET_X86_64
2380 if (env->hflags & HF_LMA_MASK) {
2381 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2382 0, 0xffffffff,
2383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2384 DESC_S_MASK |
2385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2386 DESC_L_MASK);
2387 } else
2388 #endif
2390 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2391 0, 0xffffffff,
2392 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2393 DESC_S_MASK |
2394 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2396 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2397 0, 0xffffffff,
2398 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2399 DESC_S_MASK |
2400 DESC_W_MASK | DESC_A_MASK);
2401 env->regs[R_ESP] = env->sysenter_esp;
2402 env->eip = env->sysenter_eip;
2405 void helper_sysexit(CPUX86State *env, int dflag)
2407 int cpl;
2409 cpl = env->hflags & HF_CPL_MASK;
2410 if (env->sysenter_cs == 0 || cpl != 0) {
2411 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2413 #ifdef TARGET_X86_64
2414 if (dflag == 2) {
2415 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2416 3, 0, 0xffffffff,
2417 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2418 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2419 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2420 DESC_L_MASK);
2421 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2422 3, 0, 0xffffffff,
2423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2425 DESC_W_MASK | DESC_A_MASK);
2426 } else
2427 #endif
2429 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2430 3, 0, 0xffffffff,
2431 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2432 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2433 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2434 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2435 3, 0, 0xffffffff,
2436 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2437 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2438 DESC_W_MASK | DESC_A_MASK);
2440 env->regs[R_ESP] = env->regs[R_ECX];
2441 env->eip = env->regs[R_EDX];
2444 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2446 unsigned int limit;
2447 uint32_t e1, e2, eflags, selector;
2448 int rpl, dpl, cpl, type;
2450 selector = selector1 & 0xffff;
2451 eflags = cpu_cc_compute_all(env, CC_OP);
2452 if ((selector & 0xfffc) == 0) {
2453 goto fail;
2455 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2456 goto fail;
2458 rpl = selector & 3;
2459 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2460 cpl = env->hflags & HF_CPL_MASK;
2461 if (e2 & DESC_S_MASK) {
2462 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2463 /* conforming */
2464 } else {
2465 if (dpl < cpl || dpl < rpl) {
2466 goto fail;
2469 } else {
2470 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2471 switch (type) {
2472 case 1:
2473 case 2:
2474 case 3:
2475 case 9:
2476 case 11:
2477 break;
2478 default:
2479 goto fail;
2481 if (dpl < cpl || dpl < rpl) {
2482 fail:
2483 CC_SRC = eflags & ~CC_Z;
2484 return 0;
2487 limit = get_seg_limit(e1, e2);
2488 CC_SRC = eflags | CC_Z;
2489 return limit;
2492 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2494 uint32_t e1, e2, eflags, selector;
2495 int rpl, dpl, cpl, type;
2497 selector = selector1 & 0xffff;
2498 eflags = cpu_cc_compute_all(env, CC_OP);
2499 if ((selector & 0xfffc) == 0) {
2500 goto fail;
2502 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2503 goto fail;
2505 rpl = selector & 3;
2506 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2507 cpl = env->hflags & HF_CPL_MASK;
2508 if (e2 & DESC_S_MASK) {
2509 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2510 /* conforming */
2511 } else {
2512 if (dpl < cpl || dpl < rpl) {
2513 goto fail;
2516 } else {
2517 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2518 switch (type) {
2519 case 1:
2520 case 2:
2521 case 3:
2522 case 4:
2523 case 5:
2524 case 9:
2525 case 11:
2526 case 12:
2527 break;
2528 default:
2529 goto fail;
2531 if (dpl < cpl || dpl < rpl) {
2532 fail:
2533 CC_SRC = eflags & ~CC_Z;
2534 return 0;
2537 CC_SRC = eflags | CC_Z;
2538 return e2 & 0x00f0ff00;
2541 void helper_verr(CPUX86State *env, target_ulong selector1)
2543 uint32_t e1, e2, eflags, selector;
2544 int rpl, dpl, cpl;
2546 selector = selector1 & 0xffff;
2547 eflags = cpu_cc_compute_all(env, CC_OP);
2548 if ((selector & 0xfffc) == 0) {
2549 goto fail;
2551 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2552 goto fail;
2554 if (!(e2 & DESC_S_MASK)) {
2555 goto fail;
2557 rpl = selector & 3;
2558 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2559 cpl = env->hflags & HF_CPL_MASK;
2560 if (e2 & DESC_CS_MASK) {
2561 if (!(e2 & DESC_R_MASK)) {
2562 goto fail;
2564 if (!(e2 & DESC_C_MASK)) {
2565 if (dpl < cpl || dpl < rpl) {
2566 goto fail;
2569 } else {
2570 if (dpl < cpl || dpl < rpl) {
2571 fail:
2572 CC_SRC = eflags & ~CC_Z;
2573 return;
2576 CC_SRC = eflags | CC_Z;
2579 void helper_verw(CPUX86State *env, target_ulong selector1)
2581 uint32_t e1, e2, eflags, selector;
2582 int rpl, dpl, cpl;
2584 selector = selector1 & 0xffff;
2585 eflags = cpu_cc_compute_all(env, CC_OP);
2586 if ((selector & 0xfffc) == 0) {
2587 goto fail;
2589 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2590 goto fail;
2592 if (!(e2 & DESC_S_MASK)) {
2593 goto fail;
2595 rpl = selector & 3;
2596 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2597 cpl = env->hflags & HF_CPL_MASK;
2598 if (e2 & DESC_CS_MASK) {
2599 goto fail;
2600 } else {
2601 if (dpl < cpl || dpl < rpl) {
2602 goto fail;
2604 if (!(e2 & DESC_W_MASK)) {
2605 fail:
2606 CC_SRC = eflags & ~CC_Z;
2607 return;
2610 CC_SRC = eflags | CC_Z;
2613 #if defined(CONFIG_USER_ONLY)
2614 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2616 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2617 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2618 selector &= 0xffff;
2619 cpu_x86_load_seg_cache(env, seg_reg, selector,
2620 (selector << 4), 0xffff,
2621 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2622 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2623 } else {
2624 helper_load_seg(env, seg_reg, selector);
2627 #endif
2629 /* check if Port I/O is allowed in TSS */
2630 static inline void check_io(CPUX86State *env, int addr, int size,
2631 uintptr_t retaddr)
2633 int io_offset, val, mask;
2635 /* TSS must be a valid 32 bit one */
2636 if (!(env->tr.flags & DESC_P_MASK) ||
2637 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2638 env->tr.limit < 103) {
2639 goto fail;
2641 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2642 io_offset += (addr >> 3);
2643 /* Note: the check needs two bytes */
2644 if ((io_offset + 1) > env->tr.limit) {
2645 goto fail;
2647 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2648 val >>= (addr & 7);
2649 mask = (1 << size) - 1;
2650 /* all bits must be zero to allow the I/O */
2651 if ((val & mask) != 0) {
2652 fail:
2653 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2657 void helper_check_iob(CPUX86State *env, uint32_t t0)
2659 check_io(env, t0, 1, GETPC());
2662 void helper_check_iow(CPUX86State *env, uint32_t t0)
2664 check_io(env, t0, 2, GETPC());
2667 void helper_check_iol(CPUX86State *env, uint32_t t0)
2669 check_io(env, t0, 4, GETPC());