Initial import of kqemu-1.4.0pre1
[kqemu.git] / common / interp.c
blob088d2b27aa4f4de7a236fb7b8956d16fe4c248e2
1 /*
2 * KQEMU
4 * Copyright (C) 2004-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #ifdef __x86_64__
20 register unsigned long pc asm("%r12");
21 #else
22 register unsigned long pc asm("%esi");
23 #endif
25 #include "kqemu_int.h"
28 * TODO:
29 * - do not use cs.base for CS64 code
30 * - test all segment limits in 16/32 bit mode
33 //#define DEBUG_LRET
34 //#define DEBUG_INTERP
35 //#define DEBUG_SEG
37 #ifdef USE_HARD_MMU
38 static inline uint32_t lduw_kernel1(struct kqemu_state *s, unsigned long addr)
40 if (likely(s->cpu_state.cpl != 3)) {
41 return lduw_mem(s, addr);
42 } else {
43 return lduw_fast(s, addr, 0);
47 static inline uint32_t ldl_kernel1(struct kqemu_state *s, unsigned long addr)
49 if (likely(s->cpu_state.cpl != 3)) {
50 return ldl_mem(s, addr);
51 } else {
52 return ldl_fast(s, addr, 0);
56 #if defined (__x86_64__)
57 static inline uint64_t ldq_kernel1(struct kqemu_state *s, unsigned long addr)
59 if (likely(s->cpu_state.cpl != 3)) {
60 return ldq_mem(s, addr);
61 } else {
62 return ldq_fast(s, addr, 0);
65 #endif
67 static inline void stw_kernel1(struct kqemu_state *s, unsigned long addr, uint32_t val)
69 if (likely(s->cpu_state.cpl != 3)) {
70 return stw_mem(s, addr, val);
71 } else {
72 return stw_fast(s, addr, val, 0);
76 static inline void stl_kernel1(struct kqemu_state *s, unsigned long addr, uint32_t val)
78 if (likely(s->cpu_state.cpl != 3)) {
79 return stl_mem(s, addr, val);
80 } else {
81 return stl_fast(s, addr, val, 0);
85 #if defined (__x86_64__)
86 static inline void stq_kernel1(struct kqemu_state *s, unsigned long addr, uint64_t val)
88 if (likely(s->cpu_state.cpl != 3)) {
89 return stq_mem(s, addr, val);
90 } else {
91 return stq_fast(s, addr, val, 0);
94 #endif
96 #define ldq_kernel(addr) ldq_kernel1(s, addr)
97 #define ldl_kernel(addr) ldl_kernel1(s, addr)
98 #define lduw_kernel(addr) lduw_kernel1(s, addr)
99 #define stq_kernel(addr, val) stq_kernel1(s, addr, val)
100 #define stl_kernel(addr, val) stl_kernel1(s, addr, val)
101 #define stw_kernel(addr, val) stw_kernel1(s, addr, val)
103 #define ldub(s, addr) ldub_mem(s, addr)
104 #define lduw(s, addr) lduw_mem(s, addr)
105 #define ldl(s, addr) ldl_mem(s, addr)
106 #define ldq(s, addr) ldq_mem(s, addr)
107 #define stb(s, addr, val) stb_mem(s, addr, val)
108 #define stw(s, addr, val) stw_mem(s, addr, val)
109 #define stl(s, addr, val) stl_mem(s, addr, val)
110 #define stq(s, addr, val) stq_mem(s, addr, val)
111 #else
112 #define ldq_kernel(addr) ldq_fast(s, addr, 0)
113 #define ldl_kernel(addr) ldl_fast(s, addr, 0)
114 #define lduw_kernel(addr) lduw_fast(s, addr, 0)
115 #define stq_kernel(addr, val) stq_fast(s, addr, val, 0)
116 #define stl_kernel(addr, val) stl_fast(s, addr, val, 0)
117 #define stw_kernel(addr, val) stw_fast(s, addr, val, 0)
119 #define ldub(s, addr) ldub_fast(s, addr, (s->cpu_state.cpl == 3))
120 #define lduw(s, addr) lduw_fast(s, addr, (s->cpu_state.cpl == 3))
121 #define ldl(s, addr) ldl_fast(s, addr, (s->cpu_state.cpl == 3))
122 #define ldq(s, addr) ldq_fast(s, addr, (s->cpu_state.cpl == 3))
123 #define stb(s, addr, val) stb_fast(s, addr, val, (s->cpu_state.cpl == 3))
124 #define stw(s, addr, val) stw_fast(s, addr, val, (s->cpu_state.cpl == 3))
125 #define stl(s, addr, val) stl_fast(s, addr, val, (s->cpu_state.cpl == 3))
126 #define stq(s, addr, val) stq_fast(s, addr, val, (s->cpu_state.cpl == 3))
127 #endif /* !USE_HARD_MMU */
129 #ifdef __x86_64__
130 #define CODE64(s) ((s)->cpu_state.segs[R_CS].flags & DESC_L_MASK)
131 #define REX_R(s) ((s)->rex_r)
132 #define REX_X(s) ((s)->rex_x)
133 #define REX_B(s) ((s)->rex_b)
134 #else
135 #define CODE64(s) 0
136 #define REX_R(s) 0
137 #define REX_X(s) 0
138 #define REX_B(s) 0
139 #endif
141 #define PREFIX_REPZ 0x01
142 #define PREFIX_REPNZ 0x02
143 #define PREFIX_LOCK 0x04
144 #define PREFIX_REX 0x08
146 static inline unsigned int get_sp_mask(unsigned int e2)
148 if (e2 & DESC_B_MASK)
149 return 0xffffffff;
150 else
151 return 0xffff;
154 /* XXX: add a is_user flag to have proper security support */
155 #define PUSHW(ssp, sp, sp_mask, val)\
157 sp -= 2;\
158 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
161 #define PUSHL(ssp, sp, sp_mask, val)\
163 sp -= 4;\
164 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
167 #define POPW(ssp, sp, sp_mask, val)\
169 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
170 sp += 2;\
173 #define POPL(ssp, sp, sp_mask, val)\
175 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
176 sp += 4;\
179 #define PUSHQ(sp, val)\
181 sp -= 8;\
182 stq_kernel(sp, (val));\
185 #define POPQ(sp, val)\
187 val = ldq_kernel(sp);\
188 sp += 8;\
191 #define ESP (s->regs1.esp)
192 #define EIP (s->regs1.eip)
194 static inline unsigned int get_seg_sel(struct kqemu_state *s, int seg_reg)
196 unsigned int val;
197 switch(seg_reg) {
198 case R_CS:
199 val = (s->regs1.cs_sel & ~3) | s->cpu_state.cpl;
200 break;
201 case R_SS:
202 val = (s->regs1.ss_sel & ~3) | s->cpu_state.cpl;
203 break;
204 #ifdef __x86_64__
205 case R_DS:
206 asm volatile ("mov %%ds, %0" : "=r" (val));
207 val &= 0xffff; /* XXX: see if it is really necessary */
208 break;
209 case R_ES:
210 asm volatile ("mov %%es, %0" : "=r" (val));
211 val &= 0xffff; /* XXX: see if it is really necessary */
212 break;
213 #else
214 case R_DS:
215 val = s->regs1.ds_sel;
216 break;
217 case R_ES:
218 val = s->regs1.es_sel;
219 break;
220 #endif
221 case R_FS:
222 asm volatile ("mov %%fs, %0" : "=r" (val));
223 val &= 0xffff; /* XXX: see if it is really necessary */
224 break;
225 default:
226 case R_GS:
227 asm volatile ("mov %%gs, %0" : "=r" (val));
228 val &= 0xffff; /* XXX: see if it is really necessary */
229 break;
231 return val;
234 #ifdef USE_SEG_GP
235 static inline void set_seg_desc_cache(struct kqemu_state *s,
236 int seg_reg)
238 struct kqemu_segment_cache *sc;
239 uint32_t e1, e2;
240 unsigned long base, limit;
242 sc = &s->cpu_state.segs[seg_reg];
243 limit = sc->limit;
244 base = sc->base;
245 e2 = (sc->flags & 0x0070ff00) | (3 << DESC_DPL_SHIFT) |
246 DESC_S_MASK | DESC_A_MASK;
247 if (limit > 0xfffff) {
248 limit >>= 12;
249 e2 |= DESC_G_MASK;
251 e1 = (base << 16) | (limit & 0xffff);
252 e2 |= ((base >> 16) & 0xff) | (base & 0xff000000) | (limit & 0x000f0000);
253 s->seg_desc_cache[seg_reg][0] = e1;
254 s->seg_desc_cache[seg_reg][1] = e2;
257 /* seg_reg must be R_CS or R_SS */
258 static inline void set_descriptor_entry(struct kqemu_state *s,
259 int seg_reg, int selector)
261 uint32_t sel;
262 uint8_t *ptr;
264 /* reset the previous one */
265 sel = s->seg_desc_entries[seg_reg - R_CS];
266 ptr = (uint8_t *)s->dt_table + sel;
267 *(uint64_t *)(ptr) = 0;
269 if ((selector & 0xfffc) != 0) {
270 sel = (selector & ~7) | ((selector & 4) << 14);
271 ptr = (uint8_t *)s->dt_table + sel;
272 *(uint32_t *)(ptr) = s->seg_desc_cache[seg_reg][0];
273 *(uint32_t *)(ptr + 4) = s->seg_desc_cache[seg_reg][1];
274 } else {
275 sel = 0;
277 s->seg_desc_entries[seg_reg - R_CS] = sel;
279 #endif
281 /* NOTE: in the interpreter we only need the base value and flags for
282 CS and SS. The selector is loaded at its real place (either real
283 segment or regs) */
284 static void cpu_x86_load_seg_cache(struct kqemu_state *s,
285 int seg_reg, unsigned int selector,
286 uint32_t base, unsigned int limit,
287 uint32_t e1, uint32_t e2)
289 struct kqemu_segment_cache *sc;
290 #if 0
291 monitor_log(s, "%08x: load_seg_cache seg_reg=%d sel=0x%04x e2=0x%08x\n",
292 s->regs1.eip, seg_reg, selector, e2);
293 #endif
294 sc = &s->cpu_state.segs[seg_reg];
295 sc->flags = e2;
296 sc->base = base;
297 sc->limit = limit;
299 /* update CPU state if needed */
300 #ifdef USE_SEG_GP
301 if (s->cpu_state.cpl != 3) {
302 switch(seg_reg) {
303 case R_CS:
304 s->regs1.cs_sel = selector | 3;
305 set_seg_desc_cache(s, R_CS);
306 set_descriptor_entry(s, R_CS, selector);
307 break;
308 case R_SS:
309 s->regs1.ss_sel = selector | 3;
310 set_seg_desc_cache(s, R_SS);
311 set_descriptor_entry(s, R_SS, selector);
312 break;
313 #ifdef __x86_64__
314 case R_DS:
315 set_seg_desc_cache(s, R_DS);
316 set_cpu_seg_cache(s, R_DS, selector);
317 break;
318 case R_ES:
319 set_seg_desc_cache(s, R_ES);
320 set_cpu_seg_cache(s, R_ES, selector);
321 break;
322 #else
323 case R_DS:
324 s->regs1.ds_sel = selector;
325 set_seg_desc_cache(s, R_DS);
326 break;
327 case R_ES:
328 s->regs1.es_sel = selector;
329 set_seg_desc_cache(s, R_ES);
330 break;
331 #endif
332 case R_FS:
333 set_seg_desc_cache(s, R_FS);
334 set_cpu_seg_cache(s, R_FS, selector);
335 break;
336 case R_GS:
337 set_seg_desc_cache(s, R_GS);
338 set_cpu_seg_cache(s, R_GS, selector);
339 break;
341 } else
342 #endif
344 switch(seg_reg) {
345 case R_CS:
346 s->regs1.cs_sel = selector | 3;
347 break;
348 case R_SS:
349 s->regs1.ss_sel = selector | 3;
350 break;
351 #ifdef __x86_64__
352 case R_DS:
353 LOAD_SEG(ds, selector);
354 break;
355 case R_ES:
356 LOAD_SEG(es, selector);
357 break;
358 #else
359 case R_DS:
360 s->regs1.ds_sel = selector;
361 break;
362 case R_ES:
363 s->regs1.es_sel = selector;
364 break;
365 #endif
366 case R_FS:
367 LOAD_SEG(fs, selector);
368 break;
369 case R_GS:
370 LOAD_SEG(gs, selector);
371 break;
376 void update_seg_desc_caches(struct kqemu_state *s)
378 #ifdef USE_SEG_GP
379 if (s->cpu_state.cpl != 3) {
380 /* update the seg caches */
381 set_seg_desc_cache(s, R_CS);
382 set_descriptor_entry(s, R_CS, s->regs1.cs_sel);
384 set_seg_desc_cache(s, R_SS);
385 set_descriptor_entry(s, R_SS, s->regs1.ss_sel);
387 set_seg_desc_cache(s, R_DS);
388 set_seg_desc_cache(s, R_ES);
389 set_seg_desc_cache(s, R_FS);
390 set_seg_desc_cache(s, R_GS);
392 #endif
395 #define REG_PTR(reg) (&s->regs1.eax + (reg))
397 static inline unsigned long get_regb(struct kqemu_state *s, int reg)
399 unsigned long val;
400 #ifdef __x86_64__
401 if (s->prefix & PREFIX_REX) {
402 val = *(uint8_t *)REG_PTR(reg);
403 } else
404 #endif
406 val = *((uint8_t *)REG_PTR(reg & 3) + (reg >> 2));
408 return val;
411 static inline unsigned long get_reg(struct kqemu_state *s, int reg)
413 return *(unsigned long *)REG_PTR(reg);
416 static inline void set_reg(struct kqemu_state *s, int reg, unsigned long val)
418 *(unsigned long *)REG_PTR(reg) = val;
421 static inline void set_regl(struct kqemu_state *s, int reg, uint32_t val)
423 *(unsigned long *)REG_PTR(reg) = val;
426 static inline void set_regw(struct kqemu_state *s, int reg, uint32_t val)
428 *(uint16_t *)REG_PTR(reg) = val;
431 static inline void set_regb(struct kqemu_state *s, int reg, uint32_t val)
433 #ifdef __x86_64__
434 if (s->prefix & PREFIX_REX) {
435 *(uint8_t *)REG_PTR(reg) = val;
436 } else
437 #endif
439 *((uint8_t *)REG_PTR(reg & 3) + (reg >> 2)) = val;
443 static inline unsigned long ldS(struct kqemu_state *s, int bsize,
444 unsigned long addr)
446 unsigned long val;
447 switch(bsize) {
448 case 0:
449 val = ldub(s, addr);
450 break;
451 case 1:
452 val = lduw(s, addr);
453 break;
454 #ifndef __x86_64__
455 default:
456 #endif
457 case 2:
458 val = ldl(s, addr);
459 break;
460 #ifdef __x86_64__
461 default:
462 case 3:
463 val = ldq(s, addr);
464 break;
465 #endif
467 return val;
470 static inline void stS(struct kqemu_state *s, int bsize, unsigned long addr,
471 unsigned long val)
473 switch(bsize) {
474 case 0:
475 stb(s, addr, val);
476 break;
477 case 1:
478 stw(s, addr, val);
479 break;
480 #ifndef __x86_64__
481 default:
482 #endif
483 case 2:
484 stl(s, addr, val);
485 break;
486 #ifdef __x86_64__
487 default:
488 case 3:
489 stq(s, addr, val);
490 break;
491 #endif
495 static inline unsigned long get_regS(struct kqemu_state *s, int bsize,
496 int reg)
498 unsigned long val;
499 if (bsize == 0) {
500 val = get_regb(s, reg);
501 } else {
502 val = get_reg(s, reg);
504 return val;
506 #ifdef __x86_64__
507 #define QO(x...) x
508 #else
509 #define QO(x...)
510 #endif
513 static inline void set_regS(struct kqemu_state *s, int bsize,
514 int reg, unsigned long val)
516 if (bsize == 0) {
517 set_regb(s, reg, val);
518 } else if (bsize == 1) {
519 *(uint16_t *)REG_PTR(reg) = val;
521 #ifdef __x86_64__
522 else if (bsize == 3) {
523 *(unsigned long *)REG_PTR(reg) = val;\
525 #endif
526 else {
527 *(unsigned long *)REG_PTR(reg) = (uint32_t)val;
532 static inline unsigned long stack_pop(struct kqemu_state *s)
534 unsigned long addr, sp_mask, val;
536 #ifdef __x86_64__
537 if (CODE64(s)) {
538 addr = s->regs1.esp;
539 if (s->dflag) {
540 val = ldq(s, addr);
541 } else {
542 val = lduw(s, addr);
544 } else
545 #endif
547 sp_mask = get_sp_mask(s->cpu_state.segs[R_SS].flags);
548 addr = (s->regs1.esp & sp_mask) + s->cpu_state.segs[R_SS].base;
549 if (s->dflag) {
550 val = ldl(s, addr);
551 } else {
552 val = lduw(s, addr);
555 return val;
558 static inline void sp_add(struct kqemu_state *s, long addend)
560 #ifdef __x86_64__
561 if (CODE64(s)) {
562 s->regs1.esp += addend;
563 } else
564 #endif
566 if (s->cpu_state.segs[R_SS].flags & DESC_B_MASK)
567 s->regs1.esp = (uint32_t)(s->regs1.esp + addend);
568 else
569 *(uint16_t *)&s->regs1.esp += addend;
573 static inline void stack_pop_update(struct kqemu_state *s)
575 int val;
576 #ifdef __x86_64__
577 if (CODE64(s)) {
578 if (s->dflag) {
579 val = 8;
580 } else {
581 val = 2;
583 } else
584 #endif
586 val = 2 << s->dflag;
588 sp_add(s, val);
592 static inline void stack_pushS(struct kqemu_state *s, unsigned long val,
593 int dflag)
595 unsigned long addr, sp_mask, sp;
597 #ifdef __x86_64__
598 if (CODE64(s)) {
599 addr = s->regs1.esp;
600 if (dflag) {
601 addr -= 8;
602 stq(s, addr, val);
603 } else {
604 addr -= 2;
605 stw(s, addr, val);
607 s->regs1.esp = addr;
608 } else
609 #endif
611 sp_mask = get_sp_mask(s->cpu_state.segs[R_SS].flags);
612 if (dflag) {
613 sp = (s->regs1.esp - 4) & sp_mask;
614 addr = sp + s->cpu_state.segs[R_SS].base;
615 stl(s, addr, val);
616 } else {
617 sp = (s->regs1.esp - 2) & sp_mask;
618 addr = sp + s->cpu_state.segs[R_SS].base;
619 stw(s, addr, val);
621 s->regs1.esp = sp | (s->regs1.esp & ~sp_mask);
625 static inline void stack_push(struct kqemu_state *s, unsigned long val)
627 stack_pushS(s, val, s->dflag);
630 static inline int get_jcc_cond(unsigned long eflags, int b)
632 switch(b) {
633 case 0x0:
634 return eflags & CC_O;
635 case 0x1:
636 return (eflags ^ CC_O) & CC_O;
637 case 0x2:
638 return eflags & CC_C;
639 case 0x3:
640 return (eflags ^ CC_C) & CC_C;
641 case 0x4:
642 return eflags & CC_Z;
643 case 0x5:
644 return (eflags ^ CC_Z) & CC_Z;
645 case 0x6:
646 return ((eflags >> 6) | eflags) & 1;
647 case 0x7:
648 return (((eflags >> 6) | eflags) & 1) ^ 1;
649 case 0x8:
650 return eflags & CC_S;
651 case 0x9:
652 return (eflags ^ CC_S) & CC_S;
653 case 0xa:
654 return eflags & CC_P;
655 case 0xb:
656 return (eflags ^ CC_P) & CC_P;
657 case 0xc:
658 return ((eflags >> 4) ^ eflags) & CC_S;
659 case 0xd:
660 return (((eflags >> 4) ^ eflags) ^ CC_S) & CC_S;
661 case 0xe:
662 return (((eflags >> 4) ^ eflags) | (eflags << 1)) & CC_S;
663 default:
664 case 0xf:
665 return ((((eflags >> 4) ^ eflags) | (eflags << 1)) ^ CC_S) & CC_S;
669 static inline unsigned long compute_eflags(struct kqemu_state *s)
671 return (s->comm_page.virt_eflags & EFLAGS_MASK) |
672 (s->regs1.eflags & ~EFLAGS_MASK);
675 static inline void set_eflags(struct kqemu_state *s, unsigned long val)
677 s->comm_page.virt_eflags = val & EFLAGS_MASK;
678 s->regs1.eflags = compute_eflags_user(s, val);
681 static inline void load_eflags(struct kqemu_state *s,
682 unsigned long val, unsigned long update_mask)
684 unsigned long org_eflags;
686 update_mask |= 0xcff; /* DF + all condition codes */
687 org_eflags = compute_eflags(s);
688 val = (val & update_mask) | (org_eflags & ~update_mask);
689 set_eflags(s, val);
692 static inline void set_reset_eflags(struct kqemu_state *s,
693 unsigned long set_val,
694 unsigned long reset_val)
696 unsigned long val;
697 val = compute_eflags(s);
698 val = (val | set_val) & ~reset_val;
699 set_eflags(s, val);
702 static inline int get_eflags_iopl(struct kqemu_state *s)
704 return (s->comm_page.virt_eflags >> IOPL_SHIFT) & 3;
707 /* return IF_MASK or 0 */
708 static inline int get_eflags_if(struct kqemu_state *s)
710 return (s->comm_page.virt_eflags & IF_MASK);
713 /* return VM_MASK or 0 */
714 static inline int get_eflags_vm(struct kqemu_state *s)
716 return 0; /* currently VM_MASK cannot be set */
719 /* return NT_MASK or 0 */
720 static inline int get_eflags_nt(struct kqemu_state *s)
722 return s->regs1.eflags & NT_MASK;
725 static void cpu_x86_set_cpl(struct kqemu_state *s, int cpl)
727 int is_user;
729 #ifdef USE_SEG_GP
730 /* update GDT/LDT cache for cpl == 3 because GDT and LDT could
731 have been modified by guest kernel code */
732 if (cpl == 3)
733 update_gdt_ldt_cache(s);
734 #endif
736 /* switch the address space */
737 is_user = (cpl == 3);
738 s->monitor_cr3 = s->pgds_cr3[is_user];
739 asm volatile ("mov %0, %%cr3" : : "r" (s->monitor_cr3));
741 s->cpu_state.cpl = cpl;
743 /* just needed for AM bit */
744 update_host_cr0(s);
746 /* may be needed for TSD */
747 update_host_cr4(s);
749 update_seg_desc_caches(s);
751 /* switch the GDT and the LDT */
752 #ifdef USE_SEG_GP
753 s->monitor_gdt.base = s->monitor_data_vaddr +
754 offsetof(struct kqemu_state, dt_table) + 0x20000 * is_user;
755 #else
756 s->monitor_gdt.base = s->monitor_data_vaddr +
757 offsetof(struct kqemu_state, dt_table) + 0x20000 * cpl;
758 #endif
759 /* XXX: check op size for x86_64 */
760 asm volatile ("lgdt %0" : "=m" (s->monitor_gdt));
761 asm volatile ("lldt %0" : "=m" (s->monitor_ldt_sel));
764 /* load a segment descriptor */
765 static void load_seg_desc(struct kqemu_state *s,
766 int seg_reg, uint16_t selector)
768 struct kqemu_cpu_state *env = &s->cpu_state;
769 int index;
770 unsigned long ptr;
771 struct kqemu_segment_cache *dt;
772 uint32_t e1, e2;
773 int cpl, dpl, rpl;
775 #ifdef DEBUG_SEG
776 monitor_log(s, "load_seg_desc: reg=%d sel=0x%04x\n", seg_reg, selector);
777 #endif
778 if (selector >= s->monitor_selector_base &&
779 selector <= (s->monitor_selector_base + MONITOR_SEL_RANGE)) {
780 monitor_panic(s, "Trying to load a reserved selector\n");
783 if ((selector & 0xfffc) == 0) {
784 if (seg_reg == R_SS
785 #ifdef __x86_64__
786 && (!(env->segs[R_CS].flags & DESC_L_MASK) || env->cpl == 3)
787 #endif
789 raise_exception_err(s, EXCP0D_GPF, 0);
790 cpu_x86_load_seg_cache(s, seg_reg, selector, 0, 0, 0, 0);
791 } else {
792 if (selector & 0x4)
793 dt = &env->ldt;
794 else
795 dt = &env->gdt;
796 index = selector & ~7;
797 if ((index + 7) > dt->limit)
798 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
799 ptr = dt->base + index;
800 e1 = ldl_kernel(ptr);
801 e2 = ldl_kernel(ptr + 4);
803 if (!(e2 & DESC_S_MASK))
804 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
805 rpl = selector & 3;
806 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
807 cpl = env->cpl;
808 if (seg_reg == R_SS) {
809 /* must be writable segment */
810 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
811 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
812 if (rpl != cpl || dpl != cpl)
813 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
814 } else if (seg_reg == R_CS) {
815 if (!(e2 & DESC_CS_MASK))
816 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
817 if (e2 & DESC_C_MASK) {
818 if (dpl > rpl)
819 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
820 } else {
821 if (dpl != rpl)
822 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
824 } else {
825 /* must be readable segment */
826 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
827 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
829 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
830 /* if not conforming code, test rights */
831 if (dpl < cpl || dpl < rpl)
832 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
836 if (!(e2 & DESC_P_MASK)) {
837 if (seg_reg == R_SS)
838 raise_exception_err(s, EXCP0C_STACK, selector & 0xfffc);
839 else
840 raise_exception_err(s, EXCP0B_NOSEG, selector & 0xfffc);
843 #if 0
844 /* set the access bit if not already set */
845 if (!(e2 & DESC_A_MASK)) {
846 e2 |= DESC_A_MASK;
847 stl_kernel(ptr + 4, e2);
849 #endif
850 #ifdef __x86_64__
851 /* reset the long mode bit if we are in legacy mode */
852 if (seg_reg == R_CS && !(env->efer & MSR_EFER_LMA))
853 e2 &= ~DESC_L_MASK;
854 #endif
855 cpu_x86_load_seg_cache(s, seg_reg, selector, get_seg_base(e1, e2),
856 get_seg_limit(e1, e2), e1, e2);
860 /* return non zero if error */
861 static inline int load_segment(struct kqemu_state *s,
862 uint32_t *e1_ptr, uint32_t *e2_ptr,
863 int selector)
865 struct kqemu_cpu_state *env = &s->cpu_state;
866 struct kqemu_segment_cache *dt;
867 int index;
868 unsigned long ptr;
870 if (selector & 0x4)
871 dt = &env->ldt;
872 else
873 dt = &env->gdt;
874 index = selector & ~7;
875 if ((index + 7) > dt->limit)
876 return -1;
877 ptr = dt->base + index;
878 *e1_ptr = ldl_kernel(ptr);
879 *e2_ptr = ldl_kernel(ptr + 4);
880 return 0;
883 static inline void get_ss_esp_from_tss(struct kqemu_state *s,
884 uint32_t *ss_ptr,
885 uint32_t *esp_ptr, int dpl)
887 struct kqemu_cpu_state *env = &s->cpu_state;
888 int type, index, shift;
889 #if 0
890 if (!(env->tr.flags & DESC_P_MASK))
891 cpu_abort(env, "invalid tss");
892 #endif
893 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
894 #if 0
895 if ((type & 7) != 1)
896 cpu_abort(env, "invalid tss type");
897 #endif
898 shift = type >> 3;
899 index = (dpl * 4 + 2) << shift;
900 if (index + (4 << shift) - 1 > env->tr.limit)
901 raise_exception_err(s, EXCP0A_TSS, env->tr.selector & 0xfffc);
902 if (shift == 0) {
903 *esp_ptr = lduw_kernel(env->tr.base + index);
904 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
905 } else {
906 *esp_ptr = ldl_kernel(env->tr.base + index);
907 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
911 /* protected mode interrupt */
912 static void do_interrupt_protected(struct kqemu_state *s,
913 int intno, int is_int, int error_code,
914 unsigned int next_eip, int is_hw)
916 struct kqemu_cpu_state *env = &s->cpu_state;
917 struct kqemu_segment_cache *dt;
918 unsigned long ptr, ssp;
919 int type, dpl, selector, ss_dpl, cpl, sp_mask;
920 int has_error_code, new_stack, shift;
921 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
922 uint32_t old_eip;
924 has_error_code = 0;
925 if (!is_int && !is_hw) {
926 switch(intno) {
927 case 8:
928 case 10:
929 case 11:
930 case 12:
931 case 13:
932 case 14:
933 case 17:
934 has_error_code = 1;
935 break;
938 if (is_int)
939 old_eip = next_eip;
940 else
941 old_eip = EIP;
943 dt = &env->idt;
944 if (intno * 8 + 7 > dt->limit)
945 raise_exception_err(s, EXCP0D_GPF, intno * 8 + 2);
946 ptr = dt->base + intno * 8;
947 e1 = ldl_kernel(ptr);
948 e2 = ldl_kernel(ptr + 4);
949 /* check gate type */
950 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
951 switch(type) {
952 case 5: /* task gate */
953 raise_exception(s, KQEMU_RET_SOFTMMU);
954 return;
955 case 6: /* 286 interrupt gate */
956 case 7: /* 286 trap gate */
957 case 14: /* 386 interrupt gate */
958 case 15: /* 386 trap gate */
959 break;
960 default:
961 raise_exception_err(s, EXCP0D_GPF, intno * 8 + 2);
962 break;
964 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
965 cpl = env->cpl;
966 /* check privledge if software int */
967 if (is_int && dpl < cpl)
968 raise_exception_err(s, EXCP0D_GPF, intno * 8 + 2);
969 /* check valid bit */
970 if (!(e2 & DESC_P_MASK))
971 raise_exception_err(s, EXCP0B_NOSEG, intno * 8 + 2);
972 selector = e1 >> 16;
973 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
974 if ((selector & 0xfffc) == 0)
975 raise_exception_err(s, EXCP0D_GPF, 0);
977 if (load_segment(s, &e1, &e2, selector) != 0)
978 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
979 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
980 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
981 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
982 if (dpl > cpl)
983 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
984 if (!(e2 & DESC_P_MASK))
985 raise_exception_err(s, EXCP0B_NOSEG, selector & 0xfffc);
986 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
987 /* to inner priviledge */
988 get_ss_esp_from_tss(s, &ss, &esp, dpl);
989 if ((ss & 0xfffc) == 0)
990 raise_exception_err(s, EXCP0A_TSS, ss & 0xfffc);
991 if ((ss & 3) != dpl)
992 raise_exception_err(s, EXCP0A_TSS, ss & 0xfffc);
993 if (load_segment(s, &ss_e1, &ss_e2, ss) != 0)
994 raise_exception_err(s, EXCP0A_TSS, ss & 0xfffc);
995 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
996 if (ss_dpl != dpl)
997 raise_exception_err(s, EXCP0A_TSS, ss & 0xfffc);
998 if (!(ss_e2 & DESC_S_MASK) ||
999 (ss_e2 & DESC_CS_MASK) ||
1000 !(ss_e2 & DESC_W_MASK))
1001 raise_exception_err(s, EXCP0A_TSS, ss & 0xfffc);
1002 if (!(ss_e2 & DESC_P_MASK))
1003 raise_exception_err(s, EXCP0A_TSS, ss & 0xfffc);
1004 new_stack = 1;
1005 sp_mask = get_sp_mask(ss_e2);
1006 ssp = get_seg_base(ss_e1, ss_e2);
1007 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1008 /* to same priviledge */
1009 if (get_eflags_vm(s))
1010 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1011 new_stack = 0;
1012 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1013 ssp = env->segs[R_SS].base;
1014 esp = ESP;
1015 dpl = cpl;
1016 ss_e1 = ss_e2 = ss = 0; /* avoid warning */
1017 } else {
1018 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1019 new_stack = 0; /* avoid warning */
1020 sp_mask = 0; /* avoid warning */
1021 ssp = 0; /* avoid warning */
1022 esp = 0; /* avoid warning */
1025 shift = type >> 3;
1027 #if 0
1028 /* XXX: check that enough room is available */
1029 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1030 if (env->eflags & VM_MASK)
1031 push_size += 8;
1032 push_size <<= shift;
1033 #endif
1034 if (shift == 1) {
1035 if (new_stack) {
1036 if (get_eflags_vm(s)) {
1037 PUSHL(ssp, esp, sp_mask, get_seg_sel(s, R_GS));
1038 PUSHL(ssp, esp, sp_mask, get_seg_sel(s, R_FS));
1039 PUSHL(ssp, esp, sp_mask, get_seg_sel(s, R_DS));
1040 PUSHL(ssp, esp, sp_mask, get_seg_sel(s, R_ES));
1042 PUSHL(ssp, esp, sp_mask, get_seg_sel(s, R_SS));
1043 PUSHL(ssp, esp, sp_mask, ESP);
1045 PUSHL(ssp, esp, sp_mask, compute_eflags(s));
1046 PUSHL(ssp, esp, sp_mask, get_seg_sel(s, R_CS));
1047 PUSHL(ssp, esp, sp_mask, old_eip);
1048 if (has_error_code) {
1049 PUSHL(ssp, esp, sp_mask, error_code);
1051 } else {
1052 if (new_stack) {
1053 if (get_eflags_vm(s)) {
1054 PUSHW(ssp, esp, sp_mask, get_seg_sel(s, R_GS));
1055 PUSHW(ssp, esp, sp_mask, get_seg_sel(s, R_FS));
1056 PUSHW(ssp, esp, sp_mask, get_seg_sel(s, R_DS));
1057 PUSHW(ssp, esp, sp_mask, get_seg_sel(s, R_ES));
1059 PUSHW(ssp, esp, sp_mask, get_seg_sel(s, R_SS));
1060 PUSHW(ssp, esp, sp_mask, ESP);
1062 PUSHW(ssp, esp, sp_mask, compute_eflags(s));
1063 PUSHW(ssp, esp, sp_mask, get_seg_sel(s, R_CS));
1064 PUSHW(ssp, esp, sp_mask, old_eip);
1065 if (has_error_code) {
1066 PUSHW(ssp, esp, sp_mask, error_code);
1070 cpu_x86_set_cpl(s, dpl);
1071 if (new_stack) {
1072 if (get_eflags_vm(s)) {
1073 cpu_x86_load_seg_cache(s, R_ES, 0, 0, 0, 0, 0);
1074 cpu_x86_load_seg_cache(s, R_DS, 0, 0, 0, 0, 0);
1075 cpu_x86_load_seg_cache(s, R_FS, 0, 0, 0, 0, 0);
1076 cpu_x86_load_seg_cache(s, R_GS, 0, 0, 0, 0, 0);
1078 ss = (ss & ~3) | dpl;
1079 cpu_x86_load_seg_cache(s, R_SS, ss,
1080 ssp, get_seg_limit(ss_e1, ss_e2), ss_e1, ss_e2);
1082 ESP = (ESP & ~sp_mask) | (esp & sp_mask);
1084 selector = (selector & ~3) | dpl;
1085 cpu_x86_load_seg_cache(s, R_CS, selector,
1086 get_seg_base(e1, e2),
1087 get_seg_limit(e1, e2),
1088 e1, e2);
1089 EIP = offset;
1091 /* interrupt gate clear IF mask */
1092 if ((type & 1) == 0) {
1093 set_reset_eflags(s, 0, IF_MASK);
1095 set_reset_eflags(s, 0, VM_MASK | RF_MASK | TF_MASK | NT_MASK);
1098 #ifdef __x86_64__
1100 static inline unsigned long get_rsp_from_tss(struct kqemu_state *s, int level)
1102 struct kqemu_cpu_state *env = &s->cpu_state;
1103 int index;
1105 #if 0
1106 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1107 env->tr.base, env->tr.limit);
1108 #endif
1110 #if 0
1111 if (!(env->tr.flags & DESC_P_MASK))
1112 cpu_abort(env, "invalid tss");
1113 #endif
1114 index = 8 * level + 4;
1115 if ((index + 7) > env->tr.limit)
1116 raise_exception_err(s, EXCP0A_TSS, env->tr.selector & 0xfffc);
1117 return ldq_kernel(env->tr.base + index);
1120 /* 64 bit interrupt */
1121 static void do_interrupt64(struct kqemu_state *s,
1122 int intno, int is_int, int error_code,
1123 unsigned long next_eip, int is_hw)
1125 struct kqemu_cpu_state *env = &s->cpu_state;
1126 struct kqemu_segment_cache *dt;
1127 unsigned long ptr;
1128 int type, dpl, selector, cpl, ist;
1129 int has_error_code, new_stack;
1130 uint32_t e1, e2, e3, ss;
1131 unsigned long old_eip, esp, offset;
1133 has_error_code = 0;
1134 if (!is_int && !is_hw) {
1135 switch(intno) {
1136 case 8:
1137 case 10:
1138 case 11:
1139 case 12:
1140 case 13:
1141 case 14:
1142 case 17:
1143 has_error_code = 1;
1144 break;
1147 if (is_int)
1148 old_eip = next_eip;
1149 else
1150 old_eip = EIP;
1152 dt = &env->idt;
1153 if (intno * 16 + 15 > dt->limit)
1154 raise_exception_err(s, EXCP0D_GPF, intno * 16 + 2);
1155 ptr = dt->base + intno * 16;
1156 e1 = ldl_kernel(ptr);
1157 e2 = ldl_kernel(ptr + 4);
1158 e3 = ldl_kernel(ptr + 8);
1159 /* check gate type */
1160 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1161 switch(type) {
1162 case 14: /* 386 interrupt gate */
1163 case 15: /* 386 trap gate */
1164 break;
1165 default:
1166 raise_exception_err(s, EXCP0D_GPF, intno * 16 + 2);
1167 break;
1169 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1170 cpl = env->cpl;
1171 /* check privledge if software int */
1172 if (is_int && dpl < cpl)
1173 raise_exception_err(s, EXCP0D_GPF, intno * 16 + 2);
1174 /* check valid bit */
1175 if (!(e2 & DESC_P_MASK))
1176 raise_exception_err(s, EXCP0B_NOSEG, intno * 16 + 2);
1177 selector = e1 >> 16;
1178 offset = ((unsigned long)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1179 ist = e2 & 7;
1180 if ((selector & 0xfffc) == 0)
1181 raise_exception_err(s, EXCP0D_GPF, 0);
1183 if (load_segment(s, &e1, &e2, selector) != 0)
1184 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1185 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1186 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1187 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1188 if (dpl > cpl)
1189 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1190 if (!(e2 & DESC_P_MASK))
1191 raise_exception_err(s, EXCP0B_NOSEG, selector & 0xfffc);
1192 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1193 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1194 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1195 /* to inner priviledge */
1196 if (ist != 0)
1197 esp = get_rsp_from_tss(s, ist + 3);
1198 else
1199 esp = get_rsp_from_tss(s, dpl);
1200 esp &= ~0xfLL; /* align stack */
1201 ss = 0;
1202 new_stack = 1;
1203 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1204 /* to same priviledge */
1205 if (env->eflags & VM_MASK)
1206 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1207 new_stack = 0;
1208 if (ist != 0)
1209 esp = get_rsp_from_tss(s, ist + 3);
1210 else
1211 esp = ESP;
1212 esp &= ~0xfLL; /* align stack */
1213 dpl = cpl;
1214 } else {
1215 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1216 new_stack = 0; /* avoid warning */
1217 esp = 0; /* avoid warning */
1220 PUSHQ(esp, get_seg_sel(s, R_SS));
1221 PUSHQ(esp, ESP);
1222 PUSHQ(esp, compute_eflags(s));
1223 PUSHQ(esp, get_seg_sel(s, R_CS));
1224 PUSHQ(esp, old_eip);
1225 if (has_error_code) {
1226 PUSHQ(esp, error_code);
1229 cpu_x86_set_cpl(s, dpl);
1230 if (new_stack) {
1231 ss = 0 | dpl;
1232 cpu_x86_load_seg_cache(s, R_SS, ss, 0, 0, 0, 0);
1234 ESP = esp;
1236 selector = (selector & ~3) | dpl;
1237 cpu_x86_load_seg_cache(s, R_CS, selector,
1238 get_seg_base(e1, e2),
1239 get_seg_limit(e1, e2),
1240 e1, e2);
1241 EIP = offset;
1243 /* interrupt gate clear IF mask */
1244 if ((type & 1) == 0) {
1245 set_reset_eflags(s, 0, IF_MASK);
1247 set_reset_eflags(s, 0, VM_MASK | RF_MASK | TF_MASK | NT_MASK);
1249 #endif
1251 static void do_interrupt(struct kqemu_state *s,
1252 int intno, int is_int, int error_code,
1253 unsigned long next_eip, int is_hw)
1255 #ifdef __x86_64__
1256 if (s->cpu_state.efer & MSR_EFER_LMA) {
1257 do_interrupt64(s, intno, is_int, error_code, next_eip, is_hw);
1258 } else
1259 #endif
1261 do_interrupt_protected(s, intno, is_int, error_code, next_eip, is_hw);
1265 static inline void validate_seg(struct kqemu_state *s, int seg_reg, int cpl)
1267 int dpl;
1268 uint32_t e2;
1270 e2 = s->cpu_state.segs[seg_reg].flags;
1271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1272 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1273 /* data or non conforming code segment */
1274 if (dpl < cpl) {
1275 cpu_x86_load_seg_cache(s, seg_reg, 0, 0, 0, 0, 0);
1280 /* protected mode iret */
1281 static inline void helper_ret_protected(struct kqemu_state *s,
1282 int shift, int is_iret, int addend)
1284 struct kqemu_cpu_state *env = &s->cpu_state;
1285 uint32_t new_cs, new_eflags, new_ss;
1286 uint32_t e1, e2, ss_e1, ss_e2;
1287 int cpl, dpl, rpl, eflags_mask, iopl;
1288 unsigned long ssp, sp, new_eip, new_esp, sp_mask;
1290 #ifdef __x86_64__
1291 if (shift == 2)
1292 sp_mask = -1;
1293 else
1294 #endif
1295 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1296 sp = ESP;
1297 /* XXX: ssp is zero in 64 bit ? */
1298 ssp = env->segs[R_SS].base;
1299 new_eflags = 0; /* avoid warning */
1300 #ifdef __x86_64__
1301 if (shift == 2) {
1302 POPQ(sp, new_eip);
1303 POPQ(sp, new_cs);
1304 new_cs &= 0xffff;
1305 if (is_iret) {
1306 POPQ(sp, new_eflags);
1308 } else
1309 #endif
1310 if (shift == 1) {
1311 /* 32 bits */
1312 POPL(ssp, sp, sp_mask, new_eip);
1313 POPL(ssp, sp, sp_mask, new_cs);
1314 new_cs &= 0xffff;
1315 if (is_iret) {
1316 POPL(ssp, sp, sp_mask, new_eflags);
1317 if (new_eflags & VM_MASK)
1318 goto return_to_vm86;
1320 } else {
1321 /* 16 bits */
1322 POPW(ssp, sp, sp_mask, new_eip);
1323 POPW(ssp, sp, sp_mask, new_cs);
1324 if (is_iret)
1325 POPW(ssp, sp, sp_mask, new_eflags);
1327 #ifdef DEBUG_LRET
1328 monitor_log(s, "lret new %04x:" FMT_lx " s=%d addend=0x%x\n",
1329 new_cs, new_eip, shift, addend);
1330 #endif
1331 if ((new_cs & 0xfffc) == 0)
1332 raise_exception_err(s, EXCP0D_GPF, new_cs & 0xfffc);
1333 if (load_segment(s, &e1, &e2, new_cs) != 0)
1334 raise_exception_err(s, EXCP0D_GPF, new_cs & 0xfffc);
1335 if (!(e2 & DESC_S_MASK) ||
1336 !(e2 & DESC_CS_MASK))
1337 raise_exception_err(s, EXCP0D_GPF, new_cs & 0xfffc);
1338 cpl = env->cpl;
1339 rpl = new_cs & 3;
1340 if (rpl < cpl)
1341 raise_exception_err(s, EXCP0D_GPF, new_cs & 0xfffc);
1342 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1343 if (e2 & DESC_C_MASK) {
1344 if (dpl > rpl)
1345 raise_exception_err(s, EXCP0D_GPF, new_cs & 0xfffc);
1346 } else {
1347 if (dpl != rpl)
1348 raise_exception_err(s, EXCP0D_GPF, new_cs & 0xfffc);
1350 if (!(e2 & DESC_P_MASK))
1351 raise_exception_err(s, EXCP0B_NOSEG, new_cs & 0xfffc);
1353 sp += addend;
1354 if (rpl == cpl && (!CODE64(s) ||
1355 (CODE64(s) && !is_iret))) {
1356 /* return to same priledge level */
1357 cpu_x86_load_seg_cache(s, R_CS, new_cs,
1358 get_seg_base(e1, e2),
1359 get_seg_limit(e1, e2),
1360 e1, e2);
1361 } else {
1362 /* return to different priviledge level */
1363 #ifdef __x86_64__
1364 if (shift == 2) {
1365 POPQ(sp, new_esp);
1366 POPQ(sp, new_ss);
1367 new_ss &= 0xffff;
1368 } else
1369 #endif
1370 if (shift == 1) {
1371 /* 32 bits */
1372 POPL(ssp, sp, sp_mask, new_esp);
1373 POPL(ssp, sp, sp_mask, new_ss);
1374 new_ss &= 0xffff;
1375 } else {
1376 /* 16 bits */
1377 POPW(ssp, sp, sp_mask, new_esp);
1378 POPW(ssp, sp, sp_mask, new_ss);
1380 #ifdef DEBUG_PCALL
1381 if (loglevel & CPU_LOG_PCALL) {
1382 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
1383 new_ss, new_esp);
1385 #endif
1386 if ((new_ss & 0xfffc) == 0) {
1387 #ifdef __x86_64__
1388 /* NULL ss is allowed in long mode if cpl != 3*/
1389 if ((env->efer & MSR_EFER_LMA) && rpl != 3) {
1390 cpu_x86_set_cpl(s, rpl);
1391 cpu_x86_load_seg_cache(s, R_SS, new_ss,
1392 0, 0xffffffff,
1393 0xffff,
1394 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1395 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
1396 DESC_W_MASK | DESC_A_MASK | 0x000f0000);
1397 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
1398 } else
1399 #endif
1401 raise_exception_err(s, EXCP0D_GPF, 0);
1403 } else {
1404 if ((new_ss & 3) != rpl)
1405 raise_exception_err(s, EXCP0D_GPF, new_ss & 0xfffc);
1406 if (load_segment(s, &ss_e1, &ss_e2, new_ss) != 0)
1407 raise_exception_err(s, EXCP0D_GPF, new_ss & 0xfffc);
1408 if (!(ss_e2 & DESC_S_MASK) ||
1409 (ss_e2 & DESC_CS_MASK) ||
1410 !(ss_e2 & DESC_W_MASK))
1411 raise_exception_err(s, EXCP0D_GPF, new_ss & 0xfffc);
1412 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1413 if (dpl != rpl)
1414 raise_exception_err(s, EXCP0D_GPF, new_ss & 0xfffc);
1415 if (!(ss_e2 & DESC_P_MASK))
1416 raise_exception_err(s, EXCP0B_NOSEG, new_ss & 0xfffc);
1417 cpu_x86_set_cpl(s, rpl);
1418 cpu_x86_load_seg_cache(s, R_SS, new_ss,
1419 get_seg_base(ss_e1, ss_e2),
1420 get_seg_limit(ss_e1, ss_e2),
1421 ss_e1, ss_e2);
1424 cpu_x86_load_seg_cache(s, R_CS, new_cs,
1425 get_seg_base(e1, e2),
1426 get_seg_limit(e1, e2),
1427 e1, e2);
1428 sp = new_esp;
1429 #ifdef __x86_64__
1430 if (shift == 2)
1431 sp_mask = -1;
1432 else
1433 #endif
1434 sp_mask = get_sp_mask(ss_e2);
1436 /* validate data segments */
1437 validate_seg(s, R_ES, cpl);
1438 validate_seg(s, R_DS, cpl);
1439 validate_seg(s, R_FS, cpl);
1440 validate_seg(s, R_GS, cpl);
1442 sp += addend;
1444 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1445 EIP = new_eip;
1446 if (is_iret) {
1447 /* NOTE: 'cpl' is the _old_ CPL */
1448 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
1449 if (cpl == 0)
1450 eflags_mask |= IOPL_MASK;
1451 iopl = get_eflags_iopl(s);
1452 if (cpl <= iopl)
1453 eflags_mask |= IF_MASK;
1454 if (shift == 0)
1455 eflags_mask &= 0xffff;
1456 load_eflags(s, new_eflags, eflags_mask);
1458 return;
1460 return_to_vm86:
1461 raise_exception(s, KQEMU_RET_SOFTMMU);
1464 void helper_iret_protected(struct kqemu_state *s, int shift)
1466 /* specific case for TSS */
1467 if (get_eflags_nt(s)) {
1468 #ifdef __x86_64__
1469 if (s->cpu_state.efer & MSR_EFER_LMA)
1470 raise_exception_err(s, EXCP0D_GPF, 0);
1471 #endif
1472 raise_exception(s, KQEMU_RET_SOFTMMU);
1473 } else {
1474 helper_ret_protected(s, shift, 1, 0);
1478 void helper_lret_protected(struct kqemu_state *s, int shift, int addend)
1480 helper_ret_protected(s, shift, 0, addend);
1483 void do_int(struct kqemu_state *s, int intno)
1485 unsigned long next_eip;
1486 next_eip = pc;
1487 if (s->cpu_state.user_only) {
1488 s->cpu_state.next_eip = next_eip;
1489 raise_exception(s, KQEMU_RET_INT + intno);
1490 } else {
1491 do_interrupt(s, intno, 1, 0, next_eip, 0);
1495 static void helper_syscall(struct kqemu_state *s)
1497 struct kqemu_cpu_state *env = &s->cpu_state;
1498 int selector;
1500 if (!(env->efer & MSR_EFER_SCE)) {
1501 raise_exception_err(s, EXCP06_ILLOP, 0);
1503 if (env->user_only) {
1504 env->next_eip = pc;
1505 raise_exception(s, KQEMU_RET_SYSCALL);
1508 selector = (env->star >> 32) & 0xffff;
1509 #ifdef __x86_64__
1510 if (env->efer & MSR_EFER_LMA) {
1511 int code64;
1513 s->regs1.ecx = pc;
1514 s->regs1.r11 = compute_eflags(s);
1516 code64 = CODE64(s);
1518 cpu_x86_set_cpl(s, 0);
1519 cpu_x86_load_seg_cache(s, R_CS, selector & 0xfffc,
1520 0, 0xffffffff, 0,
1521 DESC_G_MASK | DESC_P_MASK |
1522 DESC_S_MASK |
1523 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1524 cpu_x86_load_seg_cache(s, R_SS, (selector + 8) & 0xfffc,
1525 0, 0xffffffff, 0,
1526 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1527 DESC_S_MASK |
1528 DESC_W_MASK | DESC_A_MASK);
1529 set_reset_eflags(s, 0, env->fmask);
1530 if (code64)
1531 EIP = env->lstar;
1532 else
1533 EIP = env->cstar;
1534 } else
1535 #endif
1537 s->regs1.ecx = (uint32_t)(pc);
1539 cpu_x86_set_cpl(s, 0);
1540 cpu_x86_load_seg_cache(s, R_CS, selector & 0xfffc,
1541 0, 0xffffffff, 0,
1542 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1543 DESC_S_MASK |
1544 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1545 cpu_x86_load_seg_cache(s, R_SS, (selector + 8) & 0xfffc,
1546 0, 0xffffffff, 0,
1547 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1548 DESC_S_MASK |
1549 DESC_W_MASK | DESC_A_MASK);
1550 set_reset_eflags(s, 0, IF_MASK | RF_MASK | VM_MASK);
1551 EIP = (uint32_t)env->star;
1555 static void helper_sysret(struct kqemu_state *s)
1557 struct kqemu_cpu_state *env = &s->cpu_state;
1558 int selector;
1560 if (!(env->efer & MSR_EFER_SCE)) {
1561 raise_exception_err(s, EXCP06_ILLOP, 0);
1563 if (!(env->cr0 & CR0_PE_MASK) || env->cpl != 0) {
1564 raise_exception_err(s, EXCP0D_GPF, 0);
1566 selector = (env->star >> 48) & 0xffff;
1567 #ifdef __x86_64__
1568 if (env->efer & MSR_EFER_LMA) {
1569 cpu_x86_set_cpl(s, 3);
1570 if (s->dflag == 2) {
1571 cpu_x86_load_seg_cache(s, R_CS, (selector + 16) | 3,
1572 0, 0xffffffff, 0,
1573 DESC_G_MASK | DESC_P_MASK |
1574 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1575 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1576 DESC_L_MASK);
1577 EIP = s->regs1.ecx;
1578 } else {
1579 cpu_x86_load_seg_cache(s, R_CS, selector | 3,
1580 0, 0xffffffff, 0,
1581 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1582 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1583 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1584 EIP = (uint32_t)s->regs1.ecx;
1586 cpu_x86_load_seg_cache(s, R_SS, selector + 8,
1587 0, 0xffffffff, 0,
1588 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1589 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1590 DESC_W_MASK | DESC_A_MASK);
1591 load_eflags(s, (uint32_t)(s->regs1.r11), TF_MASK | AC_MASK | ID_MASK |
1592 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1593 } else
1594 #endif
1596 cpu_x86_set_cpl(s, 3);
1597 cpu_x86_load_seg_cache(s, R_CS, selector | 3,
1598 0, 0xffffffff, 0,
1599 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1600 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1601 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1602 EIP = (uint32_t)s->regs1.ecx;
1603 cpu_x86_load_seg_cache(s, R_SS, selector + 8,
1604 0, 0xffffffff, 0,
1605 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1606 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1607 DESC_W_MASK | DESC_A_MASK);
1608 set_reset_eflags(s, IF_MASK, 0);
1612 static void helper_sysenter(struct kqemu_state *s)
1614 struct kqemu_cpu_state *env = &s->cpu_state;
1616 if (env->user_only)
1617 raise_exception(s, KQEMU_RET_SOFTMMU);
1618 if (env->sysenter_cs == 0) {
1619 raise_exception_err(s, EXCP0D_GPF, 0);
1621 set_reset_eflags(s, 0, VM_MASK | IF_MASK | RF_MASK);
1622 cpu_x86_set_cpl(s, 0);
1623 cpu_x86_load_seg_cache(s, R_CS, env->sysenter_cs & 0xfffc,
1624 0, 0xffffffff,
1626 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1627 DESC_S_MASK |
1628 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1629 cpu_x86_load_seg_cache(s, R_SS, (env->sysenter_cs + 8) & 0xfffc,
1630 0, 0xffffffff,
1632 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1633 DESC_S_MASK |
1634 DESC_W_MASK | DESC_A_MASK);
1635 ESP = env->sysenter_esp;
1636 EIP = env->sysenter_eip;
1639 static void helper_sysexit(struct kqemu_state *s)
1641 struct kqemu_cpu_state *env = &s->cpu_state;
1642 int cpl;
1644 cpl = env->cpl;
1645 if (env->sysenter_cs == 0 || cpl != 0) {
1646 raise_exception_err(s, EXCP0D_GPF, 0);
1648 cpu_x86_set_cpl(s, 3);
1649 cpu_x86_load_seg_cache(s, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
1650 0, 0xffffffff,
1652 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1653 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1654 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1655 cpu_x86_load_seg_cache(s, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
1656 0, 0xffffffff,
1658 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1659 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1660 DESC_W_MASK | DESC_A_MASK);
1661 ESP = s->regs1.ecx;
1662 EIP = s->regs1.edx;
1665 static inline void load_seg_cache_raw_dt(struct kqemu_segment_cache *sc,
1666 uint32_t e1, uint32_t e2)
1668 sc->base = get_seg_base(e1, e2);
1669 sc->limit = get_seg_limit(e1, e2);
1670 sc->flags = e2;
1673 void helper_lldt(struct kqemu_state *s, int selector)
1675 struct kqemu_cpu_state *env = &s->cpu_state;
1676 struct kqemu_segment_cache *dt;
1677 uint32_t e1, e2;
1678 int index, entry_limit;
1679 unsigned long ptr;
1681 if ((selector & 0xfffc) == 0) {
1682 /* XXX: NULL selector case: invalid LDT */
1683 env->ldt.base = 0;
1684 env->ldt.limit = 0;
1685 } else {
1686 if (selector & 0x4)
1687 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1688 dt = &env->gdt;
1689 index = selector & ~7;
1690 #ifdef __x86_64__
1691 if (env->efer & MSR_EFER_LMA)
1692 entry_limit = 15;
1693 else
1694 #endif
1695 entry_limit = 7;
1696 if ((index + entry_limit) > dt->limit)
1697 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1698 ptr = dt->base + index;
1699 e1 = ldl_kernel(ptr);
1700 e2 = ldl_kernel(ptr + 4);
1701 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1702 raise_exception_err(s, EXCP0D_GPF, selector & 0xfffc);
1703 if (!(e2 & DESC_P_MASK))
1704 raise_exception_err(s, EXCP0B_NOSEG, selector & 0xfffc);
1705 #ifdef __x86_64__
1706 if (env->efer & MSR_EFER_LMA) {
1707 uint32_t e3;
1708 e3 = ldl_kernel(ptr + 8);
1709 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1710 env->ldt.base |= (unsigned long)e3 << 32;
1711 } else
1712 #endif
1714 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1717 env->ldt.selector = selector;
1720 static void helper_wrmsr(struct kqemu_state *s)
1722 #ifdef __x86_64__
1723 struct kqemu_cpu_state *env = &s->cpu_state;
1724 #endif
1725 uint64_t val;
1727 val = ((uint32_t)s->regs1.eax) |
1728 ((uint64_t)((uint32_t)s->regs1.edx) << 32);
1730 switch((uint32_t)s->regs1.ecx) {
1731 #ifdef __x86_64__
1732 case MSR_FSBASE:
1733 env->segs[R_FS].base = val;
1734 wrmsrl(MSR_FSBASE, val);
1735 break;
1736 case MSR_GSBASE:
1737 env->segs[R_GS].base = val;
1738 wrmsrl(MSR_GSBASE, val);
1739 break;
1740 case MSR_KERNELGSBASE:
1741 env->kernelgsbase = val;
1742 break;
1743 #endif
1744 default:
1745 raise_exception(s, KQEMU_RET_SOFTMMU);
1749 static void helper_rdmsr(struct kqemu_state *s)
1751 struct kqemu_cpu_state *env = &s->cpu_state;
1752 uint64_t val;
1754 switch((uint32_t)s->regs1.ecx) {
1755 case MSR_IA32_SYSENTER_CS:
1756 val = env->sysenter_cs;
1757 break;
1758 case MSR_IA32_SYSENTER_ESP:
1759 val = env->sysenter_esp;
1760 break;
1761 case MSR_IA32_SYSENTER_EIP:
1762 val = env->sysenter_eip;
1763 break;
1764 case MSR_EFER:
1765 val = env->efer;
1766 break;
1767 case MSR_STAR:
1768 val = env->star;
1769 break;
1770 #ifdef __x86_64__
1771 case MSR_LSTAR:
1772 val = env->lstar;
1773 break;
1774 case MSR_CSTAR:
1775 val = env->cstar;
1776 break;
1777 case MSR_FMASK:
1778 val = env->fmask;
1779 break;
1780 case MSR_FSBASE:
1781 val = env->segs[R_FS].base;
1782 break;
1783 case MSR_GSBASE:
1784 val = env->segs[R_GS].base;
1785 break;
1786 case MSR_KERNELGSBASE:
1787 val = env->kernelgsbase;
1788 break;
1789 #endif
1790 default:
1791 raise_exception(s, KQEMU_RET_SOFTMMU);
1793 s->regs1.eax = (uint32_t)(val);
1794 s->regs1.edx = (uint32_t)(val >> 32);
1797 #ifdef __x86_64__
1798 static void helper_swapgs(struct kqemu_state *s)
1800 struct kqemu_cpu_state *env = &s->cpu_state;
1801 uint64_t val;
1802 val = env->kernelgsbase;
1803 env->kernelgsbase = env->segs[R_GS].base;
1804 env->segs[R_GS].base = val;
1806 wrmsrl(MSR_GSBASE, val);
1808 #endif
1810 /* XXX: optimize by reloading just the needed fields ? */
1811 static inline void reload_seg_cache2(struct kqemu_state *s, int seg_reg,
1812 unsigned int selector)
1814 struct kqemu_segment_cache *sc;
1815 uint32_t e1, e2, sel;
1816 uint8_t *ptr;
1818 sel = (selector & ~7) | ((selector & 4) << 14);
1819 ptr = (uint8_t *)s->dt_table + ((NB_DT_TABLES - 1) << 17) + sel;
1820 #ifndef USE_SEG_GP
1821 e1 = *(uint16_t *)(ptr + 2);
1822 e2 = *(uint32_t *)(ptr + 4);
1823 sc = &s->cpu_state.segs[seg_reg];
1824 /* only useful for SS and CS */
1825 if (seg_reg == R_CS || seg_reg == R_SS)
1826 sc->flags = e2;
1827 sc->base = (e1 | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1828 /* limit not needed */
1829 #else
1830 e1 = *(uint32_t *)(ptr);
1831 e2 = *(uint32_t *)(ptr + 4);
1832 sc = &s->cpu_state.segs[seg_reg];
1833 sc->flags = e2;
1834 sc->base = get_seg_base(e1, e2);
1835 sc->limit = get_seg_limit(e1, e2);
1836 #endif
1839 #ifdef USE_SEG_GP
1840 static inline void reload_seg_cache3(struct kqemu_state *s, int seg_reg,
1841 unsigned int selector)
1843 struct kqemu_segment_cache *sc;
1844 unsigned int sel1, sel;
1845 uint32_t e1, e2;
1846 uint8_t *ptr;
1848 sc = &s->cpu_state.segs[seg_reg];
1849 sel1 = selector | 3;
1850 if (sel1 != 3) {
1851 if (sel1 == s->regs1.cs_sel || sel1 == s->regs1.ss_sel) {
1852 sel = (selector & ~7) | ((selector & 4) << 14);
1853 ptr = (uint8_t *)s->dt_table + sel;
1854 e1 = *(uint32_t *)(ptr);
1855 e2 = *(uint32_t *)(ptr + 4);
1856 } else {
1857 e1 = s->seg_desc_cache[seg_reg][0];
1858 e2 = s->seg_desc_cache[seg_reg][1];
1860 sc->flags = e2;
1861 sc->base = get_seg_base(e1, e2);
1862 sc->limit = get_seg_limit(e1, e2);
1863 } else {
1864 sc->flags = 0;
1865 sc->base = 0;
1866 sc->limit = 0;
1869 #endif
1871 void update_seg_cache(struct kqemu_state *s)
1873 uint16_t sel;
1875 /* we must reload the segment caches to have all the necessary
1876 values. Another solution could be to reload them on demand */
1877 #ifdef USE_SEG_GP
1878 if (s->cpu_state.cpl != 3) {
1879 reload_seg_cache3(s, R_CS, s->regs1.cs_sel);
1880 reload_seg_cache3(s, R_SS, s->regs1.ss_sel);
1881 #ifdef __x86_64__
1882 asm volatile ("mov %%ds, %0" : "=r" (sel));
1883 #else
1884 sel = s->regs1.ds_sel;
1885 #endif
1886 reload_seg_cache3(s, R_DS, sel);
1887 #ifdef __x86_64__
1888 asm volatile ("mov %%es, %0" : "=r" (sel));
1889 #else
1890 sel = s->regs1.es_sel;
1891 #endif
1892 reload_seg_cache3(s, R_ES, sel);
1893 asm volatile ("mov %%fs, %0" : "=r" (sel));
1894 reload_seg_cache3(s, R_FS, sel);
1895 asm volatile ("mov %%gs, %0" : "=r" (sel));
1896 reload_seg_cache3(s, R_GS, sel);
1897 } else
1898 #endif /* USE_SEG_GP */
1900 reload_seg_cache2(s, R_CS, s->regs1.cs_sel);
1901 reload_seg_cache2(s, R_SS, s->regs1.ss_sel);
1902 #ifdef __x86_64__
1904 int sel;
1905 asm volatile ("mov %%ds, %0" : "=r" (sel));
1906 reload_seg_cache2(s, R_DS, sel);
1907 asm volatile ("mov %%es, %0" : "=r" (sel));
1908 reload_seg_cache2(s, R_ES, sel);
1910 #else
1911 reload_seg_cache2(s, R_DS, s->regs1.ds_sel);
1912 reload_seg_cache2(s, R_ES, s->regs1.es_sel);
1913 #endif
1914 asm volatile ("mov %%fs, %0" : "=r" (sel));
1915 reload_seg_cache2(s, R_FS, sel);
1916 asm volatile ("mov %%gs, %0" : "=r" (sel));
1917 reload_seg_cache2(s, R_GS, sel);
1919 #ifdef __x86_64__
1920 rdmsrl(MSR_FSBASE, s->cpu_state.segs[R_FS].base);
1921 rdmsrl(MSR_GSBASE, s->cpu_state.segs[R_GS].base);
1922 #endif
1923 s->seg_cache_loaded = 1;
1924 s->insn_count = MAX_INSN_COUNT;
1927 /* handle the exception in the monitor */
1928 void raise_exception_interp(void *opaque)
1930 struct kqemu_state *s = opaque;
1931 int intno = s->arg0;
1932 #ifdef PROFILE_INTERP2
1933 int64_t ti;
1934 #endif
1936 #ifdef PROFILE_INTERP2
1937 ti = getclock();
1938 #endif
1939 if (!s->seg_cache_loaded)
1940 update_seg_cache(s);
1942 /* the exception handling counts as one instruction so that we can
1943 detect exception loops */
1944 /* XXX: it would be better to detect double or triple faults */
1945 if (unlikely(--s->insn_count <= 0))
1946 raise_exception(s, KQEMU_RET_SOFTMMU);
1948 do_interrupt(s, intno, 0, s->cpu_state.error_code, 0, 0);
1950 if (!get_eflags_if(s)) {
1951 insn_interp(s);
1953 #ifdef PROFILE_INTERP2
1954 s->interp_interrupt_count++;
1955 s->interp_interrupt_cycles += (getclock() - ti);
1956 #endif
1957 goto_user(s, s->regs);
1960 #define MAX_INSN_LEN 15
1962 static inline uint32_t ldub_code(struct kqemu_state *s)
1964 uint32_t val;
1966 val = ldub_mem_fast(s, pc + s->cpu_state.segs[R_CS].base);
1967 pc++;
1968 return val;
1971 static inline uint32_t lduw_code(struct kqemu_state *s)
1973 uint32_t val;
1975 val = lduw_mem_fast(s, pc + s->cpu_state.segs[R_CS].base);
1976 pc += 2;
1977 return val;
1980 static inline uint32_t ldl_code(struct kqemu_state *s)
1982 uint32_t val;
1984 val = ldl_mem_fast(s, pc + s->cpu_state.segs[R_CS].base);
1985 pc += 4;
1986 return val;
1989 static inline uint64_t ldq_code(struct kqemu_state *s)
1991 uint64_t val;
1993 val = ldl_mem_fast(s, pc + s->cpu_state.segs[R_CS].base);
1994 val |= (uint64_t)ldl_mem_fast(s, pc + s->cpu_state.segs[R_CS].base + 4) << 32;
1995 pc += 8;
1996 return val;
1999 static unsigned long __attribute__((regparm(2))) get_modrm(struct kqemu_state *s, int modrm)
2001 unsigned long disp, addr;
2002 int base;
2003 int index;
2004 int scale;
2005 int mod, rm, code, override;
2006 static const void *modrm_table32[0x88] = {
2007 [0x00] = &&modrm32_00,
2008 [0x01] = &&modrm32_01,
2009 [0x02] = &&modrm32_02,
2010 [0x03] = &&modrm32_03,
2011 [0x04] = &&modrm32_04,
2012 [0x05] = &&modrm32_05,
2013 [0x06] = &&modrm32_06,
2014 [0x07] = &&modrm32_07,
2016 [0x40] = &&modrm32_40,
2017 [0x41] = &&modrm32_41,
2018 [0x42] = &&modrm32_42,
2019 [0x43] = &&modrm32_43,
2020 [0x44] = &&modrm32_44,
2021 [0x45] = &&modrm32_45,
2022 [0x46] = &&modrm32_46,
2023 [0x47] = &&modrm32_47,
2025 [0x80] = &&modrm32_80,
2026 [0x81] = &&modrm32_81,
2027 [0x82] = &&modrm32_82,
2028 [0x83] = &&modrm32_83,
2029 [0x84] = &&modrm32_84,
2030 [0x85] = &&modrm32_85,
2031 [0x86] = &&modrm32_86,
2032 [0x87] = &&modrm32_87,
2035 if (likely(s->aflag)) {
2036 #if 1
2037 goto *modrm_table32[modrm & 0xc7];
2038 modrm32_44:
2039 /* sib, most common case ? */
2040 code = ldub_code(s);
2041 addr = (int8_t)ldub_code(s);
2042 do_sib:
2043 base = (code & 7) | REX_B(s);
2044 addr += get_reg(s, base);
2045 index = ((code >> 3) & 7) | REX_X(s);
2046 if (index != 4) {
2047 scale = (code >> 6);
2048 addr += get_reg(s, index) << scale;
2050 goto next;
2052 modrm32_04:
2053 /* sib */
2054 code = ldub_code(s);
2055 base = (code & 7);
2056 if (base == 5) {
2057 addr = (int32_t)ldl_code(s);
2058 base = 0; /* force DS override */
2059 } else {
2060 base |= REX_B(s);
2061 addr = get_reg(s, base);
2063 index = ((code >> 3) & 7) | REX_X(s);
2064 if (index != 4) {
2065 scale = (code >> 6);
2066 addr += get_reg(s, index) << scale;
2068 goto next;
2070 modrm32_84:
2071 /* sib */
2072 code = ldub_code(s);
2073 addr = (int32_t)ldl_code(s);
2074 goto do_sib;
2076 modrm32_05:
2077 addr = (int32_t)ldl_code(s);
2078 base = 0; /* force DS override */
2079 if (CODE64(s))
2080 addr += pc + s->rip_offset;
2081 goto next;
2082 modrm32_00:
2083 modrm32_01:
2084 modrm32_02:
2085 modrm32_03:
2086 modrm32_06:
2087 modrm32_07:
2088 base = (modrm & 7) | REX_B(s);
2089 addr = get_reg(s, base);
2090 goto next;
2092 modrm32_40:
2093 modrm32_41:
2094 modrm32_42:
2095 modrm32_43:
2096 modrm32_45:
2097 modrm32_46:
2098 modrm32_47:
2099 addr = (int8_t)ldub_code(s);
2100 base = (modrm & 7) | REX_B(s);
2101 addr += get_reg(s, base);
2102 goto next;
2103 modrm32_80:
2104 modrm32_81:
2105 modrm32_82:
2106 modrm32_83:
2107 modrm32_85:
2108 modrm32_86:
2109 modrm32_87:
2110 addr = (int32_t)ldl_code(s);
2111 base = (modrm & 7) | REX_B(s);
2112 addr += get_reg(s, base);
2113 next:
2114 if (unlikely(s->popl_esp_hack)) {
2115 if (base == 4)
2116 addr += s->popl_esp_hack;
2118 #else
2119 int havesib;
2121 mod = (modrm >> 6) & 3;
2122 rm = modrm & 7;
2123 havesib = 0;
2124 base = rm;
2125 index = 0;
2126 scale = 0;
2128 if (base == 4) {
2129 havesib = 1;
2130 code = ldub_code(s);
2131 scale = (code >> 6) & 3;
2132 index = ((code >> 3) & 7) | REX_X(s);
2133 base = (code & 7);
2135 base |= REX_B(s);
2137 switch (mod) {
2138 case 0:
2139 if ((base & 7) == 5) {
2140 base = -1;
2141 disp = (int32_t)ldl_code(s);
2142 if (CODE64(s) && !havesib) {
2143 disp += pc + s->rip_offset;
2145 } else {
2146 disp = 0;
2148 break;
2149 case 1:
2150 disp = (int8_t)ldub_code(s);
2151 break;
2152 default:
2153 case 2:
2154 disp = (int32_t)ldl_code(s);
2155 break;
2158 addr = disp;
2159 if (base >= 0) {
2160 /* for correct popl handling with esp */
2161 if (base == 4 && s->popl_esp_hack)
2162 addr += s->popl_esp_hack;
2163 addr += get_reg(s, base);
2165 /* XXX: index == 4 is always invalid */
2166 if (havesib && (index != 4 || scale != 0)) {
2167 addr += get_reg(s, index) << scale;
2169 #endif
2170 override = s->override;
2171 if (CODE64(s)) {
2172 if (override == R_FS || override == R_GS)
2173 addr += s->cpu_state.segs[override].base;
2174 if (s->aflag != 2)
2175 addr = (uint32_t)addr;
2176 } else {
2177 if (override != -2) {
2178 if (override < 0) {
2179 if (base == R_EBP || base == R_ESP)
2180 override = R_SS;
2181 else
2182 override = R_DS;
2184 addr += s->cpu_state.segs[override].base;
2186 addr = (uint32_t)addr;
2188 } else {
2189 mod = (modrm >> 6) & 3;
2190 rm = modrm & 7;
2191 switch (mod) {
2192 case 0:
2193 if (rm == 6) {
2194 disp = lduw_code(s);
2195 addr = disp;
2196 rm = 0; /* avoid SS override */
2197 goto no_rm;
2198 } else {
2199 disp = 0;
2201 break;
2202 case 1:
2203 disp = (int8_t)ldub_code(s);
2204 break;
2205 default:
2206 case 2:
2207 disp = lduw_code(s);
2208 break;
2210 switch(rm) {
2211 case 0:
2212 addr = s->regs1.ebx + s->regs1.esi;
2213 break;
2214 case 1:
2215 addr = s->regs1.ebx + s->regs1.edi;
2216 break;
2217 case 2:
2218 addr = s->regs1.ebp + s->regs1.esi;
2219 break;
2220 case 3:
2221 addr = s->regs1.ebp + s->regs1.edi;
2222 break;
2223 case 4:
2224 addr = s->regs1.esi;
2225 break;
2226 case 5:
2227 addr = s->regs1.edi;
2228 break;
2229 case 6:
2230 addr = s->regs1.ebp;
2231 break;
2232 default:
2233 case 7:
2234 addr = s->regs1.ebx;
2235 break;
2237 addr += disp;
2238 addr &= 0xffff;
2239 no_rm:
2240 override = s->override;
2241 if (override != -2) {
2242 if (override < 0) {
2243 if (rm == 2 || rm == 3 || rm == 6)
2244 override = R_SS;
2245 else
2246 override = R_DS;
2248 addr += s->cpu_state.segs[override].base;
2251 #ifdef DEBUG_INTERP
2252 monitor_log(s, "get_modrm: addr=%08lx\n", addr);
2253 #endif
2254 return addr;
2257 /* operand size */
2258 enum {
2259 OT_BYTE = 0,
2260 OT_WORD,
2261 OT_LONG,
2262 OT_QUAD,
2265 static inline int insn_const_size(unsigned int ot)
2267 if (ot <= OT_LONG)
2268 return 1 << ot;
2269 else
2270 return 4;
2273 static inline int insn_get(struct kqemu_state *s, int ot)
2275 int ret;
2277 switch(ot) {
2278 case OT_BYTE:
2279 ret = ldub_code(s);
2280 break;
2281 case OT_WORD:
2282 ret = lduw_code(s);
2283 break;
2284 default:
2285 case OT_LONG:
2286 ret = ldl_code(s);
2287 break;
2289 return ret;
2292 #define EB_ADD (0 * 4)
2293 #define EB_AND (4 * 4)
2294 #define EB_SUB (5 * 4)
2295 #define EB_INC (8 * 4)
2296 #define EB_DEC (9 * 4)
2297 #define EB_ROL (10 * 4)
2298 #define EB_BT (18 * 4)
2299 #define EB_BSF (22 * 4)
2301 #ifdef __x86_64__
2302 #define UPDATE_CODE32()\
2304 if (CODE64(s)) {\
2305 code32 = 1;\
2306 flags_initval = 0x00ff0201;\
2307 } else {\
2308 code32 = (s->cpu_state.segs[R_CS].flags >> DESC_B_SHIFT) & 1;\
2309 flags_initval = code32 | (code32 << 8) | 0x00ff0000;\
2312 #else
2313 #define UPDATE_CODE32()\
2315 code32 = (s->cpu_state.segs[R_CS].flags >> DESC_B_SHIFT) & 1;\
2316 flags_initval = code32 | (code32 << 8) | 0x00ff0000;\
2318 #endif
2320 #ifdef __x86_64__
2322 #define LOAD_CC()\
2323 "push %%rcx\n"\
2324 "andl $0x8d5, %%ecx\n"\
2325 "pushf\n"\
2326 "pop %%rax\n"\
2327 "andl $~0x8d5, %%eax\n"\
2328 "orl %%ecx, %%eax\n"\
2329 "pop %%rcx\n"\
2330 "push %%rax\n"\
2331 "popf\n"
2333 #define SAVE_CC()\
2334 "pushf\n"\
2335 "pop %%rax\n"\
2336 "andl $0x8d5, %%eax\n"\
2337 "andl $~0x8d5, %%ecx\n"\
2338 "orl %%eax, %%ecx\n"
2340 #define SAVE_CC_LOGIC() SAVE_CC()
2342 /* XXX: suppress */
2343 #define SAHF ".byte 0x9e"
2344 #define LAHF ".byte 0x9f"
2346 #else
2348 #ifdef __x86_64__
2349 #define SAHF ".byte 0x9e"
2350 #define LAHF ".byte 0x9f"
2351 #else
2352 #define SAHF "sahf"
2353 #define LAHF "lahf"
2354 #endif
2356 #define LOAD_CC()\
2357 "movb %%cl, %%ah\n"\
2358 SAHF "\n"
2360 #define SAVE_CC()\
2361 LAHF "\n"\
2362 "seto %%al\n"\
2363 "movb %%ah, %%cl\n"\
2364 "shll $3, %%eax\n"\
2365 "andl $~0x0800, %%ecx\n"\
2366 "orb %%al, %%ch\n"
2368 #define SAVE_CC_LOGIC()\
2369 LAHF "\n"\
2370 "movb %%ah, %%cl\n"\
2371 "andl $~0x0800, %%ecx\n"
2373 #endif /* !__x86_64__ */
2375 /* return -1 if unsupported insn */
2376 int insn_interp(struct kqemu_state *s)
2378 int b, sel, ot;
2379 int modrm, mod, op, code32, reg, rm, iopl;
2380 long val, val2;
2381 unsigned long next_eip, addr, saved_pc, eflags;
2382 uint32_t flags_initval;
2383 #ifdef PROFILE_INSN
2384 int opcode;
2385 int64_t ti;
2386 #endif
2388 #ifdef __x86_64__
2389 #define NB_INSN_TABLES 3
2390 #else
2391 #define NB_INSN_TABLES 2
2392 #endif
2393 static const void *insn_table[NB_INSN_TABLES][512] = {
2395 #define INSN(x) &&insn_ ## x,
2396 #define INSN_S(x) &&insn_ ## x ## w,
2397 #include "insn_table.h"
2398 #undef INSN_S
2399 #undef INSN
2402 #define INSN(x) &&insn_ ## x,
2403 #define INSN_S(x) &&insn_ ## x ## l,
2404 #include "insn_table.h"
2405 #undef INSN_S
2406 #undef INSN
2408 #ifdef __x86_64__
2410 #define INSN(x) &&insn_ ## x,
2411 #define INSN_S(x) &&insn_ ## x ## q,
2412 #include "insn_table.h"
2413 #undef INSN_S
2414 #undef INSN
2416 #endif
2419 #define LABEL(x) insn_ ## x: asm volatile(".globl insn_" #x " ; insn_" #x ":\n") ;
2421 saved_pc = pc; /* save register variable */
2422 #ifdef PROFILE_INTERP2
2423 s->total_interp_count++;
2424 #endif
2425 s->popl_esp_hack = 0;
2426 s->rip_offset = 0; /* for relative ip address */
2427 UPDATE_CODE32();
2428 pc = s->regs1.eip;
2429 goto insn_next2;
2430 insn_next:
2431 s->regs1.eip = pc;
2432 if (unlikely(get_eflags_if(s)))
2433 goto the_end;
2434 /* XXX: since we run with the IRQs disabled, it is better to
2435 stop executing after a few instructions */
2436 insn_next3:
2437 if (unlikely(--s->insn_count <= 0))
2438 raise_exception(s, KQEMU_RET_SOFTMMU);
2439 insn_next2:
2440 #if defined(DEBUG_INTERP)
2441 monitor_log(s, "%05d: %04x:" FMT_lx " %04x:" FMT_lx " eax=" FMT_lx "\n",
2442 s->insn_count,
2443 get_seg_sel(s, R_CS),
2444 (long)s->regs1.eip,
2445 get_seg_sel(s, R_SS),
2446 (long)s->regs1.esp,
2447 (long)s->regs1.eax);
2448 #endif
2449 #ifdef __x86_64__
2450 *(uint64_t *)&s->dflag = flags_initval;
2451 #else
2452 *(uint32_t *)&s->dflag = flags_initval;
2453 #endif
2454 #ifdef PROFILE_INSN
2455 ti = getclock();
2456 #endif
2457 next_byte:
2458 /* XXX: more precise test */
2459 if (unlikely((pc - (unsigned long)&_start) < MONITOR_MEM_SIZE))
2460 raise_exception(s, KQEMU_RET_SOFTMMU);
2461 b = ldub_mem_fast(s, pc + s->cpu_state.segs[R_CS].base);
2462 pc++;
2463 reswitch:
2464 #ifdef PROFILE_INSN
2465 opcode = b;
2466 #endif
2467 goto *insn_table[s->dflag][b];
2469 /* prefix processing */
2470 LABEL(f3)
2471 s->prefix |= PREFIX_REPZ;
2472 goto next_byte;
2473 LABEL(f2)
2474 s->prefix |= PREFIX_REPNZ;
2475 goto next_byte;
2476 LABEL(f0)
2477 s->prefix |= PREFIX_LOCK;
2478 goto next_byte;
2479 LABEL(2e)
2480 s->override = R_CS;
2481 goto next_byte;
2482 LABEL(36)
2483 s->override = R_SS;
2484 goto next_byte;
2485 LABEL(3e)
2486 s->override = R_DS;
2487 goto next_byte;
2488 LABEL(26)
2489 s->override = R_ES;
2490 goto next_byte;
2491 LABEL(64)
2492 s->override = R_FS;
2493 goto next_byte;
2494 LABEL(65)
2495 s->override = R_GS;
2496 goto next_byte;
2497 LABEL(66)
2498 s->dflag = !code32;
2499 goto next_byte;
2500 LABEL(67)
2501 if (CODE64(s))
2502 s->aflag = 1;
2503 else
2504 s->aflag = !code32;
2505 goto next_byte;
2507 #ifdef __x86_64__
2508 rex_prefix:
2510 int rex_w;
2511 /* REX prefix */
2512 rex_w = (b >> 3) & 1;
2513 s->rex_r = (b & 0x4) << 1;
2514 s->rex_x = (b & 0x2) << 2;
2515 s->rex_b = (b & 0x1) << 3;
2516 s->prefix |= PREFIX_REX;
2518 /* we suppose, as in the AMD spec, that it comes after the
2519 legacy prefixes */
2520 if (rex_w == 1) {
2521 /* 0x66 is ignored if rex.w is set */
2522 s->dflag = 2;
2525 goto next_byte;
2526 #endif
2527 LABEL(0f)
2528 /**************************/
2529 /* extended op code */
2530 b = ldub_code(s) | 0x100;
2531 goto reswitch;
2533 /**************************/
2534 /* arith & logic */
2536 #define ARITH_OP(op, eflags, val, val2)\
2537 asm volatile(op "\n"\
2538 SAVE_CC()\
2539 : "=c" (eflags),\
2540 "=q" (val)\
2541 : "0" (eflags),\
2542 "1" (val),\
2543 "q" (val2)\
2544 : "%eax");\
2546 #define ARITH_OPC(op, eflags, val, val2)\
2547 asm volatile(LOAD_CC() \
2548 op "\n"\
2549 SAVE_CC()\
2550 : "=c" (eflags),\
2551 "=q" (val)\
2552 : "0" (eflags),\
2553 "1" (val),\
2554 "q" (val2)\
2555 : "%eax");
2557 #define LOGIC_OP(op, eflags, val, val2)\
2558 asm volatile(op "\n"\
2559 SAVE_CC_LOGIC()\
2560 : "=c" (eflags),\
2561 "=q" (val)\
2562 : "0" (eflags),\
2563 "1" (val),\
2564 "q" (val2)\
2565 : "%eax");
2567 #define ARITH_EXEC(eflags, op, ot, val, val2)\
2568 switch(ot) {\
2569 case OT_BYTE:\
2570 switch(op) {\
2571 case 0: ARITH_OP("addb %b4, %b1", eflags, val, val2); break;\
2572 case 1: LOGIC_OP("orb %b4, %b1", eflags, val, val2); break;\
2573 case 2: ARITH_OPC("adcb %b4, %b1", eflags, val, val2); break;\
2574 case 3: ARITH_OPC("sbbb %b4, %b1", eflags, val, val2); break;\
2575 case 4: LOGIC_OP("andb %b4, %b1", eflags, val, val2); break;\
2576 case 5: ARITH_OP("subb %b4, %b1", eflags, val, val2); break;\
2577 case 6: LOGIC_OP("xorb %b4, %b1", eflags, val, val2); break;\
2578 default: ARITH_OP("cmpb %b4, %b1", eflags, val, val2); break;\
2580 break;\
2581 case OT_WORD:\
2582 switch(op) {\
2583 case 0: ARITH_OP("addw %w4, %w1", eflags, val, val2); break;\
2584 case 1: LOGIC_OP("orw %w4, %w1", eflags, val, val2); break;\
2585 case 2: ARITH_OPC("adcw %w4, %w1", eflags, val, val2); break;\
2586 case 3: ARITH_OPC("sbbw %w4, %w1", eflags, val, val2); break;\
2587 case 4: LOGIC_OP("andw %w4, %w1", eflags, val, val2); break;\
2588 case 5: ARITH_OP("subw %w4, %w1", eflags, val, val2); break;\
2589 case 6: LOGIC_OP("xorw %w4, %w1", eflags, val, val2); break;\
2590 default: ARITH_OP("cmpw %w4, %w1", eflags, val, val2); break;\
2592 break;\
2593 case OT_LONG:\
2594 switch(op) {\
2595 case 0: ARITH_OP("addl %k4, %k1", eflags, val, val2); break;\
2596 case 1: LOGIC_OP("orl %k4, %k1", eflags, val, val2); break;\
2597 case 2: ARITH_OPC("adcl %k4, %k1", eflags, val, val2); break;\
2598 case 3: ARITH_OPC("sbbl %k4, %k1", eflags, val, val2); break;\
2599 case 4: LOGIC_OP("andl %k4, %k1", eflags, val, val2); break;\
2600 case 5: ARITH_OP("subl %k4, %k1", eflags, val, val2); break;\
2601 case 6: LOGIC_OP("xorl %k4, %k1", eflags, val, val2); break;\
2602 default: ARITH_OP("cmpl %k4, %k1", eflags, val, val2); break;\
2604 break;\
2605 QO(case OT_QUAD:\
2606 switch(op) {\
2607 case 0: ARITH_OP("addq %4, %1", eflags, val, val2); break;\
2608 case 1: LOGIC_OP("orq %4, %1", eflags, val, val2); break;\
2609 case 2: ARITH_OPC("adcq %4, %1", eflags, val, val2); break;\
2610 case 3: ARITH_OPC("sbbq %4, %1", eflags, val, val2); break;\
2611 case 4: LOGIC_OP("andq %4, %1", eflags, val, val2); break;\
2612 case 5: ARITH_OP("subq %4, %1", eflags, val, val2); break;\
2613 case 6: LOGIC_OP("xorq %4, %1", eflags, val, val2); break;\
2614 default: ARITH_OP("cmpq %4, %1", eflags, val, val2); break;\
2616 break;)\
2619 #define ARITH_Ev_Gv(op, ot) \
2620 { int modrm, reg, mod; unsigned long val, val2, eflags;\
2621 modrm = ldub_code(s);\
2622 reg = ((modrm >> 3) & 7) | REX_R(s);\
2623 mod = (modrm >> 6);\
2624 val2 = get_regS(s, ot, reg);\
2625 if (mod != 3) {\
2626 addr = get_modrm(s, modrm);\
2627 val = ldS(s, ot, addr);\
2628 eflags = s->regs1.eflags;\
2629 ARITH_EXEC(eflags, op, ot, val, val2);\
2630 if (op != 7)\
2631 stS(s, ot, addr, val);\
2632 s->regs1.eflags = eflags;\
2633 } else {\
2634 rm = (modrm & 7) | REX_B(s);\
2635 val = get_regS(s, ot, rm);\
2636 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2637 if (op != 7)\
2638 set_regS(s, ot, rm, val);\
2641 goto insn_next;
2643 #define ARITH_Gv_Ev(op, ot)\
2644 modrm = ldub_code(s);\
2645 mod = (modrm >> 6);\
2646 reg = ((modrm >> 3) & 7) | REX_R(s);\
2647 if (mod != 3) {\
2648 addr = get_modrm(s, modrm);\
2649 val2 = ldS(s, ot, addr);\
2650 } else {\
2651 rm = (modrm & 7) | REX_B(s);\
2652 val2 = get_regS(s, ot, rm);\
2654 val = get_regS(s, ot, reg);\
2655 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2656 if (op != 7)\
2657 set_regS(s, ot, reg, val);\
2658 goto insn_next;
2660 #define ARITH_A_Iv(op, ot)\
2661 if (ot == 0)\
2662 val2 = (int8_t)ldub_code(s);\
2663 else if (ot == 1)\
2664 val2 = (int16_t)lduw_code(s);\
2665 else\
2666 val2 = (int32_t)ldl_code(s);\
2667 val = s->regs1.eax;\
2668 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2669 if (op != 7)\
2670 set_regS(s, ot, R_EAX, val);\
2671 goto insn_next;
2674 LABEL(00) ARITH_Ev_Gv(0, OT_BYTE);
2675 LABEL(01w) ARITH_Ev_Gv(0, OT_WORD);
2676 LABEL(01l) ARITH_Ev_Gv(0, OT_LONG);
2677 QO( LABEL(01q) ARITH_Ev_Gv(0, OT_QUAD); )
2678 LABEL(02) ARITH_Gv_Ev(0, OT_BYTE);
2679 LABEL(03w) ARITH_Gv_Ev(0, OT_WORD);
2680 LABEL(03l) ARITH_Gv_Ev(0, OT_LONG);
2681 QO( LABEL(03q) ARITH_Gv_Ev(0, OT_QUAD); )
2682 LABEL(04) ARITH_A_Iv(0, OT_BYTE);
2683 LABEL(05w) ARITH_A_Iv(0, OT_WORD);
2684 LABEL(05l) ARITH_A_Iv(0, OT_LONG);
2685 QO( LABEL(05q) ARITH_A_Iv(0, OT_QUAD); )
2687 LABEL(08) ARITH_Ev_Gv(1, OT_BYTE);
2688 LABEL(09w) ARITH_Ev_Gv(1, OT_WORD);
2689 LABEL(09l) ARITH_Ev_Gv(1, OT_LONG);
2690 QO( LABEL(09q) ARITH_Ev_Gv(1, OT_QUAD); )
2691 LABEL(0a) ARITH_Gv_Ev(1, OT_BYTE);
2692 LABEL(0bw) ARITH_Gv_Ev(1, OT_WORD);
2693 LABEL(0bl) ARITH_Gv_Ev(1, OT_LONG);
2694 QO( LABEL(0bq) ARITH_Gv_Ev(1, OT_QUAD); )
2695 LABEL(0c) ARITH_A_Iv(1, OT_BYTE);
2696 LABEL(0dw) ARITH_A_Iv(1, OT_WORD);
2697 LABEL(0dl) ARITH_A_Iv(1, OT_LONG);
2698 QO( LABEL(0dq) ARITH_A_Iv(1, OT_QUAD); )
2700 LABEL(10) ARITH_Ev_Gv(2, OT_BYTE);
2701 LABEL(11w) ARITH_Ev_Gv(2, OT_WORD);
2702 LABEL(11l) ARITH_Ev_Gv(2, OT_LONG);
2703 QO( LABEL(11q) ARITH_Ev_Gv(2, OT_QUAD); )
2704 LABEL(12) ARITH_Gv_Ev(2, OT_BYTE);
2705 LABEL(13w) ARITH_Gv_Ev(2, OT_WORD);
2706 LABEL(13l) ARITH_Gv_Ev(2, OT_LONG);
2707 QO( LABEL(13q) ARITH_Gv_Ev(2, OT_QUAD); )
2708 LABEL(14) ARITH_A_Iv(2, OT_BYTE);
2709 LABEL(15w) ARITH_A_Iv(2, OT_WORD);
2710 LABEL(15l) ARITH_A_Iv(2, OT_LONG);
2711 QO( LABEL(15q) ARITH_A_Iv(2, OT_QUAD); )
2713 LABEL(18) ARITH_Ev_Gv(3, OT_BYTE);
2714 LABEL(19w) ARITH_Ev_Gv(3, OT_WORD);
2715 LABEL(19l) ARITH_Ev_Gv(3, OT_LONG);
2716 QO( LABEL(19q) ARITH_Ev_Gv(3, OT_QUAD); )
2717 LABEL(1a) ARITH_Gv_Ev(3, OT_BYTE);
2718 LABEL(1bw) ARITH_Gv_Ev(3, OT_WORD);
2719 LABEL(1bl) ARITH_Gv_Ev(3, OT_LONG);
2720 QO( LABEL(1bq) ARITH_Gv_Ev(3, OT_QUAD); )
2721 LABEL(1c) ARITH_A_Iv(3, OT_BYTE);
2722 LABEL(1dw) ARITH_A_Iv(3, OT_WORD);
2723 LABEL(1dl) ARITH_A_Iv(3, OT_LONG);
2724 QO( LABEL(1dq) ARITH_A_Iv(3, OT_QUAD); )
2726 LABEL(20) ARITH_Ev_Gv(4, OT_BYTE);
2727 LABEL(21w) ARITH_Ev_Gv(4, OT_WORD);
2728 LABEL(21l) ARITH_Ev_Gv(4, OT_LONG);
2729 QO( LABEL(21q) ARITH_Ev_Gv(4, OT_QUAD); )
2730 LABEL(22) ARITH_Gv_Ev(4, OT_BYTE);
2731 LABEL(23w) ARITH_Gv_Ev(4, OT_WORD);
2732 LABEL(23l) ARITH_Gv_Ev(4, OT_LONG);
2733 QO( LABEL(23q) ARITH_Gv_Ev(4, OT_QUAD); )
2734 LABEL(24) ARITH_A_Iv(4, OT_BYTE);
2735 LABEL(25w) ARITH_A_Iv(4, OT_WORD);
2736 LABEL(25l) ARITH_A_Iv(4, OT_LONG);
2737 QO( LABEL(25q) ARITH_A_Iv(4, OT_QUAD); )
2739 LABEL(28) ARITH_Ev_Gv(5, OT_BYTE);
2740 LABEL(29w) ARITH_Ev_Gv(5, OT_WORD);
2741 LABEL(29l) ARITH_Ev_Gv(5, OT_LONG);
2742 QO( LABEL(29q) ARITH_Ev_Gv(5, OT_QUAD); )
2743 LABEL(2a) ARITH_Gv_Ev(5, OT_BYTE);
2744 LABEL(2bw) ARITH_Gv_Ev(5, OT_WORD);
2745 LABEL(2bl) ARITH_Gv_Ev(5, OT_LONG);
2746 QO( LABEL(2bq) ARITH_Gv_Ev(5, OT_QUAD); )
2747 LABEL(2c) ARITH_A_Iv(5, OT_BYTE);
2748 LABEL(2dw) ARITH_A_Iv(5, OT_WORD);
2749 LABEL(2dl) ARITH_A_Iv(5, OT_LONG);
2750 QO( LABEL(2dq) ARITH_A_Iv(5, OT_QUAD); )
2752 LABEL(30) ARITH_Ev_Gv(6, OT_BYTE);
2753 LABEL(31w) ARITH_Ev_Gv(6, OT_WORD);
2754 LABEL(31l) ARITH_Ev_Gv(6, OT_LONG);
2755 QO( LABEL(31q) ARITH_Ev_Gv(6, OT_QUAD); )
2756 LABEL(32) ARITH_Gv_Ev(6, OT_BYTE);
2757 LABEL(33w) ARITH_Gv_Ev(6, OT_WORD);
2758 LABEL(33l) ARITH_Gv_Ev(6, OT_LONG);
2759 QO( LABEL(33q) ARITH_Gv_Ev(6, OT_QUAD); )
2760 LABEL(34) ARITH_A_Iv(6, OT_BYTE);
2761 LABEL(35w) ARITH_A_Iv(6, OT_WORD);
2762 LABEL(35l) ARITH_A_Iv(6, OT_LONG);
2763 QO( LABEL(35q) ARITH_A_Iv(6, OT_QUAD); )
2765 LABEL(38) ARITH_Ev_Gv(7, OT_BYTE);
2766 LABEL(39w) ARITH_Ev_Gv(7, OT_WORD);
2767 LABEL(39l) ARITH_Ev_Gv(7, OT_LONG);
2768 QO( LABEL(39q) ARITH_Ev_Gv(7, OT_QUAD); )
2769 LABEL(3a) ARITH_Gv_Ev(7, OT_BYTE);
2770 LABEL(3bw) ARITH_Gv_Ev(7, OT_WORD);
2771 LABEL(3bl) ARITH_Gv_Ev(7, OT_LONG);
2772 QO( LABEL(3bq) ARITH_Gv_Ev(7, OT_QUAD); )
2773 LABEL(3c) ARITH_A_Iv(7, OT_BYTE);
2774 LABEL(3dw) ARITH_A_Iv(7, OT_WORD);
2775 LABEL(3dl) ARITH_A_Iv(7, OT_LONG);
2776 QO( LABEL(3dq) ARITH_A_Iv(7, OT_QUAD); )
2778 #define ARITH_GRP1(b, ot) \
2779 modrm = ldub_code(s);\
2780 mod = (modrm >> 6);\
2781 op = (modrm >> 3) & 7;\
2782 if (mod != 3) {\
2783 if (b == 0x83)\
2784 s->rip_offset = 1;\
2785 else\
2786 s->rip_offset = insn_const_size(ot);\
2787 addr = get_modrm(s, modrm);\
2788 s->rip_offset = 0;\
2789 val = ldS(s, ot, addr);\
2790 switch(b) {\
2791 default:\
2792 case 0x80:\
2793 case 0x81:\
2794 case 0x82:\
2795 val2 = insn_get(s, ot);\
2796 break;\
2797 case 0x83:\
2798 val2 = (int8_t)ldub_code(s);\
2799 break;\
2801 eflags = s->regs1.eflags;\
2802 ARITH_EXEC(eflags, op, ot, val, val2);\
2803 if (op != 7)\
2804 stS(s, ot, addr, val);\
2805 s->regs1.eflags = eflags;\
2806 } else {\
2807 rm = (modrm & 7) | REX_B(s);\
2808 val = get_regS(s, ot, rm);\
2809 switch(b) {\
2810 default:\
2811 case 0x80:\
2812 case 0x81:\
2813 case 0x82:\
2814 val2 = insn_get(s, ot);\
2815 break;\
2816 case 0x83:\
2817 val2 = (int8_t)ldub_code(s);\
2818 break;\
2820 ARITH_EXEC(s->regs1.eflags, op, ot, val, val2);\
2821 if (op != 7)\
2822 set_regS(s, ot, rm, val);\
2824 goto insn_next;
2826 LABEL(80) /* GRP1 */
2827 LABEL(82)
2828 ARITH_GRP1(0x80, OT_BYTE);
2829 LABEL(81w) ARITH_GRP1(0x81, OT_WORD);
2830 LABEL(81l) ARITH_GRP1(0x81, OT_LONG);
2831 QO( LABEL(81q) ARITH_GRP1(0x81, OT_QUAD); )
2832 LABEL(83w) ARITH_GRP1(0x83, OT_WORD);
2833 LABEL(83l) ARITH_GRP1(0x83, OT_LONG);
2834 QO( LABEL(83q) ARITH_GRP1(0x83, OT_QUAD); )
2836 LABEL(84) /* test Ev, Gv */
2837 LABEL(85)
2838 if ((b & 1) == 0)
2839 ot = OT_BYTE;
2840 else
2841 ot = s->dflag + OT_WORD;
2843 modrm = ldub_code(s);
2844 mod = (modrm >> 6);
2845 if (mod != 3) {
2846 addr = get_modrm(s, modrm);
2847 val = ldS(s, ot, addr);
2848 } else {
2849 rm = (modrm & 7) | REX_B(s);
2850 val = get_regS(s, ot, rm);
2852 reg = ((modrm >> 3) & 7) | REX_R(s);
2853 val2 = get_regS(s, ot, reg);
2854 exec_binary(&s->regs1.eflags,
2855 EB_AND + ot,
2856 val, val2);
2857 goto insn_next;
2859 LABEL(a8) /* test eAX, Iv */
2860 LABEL(a9)
2861 if ((b & 1) == 0)
2862 ot = OT_BYTE;
2863 else
2864 ot = s->dflag + OT_WORD;
2865 val2 = insn_get(s, ot);
2867 val = get_regS(s, ot, R_EAX);
2868 exec_binary(&s->regs1.eflags,
2869 EB_AND + ot,
2870 val, val2);
2871 goto insn_next;
2873 LABEL(40) /* inc Gv */
2874 LABEL(41)
2875 LABEL(42)
2876 LABEL(43)
2877 LABEL(44)
2878 LABEL(45)
2879 LABEL(46)
2880 LABEL(47)
2882 LABEL(48) /* dec Gv */
2883 LABEL(49)
2884 LABEL(4a)
2885 LABEL(4b)
2886 LABEL(4c)
2887 LABEL(4d)
2888 LABEL(4e)
2889 LABEL(4f)
2890 #ifdef __x86_64__
2891 if (CODE64(s))
2892 goto rex_prefix;
2893 #endif
2894 ot = s->dflag + OT_WORD;
2895 reg = b & 7;
2896 val = get_regS(s, ot, reg);
2897 val = exec_binary(&s->regs1.eflags,
2898 EB_INC + ((b >> 1) & 4) + ot,
2899 val, 0);
2900 set_regS(s, ot, reg, val);
2901 goto insn_next;
2903 LABEL(f6) /* GRP3 */
2904 LABEL(f7)
2905 if ((b & 1) == 0)
2906 ot = OT_BYTE;
2907 else
2908 ot = s->dflag + OT_WORD;
2910 modrm = ldub_code(s);
2911 mod = (modrm >> 6);
2912 rm = (modrm & 7) | REX_B(s);
2913 op = (modrm >> 3) & 7;
2915 switch(op) {
2916 case 0: /* test */
2917 if (mod != 3) {
2918 s->rip_offset = insn_const_size(ot);
2919 addr = get_modrm(s, modrm);
2920 s->rip_offset = 0;
2921 val = ldS(s, ot, addr);
2922 } else {
2923 val = get_regS(s, ot, rm);
2925 val2 = insn_get(s, ot);
2926 exec_binary(&s->regs1.eflags,
2927 EB_AND + ot,
2928 val, val2);
2929 break;
2930 case 2: /* not */
2931 if (mod != 3) {
2932 addr = get_modrm(s, modrm);
2933 val = ldS(s, ot, addr);
2934 val = ~val;
2935 stS(s, ot, addr, val);
2936 } else {
2937 val = get_regS(s, ot, rm);
2938 val = ~val;
2939 set_regS(s, ot, rm, val);
2941 break;
2942 case 3: /* neg */
2943 if (mod != 3) {
2944 addr = get_modrm(s, modrm);
2945 val = ldS(s, ot, addr);
2946 eflags = s->regs1.eflags;
2947 val = exec_binary(&eflags, EB_SUB + ot,
2948 0, val);
2949 stS(s, ot, addr, val);
2950 s->regs1.eflags = eflags;
2951 } else {
2952 val = get_regS(s, ot, rm);
2953 val = exec_binary(&s->regs1.eflags, EB_SUB + ot,
2954 0, val);
2955 set_regS(s, ot, rm, val);
2957 break;
2958 case 4: /* mul */
2959 if (mod != 3) {
2960 addr = get_modrm(s, modrm);
2961 val = ldS(s, ot, addr);
2962 } else {
2963 val = get_regS(s, ot, rm);
2965 switch(ot) {
2966 case OT_BYTE:
2967 asm volatile(LOAD_CC()
2968 "movb %1, %%al\n"
2969 "mulb %4\n"
2970 "movw %%ax, %1\n"
2971 SAVE_CC()
2972 : "=c" (s->regs1.eflags),
2973 "=m" (s->regs1.eax)
2974 : "0" (s->regs1.eflags),
2975 "m" (s->regs1.eax),
2976 "m" (val)
2977 : "%eax");
2978 break;
2979 case OT_WORD:
2980 asm volatile(LOAD_CC()
2981 "movw %1, %%ax\n"
2982 "mulw %5\n"
2983 "movw %%ax, %1\n"
2984 "movw %%dx, %2\n"
2985 SAVE_CC()
2986 : "=c" (s->regs1.eflags),
2987 "=m" (s->regs1.eax),
2988 "=m" (s->regs1.edx)
2989 : "0" (s->regs1.eflags),
2990 "m" (s->regs1.eax),
2991 "m" (val)
2992 : "%eax", "%edx");
2993 break;
2994 case OT_LONG:
2995 asm volatile(LOAD_CC()
2996 "movl %1, %%eax\n"
2997 "mull %5\n"
2998 "movl %%eax, %1\n"
2999 "movl %%edx, %2\n"
3000 SAVE_CC()
3001 : "=c" (s->regs1.eflags),
3002 "=m" (s->regs1.eax),
3003 "=m" (s->regs1.edx)
3004 : "0" (s->regs1.eflags),
3005 "m" (s->regs1.eax),
3006 "m" (val)
3007 : "%eax", "%edx");
3008 break;
3009 #ifdef __x86_64__
3010 case OT_QUAD:
3011 asm volatile(LOAD_CC()
3012 "movq %1, %%rax\n"
3013 "mulq %5\n"
3014 "movq %%rax, %1\n"
3015 "movq %%rdx, %2\n"
3016 SAVE_CC()
3017 : "=c" (s->regs1.eflags),
3018 "=m" (s->regs1.eax),
3019 "=m" (s->regs1.edx)
3020 : "0" (s->regs1.eflags),
3021 "m" (s->regs1.eax),
3022 "m" (val)
3023 : "%rax", "%rdx");
3024 break;
3025 #endif
3027 break;
3028 case 5: /* imul */
3029 if (mod != 3) {
3030 addr = get_modrm(s, modrm);
3031 val = ldS(s, ot, addr);
3032 } else {
3033 val = get_regS(s, ot, rm);
3035 switch(ot) {
3036 case OT_BYTE:
3037 asm volatile(LOAD_CC()
3038 "movb %1, %%al\n"
3039 "imulb %4\n"
3040 "movw %%ax, %1\n"
3041 SAVE_CC()
3042 : "=c" (s->regs1.eflags),
3043 "=m" (s->regs1.eax)
3044 : "0" (s->regs1.eflags),
3045 "m" (s->regs1.eax),
3046 "m" (val)
3047 : "%eax");
3048 break;
3049 case OT_WORD:
3050 asm volatile(LOAD_CC()
3051 "movw %1, %%ax\n"
3052 "imulw %5\n"
3053 "movw %%ax, %1\n"
3054 "movw %%dx, %2\n"
3055 SAVE_CC()
3056 : "=c" (s->regs1.eflags),
3057 "=m" (s->regs1.eax),
3058 "=m" (s->regs1.edx)
3059 : "0" (s->regs1.eflags),
3060 "m" (s->regs1.eax),
3061 "m" (val)
3062 : "%eax", "%edx");
3063 break;
3064 case OT_LONG:
3065 asm volatile(LOAD_CC()
3066 "movl %1, %%eax\n"
3067 "imull %5\n"
3068 "movl %%eax, %1\n"
3069 "movl %%edx, %2\n"
3070 SAVE_CC()
3071 : "=c" (s->regs1.eflags),
3072 "=m" (s->regs1.eax),
3073 "=m" (s->regs1.edx)
3074 : "0" (s->regs1.eflags),
3075 "m" (s->regs1.eax),
3076 "m" (val)
3077 : "%eax", "%edx");
3078 break;
3079 #ifdef __x86_64__
3080 case OT_QUAD:
3081 asm volatile(LOAD_CC()
3082 "movq %1, %%rax\n"
3083 "imulq %5\n"
3084 "movq %%rax, %1\n"
3085 "movq %%rdx, %2\n"
3086 SAVE_CC()
3087 : "=c" (s->regs1.eflags),
3088 "=m" (s->regs1.eax),
3089 "=m" (s->regs1.edx)
3090 : "0" (s->regs1.eflags),
3091 "m" (s->regs1.eax),
3092 "m" (val)
3093 : "%rax", "%rdx");
3094 break;
3095 #endif
3097 break;
3098 case 6: /* div */
3099 if (mod != 3) {
3100 addr = get_modrm(s, modrm);
3101 val = ldS(s, ot, addr);
3102 } else {
3103 val = get_regS(s, ot, rm);
3105 switch(ot) {
3106 case OT_BYTE:
3107 asm volatile("movw %0, %%ax\n"
3108 "1: divb %2\n"
3109 SEG_EXCEPTION(1b)
3110 "movw %%ax, %0\n"
3111 : "=m" (s->regs1.eax)
3112 : "m" (s->regs1.eax),
3113 "m" (val)
3114 : "%eax");
3115 break;
3116 case OT_WORD:
3117 asm volatile("movw %0, %%ax\n"
3118 "movw %1, %%dx\n"
3119 "1: divw %4\n"
3120 SEG_EXCEPTION(1b)
3121 "movw %%ax, %0\n"
3122 "movw %%dx, %1\n"
3123 : "=m" (s->regs1.eax),
3124 "=m" (s->regs1.edx)
3125 : "m" (s->regs1.eax),
3126 "m" (s->regs1.edx),
3127 "m" (val)
3128 : "%eax", "%edx");
3129 break;
3130 case OT_LONG:
3131 asm volatile("1: divl %4\n"
3132 SEG_EXCEPTION(1b)
3133 : "=a" (s->regs1.eax),
3134 "=d" (s->regs1.edx)
3135 : "0" (s->regs1.eax),
3136 "1" (s->regs1.edx),
3137 "m" (val));
3138 break;
3139 #ifdef __x86_64__
3140 case OT_QUAD:
3141 asm volatile("movq %0, %%rax\n"
3142 "movq %1, %%rdx\n"
3143 "1: divq %4\n"
3144 SEG_EXCEPTION(1b)
3145 "movq %%rax, %0\n"
3146 "movq %%rdx, %1\n"
3147 : "=m" (s->regs1.eax),
3148 "=m" (s->regs1.edx)
3149 : "m" (s->regs1.eax),
3150 "m" (s->regs1.edx),
3151 "m" (val)
3152 : "%eax", "%edx");
3153 break;
3154 #endif
3156 break;
3157 case 7: /* idiv */
3158 if (mod != 3) {
3159 addr = get_modrm(s, modrm);
3160 val = ldS(s, ot, addr);
3161 } else {
3162 val = get_regS(s, ot, rm);
3164 switch(ot) {
3165 case OT_BYTE:
3166 asm volatile("movw %0, %%ax\n"
3167 "1: idivb %2\n"
3168 SEG_EXCEPTION(1b)
3169 "movw %%ax, %0\n"
3170 : "=m" (s->regs1.eax)
3171 : "m" (s->regs1.eax),
3172 "m" (val)
3173 : "%eax");
3174 break;
3175 case OT_WORD:
3176 asm volatile("movw %0, %%ax\n"
3177 "movw %1, %%dx\n"
3178 "1: idivw %4\n"
3179 SEG_EXCEPTION(1b)
3180 "movw %%ax, %0\n"
3181 "movw %%dx, %1\n"
3182 : "=m" (s->regs1.eax),
3183 "=m" (s->regs1.edx)
3184 : "m" (s->regs1.eax),
3185 "m" (s->regs1.edx),
3186 "m" (val)
3187 : "%eax", "%edx");
3188 break;
3189 case OT_LONG:
3190 asm volatile("1: idivl %4\n"
3191 SEG_EXCEPTION(1b)
3192 : "=a" (s->regs1.eax),
3193 "=d" (s->regs1.edx)
3194 : "0" (s->regs1.eax),
3195 "1" (s->regs1.edx),
3196 "m" (val));
3197 break;
3198 #ifdef __x86_64__
3199 case OT_QUAD:
3200 asm volatile("movq %0, %%rax\n"
3201 "movq %1, %%rdx\n"
3202 "1: idivq %4\n"
3203 SEG_EXCEPTION(1b)
3204 "movq %%rax, %0\n"
3205 "movq %%rdx, %1\n"
3206 : "=m" (s->regs1.eax),
3207 "=m" (s->regs1.edx)
3208 : "m" (s->regs1.eax),
3209 "m" (s->regs1.edx),
3210 "m" (val)
3211 : "%eax", "%edx");
3212 break;
3213 #endif
3215 break;
3216 default:
3217 goto illegal_op;
3219 goto insn_next;
3221 LABEL(69) /* imul Gv, Ev, I */
3222 LABEL(6b)
3223 ot = s->dflag + OT_WORD;
3224 modrm = ldub_code(s);
3225 mod = (modrm >> 6);
3226 if (mod != 3) {
3227 if (b == 0x69)
3228 s->rip_offset = insn_const_size(ot);
3229 else
3230 s->rip_offset = 1;
3231 addr = get_modrm(s, modrm);
3232 s->rip_offset = 0;
3233 val = ldS(s, ot, addr);
3234 } else {
3235 rm = (modrm & 7) | REX_B(s);
3236 val = get_regS(s, ot, rm);
3238 reg = ((modrm >> 3) & 7) | REX_R(s);
3239 if (b == 0x69) {
3240 val2 = insn_get(s, ot);
3241 } else {
3242 val2 = (int8_t)ldub_code(s);
3244 reg = ((modrm >> 3) & 7) | REX_R(s);
3245 goto do_imul;
3246 LABEL(1af) /* imul Gv, Ev */
3247 ot = s->dflag + OT_WORD;
3248 modrm = ldub_code(s);
3249 mod = (modrm >> 6);
3250 if (mod != 3) {
3251 addr = get_modrm(s, modrm);
3252 val = ldS(s, ot, addr);
3253 } else {
3254 rm = (modrm & 7) | REX_B(s);
3255 val = get_regS(s, ot, rm);
3257 reg = ((modrm >> 3) & 7) | REX_R(s);
3258 val2 = get_regS(s, ot, reg);
3259 do_imul:
3260 switch(ot) {
3261 case OT_WORD:
3262 asm volatile(LOAD_CC()
3263 "imulw %w4, %w1\n"
3264 SAVE_CC()
3265 : "=c" (s->regs1.eflags),
3266 "=r" (val)
3267 : "0" (s->regs1.eflags),
3268 "1" (val),
3269 "r" (val2)
3270 : "%eax");
3271 break;
3272 case OT_LONG:
3273 asm volatile(LOAD_CC()
3274 "imull %k4, %k1\n"
3275 SAVE_CC()
3276 : "=c" (s->regs1.eflags),
3277 "=r" (val)
3278 : "0" (s->regs1.eflags),
3279 "1" (val),
3280 "r" (val2)
3281 : "%eax");
3282 break;
3283 #ifdef __x86_64__
3284 case OT_QUAD:
3285 asm volatile(LOAD_CC()
3286 "imulq %4, %1\n"
3287 SAVE_CC()
3288 : "=c" (s->regs1.eflags),
3289 "=r" (val)
3290 : "0" (s->regs1.eflags),
3291 "1" (val),
3292 "r" (val2)
3293 : "%eax");
3294 break;
3295 #endif
3297 set_regS(s, ot, reg, val);
3298 goto insn_next;
3300 LABEL(fe) /* GRP4 */
3301 LABEL(ff) /* GRP5 */
3302 if ((b & 1) == 0)
3303 ot = OT_BYTE;
3304 else
3305 ot = s->dflag + OT_WORD;
3307 modrm = ldub_code(s);
3308 mod = (modrm >> 6);
3309 rm = (modrm & 7) | REX_B(s);
3310 op = (modrm >> 3) & 7;
3312 switch(op) {
3313 case 0: /* inc Ev */
3314 case 1: /* dec Ev */
3315 if (mod != 3) {
3316 addr = get_modrm(s, modrm);
3317 val = ldS(s, ot, addr);
3318 eflags = s->regs1.eflags;
3319 val = exec_binary(&eflags,
3320 EB_INC + (op << 2) + ot,
3321 val, 0);
3322 stS(s, ot, addr, val);
3323 s->regs1.eflags = eflags;
3324 } else {
3325 val = get_regS(s, ot, rm);
3326 val = exec_binary(&s->regs1.eflags,
3327 EB_INC + (op << 2) + ot,
3328 val, 0);
3329 set_regS(s, ot, rm, val);
3331 break;
3332 case 2: /* call Ev */
3333 if (ot == OT_BYTE)
3334 goto illegal_op;
3335 if (CODE64(s))
3336 ot = OT_QUAD;
3337 if (mod != 3) {
3338 addr = get_modrm(s, modrm);
3339 val = ldS(s, ot, addr);
3340 } else {
3341 val = get_regS(s, ot, rm);
3343 if (ot == OT_WORD)
3344 val &= 0xffff;
3345 next_eip = pc;
3346 stack_push(s, next_eip);
3347 pc = val;
3348 goto insn_next;
3349 case 4: /* jmp Ev */
3350 if (ot == OT_BYTE)
3351 goto illegal_op;
3352 if (CODE64(s))
3353 ot = OT_QUAD;
3354 if (mod != 3) {
3355 addr = get_modrm(s, modrm);
3356 val = ldS(s, ot, addr);
3357 } else {
3358 val = get_regS(s, ot, rm);
3360 if (ot == OT_WORD)
3361 val &= 0xffff;
3362 pc = val;
3363 goto insn_next;
3364 case 3: /* lcall Ev */
3365 case 5: /* ljmp Ev */
3366 if (ot == OT_BYTE)
3367 goto illegal_op;
3368 raise_exception(s, KQEMU_RET_SOFTMMU);
3370 case 6: /* push Ev */
3371 if (ot == OT_BYTE)
3372 goto illegal_op;
3373 if (CODE64(s) && s->dflag)
3374 ot = OT_QUAD;
3375 if (mod != 3) {
3376 addr = get_modrm(s, modrm);
3377 val = ldS(s, ot, addr);
3378 } else {
3379 val = get_regS(s, ot, rm);
3381 stack_push(s, val);
3382 break;
3383 default:
3384 goto unhandled_op;
3386 goto insn_next;
3388 LABEL(50w) /* push w */
3389 LABEL(51w)
3390 LABEL(52w)
3391 LABEL(53w)
3392 LABEL(54w)
3393 LABEL(55w)
3394 LABEL(56w)
3395 LABEL(57w)
3396 reg = (b & 7) | REX_B(s);
3397 stack_pushS(s, get_reg(s, reg), 0);
3398 goto insn_next;
3400 LABEL(50l) /* push l */
3401 LABEL(51l)
3402 LABEL(52l)
3403 LABEL(53l)
3404 LABEL(54l)
3405 LABEL(55l)
3406 LABEL(56l)
3407 LABEL(57l)
3408 #ifdef __x86_64__
3409 LABEL(50q) /* push l */
3410 LABEL(51q)
3411 LABEL(52q)
3412 LABEL(53q)
3413 LABEL(54q)
3414 LABEL(55q)
3415 LABEL(56q)
3416 LABEL(57q)
3417 #endif
3418 reg = (b & 7) | REX_B(s);
3419 stack_pushS(s, get_reg(s, reg), 1);
3420 goto insn_next;
3422 LABEL(58) /* pop */
3423 LABEL(59)
3424 LABEL(5a)
3425 LABEL(5b)
3426 LABEL(5c)
3427 LABEL(5d)
3428 LABEL(5e)
3429 LABEL(5f)
3430 reg = (b & 7) | REX_B(s);
3431 if (likely(!CODE64(s) && s->dflag == 1 &&
3432 (s->cpu_state.segs[R_SS].flags & DESC_B_MASK))) {
3433 addr = s->regs1.esp + s->cpu_state.segs[R_SS].base;
3434 val = ldl(s, addr);
3435 /* NOTE: order is important for pop %sp */
3436 s->regs1.esp += 4;
3437 set_regl(s, reg, val);
3438 } else {
3439 val = stack_pop(s);
3440 /* NOTE: order is important for pop %sp */
3441 stack_pop_update(s);
3442 if (CODE64(s)) {
3443 if (s->dflag)
3444 set_reg(s, reg, val);
3445 else
3446 set_regw(s, reg, val);
3447 } else {
3448 if (s->dflag)
3449 set_regl(s, reg, val);
3450 else
3451 set_regw(s, reg, val);
3454 goto insn_next;
3456 LABEL(68) /* push Iv */
3457 if (s->dflag)
3458 val = (int32_t)ldl_code(s);
3459 else
3460 val = lduw_code(s);
3461 stack_push(s, val);
3462 goto insn_next;
3463 LABEL(6a) /* push Iv */
3464 val = (int8_t)ldub_code(s);
3465 stack_push(s, val);
3466 goto insn_next;
3467 LABEL(8f) /* pop Ev */
3468 if (CODE64(s) && s->dflag)
3469 s->dflag = 2;
3470 ot = s->dflag + OT_WORD;
3471 modrm = ldub_code(s);
3472 mod = (modrm >> 6);
3473 val = stack_pop(s);
3474 if (mod == 3) {
3475 /* NOTE: order is important for pop %sp */
3476 stack_pop_update(s);
3477 rm = (modrm & 7) | REX_B(s);
3478 set_regS(s, ot, rm, val);
3479 } else {
3480 /* NOTE: order is important too for MMU exceptions */
3481 s->popl_esp_hack = 1 << ot;
3482 addr = get_modrm(s, modrm);
3483 s->popl_esp_hack = 0;
3484 stS(s, ot, addr, val);
3485 stack_pop_update(s);
3487 goto insn_next;
3488 LABEL(06) /* push es */
3489 LABEL(0e) /* push cs */
3490 LABEL(16) /* push ss */
3491 LABEL(1e) /* push ds */
3492 if (CODE64(s))
3493 goto illegal_op;
3494 do_push_seg:
3495 reg = (b >> 3) & 7;
3496 val = get_seg_sel(s, reg);
3497 stack_push(s, val);
3498 goto insn_next;
3499 LABEL(1a0) /* push fs */
3500 LABEL(1a8) /* push gs */
3501 goto do_push_seg;
3503 LABEL(07) /* pop es */
3504 LABEL(17) /* pop ss */
3505 LABEL(1f) /* pop ds */
3506 if (CODE64(s))
3507 goto illegal_op;
3508 do_pop_seg:
3509 val = stack_pop(s);
3510 reg = (b >> 3) & 7;
3511 load_seg_desc(s, reg, val & 0xffff);
3512 stack_pop_update(s);
3513 goto insn_next;
3514 LABEL(1a1) /* pop fs */
3515 LABEL(1a9) /* pop gs */
3516 goto do_pop_seg;
3518 LABEL(c9) /* leave */
3519 if (CODE64(s)) {
3520 set_reg(s, R_ESP, s->regs1.ebp);
3521 } else if (s->cpu_state.segs[R_SS].flags & DESC_B_MASK) {
3522 set_regl(s, R_ESP, s->regs1.ebp);
3523 } else {
3524 set_regw(s, R_ESP, s->regs1.ebp);
3526 val = stack_pop(s);
3527 if (CODE64(s) && s->dflag) {
3528 set_reg(s, R_EBP, val);
3529 } else if (s->dflag) {
3530 set_regl(s, R_EBP, val);
3531 } else {
3532 set_regw(s, R_EBP, val);
3534 stack_pop_update(s);
3535 goto insn_next;
3536 /**************************/
3537 /* mov */
3539 #define MOV_Gv_Ev(ot)\
3540 modrm = ldub_code(s);\
3541 reg = ((modrm >> 3) & 7) | REX_R(s);\
3542 val = get_regS(s, ot, reg);\
3543 mod = (modrm >> 6);\
3544 if (mod == 3) {\
3545 rm = (modrm & 7) | REX_B(s);\
3546 set_regS(s, ot, rm, val);\
3547 } else {\
3548 addr = get_modrm(s, modrm);\
3549 stS(s, ot, addr, val);\
3551 goto insn_next;
3553 /* mov Gv, Ev */
3554 LABEL(88) MOV_Gv_Ev(OT_BYTE);
3555 LABEL(89w) MOV_Gv_Ev(OT_WORD);
3556 LABEL(89l) MOV_Gv_Ev(OT_LONG);
3557 QO( LABEL(89q) MOV_Gv_Ev(OT_QUAD); )
3559 #define MOV_Ev_Iv(ot)\
3560 modrm = ldub_code(s);\
3561 mod = (modrm >> 6);\
3562 if (mod != 3) {\
3563 s->rip_offset = insn_const_size(ot);\
3564 addr = get_modrm(s, modrm);\
3565 s->rip_offset = 0;\
3566 val = insn_get(s, ot);\
3567 stS(s, ot, addr, val);\
3568 } else {\
3569 val = insn_get(s, ot);\
3570 rm = (modrm & 7) | REX_B(s);\
3571 set_regS(s, ot, rm, val);\
3573 goto insn_next;
3575 LABEL(c6) MOV_Ev_Iv(OT_BYTE);
3576 LABEL(c7w) MOV_Ev_Iv(OT_WORD);
3577 LABEL(c7l) MOV_Ev_Iv(OT_LONG);
3578 QO( LABEL(c7q) MOV_Ev_Iv(OT_QUAD); )
3580 #define MOV_Ev_Gv(ot)\
3581 modrm = ldub_code(s);\
3582 reg = ((modrm >> 3) & 7) | REX_R(s);\
3583 mod = (modrm >> 6);\
3584 if (mod == 3) {\
3585 rm = (modrm & 7) | REX_B(s);\
3586 val = get_regS(s, ot, rm);\
3587 } else {\
3588 addr = get_modrm(s, modrm);\
3589 val = ldS(s, ot, addr);\
3591 set_regS(s, ot, reg, val);\
3592 goto insn_next;
3594 /* mov Ev, Gv */
3595 LABEL(8a) MOV_Ev_Gv(OT_BYTE);
3596 LABEL(8bw) MOV_Ev_Gv(OT_WORD);
3597 LABEL(8bl) MOV_Ev_Gv(OT_LONG);
3598 QO( LABEL(8bq) MOV_Ev_Gv(OT_QUAD); )
3600 LABEL(8e) /* mov seg, Gv */
3601 modrm = ldub_code(s);
3602 reg = (modrm >> 3) & 7;
3603 if (reg >= 6 || reg == R_CS)
3604 goto illegal_op;
3605 mod = (modrm >> 6);
3606 if (mod == 3) {
3607 val = get_reg(s, modrm & 7) & 0xffff;
3608 } else {
3609 addr = get_modrm(s, modrm);
3610 val = lduw(s, addr);
3612 load_seg_desc(s, reg, val);
3613 goto insn_next;
3614 LABEL(8c) /* mov Gv, seg */
3615 modrm = ldub_code(s);
3616 reg = (modrm >> 3) & 7;
3617 mod = (modrm >> 6);
3618 if (reg >= 6)
3619 goto illegal_op;
3620 val = get_seg_sel(s, reg);
3621 if (mod == 3) {
3622 ot = OT_WORD + s->dflag;
3623 rm = (modrm & 7) | REX_B(s);
3624 set_regS(s, ot, rm, val);
3625 } else {
3626 addr = get_modrm(s, modrm);
3627 stw(s, addr, val);
3629 goto insn_next;
3631 LABEL(b0) /* mov R, Ib */
3632 LABEL(b1)
3633 LABEL(b2)
3634 LABEL(b3)
3635 LABEL(b4)
3636 LABEL(b5)
3637 LABEL(b6)
3638 LABEL(b7)
3639 val = ldub_code(s);
3640 reg = (b & 7) | REX_B(s);
3641 set_regb(s, reg, val);
3642 goto insn_next;
3644 #if defined(__x86_64__)
3645 LABEL(b8q) /* mov R, Iv */
3646 LABEL(b9q)
3647 LABEL(baq)
3648 LABEL(bbq)
3649 LABEL(bcq)
3650 LABEL(bdq)
3651 LABEL(beq)
3652 LABEL(bfq)
3653 reg = (b & 7) | REX_B(s);
3654 val = ldq_code(s);
3655 set_reg(s, reg, val);
3656 goto insn_next;
3657 #endif
3659 LABEL(b8l) /* mov R, Iv */
3660 LABEL(b9l)
3661 LABEL(bal)
3662 LABEL(bbl)
3663 LABEL(bcl)
3664 LABEL(bdl)
3665 LABEL(bel)
3666 LABEL(bfl)
3667 reg = (b & 7) | REX_B(s);
3668 val = ldl_code(s);
3669 set_regl(s, reg, val);
3670 goto insn_next;
3672 LABEL(b8w) /* mov R, Iv */
3673 LABEL(b9w)
3674 LABEL(baw)
3675 LABEL(bbw)
3676 LABEL(bcw)
3677 LABEL(bdw)
3678 LABEL(bew)
3679 LABEL(bfw)
3680 reg = (b & 7) | REX_B(s);
3681 val = lduw_code(s);
3682 set_regw(s, reg, val);
3683 goto insn_next;
3685 LABEL(91) /* xchg R, EAX */
3686 LABEL(92)
3687 LABEL(93)
3688 LABEL(94)
3689 LABEL(95)
3690 LABEL(96)
3691 LABEL(97)
3692 ot = s->dflag + OT_WORD;
3693 reg = (b & 7) | REX_B(s);
3694 rm = R_EAX;
3695 goto do_xchg_reg;
3696 LABEL(86)
3697 LABEL(87) /* xchg Ev, Gv */
3698 if ((b & 1) == 0)
3699 ot = OT_BYTE;
3700 else
3701 ot = s->dflag + OT_WORD;
3702 modrm = ldub_code(s);
3703 reg = ((modrm >> 3) & 7) | REX_R(s);
3704 mod = (modrm >> 6) & 3;
3705 if (mod == 3) {
3706 rm = (modrm & 7) | REX_B(s);
3707 do_xchg_reg:
3708 val = get_regS(s, ot, reg);
3709 val2 = get_regS(s, ot, rm);
3710 set_regS(s, ot, rm, val);
3711 set_regS(s, ot, reg, val2);
3712 } else {
3713 /* XXX: lock for SMP */
3714 addr = get_modrm(s, modrm);
3715 val = get_regS(s, ot, reg);
3716 val2 = ldS(s, ot, addr);
3717 stS(s, ot, addr, val);
3718 set_regS(s, ot, reg, val2);
3720 goto insn_next;
3722 #define MOVZS(sgn, ot, d_ot)\
3724 /* d_ot is the size of destination */\
3725 /* ot is the size of source */\
3726 modrm = ldub_code(s);\
3727 reg = ((modrm >> 3) & 7) | REX_R(s);\
3728 mod = (modrm >> 6);\
3729 rm = (modrm & 7) | REX_B(s);\
3730 if (mod == 3) {\
3731 val = get_regS(s, ot, rm);\
3732 switch(ot | (sgn << 3)) {\
3733 case OT_BYTE:\
3734 val = (uint8_t)val;\
3735 break;\
3736 case OT_BYTE | 8:\
3737 val = (int8_t)val;\
3738 break;\
3739 case OT_WORD:\
3740 val = (uint16_t)val;\
3741 break;\
3742 default:\
3743 case OT_WORD | 8:\
3744 val = (int16_t)val;\
3745 break;\
3746 QO( case OT_LONG | 8:\
3747 val = (int32_t)val;\
3748 break;)\
3750 } else {\
3751 addr = get_modrm(s, modrm);\
3752 switch(ot | (sgn << 3)) {\
3753 case OT_BYTE:\
3754 val = ldub(s, addr);\
3755 break;\
3756 case OT_BYTE | 8:\
3757 val = (int8_t)ldub(s, addr);\
3758 break;\
3759 case OT_WORD:\
3760 val = (uint16_t)lduw(s, addr);\
3761 break;\
3762 default:\
3763 case OT_WORD | 8:\
3764 val = (int16_t)lduw(s, addr);\
3765 break;\
3766 QO( case OT_LONG | 8:\
3767 val = (int32_t)ldl(s, addr);\
3768 break;)\
3771 set_regS(s, d_ot, reg, val);\
3773 goto insn_next;
3775 /* movzbS Gv, Eb */
3776 LABEL(1b6w) MOVZS(0, OT_BYTE, OT_WORD);
3777 LABEL(1b6l) MOVZS(0, OT_BYTE, OT_LONG);
3778 QO( LABEL(1b6q) MOVZS(0, OT_BYTE, OT_QUAD); )
3780 /* movzwS Gv, Eb */
3781 LABEL(1b7w) MOVZS(0, OT_WORD, OT_WORD);
3782 LABEL(1b7l) MOVZS(0, OT_WORD, OT_LONG);
3783 QO( LABEL(1b7q) MOVZS(0, OT_WORD, OT_QUAD); )
3785 /* movsbS Gv, Eb */
3786 LABEL(1bew) MOVZS(1, OT_BYTE, OT_WORD);
3787 LABEL(1bel) MOVZS(1, OT_BYTE, OT_LONG);
3788 QO( LABEL(1beq) MOVZS(1, OT_BYTE, OT_QUAD); )
3790 /* movswS Gv, Eb */
3791 LABEL(1bfw) MOVZS(1, OT_WORD, OT_WORD);
3792 LABEL(1bfl) MOVZS(1, OT_WORD, OT_LONG);
3793 QO( LABEL(1bfq) MOVZS(1, OT_WORD, OT_QUAD); )
3795 /* movslS Gv, Eb */
3796 LABEL(63w)
3797 if (!CODE64(s))
3798 goto unhandled_op;
3799 MOVZS(1, OT_LONG, OT_WORD);
3800 LABEL(63l)
3801 if (!CODE64(s))
3802 goto unhandled_op;
3803 MOVZS(1, OT_LONG, OT_LONG);
3804 QO( LABEL(63q) MOVZS(1, OT_LONG, OT_QUAD); )
3806 #define LEA(ot)\
3807 modrm = ldub_code(s);\
3808 mod = (modrm >> 6);\
3809 if (mod == 3)\
3810 goto illegal_op;\
3811 reg = ((modrm >> 3) & 7) | REX_R(s);\
3812 s->override = -2;\
3813 addr = get_modrm(s, modrm);\
3814 set_regS(s, ot, reg, addr);\
3815 goto insn_next;
3817 /* lea */
3818 LABEL(8dw) LEA(OT_WORD);
3819 LABEL(8dl) LEA(OT_LONG);
3820 QO( LABEL(8dq) LEA(OT_QUAD); )
3822 LABEL(a0) /* mov EAX, Ov */
3823 LABEL(a1)
3824 LABEL(a2) /* mov Ov, EAX */
3825 LABEL(a3)
3826 if ((b & 1) == 0)
3827 ot = OT_BYTE;
3828 else
3829 ot = s->dflag + OT_WORD;
3830 #ifdef __x86_64__
3831 if (s->aflag == 2) {
3832 addr = ldq_code(s);
3833 if (s->override == R_FS || s->override == R_GS)
3834 addr += s->cpu_state.segs[s->override].base;
3835 } else
3836 #endif
3838 int override;
3839 if (s->aflag) {
3840 addr = ldl_code(s);
3841 } else {
3842 addr = lduw_code(s);
3844 override = s->override;
3845 if (override < 0)
3846 override = R_DS;
3847 addr = (uint32_t)(addr + s->cpu_state.segs[override].base);
3849 if ((b & 2) == 0) {
3850 val = ldS(s, ot, addr);
3851 set_regS(s, ot, R_EAX, val);
3852 } else {
3853 val = get_regS(s, ot, R_EAX);
3854 stS(s, ot, addr, val);
3856 goto insn_next;
3858 /************************/
3859 /* flags */
3860 LABEL(9c) /* pushf */
3861 iopl = get_eflags_iopl(s);
3862 if (get_eflags_vm(s) && iopl != 3)
3863 raise_exception_err(s, EXCP0D_GPF, 0);
3864 val = compute_eflags(s);
3865 val &= ~(VM_MASK | RF_MASK);
3866 stack_push(s, val);
3867 goto insn_next;
3868 LABEL(9d) /* popf */
3870 long mask;
3871 iopl = get_eflags_iopl(s);
3872 if (get_eflags_vm(s) && iopl != 3)
3873 raise_exception_err(s, EXCP0D_GPF, 0);
3874 if (s->cpu_state.cpl == 0) {
3875 mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK;
3876 } else {
3877 if (s->cpu_state.cpl <= iopl) {
3878 mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK;
3879 } else {
3880 mask = TF_MASK | AC_MASK | ID_MASK | NT_MASK;
3883 if (s->dflag == 0)
3884 mask &= 0xffff;
3885 val = stack_pop(s);
3886 load_eflags(s, val, mask);
3887 stack_pop_update(s);
3889 goto insn_next;
3890 LABEL(f5) /* cmc */
3891 s->regs1.eflags ^= CC_C;
3892 goto insn_next;
3893 LABEL(f8) /* clc */
3894 s->regs1.eflags &= ~CC_C;
3895 goto insn_next;
3896 LABEL(f9) /* stc */
3897 s->regs1.eflags |= CC_C;
3898 goto insn_next;
3899 LABEL(fc) /* cld */
3900 s->regs1.eflags &= ~DF_MASK;
3901 goto insn_next;
3902 LABEL(fd) /* std */
3903 s->regs1.eflags |= DF_MASK;
3904 goto insn_next;
3906 /************************/
3907 /* bit operations */
3908 LABEL(1ba) /* bt/bts/btr/btc Gv, im */
3909 ot = s->dflag + OT_WORD;
3910 modrm = ldub_code(s);
3911 op = (modrm >> 3) & 7;
3912 if (op < 4)
3913 goto illegal_op;
3914 op -= 4;
3915 mod = (modrm >> 6);
3916 rm = (modrm & 7) | REX_B(s);
3917 if (mod != 3) {
3918 s->rip_offset = 1;
3919 addr = get_modrm(s, modrm);
3920 s->rip_offset = 0;
3921 val2 = ldub_code(s);
3922 val = ldS(s, ot, addr);
3923 eflags = s->regs1.eflags;
3924 val = exec_binary(&eflags, EB_BT + (op << 2) + ot,
3925 val, val2);
3926 if (op != 0)
3927 stS(s, ot, addr, val);
3928 s->regs1.eflags = eflags;
3929 } else {
3930 val2 = ldub_code(s);
3931 val = get_regS(s, ot, rm);
3932 val = exec_binary(&s->regs1.eflags, EB_BT + (op << 2) + ot,
3933 val, val2);
3934 if (op != 0)
3935 set_regS(s, ot, rm, val);
3937 goto insn_next;
3938 LABEL(1a3) /* bt Gv, Ev */
3939 op = 0;
3940 goto do_btx;
3941 LABEL(1ab) /* bts */
3942 op = 1;
3943 goto do_btx;
3944 LABEL(1b3) /* btr */
3945 op = 2;
3946 goto do_btx;
3947 LABEL(1bb) /* btc */
3948 op = 3;
3949 do_btx:
3950 ot = s->dflag + OT_WORD;
3951 modrm = ldub_code(s);
3952 reg = ((modrm >> 3) & 7) | REX_R(s);
3953 mod = (modrm >> 6);
3954 rm = (modrm & 7) | REX_B(s);
3955 val2 = get_regS(s, ot, reg);
3956 if (mod != 3) {
3957 addr = get_modrm(s, modrm);
3958 /* add the offset */
3959 switch(ot) {
3960 case OT_WORD:
3961 addr += ((int16_t)val2 >> 4) << 1;
3962 break;
3963 case OT_LONG:
3964 addr += ((int32_t)val2 >> 5) << 2;
3965 break;
3966 default:
3967 case OT_QUAD:
3968 addr += ((long)val2 >> 6) << 3;
3969 break;
3971 val = ldS(s, ot, addr);
3972 eflags = s->regs1.eflags;
3973 val = exec_binary(&eflags, EB_BT + (op << 2) + ot,
3974 val, val2);
3975 if (op != 0)
3976 stS(s, ot, addr, val);
3977 s->regs1.eflags = eflags;
3978 } else {
3979 val = get_regS(s, ot, rm);
3980 val = exec_binary(&s->regs1.eflags, EB_BT + (op << 2) + ot,
3981 val, val2);
3982 if (op != 0)
3983 set_regS(s, ot, rm, val);
3985 goto insn_next;
3986 LABEL(1bc) /* bsf */
3987 LABEL(1bd) /* bsr */
3988 ot = s->dflag + OT_WORD;
3989 modrm = ldub_code(s);
3990 mod = (modrm >> 6);
3991 rm = (modrm & 7) | REX_B(s);
3992 reg = ((modrm >> 3) & 7) | REX_R(s);
3993 val = get_regS(s, ot, reg);
3994 if (mod != 3) {
3995 addr = get_modrm(s, modrm);
3996 val2 = ldS(s, ot, addr);
3997 } else {
3998 val2 = get_regS(s, ot, rm);
4000 op = b & 1;
4001 val = exec_binary(&s->regs1.eflags, EB_BSF + (op << 2) + ot,
4002 val, val2);
4003 set_regS(s, ot, reg, val);
4004 goto insn_next;
4006 /************************/
4007 /* control */
4008 LABEL(c2) /* ret im */
4010 long addend;
4011 addend = (int16_t)lduw_code(s);
4012 val = stack_pop(s);
4013 if (s->dflag == 0)
4014 val &= 0xffff;
4015 if (CODE64(s) && s->dflag)
4016 s->dflag = 2;
4017 sp_add(s, addend + (2 << s->dflag));
4018 pc = val;
4020 goto insn_next;
4021 LABEL(c3) /* ret */
4022 val = stack_pop(s);
4023 if (s->dflag == 0)
4024 val &= 0xffff;
4025 stack_pop_update(s);
4026 pc = val;
4027 goto insn_next;
4028 LABEL(ca) /* lret im */
4029 val = (int16_t)lduw_code(s);
4030 helper_lret_protected(s, s->dflag, val);
4031 goto ljmp_op;
4032 LABEL(cb) /* lret */
4033 helper_lret_protected(s, s->dflag, 0);
4034 goto ljmp_op;
4035 LABEL(cf) /* iret */
4036 helper_iret_protected(s, s->dflag);
4037 goto ljmp_op;
4038 LABEL(e8) /* call im */
4039 if (s->dflag)
4040 val = (int32_t)ldl_code(s);
4041 else
4042 val = (int16_t)lduw_code(s);
4043 next_eip = pc;
4044 val += next_eip;
4045 if (s->dflag == 0)
4046 val &= 0xffff;
4047 stack_push(s, next_eip);
4048 pc = val;
4049 goto insn_next;
4050 LABEL(e9) /* jmp im */
4051 if (s->dflag)
4052 val = (int32_t)ldl_code(s);
4053 else
4054 val = (int16_t)lduw_code(s);
4055 do_jmp:
4056 next_eip = pc;
4057 val += next_eip;
4058 if (s->dflag == 0)
4059 val &= 0xffff;
4060 pc = val;
4061 goto insn_next;
4062 LABEL(eb) /* jmp Jb */
4063 val = (int8_t)ldub_code(s);
4064 goto do_jmp;
4066 #define JCC(ot, v)\
4068 if (ot == OT_BYTE)\
4069 val = (int8_t)ldub_code(s);\
4070 else if (ot == OT_WORD)\
4071 val = (int16_t)lduw_code(s);\
4072 else\
4073 val = (int32_t)ldl_code(s);\
4074 if (get_jcc_cond(s->regs1.eflags, v))\
4075 goto do_jmp;\
4076 goto insn_next;\
4078 /* jcc Jb */
4080 LABEL(70) JCC(OT_BYTE, 0x0)
4081 LABEL(71) JCC(OT_BYTE, 0x1)
4082 LABEL(72) JCC(OT_BYTE, 0x2)
4083 LABEL(73) JCC(OT_BYTE, 0x3)
4084 LABEL(74) JCC(OT_BYTE, 0x4)
4085 LABEL(75) JCC(OT_BYTE, 0x5)
4086 LABEL(76) JCC(OT_BYTE, 0x6)
4087 LABEL(77) JCC(OT_BYTE, 0x7)
4088 LABEL(78) JCC(OT_BYTE, 0x8)
4089 LABEL(79) JCC(OT_BYTE, 0x9)
4090 LABEL(7a) JCC(OT_BYTE, 0xa)
4091 LABEL(7b) JCC(OT_BYTE, 0xb)
4092 LABEL(7c) JCC(OT_BYTE, 0xc)
4093 LABEL(7d) JCC(OT_BYTE, 0xd)
4094 LABEL(7e) JCC(OT_BYTE, 0xe)
4095 LABEL(7f) JCC(OT_BYTE, 0xf)
4097 /* jcc Jv */
4098 LABEL(180w) JCC(OT_WORD, 0x0)
4099 LABEL(181w) JCC(OT_WORD, 0x1)
4100 LABEL(182w) JCC(OT_WORD, 0x2)
4101 LABEL(183w) JCC(OT_WORD, 0x3)
4102 LABEL(184w) JCC(OT_WORD, 0x4)
4103 LABEL(185w) JCC(OT_WORD, 0x5)
4104 LABEL(186w) JCC(OT_WORD, 0x6)
4105 LABEL(187w) JCC(OT_WORD, 0x7)
4106 LABEL(188w) JCC(OT_WORD, 0x8)
4107 LABEL(189w) JCC(OT_WORD, 0x9)
4108 LABEL(18aw) JCC(OT_WORD, 0xa)
4109 LABEL(18bw) JCC(OT_WORD, 0xb)
4110 LABEL(18cw) JCC(OT_WORD, 0xc)
4111 LABEL(18dw) JCC(OT_WORD, 0xd)
4112 LABEL(18ew) JCC(OT_WORD, 0xe)
4113 LABEL(18fw) JCC(OT_WORD, 0xf)
4116 QO(LABEL(180q)) LABEL(180l) JCC(OT_LONG, 0x0)
4117 QO(LABEL(181q)) LABEL(181l) JCC(OT_LONG, 0x1)
4118 QO(LABEL(182q)) LABEL(182l) JCC(OT_LONG, 0x2)
4119 QO(LABEL(183q)) LABEL(183l) JCC(OT_LONG, 0x3)
4120 QO(LABEL(184q)) LABEL(184l) JCC(OT_LONG, 0x4)
4121 QO(LABEL(185q)) LABEL(185l) JCC(OT_LONG, 0x5)
4122 QO(LABEL(186q)) LABEL(186l) JCC(OT_LONG, 0x6)
4123 QO(LABEL(187q)) LABEL(187l) JCC(OT_LONG, 0x7)
4124 QO(LABEL(188q)) LABEL(188l) JCC(OT_LONG, 0x8)
4125 QO(LABEL(189q)) LABEL(189l) JCC(OT_LONG, 0x9)
4126 QO(LABEL(18aq)) LABEL(18al) JCC(OT_LONG, 0xa)
4127 QO(LABEL(18bq)) LABEL(18bl) JCC(OT_LONG, 0xb)
4128 QO(LABEL(18cq)) LABEL(18cl) JCC(OT_LONG, 0xc)
4129 QO(LABEL(18dq)) LABEL(18dl) JCC(OT_LONG, 0xd)
4130 QO(LABEL(18eq)) LABEL(18el) JCC(OT_LONG, 0xe)
4131 QO(LABEL(18fq)) LABEL(18fl) JCC(OT_LONG, 0xf)
4133 LABEL(190) /* setcc Gv */
4134 LABEL(191)
4135 LABEL(192)
4136 LABEL(193)
4137 LABEL(194)
4138 LABEL(195)
4139 LABEL(196)
4140 LABEL(197)
4141 LABEL(198)
4142 LABEL(199)
4143 LABEL(19a)
4144 LABEL(19b)
4145 LABEL(19c)
4146 LABEL(19d)
4147 LABEL(19e)
4148 LABEL(19f)
4149 modrm = ldub_code(s);
4150 mod = (modrm >> 6);
4151 val = (get_jcc_cond(s->regs1.eflags, b & 0xf) != 0);
4152 if (mod != 3) {
4153 addr = get_modrm(s, modrm);
4154 stS(s, OT_BYTE, addr, val);
4155 } else {
4156 rm = (modrm & 7) | REX_B(s);
4157 set_regS(s, OT_BYTE, rm, val);
4159 goto insn_next;
4160 LABEL(140) /* cmov Gv, Ev */
4161 LABEL(141)
4162 LABEL(142)
4163 LABEL(143)
4164 LABEL(144)
4165 LABEL(145)
4166 LABEL(146)
4167 LABEL(147)
4168 LABEL(148)
4169 LABEL(149)
4170 LABEL(14a)
4171 LABEL(14b)
4172 LABEL(14c)
4173 LABEL(14d)
4174 LABEL(14e)
4175 LABEL(14f)
4176 ot = s->dflag + OT_WORD;
4177 modrm = ldub_code(s);
4178 mod = (modrm >> 6);
4179 reg = ((modrm >> 3) & 7) | REX_R(s);
4180 if (mod != 3) {
4181 addr = get_modrm(s, modrm);
4182 val = ldS(s, ot, addr);
4183 } else {
4184 rm = (modrm & 7) | REX_B(s);
4185 val = get_regS(s, ot, rm);
4187 if (get_jcc_cond(s->regs1.eflags, b & 0xf)) {
4188 set_regS(s, ot, reg, val);
4190 goto insn_next;
4192 LABEL(c4) /* les Gv */
4193 op = R_ES;
4194 goto do_lxx;
4195 LABEL(c5) /* lds Gv */
4196 op = R_DS;
4197 goto do_lxx;
4198 LABEL(1b2) /* lss Gv */
4199 op = R_SS;
4200 goto do_lxx;
4201 LABEL(1b4) /* lfs Gv */
4202 op = R_FS;
4203 goto do_lxx;
4204 LABEL(1b5) /* lgs Gv */
4205 op = R_GS;
4206 do_lxx:
4207 modrm = ldub_code(s);
4208 reg = ((modrm >> 3) & 7);
4209 mod = (modrm >> 6);
4210 if (mod == 3)
4211 goto illegal_op;
4212 addr = get_modrm(s, modrm);
4213 if (s->dflag) {
4214 val = ldl(s, addr);
4215 addr += 4;
4216 } else {
4217 val = lduw(s, addr);
4218 addr += 2;
4220 sel = lduw(s, addr);
4221 load_seg_desc(s, op, sel);
4222 if (s->dflag)
4223 set_regl(s, reg, val);
4224 else
4225 set_regw(s, reg, val);
4226 goto insn_next;
4228 /************************/
4229 /* shifts */
4230 LABEL(c0)
4231 LABEL(c1)
4232 /* shift Ev,Ib */
4233 if ((b & 1) == 0)
4234 ot = OT_BYTE;
4235 else
4236 ot = s->dflag + OT_WORD;
4238 modrm = ldub_code(s);
4239 mod = (modrm >> 6);
4240 op = (modrm >> 3) & 7;
4241 if (mod != 3) {
4242 s->rip_offset = 1;
4243 addr = get_modrm(s, modrm);
4244 s->rip_offset = 0;
4245 val = ldS(s, ot, addr);
4246 val2 = ldub_code(s);
4247 eflags = s->regs1.eflags;
4248 val = exec_binary(&eflags,
4249 EB_ROL + (op << 2) + ot,
4250 val, val2);
4251 stS(s, ot, addr, val);
4252 s->regs1.eflags = eflags;
4253 } else {
4254 rm = (modrm & 7) | REX_B(s);
4255 val = get_regS(s, ot, rm);
4256 val2 = ldub_code(s);
4257 val = exec_binary(&s->regs1.eflags,
4258 EB_ROL + (op << 2) + ot,
4259 val, val2);
4260 set_regS(s, ot, rm, val);
4262 goto insn_next;
4263 LABEL(d0)
4264 LABEL(d1)
4265 /* shift Ev,1 */
4266 val2 = 1;
4267 grp2:
4268 if ((b & 1) == 0)
4269 ot = OT_BYTE;
4270 else
4271 ot = s->dflag + OT_WORD;
4273 modrm = ldub_code(s);
4274 mod = (modrm >> 6);
4275 op = (modrm >> 3) & 7;
4276 if (mod != 3) {
4277 addr = get_modrm(s, modrm);
4278 val = ldS(s, ot, addr);
4279 eflags = s->regs1.eflags;
4280 val = exec_binary(&eflags,
4281 EB_ROL + (op << 2) + ot,
4282 val, val2);
4283 stS(s, ot, addr, val);
4284 s->regs1.eflags = eflags;
4285 } else {
4286 rm = (modrm & 7) | REX_B(s);
4287 val = get_regS(s, ot, rm);
4288 val = exec_binary(&s->regs1.eflags,
4289 EB_ROL + (op << 2) + ot,
4290 val, val2);
4291 set_regS(s, ot, rm, val);
4293 goto insn_next;
4294 LABEL(d2)
4295 LABEL(d3)
4296 /* shift Ev,cl */
4297 val2 = s->regs1.ecx;
4298 goto grp2;
4300 #ifdef __x86_64__
4301 #define SHIFTD1(op, eflags, val, val2, shift) \
4302 asm volatile( op "\n"\
4303 "pushf\n"\
4304 "pop %%rax\n"\
4305 "andl $0x8d5, %%eax\n"\
4306 "andl $~0x8d5, %%ebx\n"\
4307 "orl %%eax, %%ebx\n"\
4308 : "=b" (eflags),\
4309 "=r" (val)\
4310 : "0" (eflags),\
4311 "1" (val),\
4312 "r" (val2),\
4313 "c" (shift)\
4314 : "%eax");
4315 #else
4316 #define SHIFTD1(op, eflags, val, val2, shift) \
4317 asm volatile( op "\n"\
4318 LAHF "\n"\
4319 "seto %%al\n"\
4320 "movb %%ah, %%bl\n"\
4321 "shll $3, %%eax\n"\
4322 "andl $~0x0800, %%ebx\n"\
4323 "orb %%al, %%bh\n"\
4324 : "=b" (eflags),\
4325 "=r" (val)\
4326 : "0" (eflags),\
4327 "1" (val),\
4328 "r" (val2),\
4329 "c" (shift)\
4330 : "%eax");
4331 #endif
4333 #define SHIFTD(eflags, op, val, val2, shift) \
4334 switch(op) {\
4335 case 1: SHIFTD1("shld %%cl, %w4, %w1", eflags, val, val2, shift); break;\
4336 case 2: SHIFTD1("shld %%cl, %k4, %k1", eflags, val, val2, shift); break;\
4337 QO(case 3: SHIFTD1("shld %%cl, %4, %1", eflags, val, val2, shift); break;)\
4338 case 5: SHIFTD1("shrd %%cl, %w4, %w1", eflags, val, val2, shift); break;\
4339 case 6: SHIFTD1("shrd %%cl, %k4, %k1", eflags, val, val2, shift); break;\
4340 QO(case 7: SHIFTD1("shrd %%cl, %4, %1", eflags, val, val2, shift); break;)\
4343 LABEL(1a4) /* shld imm */
4344 op = 0;
4345 goto do_shiftd_imm;
4346 LABEL(1ac) /* shrd imm */
4347 op = 1;
4348 do_shiftd_imm:
4350 long shift;
4351 ot = s->dflag + OT_WORD;
4352 modrm = ldub_code(s);
4353 mod = (modrm >> 6);
4354 reg = ((modrm >> 3) & 7) | REX_R(s);
4355 val2 = get_regS(s, ot, reg);
4356 if (mod != 3) {
4357 s->rip_offset = 1;
4358 addr = get_modrm(s, modrm);
4359 s->rip_offset = 0;
4360 val = ldS(s, ot, addr);
4361 shift = ldub_code(s);
4362 if (ot == OT_QUAD)
4363 shift &= 0x3f;
4364 else
4365 shift &= 0x1f;
4366 eflags = s->regs1.eflags;
4367 if (shift != 0) {
4368 SHIFTD(eflags, (op << 2) + ot, val, val2, shift);
4370 stS(s, ot, addr, val);
4371 s->regs1.eflags = eflags;
4372 } else {
4373 rm = (modrm & 7) | REX_B(s);
4374 val = get_regS(s, ot, rm);
4375 shift = ldub_code(s);
4376 if (ot == OT_QUAD)
4377 shift &= 0x3f;
4378 else
4379 shift &= 0x1f;
4380 if (shift != 0) {
4381 SHIFTD(eflags, (op << 2) + ot, val, val2, shift);
4383 set_regS(s, ot, rm, val);
4386 goto insn_next;
4388 LABEL(1a5) /* shld cl */
4389 op = 0;
4390 goto do_shiftd;
4391 LABEL(1ad) /* shrd cl */
4392 op = 1;
4393 do_shiftd:
4395 long shift;
4396 ot = s->dflag + OT_WORD;
4397 modrm = ldub_code(s);
4398 mod = (modrm >> 6);
4399 reg = ((modrm >> 3) & 7) | REX_R(s);
4400 val2 = get_regS(s, s->dflag + OT_WORD, reg);
4401 shift = s->regs1.ecx;
4402 if (ot == OT_QUAD)
4403 shift &= 0x3f;
4404 else
4405 shift &= 0x1f;
4406 if (mod != 3) {
4407 addr = get_modrm(s, modrm);
4408 val = ldS(s, ot, addr);
4409 eflags = s->regs1.eflags;
4410 if (shift != 0) {
4411 SHIFTD(eflags, (op << 2) + ot, val, val2, shift);
4413 stS(s, ot, addr, val);
4414 s->regs1.eflags = eflags;
4415 } else {
4416 rm = (modrm & 7) | REX_B(s);
4417 val = get_regS(s, ot, rm);
4418 if (shift != 0) {
4419 SHIFTD(eflags, (op << 2) + ot, val, val2, shift);
4421 set_regS(s, ot, rm, val);
4424 goto insn_next;
4426 LABEL(cd) /* int N */
4427 val = ldub_code(s);
4428 do_int(s, val);
4429 goto ljmp_op;
4430 LABEL(f4) /* hlt */
4431 if (s->cpu_state.cpl != 0)
4432 raise_exception_err(s, EXCP0D_GPF, 0);
4433 raise_exception(s, KQEMU_RET_SOFTMMU);
4434 goto insn_next;
4435 LABEL(100)
4436 modrm = ldub_code(s);
4437 mod = (modrm >> 6);
4438 op = (modrm >> 3) & 7;
4439 switch(op) {
4440 case 0: /* sldt */
4441 if (!(s->cpu_state.cr0 & CR0_PE_MASK) || get_eflags_vm(s))
4442 goto illegal_op;
4443 raise_exception(s, KQEMU_RET_SOFTMMU);
4444 break;
4445 case 2: /* lldt */
4446 if (!(s->cpu_state.cr0 & CR0_PE_MASK) || get_eflags_vm(s))
4447 goto illegal_op;
4448 if (s->cpu_state.cpl != 0)
4449 raise_exception_err(s, EXCP0D_GPF, 0);
4450 #ifdef USE_SEG_GP
4451 if (mod == 3) {
4452 rm = (modrm & 7) | REX_B(s);
4453 val = get_regS(s, OT_WORD, rm) & 0xffff;
4454 } else {
4455 addr = get_modrm(s, modrm);
4456 val = ldS(s, OT_WORD, addr);
4458 helper_lldt(s, val);
4459 #else
4460 raise_exception(s, KQEMU_RET_SOFTMMU);
4461 #endif
4462 break;
4463 case 1: /* str */
4464 if (!(s->cpu_state.cr0 & CR0_PE_MASK) || get_eflags_vm(s))
4465 goto illegal_op;
4466 raise_exception(s, KQEMU_RET_SOFTMMU);
4467 break;
4468 case 3: /* ltr */
4469 if (!(s->cpu_state.cr0 & CR0_PE_MASK) || get_eflags_vm(s))
4470 goto illegal_op;
4471 if (s->cpu_state.cpl != 0)
4472 raise_exception_err(s, EXCP0D_GPF, 0);
4473 raise_exception(s, KQEMU_RET_SOFTMMU);
4474 break;
4475 case 4: /* verr */
4476 case 5: /* verw */
4477 if (!(s->cpu_state.cr0 & CR0_PE_MASK) || get_eflags_vm(s))
4478 goto illegal_op;
4479 raise_exception(s, KQEMU_RET_SOFTMMU);
4480 break;
4481 default:
4482 goto illegal_op;
4484 goto insn_next;
4485 LABEL(101)
4486 modrm = ldub_code(s);
4487 mod = (modrm >> 6);
4488 op = (modrm >> 3) & 7;
4489 switch(op) {
4490 case 0: /* sgdt */
4491 if (mod == 3)
4492 goto illegal_op;
4493 raise_exception(s, KQEMU_RET_SOFTMMU);
4494 case 1:
4495 if (mod == 3) {
4496 rm = modrm & 7;
4497 switch(rm) {
4498 case 0: /* monitor */
4499 if (/* !(s->cpuid_ext_features & CPUID_EXT_MONITOR) || */
4500 s->cpu_state.cpl != 0)
4501 goto illegal_op;
4502 if ((uint32_t)s->regs1.ecx != 0)
4503 raise_exception_err(s, EXCP0D_GPF, 0);
4504 break;
4505 default:
4506 goto illegal_op;
4508 } else {
4509 /* sidt */
4510 raise_exception(s, KQEMU_RET_SOFTMMU);
4512 break;
4513 case 2: /* lgdt */
4514 case 3: /* lidt */
4515 if (mod == 3)
4516 goto illegal_op;
4517 if (s->cpu_state.cpl != 0)
4518 raise_exception_err(s, EXCP0D_GPF, 0);
4519 raise_exception(s, KQEMU_RET_SOFTMMU);
4520 case 4: /* smsw */
4521 raise_exception(s, KQEMU_RET_SOFTMMU);
4522 case 6: /* lmsw */
4523 if (s->cpu_state.cpl != 0)
4524 raise_exception_err(s, EXCP0D_GPF, 0);
4525 raise_exception(s, KQEMU_RET_SOFTMMU);
4526 case 7: /* invlpg/swapgs */
4527 if (s->cpu_state.cpl != 0)
4528 raise_exception_err(s, EXCP0D_GPF, 0);
4529 if (mod == 3) {
4530 #ifdef __x86_64__
4531 if (CODE64(s) && (modrm & 7) == 0) {
4532 helper_swapgs(s);
4533 } else
4534 #endif
4536 goto illegal_op;
4538 } else {
4539 addr = get_modrm(s, modrm);
4540 do_invlpg(s, addr);
4542 break;
4543 default:
4544 goto illegal_op;
4546 goto insn_next;
4547 LABEL(108) /* invd */
4548 LABEL(109) /* wbinvd */
4549 if (s->cpu_state.cpl != 0)
4550 raise_exception_err(s, EXCP0D_GPF, 0);
4551 goto insn_next;
4552 LABEL(121) /* mov reg, drN */
4553 LABEL(123) /* mov drN, reg */
4554 if (s->cpu_state.cpl != 0)
4555 raise_exception_err(s, EXCP0D_GPF, 0);
4556 modrm = ldub_code(s);
4557 if ((modrm & 0xc0) != 0xc0)
4558 goto illegal_op;
4559 rm = (modrm & 7) | REX_B(s);
4560 reg = ((modrm >> 3) & 7) | REX_R(s);
4561 if (CODE64(s))
4562 ot = OT_QUAD;
4563 else
4564 ot = OT_LONG;
4565 if (b & 2) {
4566 val = get_reg(s, rm);
4567 if (ot == OT_LONG)
4568 val = (uint32_t)val;
4569 switch(reg) {
4570 case 0:
4571 case 1:
4572 case 2:
4573 case 3:
4574 case 6:
4575 raise_exception(s, KQEMU_RET_SOFTMMU);
4576 case 7:
4577 /* better than nothing: do nothing if no change */
4578 if (val != s->cpu_state.dr7)
4579 raise_exception(s, KQEMU_RET_SOFTMMU);
4580 break;
4581 default:
4582 goto illegal_op;
4584 } else {
4585 switch(reg) {
4586 case 0:
4587 val = s->cpu_state.dr0;
4588 break;
4589 case 1:
4590 val = s->cpu_state.dr1;
4591 break;
4592 case 2:
4593 val = s->cpu_state.dr2;
4594 break;
4595 case 3:
4596 val = s->cpu_state.dr3;
4597 break;
4598 case 6:
4599 val = s->cpu_state.dr6;
4600 break;
4601 case 7:
4602 val = s->cpu_state.dr7;
4603 break;
4604 default:
4605 goto illegal_op;
4607 set_regS(s, ot, rm, val);
4609 goto insn_next;
4610 LABEL(106) /* clts */
4611 if (s->cpu_state.cpl != 0)
4612 raise_exception_err(s, EXCP0D_GPF, 0);
4613 do_update_cr0(s, s->cpu_state.cr0 & ~CR0_TS_MASK);
4614 goto insn_next;
4616 LABEL(118)
4617 modrm = ldub_code(s);
4618 mod = (modrm >> 6);
4619 op = (modrm >> 3) & 7;
4620 switch(op) {
4621 case 0: /* prefetchnta */
4622 case 1: /* prefetchnt0 */
4623 case 2: /* prefetchnt0 */
4624 case 3: /* prefetchnt0 */
4625 if (mod == 3)
4626 goto illegal_op;
4627 addr = get_modrm(s, modrm);
4628 /* nothing more to do */
4629 break;
4630 default: /* nop (multi byte) */
4631 addr = get_modrm(s, modrm);
4632 break;
4634 goto insn_next;
4636 LABEL(119) /* nop (multi byte) */
4637 LABEL(11a)
4638 LABEL(11b)
4639 LABEL(11c)
4640 LABEL(11d)
4641 LABEL(11e)
4642 LABEL(11f)
4643 modrm = ldub_code(s);
4644 addr = get_modrm(s, modrm);
4645 goto insn_next;
4647 LABEL(120) /* mov reg, crN */
4648 LABEL(122) /* mov crN, reg */
4649 if (s->cpu_state.cpl != 0)
4650 raise_exception_err(s, EXCP0D_GPF, 0);
4651 modrm = ldub_code(s);
4652 if ((modrm & 0xc0) != 0xc0)
4653 goto illegal_op;
4654 rm = (modrm & 7) | REX_B(s);
4655 reg = ((modrm >> 3) & 7) | REX_R(s);
4656 if (b & 2) {
4657 val = get_reg(s, rm);
4658 switch(reg) {
4659 case 0:
4660 do_update_cr0(s, val);
4661 break;
4662 case 3:
4663 do_update_cr3(s, val);
4664 break;
4665 case 4:
4666 do_update_cr4(s, val);
4667 break;
4668 case 2:
4669 case 8:
4670 raise_exception(s, KQEMU_RET_SOFTMMU);
4671 default:
4672 goto illegal_op;
4674 } else {
4675 switch(reg) {
4676 case 0:
4677 set_reg(s, rm, s->cpu_state.cr0);
4678 break;
4679 case 2:
4680 set_reg(s, rm, s->cpu_state.cr2);
4681 break;
4682 case 3:
4683 set_reg(s, rm, s->cpu_state.cr3);
4684 break;
4685 case 4:
4686 set_reg(s, rm, s->cpu_state.cr4);
4687 break;
4688 case 8:
4689 raise_exception(s, KQEMU_RET_SOFTMMU);
4690 default:
4691 goto illegal_op;
4694 goto insn_next;
4695 LABEL(130) /* wrmsr */
4696 if (s->cpu_state.cpl != 0)
4697 raise_exception_err(s, EXCP0D_GPF, 0);
4698 helper_wrmsr(s);
4699 goto insn_next;
4700 LABEL(132) /* rdmsr */
4701 if (s->cpu_state.cpl != 0)
4702 raise_exception_err(s, EXCP0D_GPF, 0);
4703 helper_rdmsr(s);
4704 goto insn_next;
4705 LABEL(fa) /* cli */
4706 iopl = get_eflags_iopl(s);
4707 if (likely(s->cpu_state.cpl <= iopl)) {
4708 set_reset_eflags(s, 0, IF_MASK);
4709 } else {
4710 raise_exception_err(s, EXCP0D_GPF, 0);
4712 goto insn_next;
4713 LABEL(fb) /* sti */
4714 iopl = get_eflags_iopl(s);
4715 if (likely(s->cpu_state.cpl <= iopl)) {
4716 set_reset_eflags(s, IF_MASK, 0);
4717 } else {
4718 raise_exception_err(s, EXCP0D_GPF, 0);
4720 /* NOTE: irq should be disabled for the instruction after
4721 STI. As it would be too complicated to ensure this, we
4722 handle the "sti ; sysenter" case found in XP
4723 specifically. XXX: see why we cannot execute the
4724 next insn in every case. */
4725 val = lduw_mem_fast(s, pc + s->cpu_state.segs[R_CS].base);
4726 if (val == 0x350f) {
4727 /* sysexit */
4728 s->regs1.eip = pc;
4729 goto insn_next3;
4730 } else {
4731 goto insn_next;
4734 LABEL(90) /* nop */
4735 goto insn_next;
4736 LABEL(131) /* rdtsc */
4738 uint32_t low, high;
4739 if ((s->cpu_state.cr4 & CR4_TSD_MASK) &&
4740 s->cpu_state.cpl != 0) {
4741 raise_exception_err(s, EXCP0D_GPF, 0);
4743 asm volatile("rdtsc" : "=a" (low), "=d" (high));
4744 s->regs1.eax = low;
4745 s->regs1.edx = high;
4747 goto insn_next;
4749 LABEL(105) /* syscall */
4750 helper_syscall(s);
4751 goto ljmp_op;
4753 LABEL(107) /* sysret */
4754 helper_sysret(s);
4755 goto ljmp_op;
4757 LABEL(134) /* sysenter */
4758 if (CODE64(s))
4759 goto illegal_op;
4760 helper_sysenter(s);
4761 goto ljmp_op;
4763 LABEL(135) /* sysexit */
4764 if (CODE64(s))
4765 goto illegal_op;
4766 helper_sysexit(s);
4767 goto ljmp_op;
4769 LABEL(9a) /* lcall im */
4770 LABEL(ea) /* ljmp im */
4772 LABEL(e4) /* in im */
4773 LABEL(e5)
4774 LABEL(e6) /* out im */
4775 LABEL(e7)
4776 LABEL(ec) /* in dx */
4777 LABEL(ed)
4778 LABEL(ee) /* out dx */
4779 LABEL(ef)
4780 LABEL(6c) /* insS */
4781 LABEL(6d)
4782 LABEL(6e) /* outS */
4783 LABEL(6f)
4784 raise_exception(s, KQEMU_RET_SOFTMMU);
4786 LABEL(a4) /* movs */
4787 LABEL(a5)
4789 unsigned long saddr, daddr, incr, mask;
4790 int override;
4792 if ((b & 1) == 0)
4793 ot = OT_BYTE;
4794 else
4795 ot = s->dflag + OT_WORD;
4797 if (s->aflag == 2)
4798 mask = -1;
4799 else if (s->aflag)
4800 mask = 0xffffffff;
4801 else
4802 mask = 0xffff;
4803 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
4804 if ((s->regs1.ecx & mask) == 0)
4805 goto insn_next;
4808 incr = (1 - (2 * ((s->regs1.eflags >> 10) & 1))) << ot;
4809 #ifdef __x86_64__
4810 if (s->aflag == 2) {
4811 saddr = s->regs1.esi;
4812 if (s->override == R_FS || s->override == R_GS)
4813 saddr += s->cpu_state.segs[s->override].base;
4815 daddr = s->regs1.edi;
4817 s->regs1.esi += incr;
4818 s->regs1.edi += incr;
4819 } else
4820 #endif
4822 saddr = s->regs1.esi & mask;
4823 override = s->override;
4824 if (override < 0)
4825 override = R_DS;
4826 saddr = (uint32_t)(saddr + s->cpu_state.segs[override].base);
4828 daddr = s->regs1.edi & mask;
4829 daddr = (uint32_t)(daddr + s->cpu_state.segs[R_ES].base);
4831 val = s->regs1.esi + incr;
4832 s->regs1.esi = (s->regs1.esi & ~mask) | (val & mask);
4833 val = s->regs1.edi + incr;
4834 s->regs1.edi = (s->regs1.edi & ~mask) | (val & mask);
4836 val = ldS(s, ot, saddr);
4837 stS(s, ot, daddr, val);
4839 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
4840 val = s->regs1.ecx - 1;
4841 s->regs1.ecx = (s->regs1.ecx & ~mask) | (val & mask);
4842 pc = s->regs1.eip;
4845 goto insn_next;
4847 LABEL(98) /* CWDE/CBW */
4848 #ifdef __x86_64__
4849 if (s->dflag == 2) {
4850 s->regs1.eax = (int32_t)s->regs1.eax;
4851 } else
4852 #endif
4853 if (s->dflag) {
4854 s->regs1.eax = (uint32_t)((int16_t)s->regs1.eax);
4855 } else {
4856 s->regs1.eax = (s->regs1.eax & ~0xffff) |
4857 ((int8_t)s->regs1.eax & 0xffff);
4859 goto insn_next;
4861 LABEL(99) /* cltd */
4862 #ifdef __x86_64__
4863 if (s->dflag == 2) {
4864 s->regs1.edx = (int64_t)s->regs1.eax >> 63;
4865 } else
4866 #endif
4867 if (s->dflag) {
4868 s->regs1.edx = (uint32_t)((int32_t)s->regs1.eax >> 31);
4869 } else {
4870 s->regs1.edx = (s->regs1.edx & ~0xffff) |
4871 (((int16_t)s->regs1.eax >> 15) & 0xffff);
4873 goto insn_next;
4875 LABEL(1c0) /* xadd */
4876 LABEL(1c1)
4877 if ((b & 1) == 0)
4878 ot = OT_BYTE;
4879 else
4880 ot = s->dflag + OT_WORD;
4882 modrm = ldub_code(s);
4883 mod = (modrm >> 6);
4884 reg = ((modrm >> 3) & 7) | REX_R(s);
4885 val = get_regS(s, ot, reg);
4886 if (mod == 3) {
4887 rm = (modrm & 7) | REX_B(s);
4888 val2 = get_regS(s, ot, rm);
4889 val = exec_binary(&s->regs1.eflags,
4890 EB_ADD + ot,
4891 val, val2);
4892 set_regS(s, ot, rm, val);
4893 set_regS(s, ot, reg, val2);
4894 } else {
4895 addr = get_modrm(s, modrm);
4896 val2 = ldS(s, ot, addr);
4897 eflags = s->regs1.eflags;
4898 val = exec_binary(&eflags,
4899 EB_ADD + ot,
4900 val, val2);
4901 stS(s, ot, addr, val);
4902 set_regS(s, ot, reg, val2);
4903 s->regs1.eflags = eflags;
4905 goto insn_next;
4906 LABEL(1ae)
4907 modrm = ldub_code(s);
4908 mod = (modrm >> 6);
4909 op = (modrm >> 3) & 7;
4910 switch(op) {
4911 case 0: /* fxsave */
4912 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR))
4913 goto illegal_op;
4914 addr = get_modrm(s, modrm);
4915 if (unlikely((addr - ((unsigned long)&_start - 511)) <
4916 (MONITOR_MEM_SIZE + 511)))
4917 raise_exception(s, KQEMU_RET_SOFTMMU);
4918 #ifdef __x86_64__
4919 if (s->dflag == 2) {
4920 asm volatile("1:\n"
4921 "rex64 ; fxsave (%0)\n"
4922 MMU_EXCEPTION(1b)
4923 : : "r" (addr) : "memory");
4924 } else
4925 #endif
4927 asm volatile("1:\n"
4928 "fxsave (%0)\n"
4929 MMU_EXCEPTION(1b)
4930 : : "r" (addr) : "memory");
4932 break;
4933 case 1: /* fxrstor */
4934 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR))
4935 goto illegal_op;
4936 addr = get_modrm(s, modrm);
4937 if (unlikely((addr - ((unsigned long)&_start - 511)) <
4938 (MONITOR_MEM_SIZE + 511)))
4939 raise_exception(s, KQEMU_RET_SOFTMMU);
4940 #ifdef __x86_64__
4941 if (s->dflag == 2) {
4942 asm volatile("1:\n"
4943 "rex64 ; fxrstor (%0)\n"
4944 MMU_EXCEPTION(1b)
4945 : : "r" (addr));
4946 } else
4947 #endif
4949 asm volatile("1:\n"
4950 "fxrstor (%0)\n"
4951 MMU_EXCEPTION(1b)
4952 : : "r" (addr));
4954 break;
4955 case 5: /* lfence */
4956 case 6: /* mfence */
4957 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
4958 goto illegal_op;
4959 break;
4960 case 7: /* sfence / clflush */
4961 if ((modrm & 0xc7) == 0xc0) {
4962 /* sfence */
4963 if (!(s->cpuid_features & CPUID_SSE))
4964 goto illegal_op;
4965 } else {
4966 /* clflush */
4967 if (!(s->cpuid_features & CPUID_CLFLUSH))
4968 goto illegal_op;
4969 addr = get_modrm(s, modrm);
4971 break;
4972 default:
4973 raise_exception(s, KQEMU_RET_SOFTMMU);
4975 goto insn_next;
4976 LABEL(e3) /* jecxz */
4977 val = (int8_t)ldub_code(s);
4978 val2 = s->regs1.ecx;
4979 if (s->aflag == 0)
4980 val2 = (uint16_t)val2;
4981 #ifdef __x86_64__
4982 else if (s->aflag == 1)
4983 val2 = (uint32_t)val2;
4984 #endif
4985 if (val2 == 0)
4986 goto do_jmp;
4987 goto insn_next;
4989 LABEL(1ff)
4990 LABEL(1fe)
4991 LABEL(1fd)
4992 LABEL(1fc)
4993 LABEL(1fb)
4994 LABEL(1fa)
4995 LABEL(1f9)
4996 LABEL(1f8)
4997 LABEL(1f7)
4998 LABEL(1f6)
4999 LABEL(1f5)
5000 LABEL(1f4)
5001 LABEL(1f3)
5002 LABEL(1f2)
5003 LABEL(1f1)
5004 LABEL(1f0)
5005 LABEL(1ef)
5006 LABEL(1ee)
5007 LABEL(1ed)
5008 LABEL(1ec)
5009 LABEL(1eb)
5010 LABEL(1ea)
5011 LABEL(1e9)
5012 LABEL(1e8)
5013 LABEL(1e7)
5014 LABEL(1e6)
5015 LABEL(1e5)
5016 LABEL(1e4)
5017 LABEL(1e3)
5018 LABEL(1e2)
5019 LABEL(1e1)
5020 LABEL(1e0)
5021 LABEL(1df)
5022 LABEL(1de)
5023 LABEL(1dd)
5024 LABEL(1dc)
5025 LABEL(1db)
5026 LABEL(1da)
5027 LABEL(1d9)
5028 LABEL(1d8)
5029 LABEL(1d7)
5030 LABEL(1d6)
5031 LABEL(1d5)
5032 LABEL(1d4)
5033 LABEL(1d3)
5034 LABEL(1d2)
5035 LABEL(1d1)
5036 LABEL(1d0)
5037 LABEL(1cf)
5038 LABEL(1ce)
5039 LABEL(1cd)
5040 LABEL(1cc)
5041 LABEL(1cb)
5042 LABEL(1ca)
5043 LABEL(1c9)
5044 LABEL(1c8)
5045 LABEL(1c7)
5046 LABEL(1c6)
5047 LABEL(1c5)
5048 LABEL(1c4)
5049 LABEL(1c3)
5050 LABEL(1c2)
5051 LABEL(1b9)
5052 LABEL(1b8)
5053 LABEL(1b1)
5054 LABEL(1b0)
5055 LABEL(1aa)
5056 LABEL(1a7)
5057 LABEL(1a6)
5058 LABEL(1a2)
5059 LABEL(17f)
5060 LABEL(17e)
5061 LABEL(17d)
5062 LABEL(17c)
5063 LABEL(17b)
5064 LABEL(17a)
5065 LABEL(179)
5066 LABEL(178)
5067 LABEL(177)
5068 LABEL(176)
5069 LABEL(175)
5070 LABEL(174)
5071 LABEL(173)
5072 LABEL(172)
5073 LABEL(171)
5074 LABEL(170)
5075 LABEL(16f)
5076 LABEL(16e)
5077 LABEL(16d)
5078 LABEL(16c)
5079 LABEL(16b)
5080 LABEL(16a)
5081 LABEL(169)
5082 LABEL(168)
5083 LABEL(167)
5084 LABEL(166)
5085 LABEL(165)
5086 LABEL(164)
5087 LABEL(163)
5088 LABEL(162)
5089 LABEL(161)
5090 LABEL(160)
5091 LABEL(15f)
5092 LABEL(15e)
5093 LABEL(15d)
5094 LABEL(15c)
5095 LABEL(15b)
5096 LABEL(15a)
5097 LABEL(159)
5098 LABEL(158)
5099 LABEL(157)
5100 LABEL(156)
5101 LABEL(155)
5102 LABEL(154)
5103 LABEL(153)
5104 LABEL(152)
5105 LABEL(151)
5106 LABEL(150)
5107 LABEL(13f)
5108 LABEL(13e)
5109 LABEL(13d)
5110 LABEL(13c)
5111 LABEL(13b)
5112 LABEL(13a)
5113 LABEL(139)
5114 LABEL(138)
5115 LABEL(137)
5116 LABEL(136)
5117 LABEL(133)
5118 LABEL(12f)
5119 LABEL(12e)
5120 LABEL(12d)
5121 LABEL(12c)
5122 LABEL(12b)
5123 LABEL(12a)
5124 LABEL(129)
5125 LABEL(128)
5126 LABEL(127)
5127 LABEL(126)
5128 LABEL(125)
5129 LABEL(124)
5130 LABEL(117)
5131 LABEL(116)
5132 LABEL(115)
5133 LABEL(114)
5134 LABEL(113)
5135 LABEL(112)
5136 LABEL(111)
5137 LABEL(110)
5138 LABEL(10f)
5139 LABEL(10e)
5140 LABEL(10d)
5141 LABEL(10c)
5142 LABEL(10b)
5143 LABEL(10a)
5144 LABEL(104)
5145 LABEL(103)
5146 LABEL(102)
5147 LABEL(f1)
5148 LABEL(e2)
5149 LABEL(e1)
5150 LABEL(e0)
5151 LABEL(df)
5152 LABEL(de)
5153 LABEL(dd)
5154 LABEL(dc)
5155 LABEL(db)
5156 LABEL(da)
5157 LABEL(d9)
5158 LABEL(d8)
5159 LABEL(d7)
5160 LABEL(d6)
5161 LABEL(d5)
5162 LABEL(d4)
5163 LABEL(ce)
5164 LABEL(cc)
5165 LABEL(c8)
5166 LABEL(af)
5167 LABEL(ae)
5168 LABEL(ad)
5169 LABEL(ac)
5170 LABEL(ab)
5171 LABEL(aa)
5172 LABEL(a7)
5173 LABEL(a6)
5174 LABEL(9f)
5175 LABEL(9e)
5176 LABEL(9b)
5177 LABEL(62)
5178 LABEL(61)
5179 LABEL(60)
5180 LABEL(3f)
5181 LABEL(37)
5182 LABEL(2f)
5183 LABEL(27)
5184 goto unhandled_op;
5185 unhandled_op:
5186 illegal_op:
5187 raise_exception(s, KQEMU_RET_SOFTMMU);
5188 ljmp_op:
5189 /* instruction modifying CS:EIP */
5190 if (get_eflags_if(s))
5191 goto the_end;
5192 pc = s->regs1.eip;
5193 UPDATE_CODE32();
5194 goto insn_next;
5195 the_end:
5196 pc = saved_pc;
5197 return 0;
5200 #ifdef PROFILE_INSN
5202 int n;
5203 n = getclock() - ti;
5204 s->tab_insn_count[opcode]++;
5205 s->tab_insn_cycles[opcode] += n;
5206 if (n < s->tab_insn_cycles_min[opcode])
5207 s->tab_insn_cycles_min[opcode] = n;
5208 if (n > s->tab_insn_cycles_max[opcode])
5209 s->tab_insn_cycles_max[opcode] = n;
5211 #endif