KVM: x86 emulator: mask group 8 instruction as BitOp
[linux-2.6.git] / arch / x86 / kvm / emulate.c
bloba9b2b9e6a3f059a522c2dd7875214bbde5444677
1 /******************************************************************************
2 * emulate.c
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #ifndef __KERNEL__
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #else
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
32 #endif
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
36 #include "x86.h"
37 #include "tss.h"
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
45 * not be handled.
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<0) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<1) /* Register operand. */
53 #define DstMem (3<<1) /* Memory operand. */
54 #define DstAcc (4<<1) /* Destination Accumulator */
55 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<1) /* 64bit memory operand */
57 #define DstMask (7<<1)
58 /* Source operand type. */
59 #define SrcNone (0<<4) /* No source operand. */
60 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcAcc (0xd<<4) /* Source Accumulator */
74 #define SrcMask (0xf<<4)
75 /* Generic ModRM decode. */
76 #define ModRM (1<<8)
77 /* Destination is only written; never read. */
78 #define Mov (1<<9)
79 #define BitOp (1<<10)
80 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
81 #define String (1<<12) /* String instruction (rep capable) */
82 #define Stack (1<<13) /* Stack instruction (push/pop) */
83 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
84 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
85 /* Misc flags */
86 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
87 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
88 #define Undefined (1<<25) /* No Such Instruction */
89 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
90 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
91 #define No64 (1<<28)
92 /* Source 2 operand type */
93 #define Src2None (0<<29)
94 #define Src2CL (1<<29)
95 #define Src2ImmByte (2<<29)
96 #define Src2One (3<<29)
97 #define Src2Mask (7<<29)
99 #define X2(x...) x, x
100 #define X3(x...) X2(x), x
101 #define X4(x...) X2(x), X2(x)
102 #define X5(x...) X4(x), x
103 #define X6(x...) X4(x), X2(x)
104 #define X7(x...) X4(x), X3(x)
105 #define X8(x...) X4(x), X4(x)
106 #define X16(x...) X8(x), X8(x)
108 struct opcode {
109 u32 flags;
110 union {
111 int (*execute)(struct x86_emulate_ctxt *ctxt);
112 struct opcode *group;
113 struct group_dual *gdual;
114 } u;
117 struct group_dual {
118 struct opcode mod012[8];
119 struct opcode mod3[8];
122 /* EFLAGS bit definitions. */
123 #define EFLG_ID (1<<21)
124 #define EFLG_VIP (1<<20)
125 #define EFLG_VIF (1<<19)
126 #define EFLG_AC (1<<18)
127 #define EFLG_VM (1<<17)
128 #define EFLG_RF (1<<16)
129 #define EFLG_IOPL (3<<12)
130 #define EFLG_NT (1<<14)
131 #define EFLG_OF (1<<11)
132 #define EFLG_DF (1<<10)
133 #define EFLG_IF (1<<9)
134 #define EFLG_TF (1<<8)
135 #define EFLG_SF (1<<7)
136 #define EFLG_ZF (1<<6)
137 #define EFLG_AF (1<<4)
138 #define EFLG_PF (1<<2)
139 #define EFLG_CF (1<<0)
141 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
142 #define EFLG_RESERVED_ONE_MASK 2
145 * Instruction emulation:
146 * Most instructions are emulated directly via a fragment of inline assembly
147 * code. This allows us to save/restore EFLAGS and thus very easily pick up
148 * any modified flags.
151 #if defined(CONFIG_X86_64)
152 #define _LO32 "k" /* force 32-bit operand */
153 #define _STK "%%rsp" /* stack pointer */
154 #elif defined(__i386__)
155 #define _LO32 "" /* force 32-bit operand */
156 #define _STK "%%esp" /* stack pointer */
157 #endif
160 * These EFLAGS bits are restored from saved value during emulation, and
161 * any changes are written back to the saved value after emulation.
163 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
165 /* Before executing instruction: restore necessary bits in EFLAGS. */
166 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
167 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
168 "movl %"_sav",%"_LO32 _tmp"; " \
169 "push %"_tmp"; " \
170 "push %"_tmp"; " \
171 "movl %"_msk",%"_LO32 _tmp"; " \
172 "andl %"_LO32 _tmp",("_STK"); " \
173 "pushf; " \
174 "notl %"_LO32 _tmp"; " \
175 "andl %"_LO32 _tmp",("_STK"); " \
176 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
177 "pop %"_tmp"; " \
178 "orl %"_LO32 _tmp",("_STK"); " \
179 "popf; " \
180 "pop %"_sav"; "
182 /* After executing instruction: write-back necessary bits in EFLAGS. */
183 #define _POST_EFLAGS(_sav, _msk, _tmp) \
184 /* _sav |= EFLAGS & _msk; */ \
185 "pushf; " \
186 "pop %"_tmp"; " \
187 "andl %"_msk",%"_LO32 _tmp"; " \
188 "orl %"_LO32 _tmp",%"_sav"; "
190 #ifdef CONFIG_X86_64
191 #define ON64(x) x
192 #else
193 #define ON64(x)
194 #endif
196 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
197 do { \
198 __asm__ __volatile__ ( \
199 _PRE_EFLAGS("0", "4", "2") \
200 _op _suffix " %"_x"3,%1; " \
201 _POST_EFLAGS("0", "4", "2") \
202 : "=m" (_eflags), "=m" ((_dst).val), \
203 "=&r" (_tmp) \
204 : _y ((_src).val), "i" (EFLAGS_MASK)); \
205 } while (0)
208 /* Raw emulation: instruction has two explicit operands. */
209 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
210 do { \
211 unsigned long _tmp; \
213 switch ((_dst).bytes) { \
214 case 2: \
215 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
216 break; \
217 case 4: \
218 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
219 break; \
220 case 8: \
221 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
222 break; \
224 } while (0)
226 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
227 do { \
228 unsigned long _tmp; \
229 switch ((_dst).bytes) { \
230 case 1: \
231 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
232 break; \
233 default: \
234 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
235 _wx, _wy, _lx, _ly, _qx, _qy); \
236 break; \
238 } while (0)
240 /* Source operand is byte-sized and may be restricted to just %cl. */
241 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
242 __emulate_2op(_op, _src, _dst, _eflags, \
243 "b", "c", "b", "c", "b", "c", "b", "c")
245 /* Source operand is byte, word, long or quad sized. */
246 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
247 __emulate_2op(_op, _src, _dst, _eflags, \
248 "b", "q", "w", "r", _LO32, "r", "", "r")
250 /* Source operand is word, long or quad sized. */
251 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
252 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
253 "w", "r", _LO32, "r", "", "r")
255 /* Instruction has three operands and one operand is stored in ECX register */
256 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
257 do { \
258 unsigned long _tmp; \
259 _type _clv = (_cl).val; \
260 _type _srcv = (_src).val; \
261 _type _dstv = (_dst).val; \
263 __asm__ __volatile__ ( \
264 _PRE_EFLAGS("0", "5", "2") \
265 _op _suffix " %4,%1 \n" \
266 _POST_EFLAGS("0", "5", "2") \
267 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
268 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
269 ); \
271 (_cl).val = (unsigned long) _clv; \
272 (_src).val = (unsigned long) _srcv; \
273 (_dst).val = (unsigned long) _dstv; \
274 } while (0)
276 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
277 do { \
278 switch ((_dst).bytes) { \
279 case 2: \
280 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
281 "w", unsigned short); \
282 break; \
283 case 4: \
284 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
285 "l", unsigned int); \
286 break; \
287 case 8: \
288 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
289 "q", unsigned long)); \
290 break; \
292 } while (0)
294 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
295 do { \
296 unsigned long _tmp; \
298 __asm__ __volatile__ ( \
299 _PRE_EFLAGS("0", "3", "2") \
300 _op _suffix " %1; " \
301 _POST_EFLAGS("0", "3", "2") \
302 : "=m" (_eflags), "+m" ((_dst).val), \
303 "=&r" (_tmp) \
304 : "i" (EFLAGS_MASK)); \
305 } while (0)
307 /* Instruction has only one explicit operand (no source operand). */
308 #define emulate_1op(_op, _dst, _eflags) \
309 do { \
310 switch ((_dst).bytes) { \
311 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
312 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
313 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
314 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
316 } while (0)
318 /* Fetch next part of the instruction being emulated. */
319 #define insn_fetch(_type, _size, _eip) \
320 ({ unsigned long _x; \
321 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
322 if (rc != X86EMUL_CONTINUE) \
323 goto done; \
324 (_eip) += (_size); \
325 (_type)_x; \
328 #define insn_fetch_arr(_arr, _size, _eip) \
329 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
330 if (rc != X86EMUL_CONTINUE) \
331 goto done; \
332 (_eip) += (_size); \
335 static inline unsigned long ad_mask(struct decode_cache *c)
337 return (1UL << (c->ad_bytes << 3)) - 1;
340 /* Access/update address held in a register, based on addressing mode. */
341 static inline unsigned long
342 address_mask(struct decode_cache *c, unsigned long reg)
344 if (c->ad_bytes == sizeof(unsigned long))
345 return reg;
346 else
347 return reg & ad_mask(c);
350 static inline unsigned long
351 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
353 return base + address_mask(c, reg);
356 static inline void
357 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
359 if (c->ad_bytes == sizeof(unsigned long))
360 *reg += inc;
361 else
362 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
365 static inline void jmp_rel(struct decode_cache *c, int rel)
367 register_address_increment(c, &c->eip, rel);
370 static void set_seg_override(struct decode_cache *c, int seg)
372 c->has_seg_override = true;
373 c->seg_override = seg;
376 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
377 struct x86_emulate_ops *ops, int seg)
379 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
380 return 0;
382 return ops->get_cached_segment_base(seg, ctxt->vcpu);
385 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
386 struct x86_emulate_ops *ops,
387 struct decode_cache *c)
389 if (!c->has_seg_override)
390 return 0;
392 return seg_base(ctxt, ops, c->seg_override);
395 static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
396 struct x86_emulate_ops *ops)
398 return seg_base(ctxt, ops, VCPU_SREG_ES);
401 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
402 struct x86_emulate_ops *ops)
404 return seg_base(ctxt, ops, VCPU_SREG_SS);
407 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
408 u32 error, bool valid)
410 ctxt->exception = vec;
411 ctxt->error_code = error;
412 ctxt->error_code_valid = valid;
413 ctxt->restart = false;
416 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
418 emulate_exception(ctxt, GP_VECTOR, err, true);
421 static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
422 int err)
424 ctxt->cr2 = addr;
425 emulate_exception(ctxt, PF_VECTOR, err, true);
428 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
430 emulate_exception(ctxt, UD_VECTOR, 0, false);
433 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
435 emulate_exception(ctxt, TS_VECTOR, err, true);
438 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
439 struct x86_emulate_ops *ops,
440 unsigned long eip, u8 *dest)
442 struct fetch_cache *fc = &ctxt->decode.fetch;
443 int rc;
444 int size, cur_size;
446 if (eip == fc->end) {
447 cur_size = fc->end - fc->start;
448 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
449 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
450 size, ctxt->vcpu, NULL);
451 if (rc != X86EMUL_CONTINUE)
452 return rc;
453 fc->end += size;
455 *dest = fc->data[eip - fc->start];
456 return X86EMUL_CONTINUE;
459 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
460 struct x86_emulate_ops *ops,
461 unsigned long eip, void *dest, unsigned size)
463 int rc;
465 /* x86 instructions are limited to 15 bytes. */
466 if (eip + size - ctxt->eip > 15)
467 return X86EMUL_UNHANDLEABLE;
468 while (size--) {
469 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
470 if (rc != X86EMUL_CONTINUE)
471 return rc;
473 return X86EMUL_CONTINUE;
477 * Given the 'reg' portion of a ModRM byte, and a register block, return a
478 * pointer into the block that addresses the relevant register.
479 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
481 static void *decode_register(u8 modrm_reg, unsigned long *regs,
482 int highbyte_regs)
484 void *p;
486 p = &regs[modrm_reg];
487 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
488 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
489 return p;
492 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
493 struct x86_emulate_ops *ops,
494 ulong addr,
495 u16 *size, unsigned long *address, int op_bytes)
497 int rc;
499 if (op_bytes == 2)
500 op_bytes = 3;
501 *address = 0;
502 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
503 if (rc != X86EMUL_CONTINUE)
504 return rc;
505 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
506 return rc;
509 static int test_cc(unsigned int condition, unsigned int flags)
511 int rc = 0;
513 switch ((condition & 15) >> 1) {
514 case 0: /* o */
515 rc |= (flags & EFLG_OF);
516 break;
517 case 1: /* b/c/nae */
518 rc |= (flags & EFLG_CF);
519 break;
520 case 2: /* z/e */
521 rc |= (flags & EFLG_ZF);
522 break;
523 case 3: /* be/na */
524 rc |= (flags & (EFLG_CF|EFLG_ZF));
525 break;
526 case 4: /* s */
527 rc |= (flags & EFLG_SF);
528 break;
529 case 5: /* p/pe */
530 rc |= (flags & EFLG_PF);
531 break;
532 case 7: /* le/ng */
533 rc |= (flags & EFLG_ZF);
534 /* fall through */
535 case 6: /* l/nge */
536 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
537 break;
540 /* Odd condition identifiers (lsb == 1) have inverted sense. */
541 return (!!rc ^ (condition & 1));
544 static void fetch_register_operand(struct operand *op)
546 switch (op->bytes) {
547 case 1:
548 op->val = *(u8 *)op->addr.reg;
549 break;
550 case 2:
551 op->val = *(u16 *)op->addr.reg;
552 break;
553 case 4:
554 op->val = *(u32 *)op->addr.reg;
555 break;
556 case 8:
557 op->val = *(u64 *)op->addr.reg;
558 break;
562 static void decode_register_operand(struct operand *op,
563 struct decode_cache *c,
564 int inhibit_bytereg)
566 unsigned reg = c->modrm_reg;
567 int highbyte_regs = c->rex_prefix == 0;
569 if (!(c->d & ModRM))
570 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
571 op->type = OP_REG;
572 if ((c->d & ByteOp) && !inhibit_bytereg) {
573 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
574 op->bytes = 1;
575 } else {
576 op->addr.reg = decode_register(reg, c->regs, 0);
577 op->bytes = c->op_bytes;
579 fetch_register_operand(op);
580 op->orig_val = op->val;
583 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
584 struct x86_emulate_ops *ops,
585 struct operand *op)
587 struct decode_cache *c = &ctxt->decode;
588 u8 sib;
589 int index_reg = 0, base_reg = 0, scale;
590 int rc = X86EMUL_CONTINUE;
591 ulong modrm_ea = 0;
593 if (c->rex_prefix) {
594 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
595 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
596 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
599 c->modrm = insn_fetch(u8, 1, c->eip);
600 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
601 c->modrm_reg |= (c->modrm & 0x38) >> 3;
602 c->modrm_rm |= (c->modrm & 0x07);
603 c->modrm_seg = VCPU_SREG_DS;
605 if (c->modrm_mod == 3) {
606 op->type = OP_REG;
607 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
608 op->addr.reg = decode_register(c->modrm_rm,
609 c->regs, c->d & ByteOp);
610 fetch_register_operand(op);
611 return rc;
614 op->type = OP_MEM;
616 if (c->ad_bytes == 2) {
617 unsigned bx = c->regs[VCPU_REGS_RBX];
618 unsigned bp = c->regs[VCPU_REGS_RBP];
619 unsigned si = c->regs[VCPU_REGS_RSI];
620 unsigned di = c->regs[VCPU_REGS_RDI];
622 /* 16-bit ModR/M decode. */
623 switch (c->modrm_mod) {
624 case 0:
625 if (c->modrm_rm == 6)
626 modrm_ea += insn_fetch(u16, 2, c->eip);
627 break;
628 case 1:
629 modrm_ea += insn_fetch(s8, 1, c->eip);
630 break;
631 case 2:
632 modrm_ea += insn_fetch(u16, 2, c->eip);
633 break;
635 switch (c->modrm_rm) {
636 case 0:
637 modrm_ea += bx + si;
638 break;
639 case 1:
640 modrm_ea += bx + di;
641 break;
642 case 2:
643 modrm_ea += bp + si;
644 break;
645 case 3:
646 modrm_ea += bp + di;
647 break;
648 case 4:
649 modrm_ea += si;
650 break;
651 case 5:
652 modrm_ea += di;
653 break;
654 case 6:
655 if (c->modrm_mod != 0)
656 modrm_ea += bp;
657 break;
658 case 7:
659 modrm_ea += bx;
660 break;
662 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
663 (c->modrm_rm == 6 && c->modrm_mod != 0))
664 c->modrm_seg = VCPU_SREG_SS;
665 modrm_ea = (u16)modrm_ea;
666 } else {
667 /* 32/64-bit ModR/M decode. */
668 if ((c->modrm_rm & 7) == 4) {
669 sib = insn_fetch(u8, 1, c->eip);
670 index_reg |= (sib >> 3) & 7;
671 base_reg |= sib & 7;
672 scale = sib >> 6;
674 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
675 modrm_ea += insn_fetch(s32, 4, c->eip);
676 else
677 modrm_ea += c->regs[base_reg];
678 if (index_reg != 4)
679 modrm_ea += c->regs[index_reg] << scale;
680 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
681 if (ctxt->mode == X86EMUL_MODE_PROT64)
682 c->rip_relative = 1;
683 } else
684 modrm_ea += c->regs[c->modrm_rm];
685 switch (c->modrm_mod) {
686 case 0:
687 if (c->modrm_rm == 5)
688 modrm_ea += insn_fetch(s32, 4, c->eip);
689 break;
690 case 1:
691 modrm_ea += insn_fetch(s8, 1, c->eip);
692 break;
693 case 2:
694 modrm_ea += insn_fetch(s32, 4, c->eip);
695 break;
698 op->addr.mem = modrm_ea;
699 done:
700 return rc;
703 static int decode_abs(struct x86_emulate_ctxt *ctxt,
704 struct x86_emulate_ops *ops,
705 struct operand *op)
707 struct decode_cache *c = &ctxt->decode;
708 int rc = X86EMUL_CONTINUE;
710 op->type = OP_MEM;
711 switch (c->ad_bytes) {
712 case 2:
713 op->addr.mem = insn_fetch(u16, 2, c->eip);
714 break;
715 case 4:
716 op->addr.mem = insn_fetch(u32, 4, c->eip);
717 break;
718 case 8:
719 op->addr.mem = insn_fetch(u64, 8, c->eip);
720 break;
722 done:
723 return rc;
726 static void fetch_bit_operand(struct decode_cache *c)
728 long sv, mask;
730 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
731 mask = ~(c->dst.bytes * 8 - 1);
733 if (c->src.bytes == 2)
734 sv = (s16)c->src.val & (s16)mask;
735 else if (c->src.bytes == 4)
736 sv = (s32)c->src.val & (s32)mask;
738 c->dst.addr.mem += (sv >> 3);
741 /* only subword offset */
742 c->src.val &= (c->dst.bytes << 3) - 1;
745 static int read_emulated(struct x86_emulate_ctxt *ctxt,
746 struct x86_emulate_ops *ops,
747 unsigned long addr, void *dest, unsigned size)
749 int rc;
750 struct read_cache *mc = &ctxt->decode.mem_read;
751 u32 err;
753 while (size) {
754 int n = min(size, 8u);
755 size -= n;
756 if (mc->pos < mc->end)
757 goto read_cached;
759 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
760 ctxt->vcpu);
761 if (rc == X86EMUL_PROPAGATE_FAULT)
762 emulate_pf(ctxt, addr, err);
763 if (rc != X86EMUL_CONTINUE)
764 return rc;
765 mc->end += n;
767 read_cached:
768 memcpy(dest, mc->data + mc->pos, n);
769 mc->pos += n;
770 dest += n;
771 addr += n;
773 return X86EMUL_CONTINUE;
776 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
777 struct x86_emulate_ops *ops,
778 unsigned int size, unsigned short port,
779 void *dest)
781 struct read_cache *rc = &ctxt->decode.io_read;
783 if (rc->pos == rc->end) { /* refill pio read ahead */
784 struct decode_cache *c = &ctxt->decode;
785 unsigned int in_page, n;
786 unsigned int count = c->rep_prefix ?
787 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
788 in_page = (ctxt->eflags & EFLG_DF) ?
789 offset_in_page(c->regs[VCPU_REGS_RDI]) :
790 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
791 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
792 count);
793 if (n == 0)
794 n = 1;
795 rc->pos = rc->end = 0;
796 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
797 return 0;
798 rc->end = n * size;
801 memcpy(dest, rc->data + rc->pos, size);
802 rc->pos += size;
803 return 1;
806 static u32 desc_limit_scaled(struct desc_struct *desc)
808 u32 limit = get_desc_limit(desc);
810 return desc->g ? (limit << 12) | 0xfff : limit;
813 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
814 struct x86_emulate_ops *ops,
815 u16 selector, struct desc_ptr *dt)
817 if (selector & 1 << 2) {
818 struct desc_struct desc;
819 memset (dt, 0, sizeof *dt);
820 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
821 return;
823 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
824 dt->address = get_desc_base(&desc);
825 } else
826 ops->get_gdt(dt, ctxt->vcpu);
829 /* allowed just for 8 bytes segments */
830 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
831 struct x86_emulate_ops *ops,
832 u16 selector, struct desc_struct *desc)
834 struct desc_ptr dt;
835 u16 index = selector >> 3;
836 int ret;
837 u32 err;
838 ulong addr;
840 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
842 if (dt.size < index * 8 + 7) {
843 emulate_gp(ctxt, selector & 0xfffc);
844 return X86EMUL_PROPAGATE_FAULT;
846 addr = dt.address + index * 8;
847 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
848 if (ret == X86EMUL_PROPAGATE_FAULT)
849 emulate_pf(ctxt, addr, err);
851 return ret;
854 /* allowed just for 8 bytes segments */
855 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
856 struct x86_emulate_ops *ops,
857 u16 selector, struct desc_struct *desc)
859 struct desc_ptr dt;
860 u16 index = selector >> 3;
861 u32 err;
862 ulong addr;
863 int ret;
865 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
867 if (dt.size < index * 8 + 7) {
868 emulate_gp(ctxt, selector & 0xfffc);
869 return X86EMUL_PROPAGATE_FAULT;
872 addr = dt.address + index * 8;
873 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
874 if (ret == X86EMUL_PROPAGATE_FAULT)
875 emulate_pf(ctxt, addr, err);
877 return ret;
880 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
881 struct x86_emulate_ops *ops,
882 u16 selector, int seg)
884 struct desc_struct seg_desc;
885 u8 dpl, rpl, cpl;
886 unsigned err_vec = GP_VECTOR;
887 u32 err_code = 0;
888 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
889 int ret;
891 memset(&seg_desc, 0, sizeof seg_desc);
893 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
894 || ctxt->mode == X86EMUL_MODE_REAL) {
895 /* set real mode segment descriptor */
896 set_desc_base(&seg_desc, selector << 4);
897 set_desc_limit(&seg_desc, 0xffff);
898 seg_desc.type = 3;
899 seg_desc.p = 1;
900 seg_desc.s = 1;
901 goto load;
904 /* NULL selector is not valid for TR, CS and SS */
905 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
906 && null_selector)
907 goto exception;
909 /* TR should be in GDT only */
910 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
911 goto exception;
913 if (null_selector) /* for NULL selector skip all following checks */
914 goto load;
916 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
917 if (ret != X86EMUL_CONTINUE)
918 return ret;
920 err_code = selector & 0xfffc;
921 err_vec = GP_VECTOR;
923 /* can't load system descriptor into segment selecor */
924 if (seg <= VCPU_SREG_GS && !seg_desc.s)
925 goto exception;
927 if (!seg_desc.p) {
928 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
929 goto exception;
932 rpl = selector & 3;
933 dpl = seg_desc.dpl;
934 cpl = ops->cpl(ctxt->vcpu);
936 switch (seg) {
937 case VCPU_SREG_SS:
939 * segment is not a writable data segment or segment
940 * selector's RPL != CPL or segment selector's RPL != CPL
942 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
943 goto exception;
944 break;
945 case VCPU_SREG_CS:
946 if (!(seg_desc.type & 8))
947 goto exception;
949 if (seg_desc.type & 4) {
950 /* conforming */
951 if (dpl > cpl)
952 goto exception;
953 } else {
954 /* nonconforming */
955 if (rpl > cpl || dpl != cpl)
956 goto exception;
958 /* CS(RPL) <- CPL */
959 selector = (selector & 0xfffc) | cpl;
960 break;
961 case VCPU_SREG_TR:
962 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
963 goto exception;
964 break;
965 case VCPU_SREG_LDTR:
966 if (seg_desc.s || seg_desc.type != 2)
967 goto exception;
968 break;
969 default: /* DS, ES, FS, or GS */
971 * segment is not a data or readable code segment or
972 * ((segment is a data or nonconforming code segment)
973 * and (both RPL and CPL > DPL))
975 if ((seg_desc.type & 0xa) == 0x8 ||
976 (((seg_desc.type & 0xc) != 0xc) &&
977 (rpl > dpl && cpl > dpl)))
978 goto exception;
979 break;
982 if (seg_desc.s) {
983 /* mark segment as accessed */
984 seg_desc.type |= 1;
985 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
986 if (ret != X86EMUL_CONTINUE)
987 return ret;
989 load:
990 ops->set_segment_selector(selector, seg, ctxt->vcpu);
991 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
992 return X86EMUL_CONTINUE;
993 exception:
994 emulate_exception(ctxt, err_vec, err_code, true);
995 return X86EMUL_PROPAGATE_FAULT;
998 static inline int writeback(struct x86_emulate_ctxt *ctxt,
999 struct x86_emulate_ops *ops)
1001 int rc;
1002 struct decode_cache *c = &ctxt->decode;
1003 u32 err;
1005 switch (c->dst.type) {
1006 case OP_REG:
1007 /* The 4-byte case *is* correct:
1008 * in 64-bit mode we zero-extend.
1010 switch (c->dst.bytes) {
1011 case 1:
1012 *(u8 *)c->dst.addr.reg = (u8)c->dst.val;
1013 break;
1014 case 2:
1015 *(u16 *)c->dst.addr.reg = (u16)c->dst.val;
1016 break;
1017 case 4:
1018 *c->dst.addr.reg = (u32)c->dst.val;
1019 break; /* 64b: zero-ext */
1020 case 8:
1021 *c->dst.addr.reg = c->dst.val;
1022 break;
1024 break;
1025 case OP_MEM:
1026 if (c->lock_prefix)
1027 rc = ops->cmpxchg_emulated(
1028 c->dst.addr.mem,
1029 &c->dst.orig_val,
1030 &c->dst.val,
1031 c->dst.bytes,
1032 &err,
1033 ctxt->vcpu);
1034 else
1035 rc = ops->write_emulated(
1036 c->dst.addr.mem,
1037 &c->dst.val,
1038 c->dst.bytes,
1039 &err,
1040 ctxt->vcpu);
1041 if (rc == X86EMUL_PROPAGATE_FAULT)
1042 emulate_pf(ctxt, c->dst.addr.mem, err);
1043 if (rc != X86EMUL_CONTINUE)
1044 return rc;
1045 break;
1046 case OP_NONE:
1047 /* no writeback */
1048 break;
1049 default:
1050 break;
1052 return X86EMUL_CONTINUE;
1055 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1056 struct x86_emulate_ops *ops)
1058 struct decode_cache *c = &ctxt->decode;
1060 c->dst.type = OP_MEM;
1061 c->dst.bytes = c->op_bytes;
1062 c->dst.val = c->src.val;
1063 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1064 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
1065 c->regs[VCPU_REGS_RSP]);
1068 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1069 struct x86_emulate_ops *ops,
1070 void *dest, int len)
1072 struct decode_cache *c = &ctxt->decode;
1073 int rc;
1075 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1076 c->regs[VCPU_REGS_RSP]),
1077 dest, len);
1078 if (rc != X86EMUL_CONTINUE)
1079 return rc;
1081 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1082 return rc;
1085 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1086 struct x86_emulate_ops *ops,
1087 void *dest, int len)
1089 int rc;
1090 unsigned long val, change_mask;
1091 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1092 int cpl = ops->cpl(ctxt->vcpu);
1094 rc = emulate_pop(ctxt, ops, &val, len);
1095 if (rc != X86EMUL_CONTINUE)
1096 return rc;
1098 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1099 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1101 switch(ctxt->mode) {
1102 case X86EMUL_MODE_PROT64:
1103 case X86EMUL_MODE_PROT32:
1104 case X86EMUL_MODE_PROT16:
1105 if (cpl == 0)
1106 change_mask |= EFLG_IOPL;
1107 if (cpl <= iopl)
1108 change_mask |= EFLG_IF;
1109 break;
1110 case X86EMUL_MODE_VM86:
1111 if (iopl < 3) {
1112 emulate_gp(ctxt, 0);
1113 return X86EMUL_PROPAGATE_FAULT;
1115 change_mask |= EFLG_IF;
1116 break;
1117 default: /* real mode */
1118 change_mask |= (EFLG_IOPL | EFLG_IF);
1119 break;
1122 *(unsigned long *)dest =
1123 (ctxt->eflags & ~change_mask) | (val & change_mask);
1125 return rc;
1128 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1129 struct x86_emulate_ops *ops, int seg)
1131 struct decode_cache *c = &ctxt->decode;
1133 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1135 emulate_push(ctxt, ops);
1138 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1139 struct x86_emulate_ops *ops, int seg)
1141 struct decode_cache *c = &ctxt->decode;
1142 unsigned long selector;
1143 int rc;
1145 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1146 if (rc != X86EMUL_CONTINUE)
1147 return rc;
1149 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1150 return rc;
1153 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1154 struct x86_emulate_ops *ops)
1156 struct decode_cache *c = &ctxt->decode;
1157 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1158 int rc = X86EMUL_CONTINUE;
1159 int reg = VCPU_REGS_RAX;
1161 while (reg <= VCPU_REGS_RDI) {
1162 (reg == VCPU_REGS_RSP) ?
1163 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1165 emulate_push(ctxt, ops);
1167 rc = writeback(ctxt, ops);
1168 if (rc != X86EMUL_CONTINUE)
1169 return rc;
1171 ++reg;
1174 /* Disable writeback. */
1175 c->dst.type = OP_NONE;
1177 return rc;
1180 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1181 struct x86_emulate_ops *ops)
1183 struct decode_cache *c = &ctxt->decode;
1184 int rc = X86EMUL_CONTINUE;
1185 int reg = VCPU_REGS_RDI;
1187 while (reg >= VCPU_REGS_RAX) {
1188 if (reg == VCPU_REGS_RSP) {
1189 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1190 c->op_bytes);
1191 --reg;
1194 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1195 if (rc != X86EMUL_CONTINUE)
1196 break;
1197 --reg;
1199 return rc;
1202 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1203 struct x86_emulate_ops *ops, int irq)
1205 struct decode_cache *c = &ctxt->decode;
1206 int rc = X86EMUL_CONTINUE;
1207 struct desc_ptr dt;
1208 gva_t cs_addr;
1209 gva_t eip_addr;
1210 u16 cs, eip;
1211 u32 err;
1213 /* TODO: Add limit checks */
1214 c->src.val = ctxt->eflags;
1215 emulate_push(ctxt, ops);
1217 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1219 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1220 emulate_push(ctxt, ops);
1222 c->src.val = c->eip;
1223 emulate_push(ctxt, ops);
1225 ops->get_idt(&dt, ctxt->vcpu);
1227 eip_addr = dt.address + (irq << 2);
1228 cs_addr = dt.address + (irq << 2) + 2;
1230 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1231 if (rc != X86EMUL_CONTINUE)
1232 return rc;
1234 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1235 if (rc != X86EMUL_CONTINUE)
1236 return rc;
1238 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1239 if (rc != X86EMUL_CONTINUE)
1240 return rc;
1242 c->eip = eip;
1244 return rc;
1247 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1248 struct x86_emulate_ops *ops, int irq)
1250 switch(ctxt->mode) {
1251 case X86EMUL_MODE_REAL:
1252 return emulate_int_real(ctxt, ops, irq);
1253 case X86EMUL_MODE_VM86:
1254 case X86EMUL_MODE_PROT16:
1255 case X86EMUL_MODE_PROT32:
1256 case X86EMUL_MODE_PROT64:
1257 default:
1258 /* Protected mode interrupts unimplemented yet */
1259 return X86EMUL_UNHANDLEABLE;
1263 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1264 struct x86_emulate_ops *ops)
1266 struct decode_cache *c = &ctxt->decode;
1267 int rc = X86EMUL_CONTINUE;
1268 unsigned long temp_eip = 0;
1269 unsigned long temp_eflags = 0;
1270 unsigned long cs = 0;
1271 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1272 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1273 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1274 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1276 /* TODO: Add stack limit check */
1278 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1280 if (rc != X86EMUL_CONTINUE)
1281 return rc;
1283 if (temp_eip & ~0xffff) {
1284 emulate_gp(ctxt, 0);
1285 return X86EMUL_PROPAGATE_FAULT;
1288 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1290 if (rc != X86EMUL_CONTINUE)
1291 return rc;
1293 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1295 if (rc != X86EMUL_CONTINUE)
1296 return rc;
1298 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1300 if (rc != X86EMUL_CONTINUE)
1301 return rc;
1303 c->eip = temp_eip;
1306 if (c->op_bytes == 4)
1307 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1308 else if (c->op_bytes == 2) {
1309 ctxt->eflags &= ~0xffff;
1310 ctxt->eflags |= temp_eflags;
1313 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1314 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1316 return rc;
1319 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1320 struct x86_emulate_ops* ops)
1322 switch(ctxt->mode) {
1323 case X86EMUL_MODE_REAL:
1324 return emulate_iret_real(ctxt, ops);
1325 case X86EMUL_MODE_VM86:
1326 case X86EMUL_MODE_PROT16:
1327 case X86EMUL_MODE_PROT32:
1328 case X86EMUL_MODE_PROT64:
1329 default:
1330 /* iret from protected mode unimplemented yet */
1331 return X86EMUL_UNHANDLEABLE;
1335 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1336 struct x86_emulate_ops *ops)
1338 struct decode_cache *c = &ctxt->decode;
1340 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1343 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1345 struct decode_cache *c = &ctxt->decode;
1346 switch (c->modrm_reg) {
1347 case 0: /* rol */
1348 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1349 break;
1350 case 1: /* ror */
1351 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1352 break;
1353 case 2: /* rcl */
1354 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1355 break;
1356 case 3: /* rcr */
1357 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1358 break;
1359 case 4: /* sal/shl */
1360 case 6: /* sal/shl */
1361 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1362 break;
1363 case 5: /* shr */
1364 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1365 break;
1366 case 7: /* sar */
1367 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1368 break;
1372 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1373 struct x86_emulate_ops *ops)
1375 struct decode_cache *c = &ctxt->decode;
1377 switch (c->modrm_reg) {
1378 case 0 ... 1: /* test */
1379 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1380 break;
1381 case 2: /* not */
1382 c->dst.val = ~c->dst.val;
1383 break;
1384 case 3: /* neg */
1385 emulate_1op("neg", c->dst, ctxt->eflags);
1386 break;
1387 default:
1388 return 0;
1390 return 1;
1393 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1394 struct x86_emulate_ops *ops)
1396 struct decode_cache *c = &ctxt->decode;
1398 switch (c->modrm_reg) {
1399 case 0: /* inc */
1400 emulate_1op("inc", c->dst, ctxt->eflags);
1401 break;
1402 case 1: /* dec */
1403 emulate_1op("dec", c->dst, ctxt->eflags);
1404 break;
1405 case 2: /* call near abs */ {
1406 long int old_eip;
1407 old_eip = c->eip;
1408 c->eip = c->src.val;
1409 c->src.val = old_eip;
1410 emulate_push(ctxt, ops);
1411 break;
1413 case 4: /* jmp abs */
1414 c->eip = c->src.val;
1415 break;
1416 case 6: /* push */
1417 emulate_push(ctxt, ops);
1418 break;
1420 return X86EMUL_CONTINUE;
1423 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1424 struct x86_emulate_ops *ops)
1426 struct decode_cache *c = &ctxt->decode;
1427 u64 old = c->dst.orig_val64;
1429 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1430 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1431 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1432 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1433 ctxt->eflags &= ~EFLG_ZF;
1434 } else {
1435 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1436 (u32) c->regs[VCPU_REGS_RBX];
1438 ctxt->eflags |= EFLG_ZF;
1440 return X86EMUL_CONTINUE;
1443 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1444 struct x86_emulate_ops *ops)
1446 struct decode_cache *c = &ctxt->decode;
1447 int rc;
1448 unsigned long cs;
1450 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1451 if (rc != X86EMUL_CONTINUE)
1452 return rc;
1453 if (c->op_bytes == 4)
1454 c->eip = (u32)c->eip;
1455 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1456 if (rc != X86EMUL_CONTINUE)
1457 return rc;
1458 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1459 return rc;
1462 static inline void
1463 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1464 struct x86_emulate_ops *ops, struct desc_struct *cs,
1465 struct desc_struct *ss)
1467 memset(cs, 0, sizeof(struct desc_struct));
1468 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1469 memset(ss, 0, sizeof(struct desc_struct));
1471 cs->l = 0; /* will be adjusted later */
1472 set_desc_base(cs, 0); /* flat segment */
1473 cs->g = 1; /* 4kb granularity */
1474 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1475 cs->type = 0x0b; /* Read, Execute, Accessed */
1476 cs->s = 1;
1477 cs->dpl = 0; /* will be adjusted later */
1478 cs->p = 1;
1479 cs->d = 1;
1481 set_desc_base(ss, 0); /* flat segment */
1482 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1483 ss->g = 1; /* 4kb granularity */
1484 ss->s = 1;
1485 ss->type = 0x03; /* Read/Write, Accessed */
1486 ss->d = 1; /* 32bit stack segment */
1487 ss->dpl = 0;
1488 ss->p = 1;
1491 static int
1492 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1494 struct decode_cache *c = &ctxt->decode;
1495 struct desc_struct cs, ss;
1496 u64 msr_data;
1497 u16 cs_sel, ss_sel;
1499 /* syscall is not available in real mode */
1500 if (ctxt->mode == X86EMUL_MODE_REAL ||
1501 ctxt->mode == X86EMUL_MODE_VM86) {
1502 emulate_ud(ctxt);
1503 return X86EMUL_PROPAGATE_FAULT;
1506 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1508 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1509 msr_data >>= 32;
1510 cs_sel = (u16)(msr_data & 0xfffc);
1511 ss_sel = (u16)(msr_data + 8);
1513 if (is_long_mode(ctxt->vcpu)) {
1514 cs.d = 0;
1515 cs.l = 1;
1517 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1518 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1519 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1520 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1522 c->regs[VCPU_REGS_RCX] = c->eip;
1523 if (is_long_mode(ctxt->vcpu)) {
1524 #ifdef CONFIG_X86_64
1525 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1527 ops->get_msr(ctxt->vcpu,
1528 ctxt->mode == X86EMUL_MODE_PROT64 ?
1529 MSR_LSTAR : MSR_CSTAR, &msr_data);
1530 c->eip = msr_data;
1532 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1533 ctxt->eflags &= ~(msr_data | EFLG_RF);
1534 #endif
1535 } else {
1536 /* legacy mode */
1537 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1538 c->eip = (u32)msr_data;
1540 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1543 return X86EMUL_CONTINUE;
1546 static int
1547 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1549 struct decode_cache *c = &ctxt->decode;
1550 struct desc_struct cs, ss;
1551 u64 msr_data;
1552 u16 cs_sel, ss_sel;
1554 /* inject #GP if in real mode */
1555 if (ctxt->mode == X86EMUL_MODE_REAL) {
1556 emulate_gp(ctxt, 0);
1557 return X86EMUL_PROPAGATE_FAULT;
1560 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1561 * Therefore, we inject an #UD.
1563 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1564 emulate_ud(ctxt);
1565 return X86EMUL_PROPAGATE_FAULT;
1568 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1570 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1571 switch (ctxt->mode) {
1572 case X86EMUL_MODE_PROT32:
1573 if ((msr_data & 0xfffc) == 0x0) {
1574 emulate_gp(ctxt, 0);
1575 return X86EMUL_PROPAGATE_FAULT;
1577 break;
1578 case X86EMUL_MODE_PROT64:
1579 if (msr_data == 0x0) {
1580 emulate_gp(ctxt, 0);
1581 return X86EMUL_PROPAGATE_FAULT;
1583 break;
1586 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1587 cs_sel = (u16)msr_data;
1588 cs_sel &= ~SELECTOR_RPL_MASK;
1589 ss_sel = cs_sel + 8;
1590 ss_sel &= ~SELECTOR_RPL_MASK;
1591 if (ctxt->mode == X86EMUL_MODE_PROT64
1592 || is_long_mode(ctxt->vcpu)) {
1593 cs.d = 0;
1594 cs.l = 1;
1597 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1598 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1599 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1600 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1602 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1603 c->eip = msr_data;
1605 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1606 c->regs[VCPU_REGS_RSP] = msr_data;
1608 return X86EMUL_CONTINUE;
1611 static int
1612 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1614 struct decode_cache *c = &ctxt->decode;
1615 struct desc_struct cs, ss;
1616 u64 msr_data;
1617 int usermode;
1618 u16 cs_sel, ss_sel;
1620 /* inject #GP if in real mode or Virtual 8086 mode */
1621 if (ctxt->mode == X86EMUL_MODE_REAL ||
1622 ctxt->mode == X86EMUL_MODE_VM86) {
1623 emulate_gp(ctxt, 0);
1624 return X86EMUL_PROPAGATE_FAULT;
1627 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1629 if ((c->rex_prefix & 0x8) != 0x0)
1630 usermode = X86EMUL_MODE_PROT64;
1631 else
1632 usermode = X86EMUL_MODE_PROT32;
1634 cs.dpl = 3;
1635 ss.dpl = 3;
1636 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1637 switch (usermode) {
1638 case X86EMUL_MODE_PROT32:
1639 cs_sel = (u16)(msr_data + 16);
1640 if ((msr_data & 0xfffc) == 0x0) {
1641 emulate_gp(ctxt, 0);
1642 return X86EMUL_PROPAGATE_FAULT;
1644 ss_sel = (u16)(msr_data + 24);
1645 break;
1646 case X86EMUL_MODE_PROT64:
1647 cs_sel = (u16)(msr_data + 32);
1648 if (msr_data == 0x0) {
1649 emulate_gp(ctxt, 0);
1650 return X86EMUL_PROPAGATE_FAULT;
1652 ss_sel = cs_sel + 8;
1653 cs.d = 0;
1654 cs.l = 1;
1655 break;
1657 cs_sel |= SELECTOR_RPL_MASK;
1658 ss_sel |= SELECTOR_RPL_MASK;
1660 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1661 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1662 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1663 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1665 c->eip = c->regs[VCPU_REGS_RDX];
1666 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1668 return X86EMUL_CONTINUE;
1671 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1672 struct x86_emulate_ops *ops)
1674 int iopl;
1675 if (ctxt->mode == X86EMUL_MODE_REAL)
1676 return false;
1677 if (ctxt->mode == X86EMUL_MODE_VM86)
1678 return true;
1679 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1680 return ops->cpl(ctxt->vcpu) > iopl;
1683 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1684 struct x86_emulate_ops *ops,
1685 u16 port, u16 len)
1687 struct desc_struct tr_seg;
1688 int r;
1689 u16 io_bitmap_ptr;
1690 u8 perm, bit_idx = port & 0x7;
1691 unsigned mask = (1 << len) - 1;
1693 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
1694 if (!tr_seg.p)
1695 return false;
1696 if (desc_limit_scaled(&tr_seg) < 103)
1697 return false;
1698 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
1699 ctxt->vcpu, NULL);
1700 if (r != X86EMUL_CONTINUE)
1701 return false;
1702 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1703 return false;
1704 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
1705 &perm, 1, ctxt->vcpu, NULL);
1706 if (r != X86EMUL_CONTINUE)
1707 return false;
1708 if ((perm >> bit_idx) & mask)
1709 return false;
1710 return true;
1713 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1714 struct x86_emulate_ops *ops,
1715 u16 port, u16 len)
1717 if (ctxt->perm_ok)
1718 return true;
1720 if (emulator_bad_iopl(ctxt, ops))
1721 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1722 return false;
1724 ctxt->perm_ok = true;
1726 return true;
1729 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1730 struct x86_emulate_ops *ops,
1731 struct tss_segment_16 *tss)
1733 struct decode_cache *c = &ctxt->decode;
1735 tss->ip = c->eip;
1736 tss->flag = ctxt->eflags;
1737 tss->ax = c->regs[VCPU_REGS_RAX];
1738 tss->cx = c->regs[VCPU_REGS_RCX];
1739 tss->dx = c->regs[VCPU_REGS_RDX];
1740 tss->bx = c->regs[VCPU_REGS_RBX];
1741 tss->sp = c->regs[VCPU_REGS_RSP];
1742 tss->bp = c->regs[VCPU_REGS_RBP];
1743 tss->si = c->regs[VCPU_REGS_RSI];
1744 tss->di = c->regs[VCPU_REGS_RDI];
1746 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1747 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1748 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1749 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1750 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1753 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1754 struct x86_emulate_ops *ops,
1755 struct tss_segment_16 *tss)
1757 struct decode_cache *c = &ctxt->decode;
1758 int ret;
1760 c->eip = tss->ip;
1761 ctxt->eflags = tss->flag | 2;
1762 c->regs[VCPU_REGS_RAX] = tss->ax;
1763 c->regs[VCPU_REGS_RCX] = tss->cx;
1764 c->regs[VCPU_REGS_RDX] = tss->dx;
1765 c->regs[VCPU_REGS_RBX] = tss->bx;
1766 c->regs[VCPU_REGS_RSP] = tss->sp;
1767 c->regs[VCPU_REGS_RBP] = tss->bp;
1768 c->regs[VCPU_REGS_RSI] = tss->si;
1769 c->regs[VCPU_REGS_RDI] = tss->di;
1772 * SDM says that segment selectors are loaded before segment
1773 * descriptors
1775 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1776 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1777 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1778 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1779 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1782 * Now load segment descriptors. If fault happenes at this stage
1783 * it is handled in a context of new task
1785 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1786 if (ret != X86EMUL_CONTINUE)
1787 return ret;
1788 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1789 if (ret != X86EMUL_CONTINUE)
1790 return ret;
1791 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1792 if (ret != X86EMUL_CONTINUE)
1793 return ret;
1794 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1795 if (ret != X86EMUL_CONTINUE)
1796 return ret;
1797 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1798 if (ret != X86EMUL_CONTINUE)
1799 return ret;
1801 return X86EMUL_CONTINUE;
1804 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1805 struct x86_emulate_ops *ops,
1806 u16 tss_selector, u16 old_tss_sel,
1807 ulong old_tss_base, struct desc_struct *new_desc)
1809 struct tss_segment_16 tss_seg;
1810 int ret;
1811 u32 err, new_tss_base = get_desc_base(new_desc);
1813 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1814 &err);
1815 if (ret == X86EMUL_PROPAGATE_FAULT) {
1816 /* FIXME: need to provide precise fault address */
1817 emulate_pf(ctxt, old_tss_base, err);
1818 return ret;
1821 save_state_to_tss16(ctxt, ops, &tss_seg);
1823 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1824 &err);
1825 if (ret == X86EMUL_PROPAGATE_FAULT) {
1826 /* FIXME: need to provide precise fault address */
1827 emulate_pf(ctxt, old_tss_base, err);
1828 return ret;
1831 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1832 &err);
1833 if (ret == X86EMUL_PROPAGATE_FAULT) {
1834 /* FIXME: need to provide precise fault address */
1835 emulate_pf(ctxt, new_tss_base, err);
1836 return ret;
1839 if (old_tss_sel != 0xffff) {
1840 tss_seg.prev_task_link = old_tss_sel;
1842 ret = ops->write_std(new_tss_base,
1843 &tss_seg.prev_task_link,
1844 sizeof tss_seg.prev_task_link,
1845 ctxt->vcpu, &err);
1846 if (ret == X86EMUL_PROPAGATE_FAULT) {
1847 /* FIXME: need to provide precise fault address */
1848 emulate_pf(ctxt, new_tss_base, err);
1849 return ret;
1853 return load_state_from_tss16(ctxt, ops, &tss_seg);
1856 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
1857 struct x86_emulate_ops *ops,
1858 struct tss_segment_32 *tss)
1860 struct decode_cache *c = &ctxt->decode;
1862 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
1863 tss->eip = c->eip;
1864 tss->eflags = ctxt->eflags;
1865 tss->eax = c->regs[VCPU_REGS_RAX];
1866 tss->ecx = c->regs[VCPU_REGS_RCX];
1867 tss->edx = c->regs[VCPU_REGS_RDX];
1868 tss->ebx = c->regs[VCPU_REGS_RBX];
1869 tss->esp = c->regs[VCPU_REGS_RSP];
1870 tss->ebp = c->regs[VCPU_REGS_RBP];
1871 tss->esi = c->regs[VCPU_REGS_RSI];
1872 tss->edi = c->regs[VCPU_REGS_RDI];
1874 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1875 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1876 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1877 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1878 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
1879 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
1880 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1883 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
1884 struct x86_emulate_ops *ops,
1885 struct tss_segment_32 *tss)
1887 struct decode_cache *c = &ctxt->decode;
1888 int ret;
1890 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
1891 emulate_gp(ctxt, 0);
1892 return X86EMUL_PROPAGATE_FAULT;
1894 c->eip = tss->eip;
1895 ctxt->eflags = tss->eflags | 2;
1896 c->regs[VCPU_REGS_RAX] = tss->eax;
1897 c->regs[VCPU_REGS_RCX] = tss->ecx;
1898 c->regs[VCPU_REGS_RDX] = tss->edx;
1899 c->regs[VCPU_REGS_RBX] = tss->ebx;
1900 c->regs[VCPU_REGS_RSP] = tss->esp;
1901 c->regs[VCPU_REGS_RBP] = tss->ebp;
1902 c->regs[VCPU_REGS_RSI] = tss->esi;
1903 c->regs[VCPU_REGS_RDI] = tss->edi;
1906 * SDM says that segment selectors are loaded before segment
1907 * descriptors
1909 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
1910 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1911 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1912 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1913 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1914 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
1915 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
1918 * Now load segment descriptors. If fault happenes at this stage
1919 * it is handled in a context of new task
1921 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
1922 if (ret != X86EMUL_CONTINUE)
1923 return ret;
1924 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1925 if (ret != X86EMUL_CONTINUE)
1926 return ret;
1927 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1928 if (ret != X86EMUL_CONTINUE)
1929 return ret;
1930 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1931 if (ret != X86EMUL_CONTINUE)
1932 return ret;
1933 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1934 if (ret != X86EMUL_CONTINUE)
1935 return ret;
1936 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
1937 if (ret != X86EMUL_CONTINUE)
1938 return ret;
1939 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
1940 if (ret != X86EMUL_CONTINUE)
1941 return ret;
1943 return X86EMUL_CONTINUE;
1946 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
1947 struct x86_emulate_ops *ops,
1948 u16 tss_selector, u16 old_tss_sel,
1949 ulong old_tss_base, struct desc_struct *new_desc)
1951 struct tss_segment_32 tss_seg;
1952 int ret;
1953 u32 err, new_tss_base = get_desc_base(new_desc);
1955 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1956 &err);
1957 if (ret == X86EMUL_PROPAGATE_FAULT) {
1958 /* FIXME: need to provide precise fault address */
1959 emulate_pf(ctxt, old_tss_base, err);
1960 return ret;
1963 save_state_to_tss32(ctxt, ops, &tss_seg);
1965 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1966 &err);
1967 if (ret == X86EMUL_PROPAGATE_FAULT) {
1968 /* FIXME: need to provide precise fault address */
1969 emulate_pf(ctxt, old_tss_base, err);
1970 return ret;
1973 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1974 &err);
1975 if (ret == X86EMUL_PROPAGATE_FAULT) {
1976 /* FIXME: need to provide precise fault address */
1977 emulate_pf(ctxt, new_tss_base, err);
1978 return ret;
1981 if (old_tss_sel != 0xffff) {
1982 tss_seg.prev_task_link = old_tss_sel;
1984 ret = ops->write_std(new_tss_base,
1985 &tss_seg.prev_task_link,
1986 sizeof tss_seg.prev_task_link,
1987 ctxt->vcpu, &err);
1988 if (ret == X86EMUL_PROPAGATE_FAULT) {
1989 /* FIXME: need to provide precise fault address */
1990 emulate_pf(ctxt, new_tss_base, err);
1991 return ret;
1995 return load_state_from_tss32(ctxt, ops, &tss_seg);
1998 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
1999 struct x86_emulate_ops *ops,
2000 u16 tss_selector, int reason,
2001 bool has_error_code, u32 error_code)
2003 struct desc_struct curr_tss_desc, next_tss_desc;
2004 int ret;
2005 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2006 ulong old_tss_base =
2007 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2008 u32 desc_limit;
2010 /* FIXME: old_tss_base == ~0 ? */
2012 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2013 if (ret != X86EMUL_CONTINUE)
2014 return ret;
2015 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2016 if (ret != X86EMUL_CONTINUE)
2017 return ret;
2019 /* FIXME: check that next_tss_desc is tss */
2021 if (reason != TASK_SWITCH_IRET) {
2022 if ((tss_selector & 3) > next_tss_desc.dpl ||
2023 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2024 emulate_gp(ctxt, 0);
2025 return X86EMUL_PROPAGATE_FAULT;
2029 desc_limit = desc_limit_scaled(&next_tss_desc);
2030 if (!next_tss_desc.p ||
2031 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2032 desc_limit < 0x2b)) {
2033 emulate_ts(ctxt, tss_selector & 0xfffc);
2034 return X86EMUL_PROPAGATE_FAULT;
2037 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2038 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2039 write_segment_descriptor(ctxt, ops, old_tss_sel,
2040 &curr_tss_desc);
2043 if (reason == TASK_SWITCH_IRET)
2044 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2046 /* set back link to prev task only if NT bit is set in eflags
2047 note that old_tss_sel is not used afetr this point */
2048 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2049 old_tss_sel = 0xffff;
2051 if (next_tss_desc.type & 8)
2052 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2053 old_tss_base, &next_tss_desc);
2054 else
2055 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2056 old_tss_base, &next_tss_desc);
2057 if (ret != X86EMUL_CONTINUE)
2058 return ret;
2060 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2061 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2063 if (reason != TASK_SWITCH_IRET) {
2064 next_tss_desc.type |= (1 << 1); /* set busy flag */
2065 write_segment_descriptor(ctxt, ops, tss_selector,
2066 &next_tss_desc);
2069 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2070 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2071 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2073 if (has_error_code) {
2074 struct decode_cache *c = &ctxt->decode;
2076 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2077 c->lock_prefix = 0;
2078 c->src.val = (unsigned long) error_code;
2079 emulate_push(ctxt, ops);
2082 return ret;
2085 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2086 u16 tss_selector, int reason,
2087 bool has_error_code, u32 error_code)
2089 struct x86_emulate_ops *ops = ctxt->ops;
2090 struct decode_cache *c = &ctxt->decode;
2091 int rc;
2093 c->eip = ctxt->eip;
2094 c->dst.type = OP_NONE;
2096 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2097 has_error_code, error_code);
2099 if (rc == X86EMUL_CONTINUE) {
2100 rc = writeback(ctxt, ops);
2101 if (rc == X86EMUL_CONTINUE)
2102 ctxt->eip = c->eip;
2105 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2108 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2109 int reg, struct operand *op)
2111 struct decode_cache *c = &ctxt->decode;
2112 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2114 register_address_increment(c, &c->regs[reg], df * op->bytes);
2115 op->addr.mem = register_address(c, base, c->regs[reg]);
2118 static int em_push(struct x86_emulate_ctxt *ctxt)
2120 emulate_push(ctxt, ctxt->ops);
2121 return X86EMUL_CONTINUE;
2124 #define D(_y) { .flags = (_y) }
2125 #define N D(0)
2126 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2127 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2128 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2130 static struct opcode group1[] = {
2131 X7(D(Lock)), N
2134 static struct opcode group1A[] = {
2135 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2138 static struct opcode group3[] = {
2139 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2140 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2141 X4(D(Undefined)),
2144 static struct opcode group4[] = {
2145 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2146 N, N, N, N, N, N,
2149 static struct opcode group5[] = {
2150 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2151 D(SrcMem | ModRM | Stack), N,
2152 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2153 D(SrcMem | ModRM | Stack), N,
2156 static struct group_dual group7 = { {
2157 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2158 D(SrcNone | ModRM | DstMem | Mov), N,
2159 D(SrcMem16 | ModRM | Mov | Priv),
2160 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2161 }, {
2162 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2163 D(SrcNone | ModRM | DstMem | Mov), N,
2164 D(SrcMem16 | ModRM | Mov | Priv), N,
2165 } };
2167 static struct opcode group8[] = {
2168 N, N, N, N,
2169 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2170 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2173 static struct group_dual group9 = { {
2174 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2175 }, {
2176 N, N, N, N, N, N, N, N,
2177 } };
2179 static struct opcode opcode_table[256] = {
2180 /* 0x00 - 0x07 */
2181 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2182 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2183 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2184 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2185 /* 0x08 - 0x0F */
2186 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2187 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2188 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2189 D(ImplicitOps | Stack | No64), N,
2190 /* 0x10 - 0x17 */
2191 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2192 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2193 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2194 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2195 /* 0x18 - 0x1F */
2196 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2197 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2198 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2199 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2200 /* 0x20 - 0x27 */
2201 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2202 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2203 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2204 /* 0x28 - 0x2F */
2205 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2206 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2207 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2208 /* 0x30 - 0x37 */
2209 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2210 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2211 D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
2212 /* 0x38 - 0x3F */
2213 D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
2214 D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2215 D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
2216 N, N,
2217 /* 0x40 - 0x4F */
2218 X16(D(DstReg)),
2219 /* 0x50 - 0x57 */
2220 X8(I(SrcReg | Stack, em_push)),
2221 /* 0x58 - 0x5F */
2222 X8(D(DstReg | Stack)),
2223 /* 0x60 - 0x67 */
2224 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2225 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2226 N, N, N, N,
2227 /* 0x68 - 0x6F */
2228 I(SrcImm | Mov | Stack, em_push), N,
2229 I(SrcImmByte | Mov | Stack, em_push), N,
2230 D(DstDI | ByteOp | Mov | String), D(DstDI | Mov | String), /* insb, insw/insd */
2231 D(SrcSI | ByteOp | ImplicitOps | String), D(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2232 /* 0x70 - 0x7F */
2233 X16(D(SrcImmByte)),
2234 /* 0x80 - 0x87 */
2235 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2236 G(DstMem | SrcImm | ModRM | Group, group1),
2237 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2238 G(DstMem | SrcImmByte | ModRM | Group, group1),
2239 D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
2240 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2241 /* 0x88 - 0x8F */
2242 D(ByteOp | DstMem | SrcReg | ModRM | Mov), D(DstMem | SrcReg | ModRM | Mov),
2243 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem | ModRM | Mov),
2244 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2245 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2246 /* 0x90 - 0x97 */
2247 X8(D(SrcAcc | DstReg)),
2248 /* 0x98 - 0x9F */
2249 N, N, D(SrcImmFAddr | No64), N,
2250 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2251 /* 0xA0 - 0xA7 */
2252 D(ByteOp | DstAcc | SrcMem | Mov | MemAbs), D(DstAcc | SrcMem | Mov | MemAbs),
2253 D(ByteOp | DstMem | SrcAcc | Mov | MemAbs), D(DstMem | SrcAcc | Mov | MemAbs),
2254 D(ByteOp | SrcSI | DstDI | Mov | String), D(SrcSI | DstDI | Mov | String),
2255 D(ByteOp | SrcSI | DstDI | String), D(SrcSI | DstDI | String),
2256 /* 0xA8 - 0xAF */
2257 D(DstAcc | SrcImmByte | ByteOp), D(DstAcc | SrcImm),
2258 D(ByteOp | SrcAcc | DstDI | Mov | String), D(SrcAcc | DstDI | Mov | String),
2259 D(ByteOp | SrcSI | DstAcc | Mov | String), D(SrcSI | DstAcc | Mov | String),
2260 D(ByteOp | DstDI | String), D(DstDI | String),
2261 /* 0xB0 - 0xB7 */
2262 X8(D(ByteOp | DstReg | SrcImm | Mov)),
2263 /* 0xB8 - 0xBF */
2264 X8(D(DstReg | SrcImm | Mov)),
2265 /* 0xC0 - 0xC7 */
2266 D(ByteOp | DstMem | SrcImm | ModRM), D(DstMem | SrcImmByte | ModRM),
2267 N, D(ImplicitOps | Stack), N, N,
2268 D(ByteOp | DstMem | SrcImm | ModRM | Mov), D(DstMem | SrcImm | ModRM | Mov),
2269 /* 0xC8 - 0xCF */
2270 N, N, N, D(ImplicitOps | Stack),
2271 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2272 /* 0xD0 - 0xD7 */
2273 D(ByteOp | DstMem | SrcOne | ModRM), D(DstMem | SrcOne | ModRM),
2274 D(ByteOp | DstMem | SrcImplicit | ModRM), D(DstMem | SrcImplicit | ModRM),
2275 N, N, N, N,
2276 /* 0xD8 - 0xDF */
2277 N, N, N, N, N, N, N, N,
2278 /* 0xE0 - 0xE7 */
2279 N, N, N, N,
2280 D(ByteOp | SrcImmUByte | DstAcc), D(SrcImmUByte | DstAcc),
2281 D(ByteOp | SrcImmUByte | DstAcc), D(SrcImmUByte | DstAcc),
2282 /* 0xE8 - 0xEF */
2283 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2284 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2285 D(SrcNone | ByteOp | DstAcc), D(SrcNone | DstAcc),
2286 D(SrcNone | ByteOp | DstAcc), D(SrcNone | DstAcc),
2287 /* 0xF0 - 0xF7 */
2288 N, N, N, N,
2289 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2290 /* 0xF8 - 0xFF */
2291 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2292 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2295 static struct opcode twobyte_table[256] = {
2296 /* 0x00 - 0x0F */
2297 N, GD(0, &group7), N, N,
2298 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2299 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2300 N, D(ImplicitOps | ModRM), N, N,
2301 /* 0x10 - 0x1F */
2302 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2303 /* 0x20 - 0x2F */
2304 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2305 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2306 N, N, N, N,
2307 N, N, N, N, N, N, N, N,
2308 /* 0x30 - 0x3F */
2309 D(ImplicitOps | Priv), N, D(ImplicitOps | Priv), N,
2310 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2311 N, N, N, N, N, N, N, N,
2312 /* 0x40 - 0x4F */
2313 X16(D(DstReg | SrcMem | ModRM | Mov)),
2314 /* 0x50 - 0x5F */
2315 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2316 /* 0x60 - 0x6F */
2317 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2318 /* 0x70 - 0x7F */
2319 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2320 /* 0x80 - 0x8F */
2321 X16(D(SrcImm)),
2322 /* 0x90 - 0x9F */
2323 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2324 /* 0xA0 - 0xA7 */
2325 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2326 N, D(DstMem | SrcReg | ModRM | BitOp),
2327 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2328 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2329 /* 0xA8 - 0xAF */
2330 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2331 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2332 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2333 D(DstMem | SrcReg | Src2CL | ModRM),
2334 D(ModRM), N,
2335 /* 0xB0 - 0xB7 */
2336 D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
2337 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2338 N, N, D(ByteOp | DstReg | SrcMem | ModRM | Mov),
2339 D(DstReg | SrcMem16 | ModRM | Mov),
2340 /* 0xB8 - 0xBF */
2341 N, N,
2342 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2343 N, N, D(ByteOp | DstReg | SrcMem | ModRM | Mov),
2344 D(DstReg | SrcMem16 | ModRM | Mov),
2345 /* 0xC0 - 0xCF */
2346 N, N, N, D(DstMem | SrcReg | ModRM | Mov),
2347 N, N, N, GD(0, &group9),
2348 N, N, N, N, N, N, N, N,
2349 /* 0xD0 - 0xDF */
2350 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2351 /* 0xE0 - 0xEF */
2352 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2353 /* 0xF0 - 0xFF */
2354 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2357 #undef D
2358 #undef N
2359 #undef G
2360 #undef GD
2361 #undef I
2364 x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2366 struct x86_emulate_ops *ops = ctxt->ops;
2367 struct decode_cache *c = &ctxt->decode;
2368 int rc = X86EMUL_CONTINUE;
2369 int mode = ctxt->mode;
2370 int def_op_bytes, def_ad_bytes, dual, goffset;
2371 struct opcode opcode, *g_mod012, *g_mod3;
2372 struct operand memop = { .type = OP_NONE };
2374 /* we cannot decode insn before we complete previous rep insn */
2375 WARN_ON(ctxt->restart);
2377 c->eip = ctxt->eip;
2378 c->fetch.start = c->fetch.end = c->eip;
2379 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2381 switch (mode) {
2382 case X86EMUL_MODE_REAL:
2383 case X86EMUL_MODE_VM86:
2384 case X86EMUL_MODE_PROT16:
2385 def_op_bytes = def_ad_bytes = 2;
2386 break;
2387 case X86EMUL_MODE_PROT32:
2388 def_op_bytes = def_ad_bytes = 4;
2389 break;
2390 #ifdef CONFIG_X86_64
2391 case X86EMUL_MODE_PROT64:
2392 def_op_bytes = 4;
2393 def_ad_bytes = 8;
2394 break;
2395 #endif
2396 default:
2397 return -1;
2400 c->op_bytes = def_op_bytes;
2401 c->ad_bytes = def_ad_bytes;
2403 /* Legacy prefixes. */
2404 for (;;) {
2405 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2406 case 0x66: /* operand-size override */
2407 /* switch between 2/4 bytes */
2408 c->op_bytes = def_op_bytes ^ 6;
2409 break;
2410 case 0x67: /* address-size override */
2411 if (mode == X86EMUL_MODE_PROT64)
2412 /* switch between 4/8 bytes */
2413 c->ad_bytes = def_ad_bytes ^ 12;
2414 else
2415 /* switch between 2/4 bytes */
2416 c->ad_bytes = def_ad_bytes ^ 6;
2417 break;
2418 case 0x26: /* ES override */
2419 case 0x2e: /* CS override */
2420 case 0x36: /* SS override */
2421 case 0x3e: /* DS override */
2422 set_seg_override(c, (c->b >> 3) & 3);
2423 break;
2424 case 0x64: /* FS override */
2425 case 0x65: /* GS override */
2426 set_seg_override(c, c->b & 7);
2427 break;
2428 case 0x40 ... 0x4f: /* REX */
2429 if (mode != X86EMUL_MODE_PROT64)
2430 goto done_prefixes;
2431 c->rex_prefix = c->b;
2432 continue;
2433 case 0xf0: /* LOCK */
2434 c->lock_prefix = 1;
2435 break;
2436 case 0xf2: /* REPNE/REPNZ */
2437 c->rep_prefix = REPNE_PREFIX;
2438 break;
2439 case 0xf3: /* REP/REPE/REPZ */
2440 c->rep_prefix = REPE_PREFIX;
2441 break;
2442 default:
2443 goto done_prefixes;
2446 /* Any legacy prefix after a REX prefix nullifies its effect. */
2448 c->rex_prefix = 0;
2451 done_prefixes:
2453 /* REX prefix. */
2454 if (c->rex_prefix & 8)
2455 c->op_bytes = 8; /* REX.W */
2457 /* Opcode byte(s). */
2458 opcode = opcode_table[c->b];
2459 /* Two-byte opcode? */
2460 if (c->b == 0x0f) {
2461 c->twobyte = 1;
2462 c->b = insn_fetch(u8, 1, c->eip);
2463 opcode = twobyte_table[c->b];
2465 c->d = opcode.flags;
2467 if (c->d & Group) {
2468 dual = c->d & GroupDual;
2469 c->modrm = insn_fetch(u8, 1, c->eip);
2470 --c->eip;
2472 if (c->d & GroupDual) {
2473 g_mod012 = opcode.u.gdual->mod012;
2474 g_mod3 = opcode.u.gdual->mod3;
2475 } else
2476 g_mod012 = g_mod3 = opcode.u.group;
2478 c->d &= ~(Group | GroupDual);
2480 goffset = (c->modrm >> 3) & 7;
2482 if ((c->modrm >> 6) == 3)
2483 opcode = g_mod3[goffset];
2484 else
2485 opcode = g_mod012[goffset];
2486 c->d |= opcode.flags;
2489 c->execute = opcode.u.execute;
2491 /* Unrecognised? */
2492 if (c->d == 0 || (c->d & Undefined)) {
2493 DPRINTF("Cannot emulate %02x\n", c->b);
2494 return -1;
2497 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2498 c->op_bytes = 8;
2500 if (c->d & Op3264) {
2501 if (mode == X86EMUL_MODE_PROT64)
2502 c->op_bytes = 8;
2503 else
2504 c->op_bytes = 4;
2507 /* ModRM and SIB bytes. */
2508 if (c->d & ModRM) {
2509 rc = decode_modrm(ctxt, ops, &memop);
2510 if (!c->has_seg_override)
2511 set_seg_override(c, c->modrm_seg);
2512 } else if (c->d & MemAbs)
2513 rc = decode_abs(ctxt, ops, &memop);
2514 if (rc != X86EMUL_CONTINUE)
2515 goto done;
2517 if (!c->has_seg_override)
2518 set_seg_override(c, VCPU_SREG_DS);
2520 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
2521 memop.addr.mem += seg_override_base(ctxt, ops, c);
2523 if (memop.type == OP_MEM && c->ad_bytes != 8)
2524 memop.addr.mem = (u32)memop.addr.mem;
2526 if (memop.type == OP_MEM && c->rip_relative)
2527 memop.addr.mem += c->eip;
2530 * Decode and fetch the source operand: register, memory
2531 * or immediate.
2533 switch (c->d & SrcMask) {
2534 case SrcNone:
2535 break;
2536 case SrcReg:
2537 decode_register_operand(&c->src, c, 0);
2538 break;
2539 case SrcMem16:
2540 memop.bytes = 2;
2541 goto srcmem_common;
2542 case SrcMem32:
2543 memop.bytes = 4;
2544 goto srcmem_common;
2545 case SrcMem:
2546 memop.bytes = (c->d & ByteOp) ? 1 :
2547 c->op_bytes;
2548 srcmem_common:
2549 c->src = memop;
2550 break;
2551 case SrcImm:
2552 case SrcImmU:
2553 c->src.type = OP_IMM;
2554 c->src.addr.mem = c->eip;
2555 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2556 if (c->src.bytes == 8)
2557 c->src.bytes = 4;
2558 /* NB. Immediates are sign-extended as necessary. */
2559 switch (c->src.bytes) {
2560 case 1:
2561 c->src.val = insn_fetch(s8, 1, c->eip);
2562 break;
2563 case 2:
2564 c->src.val = insn_fetch(s16, 2, c->eip);
2565 break;
2566 case 4:
2567 c->src.val = insn_fetch(s32, 4, c->eip);
2568 break;
2570 if ((c->d & SrcMask) == SrcImmU) {
2571 switch (c->src.bytes) {
2572 case 1:
2573 c->src.val &= 0xff;
2574 break;
2575 case 2:
2576 c->src.val &= 0xffff;
2577 break;
2578 case 4:
2579 c->src.val &= 0xffffffff;
2580 break;
2583 break;
2584 case SrcImmByte:
2585 case SrcImmUByte:
2586 c->src.type = OP_IMM;
2587 c->src.addr.mem = c->eip;
2588 c->src.bytes = 1;
2589 if ((c->d & SrcMask) == SrcImmByte)
2590 c->src.val = insn_fetch(s8, 1, c->eip);
2591 else
2592 c->src.val = insn_fetch(u8, 1, c->eip);
2593 break;
2594 case SrcAcc:
2595 c->src.type = OP_REG;
2596 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2597 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2598 fetch_register_operand(&c->src);
2599 break;
2600 case SrcOne:
2601 c->src.bytes = 1;
2602 c->src.val = 1;
2603 break;
2604 case SrcSI:
2605 c->src.type = OP_MEM;
2606 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2607 c->src.addr.mem =
2608 register_address(c, seg_override_base(ctxt, ops, c),
2609 c->regs[VCPU_REGS_RSI]);
2610 c->src.val = 0;
2611 break;
2612 case SrcImmFAddr:
2613 c->src.type = OP_IMM;
2614 c->src.addr.mem = c->eip;
2615 c->src.bytes = c->op_bytes + 2;
2616 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2617 break;
2618 case SrcMemFAddr:
2619 memop.bytes = c->op_bytes + 2;
2620 goto srcmem_common;
2621 break;
2625 * Decode and fetch the second source operand: register, memory
2626 * or immediate.
2628 switch (c->d & Src2Mask) {
2629 case Src2None:
2630 break;
2631 case Src2CL:
2632 c->src2.bytes = 1;
2633 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2634 break;
2635 case Src2ImmByte:
2636 c->src2.type = OP_IMM;
2637 c->src2.addr.mem = c->eip;
2638 c->src2.bytes = 1;
2639 c->src2.val = insn_fetch(u8, 1, c->eip);
2640 break;
2641 case Src2One:
2642 c->src2.bytes = 1;
2643 c->src2.val = 1;
2644 break;
2647 /* Decode and fetch the destination operand: register or memory. */
2648 switch (c->d & DstMask) {
2649 case DstReg:
2650 decode_register_operand(&c->dst, c,
2651 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2652 break;
2653 case DstMem:
2654 case DstMem64:
2655 c->dst = memop;
2656 if ((c->d & DstMask) == DstMem64)
2657 c->dst.bytes = 8;
2658 else
2659 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2660 if (c->d & BitOp)
2661 fetch_bit_operand(c);
2662 c->dst.orig_val = c->dst.val;
2663 break;
2664 case DstAcc:
2665 c->dst.type = OP_REG;
2666 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2667 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2668 fetch_register_operand(&c->dst);
2669 c->dst.orig_val = c->dst.val;
2670 break;
2671 case DstDI:
2672 c->dst.type = OP_MEM;
2673 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2674 c->dst.addr.mem =
2675 register_address(c, es_base(ctxt, ops),
2676 c->regs[VCPU_REGS_RDI]);
2677 c->dst.val = 0;
2678 break;
2679 case ImplicitOps:
2680 /* Special instructions do their own operand decoding. */
2681 default:
2682 c->dst.type = OP_NONE; /* Disable writeback. */
2683 return 0;
2686 done:
2687 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2691 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
2693 struct x86_emulate_ops *ops = ctxt->ops;
2694 u64 msr_data;
2695 struct decode_cache *c = &ctxt->decode;
2696 int rc = X86EMUL_CONTINUE;
2697 int saved_dst_type = c->dst.type;
2698 int irq; /* Used for int 3, int, and into */
2700 ctxt->decode.mem_read.pos = 0;
2702 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2703 emulate_ud(ctxt);
2704 goto done;
2707 /* LOCK prefix is allowed only with some instructions */
2708 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2709 emulate_ud(ctxt);
2710 goto done;
2713 /* Privileged instruction can be executed only in CPL=0 */
2714 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2715 emulate_gp(ctxt, 0);
2716 goto done;
2719 if (c->rep_prefix && (c->d & String)) {
2720 ctxt->restart = true;
2721 /* All REP prefixes have the same first termination condition */
2722 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2723 string_done:
2724 ctxt->restart = false;
2725 ctxt->eip = c->eip;
2726 goto done;
2728 /* The second termination condition only applies for REPE
2729 * and REPNE. Test if the repeat string operation prefix is
2730 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2731 * corresponding termination condition according to:
2732 * - if REPE/REPZ and ZF = 0 then done
2733 * - if REPNE/REPNZ and ZF = 1 then done
2735 if ((c->b == 0xa6) || (c->b == 0xa7) ||
2736 (c->b == 0xae) || (c->b == 0xaf)) {
2737 if ((c->rep_prefix == REPE_PREFIX) &&
2738 ((ctxt->eflags & EFLG_ZF) == 0))
2739 goto string_done;
2740 if ((c->rep_prefix == REPNE_PREFIX) &&
2741 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
2742 goto string_done;
2744 c->eip = ctxt->eip;
2747 if (c->src.type == OP_MEM) {
2748 if (c->d & NoAccess)
2749 goto no_fetch;
2750 rc = read_emulated(ctxt, ops, c->src.addr.mem,
2751 c->src.valptr, c->src.bytes);
2752 if (rc != X86EMUL_CONTINUE)
2753 goto done;
2754 c->src.orig_val64 = c->src.val64;
2755 no_fetch:
2759 if (c->src2.type == OP_MEM) {
2760 rc = read_emulated(ctxt, ops, c->src2.addr.mem,
2761 &c->src2.val, c->src2.bytes);
2762 if (rc != X86EMUL_CONTINUE)
2763 goto done;
2766 if ((c->d & DstMask) == ImplicitOps)
2767 goto special_insn;
2770 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
2771 /* optimisation - avoid slow emulated read if Mov */
2772 rc = read_emulated(ctxt, ops, c->dst.addr.mem,
2773 &c->dst.val, c->dst.bytes);
2774 if (rc != X86EMUL_CONTINUE)
2775 goto done;
2777 c->dst.orig_val = c->dst.val;
2779 special_insn:
2781 if (c->execute) {
2782 rc = c->execute(ctxt);
2783 if (rc != X86EMUL_CONTINUE)
2784 goto done;
2785 goto writeback;
2788 if (c->twobyte)
2789 goto twobyte_insn;
2791 switch (c->b) {
2792 case 0x00 ... 0x05:
2793 add: /* add */
2794 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2795 break;
2796 case 0x06: /* push es */
2797 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
2798 break;
2799 case 0x07: /* pop es */
2800 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
2801 if (rc != X86EMUL_CONTINUE)
2802 goto done;
2803 break;
2804 case 0x08 ... 0x0d:
2805 or: /* or */
2806 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2807 break;
2808 case 0x0e: /* push cs */
2809 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
2810 break;
2811 case 0x10 ... 0x15:
2812 adc: /* adc */
2813 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2814 break;
2815 case 0x16: /* push ss */
2816 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
2817 break;
2818 case 0x17: /* pop ss */
2819 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
2820 if (rc != X86EMUL_CONTINUE)
2821 goto done;
2822 break;
2823 case 0x18 ... 0x1d:
2824 sbb: /* sbb */
2825 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2826 break;
2827 case 0x1e: /* push ds */
2828 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
2829 break;
2830 case 0x1f: /* pop ds */
2831 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
2832 if (rc != X86EMUL_CONTINUE)
2833 goto done;
2834 break;
2835 case 0x20 ... 0x25:
2836 and: /* and */
2837 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
2838 break;
2839 case 0x28 ... 0x2d:
2840 sub: /* sub */
2841 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
2842 break;
2843 case 0x30 ... 0x35:
2844 xor: /* xor */
2845 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
2846 break;
2847 case 0x38 ... 0x3d:
2848 cmp: /* cmp */
2849 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2850 break;
2851 case 0x40 ... 0x47: /* inc r16/r32 */
2852 emulate_1op("inc", c->dst, ctxt->eflags);
2853 break;
2854 case 0x48 ... 0x4f: /* dec r16/r32 */
2855 emulate_1op("dec", c->dst, ctxt->eflags);
2856 break;
2857 case 0x58 ... 0x5f: /* pop reg */
2858 pop_instruction:
2859 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2860 if (rc != X86EMUL_CONTINUE)
2861 goto done;
2862 break;
2863 case 0x60: /* pusha */
2864 rc = emulate_pusha(ctxt, ops);
2865 if (rc != X86EMUL_CONTINUE)
2866 goto done;
2867 break;
2868 case 0x61: /* popa */
2869 rc = emulate_popa(ctxt, ops);
2870 if (rc != X86EMUL_CONTINUE)
2871 goto done;
2872 break;
2873 case 0x63: /* movsxd */
2874 if (ctxt->mode != X86EMUL_MODE_PROT64)
2875 goto cannot_emulate;
2876 c->dst.val = (s32) c->src.val;
2877 break;
2878 case 0x6c: /* insb */
2879 case 0x6d: /* insw/insd */
2880 c->dst.bytes = min(c->dst.bytes, 4u);
2881 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2882 c->dst.bytes)) {
2883 emulate_gp(ctxt, 0);
2884 goto done;
2886 if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
2887 c->regs[VCPU_REGS_RDX], &c->dst.val))
2888 goto done; /* IO is needed, skip writeback */
2889 break;
2890 case 0x6e: /* outsb */
2891 case 0x6f: /* outsw/outsd */
2892 c->src.bytes = min(c->src.bytes, 4u);
2893 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2894 c->src.bytes)) {
2895 emulate_gp(ctxt, 0);
2896 goto done;
2898 ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
2899 &c->src.val, 1, ctxt->vcpu);
2901 c->dst.type = OP_NONE; /* nothing to writeback */
2902 break;
2903 case 0x70 ... 0x7f: /* jcc (short) */
2904 if (test_cc(c->b, ctxt->eflags))
2905 jmp_rel(c, c->src.val);
2906 break;
2907 case 0x80 ... 0x83: /* Grp1 */
2908 switch (c->modrm_reg) {
2909 case 0:
2910 goto add;
2911 case 1:
2912 goto or;
2913 case 2:
2914 goto adc;
2915 case 3:
2916 goto sbb;
2917 case 4:
2918 goto and;
2919 case 5:
2920 goto sub;
2921 case 6:
2922 goto xor;
2923 case 7:
2924 goto cmp;
2926 break;
2927 case 0x84 ... 0x85:
2928 test:
2929 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
2930 break;
2931 case 0x86 ... 0x87: /* xchg */
2932 xchg:
2933 /* Write back the register source. */
2934 switch (c->dst.bytes) {
2935 case 1:
2936 *(u8 *) c->src.addr.reg = (u8) c->dst.val;
2937 break;
2938 case 2:
2939 *(u16 *) c->src.addr.reg = (u16) c->dst.val;
2940 break;
2941 case 4:
2942 *c->src.addr.reg = (u32) c->dst.val;
2943 break; /* 64b reg: zero-extend */
2944 case 8:
2945 *c->src.addr.reg = c->dst.val;
2946 break;
2949 * Write back the memory destination with implicit LOCK
2950 * prefix.
2952 c->dst.val = c->src.val;
2953 c->lock_prefix = 1;
2954 break;
2955 case 0x88 ... 0x8b: /* mov */
2956 goto mov;
2957 case 0x8c: /* mov r/m, sreg */
2958 if (c->modrm_reg > VCPU_SREG_GS) {
2959 emulate_ud(ctxt);
2960 goto done;
2962 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
2963 break;
2964 case 0x8d: /* lea r16/r32, m */
2965 c->dst.val = c->src.addr.mem;
2966 break;
2967 case 0x8e: { /* mov seg, r/m16 */
2968 uint16_t sel;
2970 sel = c->src.val;
2972 if (c->modrm_reg == VCPU_SREG_CS ||
2973 c->modrm_reg > VCPU_SREG_GS) {
2974 emulate_ud(ctxt);
2975 goto done;
2978 if (c->modrm_reg == VCPU_SREG_SS)
2979 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2981 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
2983 c->dst.type = OP_NONE; /* Disable writeback. */
2984 break;
2986 case 0x8f: /* pop (sole member of Grp1a) */
2987 rc = emulate_grp1a(ctxt, ops);
2988 if (rc != X86EMUL_CONTINUE)
2989 goto done;
2990 break;
2991 case 0x90 ... 0x97: /* nop / xchg reg, rax */
2992 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
2993 break;
2994 goto xchg;
2995 case 0x9c: /* pushf */
2996 c->src.val = (unsigned long) ctxt->eflags;
2997 emulate_push(ctxt, ops);
2998 break;
2999 case 0x9d: /* popf */
3000 c->dst.type = OP_REG;
3001 c->dst.addr.reg = &ctxt->eflags;
3002 c->dst.bytes = c->op_bytes;
3003 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3004 if (rc != X86EMUL_CONTINUE)
3005 goto done;
3006 break;
3007 case 0xa0 ... 0xa3: /* mov */
3008 case 0xa4 ... 0xa5: /* movs */
3009 goto mov;
3010 case 0xa6 ... 0xa7: /* cmps */
3011 c->dst.type = OP_NONE; /* Disable writeback. */
3012 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
3013 goto cmp;
3014 case 0xa8 ... 0xa9: /* test ax, imm */
3015 goto test;
3016 case 0xaa ... 0xab: /* stos */
3017 case 0xac ... 0xad: /* lods */
3018 goto mov;
3019 case 0xae ... 0xaf: /* scas */
3020 DPRINTF("Urk! I don't handle SCAS.\n");
3021 goto cannot_emulate;
3022 case 0xb0 ... 0xbf: /* mov r, imm */
3023 goto mov;
3024 case 0xc0 ... 0xc1:
3025 emulate_grp2(ctxt);
3026 break;
3027 case 0xc3: /* ret */
3028 c->dst.type = OP_REG;
3029 c->dst.addr.reg = &c->eip;
3030 c->dst.bytes = c->op_bytes;
3031 goto pop_instruction;
3032 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
3033 mov:
3034 c->dst.val = c->src.val;
3035 break;
3036 case 0xcb: /* ret far */
3037 rc = emulate_ret_far(ctxt, ops);
3038 if (rc != X86EMUL_CONTINUE)
3039 goto done;
3040 break;
3041 case 0xcc: /* int3 */
3042 irq = 3;
3043 goto do_interrupt;
3044 case 0xcd: /* int n */
3045 irq = c->src.val;
3046 do_interrupt:
3047 rc = emulate_int(ctxt, ops, irq);
3048 if (rc != X86EMUL_CONTINUE)
3049 goto done;
3050 break;
3051 case 0xce: /* into */
3052 if (ctxt->eflags & EFLG_OF) {
3053 irq = 4;
3054 goto do_interrupt;
3056 break;
3057 case 0xcf: /* iret */
3058 rc = emulate_iret(ctxt, ops);
3060 if (rc != X86EMUL_CONTINUE)
3061 goto done;
3062 break;
3063 case 0xd0 ... 0xd1: /* Grp2 */
3064 emulate_grp2(ctxt);
3065 break;
3066 case 0xd2 ... 0xd3: /* Grp2 */
3067 c->src.val = c->regs[VCPU_REGS_RCX];
3068 emulate_grp2(ctxt);
3069 break;
3070 case 0xe4: /* inb */
3071 case 0xe5: /* in */
3072 goto do_io_in;
3073 case 0xe6: /* outb */
3074 case 0xe7: /* out */
3075 goto do_io_out;
3076 case 0xe8: /* call (near) */ {
3077 long int rel = c->src.val;
3078 c->src.val = (unsigned long) c->eip;
3079 jmp_rel(c, rel);
3080 emulate_push(ctxt, ops);
3081 break;
3083 case 0xe9: /* jmp rel */
3084 goto jmp;
3085 case 0xea: { /* jmp far */
3086 unsigned short sel;
3087 jump_far:
3088 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3090 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3091 goto done;
3093 c->eip = 0;
3094 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3095 break;
3097 case 0xeb:
3098 jmp: /* jmp rel short */
3099 jmp_rel(c, c->src.val);
3100 c->dst.type = OP_NONE; /* Disable writeback. */
3101 break;
3102 case 0xec: /* in al,dx */
3103 case 0xed: /* in (e/r)ax,dx */
3104 c->src.val = c->regs[VCPU_REGS_RDX];
3105 do_io_in:
3106 c->dst.bytes = min(c->dst.bytes, 4u);
3107 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3108 emulate_gp(ctxt, 0);
3109 goto done;
3111 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3112 &c->dst.val))
3113 goto done; /* IO is needed */
3114 break;
3115 case 0xee: /* out dx,al */
3116 case 0xef: /* out dx,(e/r)ax */
3117 c->src.val = c->regs[VCPU_REGS_RDX];
3118 do_io_out:
3119 c->dst.bytes = min(c->dst.bytes, 4u);
3120 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3121 emulate_gp(ctxt, 0);
3122 goto done;
3124 ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1,
3125 ctxt->vcpu);
3126 c->dst.type = OP_NONE; /* Disable writeback. */
3127 break;
3128 case 0xf4: /* hlt */
3129 ctxt->vcpu->arch.halt_request = 1;
3130 break;
3131 case 0xf5: /* cmc */
3132 /* complement carry flag from eflags reg */
3133 ctxt->eflags ^= EFLG_CF;
3134 break;
3135 case 0xf6 ... 0xf7: /* Grp3 */
3136 if (!emulate_grp3(ctxt, ops))
3137 goto cannot_emulate;
3138 break;
3139 case 0xf8: /* clc */
3140 ctxt->eflags &= ~EFLG_CF;
3141 break;
3142 case 0xf9: /* stc */
3143 ctxt->eflags |= EFLG_CF;
3144 break;
3145 case 0xfa: /* cli */
3146 if (emulator_bad_iopl(ctxt, ops)) {
3147 emulate_gp(ctxt, 0);
3148 goto done;
3149 } else
3150 ctxt->eflags &= ~X86_EFLAGS_IF;
3151 break;
3152 case 0xfb: /* sti */
3153 if (emulator_bad_iopl(ctxt, ops)) {
3154 emulate_gp(ctxt, 0);
3155 goto done;
3156 } else {
3157 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3158 ctxt->eflags |= X86_EFLAGS_IF;
3160 break;
3161 case 0xfc: /* cld */
3162 ctxt->eflags &= ~EFLG_DF;
3163 break;
3164 case 0xfd: /* std */
3165 ctxt->eflags |= EFLG_DF;
3166 break;
3167 case 0xfe: /* Grp4 */
3168 grp45:
3169 rc = emulate_grp45(ctxt, ops);
3170 if (rc != X86EMUL_CONTINUE)
3171 goto done;
3172 break;
3173 case 0xff: /* Grp5 */
3174 if (c->modrm_reg == 5)
3175 goto jump_far;
3176 goto grp45;
3177 default:
3178 goto cannot_emulate;
3181 writeback:
3182 rc = writeback(ctxt, ops);
3183 if (rc != X86EMUL_CONTINUE)
3184 goto done;
3187 * restore dst type in case the decoding will be reused
3188 * (happens for string instruction )
3190 c->dst.type = saved_dst_type;
3192 if ((c->d & SrcMask) == SrcSI)
3193 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3194 VCPU_REGS_RSI, &c->src);
3196 if ((c->d & DstMask) == DstDI)
3197 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3198 &c->dst);
3200 if (c->rep_prefix && (c->d & String)) {
3201 struct read_cache *rc = &ctxt->decode.io_read;
3202 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3204 * Re-enter guest when pio read ahead buffer is empty or,
3205 * if it is not used, after each 1024 iteration.
3207 if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
3208 (rc->end != 0 && rc->end == rc->pos))
3209 ctxt->restart = false;
3212 * reset read cache here in case string instruction is restared
3213 * without decoding
3215 ctxt->decode.mem_read.end = 0;
3216 ctxt->eip = c->eip;
3218 done:
3219 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3221 twobyte_insn:
3222 switch (c->b) {
3223 case 0x01: /* lgdt, lidt, lmsw */
3224 switch (c->modrm_reg) {
3225 u16 size;
3226 unsigned long address;
3228 case 0: /* vmcall */
3229 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3230 goto cannot_emulate;
3232 rc = kvm_fix_hypercall(ctxt->vcpu);
3233 if (rc != X86EMUL_CONTINUE)
3234 goto done;
3236 /* Let the processor re-execute the fixed hypercall */
3237 c->eip = ctxt->eip;
3238 /* Disable writeback. */
3239 c->dst.type = OP_NONE;
3240 break;
3241 case 2: /* lgdt */
3242 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3243 &size, &address, c->op_bytes);
3244 if (rc != X86EMUL_CONTINUE)
3245 goto done;
3246 realmode_lgdt(ctxt->vcpu, size, address);
3247 /* Disable writeback. */
3248 c->dst.type = OP_NONE;
3249 break;
3250 case 3: /* lidt/vmmcall */
3251 if (c->modrm_mod == 3) {
3252 switch (c->modrm_rm) {
3253 case 1:
3254 rc = kvm_fix_hypercall(ctxt->vcpu);
3255 if (rc != X86EMUL_CONTINUE)
3256 goto done;
3257 break;
3258 default:
3259 goto cannot_emulate;
3261 } else {
3262 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3263 &size, &address,
3264 c->op_bytes);
3265 if (rc != X86EMUL_CONTINUE)
3266 goto done;
3267 realmode_lidt(ctxt->vcpu, size, address);
3269 /* Disable writeback. */
3270 c->dst.type = OP_NONE;
3271 break;
3272 case 4: /* smsw */
3273 c->dst.bytes = 2;
3274 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3275 break;
3276 case 6: /* lmsw */
3277 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3278 (c->src.val & 0x0f), ctxt->vcpu);
3279 c->dst.type = OP_NONE;
3280 break;
3281 case 5: /* not defined */
3282 emulate_ud(ctxt);
3283 goto done;
3284 case 7: /* invlpg*/
3285 emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3286 /* Disable writeback. */
3287 c->dst.type = OP_NONE;
3288 break;
3289 default:
3290 goto cannot_emulate;
3292 break;
3293 case 0x05: /* syscall */
3294 rc = emulate_syscall(ctxt, ops);
3295 if (rc != X86EMUL_CONTINUE)
3296 goto done;
3297 else
3298 goto writeback;
3299 break;
3300 case 0x06:
3301 emulate_clts(ctxt->vcpu);
3302 break;
3303 case 0x09: /* wbinvd */
3304 kvm_emulate_wbinvd(ctxt->vcpu);
3305 break;
3306 case 0x08: /* invd */
3307 case 0x0d: /* GrpP (prefetch) */
3308 case 0x18: /* Grp16 (prefetch/nop) */
3309 break;
3310 case 0x20: /* mov cr, reg */
3311 switch (c->modrm_reg) {
3312 case 1:
3313 case 5 ... 7:
3314 case 9 ... 15:
3315 emulate_ud(ctxt);
3316 goto done;
3318 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3319 break;
3320 case 0x21: /* mov from dr to reg */
3321 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3322 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3323 emulate_ud(ctxt);
3324 goto done;
3326 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3327 break;
3328 case 0x22: /* mov reg, cr */
3329 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3330 emulate_gp(ctxt, 0);
3331 goto done;
3333 c->dst.type = OP_NONE;
3334 break;
3335 case 0x23: /* mov from reg to dr */
3336 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3337 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3338 emulate_ud(ctxt);
3339 goto done;
3342 if (ops->set_dr(c->modrm_reg, c->src.val &
3343 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3344 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3345 /* #UD condition is already handled by the code above */
3346 emulate_gp(ctxt, 0);
3347 goto done;
3350 c->dst.type = OP_NONE; /* no writeback */
3351 break;
3352 case 0x30:
3353 /* wrmsr */
3354 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3355 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3356 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3357 emulate_gp(ctxt, 0);
3358 goto done;
3360 rc = X86EMUL_CONTINUE;
3361 break;
3362 case 0x32:
3363 /* rdmsr */
3364 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3365 emulate_gp(ctxt, 0);
3366 goto done;
3367 } else {
3368 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3369 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3371 rc = X86EMUL_CONTINUE;
3372 break;
3373 case 0x34: /* sysenter */
3374 rc = emulate_sysenter(ctxt, ops);
3375 if (rc != X86EMUL_CONTINUE)
3376 goto done;
3377 else
3378 goto writeback;
3379 break;
3380 case 0x35: /* sysexit */
3381 rc = emulate_sysexit(ctxt, ops);
3382 if (rc != X86EMUL_CONTINUE)
3383 goto done;
3384 else
3385 goto writeback;
3386 break;
3387 case 0x40 ... 0x4f: /* cmov */
3388 c->dst.val = c->dst.orig_val = c->src.val;
3389 if (!test_cc(c->b, ctxt->eflags))
3390 c->dst.type = OP_NONE; /* no writeback */
3391 break;
3392 case 0x80 ... 0x8f: /* jnz rel, etc*/
3393 if (test_cc(c->b, ctxt->eflags))
3394 jmp_rel(c, c->src.val);
3395 break;
3396 case 0xa0: /* push fs */
3397 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3398 break;
3399 case 0xa1: /* pop fs */
3400 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3401 if (rc != X86EMUL_CONTINUE)
3402 goto done;
3403 break;
3404 case 0xa3:
3405 bt: /* bt */
3406 c->dst.type = OP_NONE;
3407 /* only subword offset */
3408 c->src.val &= (c->dst.bytes << 3) - 1;
3409 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3410 break;
3411 case 0xa4: /* shld imm8, r, r/m */
3412 case 0xa5: /* shld cl, r, r/m */
3413 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3414 break;
3415 case 0xa8: /* push gs */
3416 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3417 break;
3418 case 0xa9: /* pop gs */
3419 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3420 if (rc != X86EMUL_CONTINUE)
3421 goto done;
3422 break;
3423 case 0xab:
3424 bts: /* bts */
3425 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3426 break;
3427 case 0xac: /* shrd imm8, r, r/m */
3428 case 0xad: /* shrd cl, r, r/m */
3429 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3430 break;
3431 case 0xae: /* clflush */
3432 break;
3433 case 0xb0 ... 0xb1: /* cmpxchg */
3435 * Save real source value, then compare EAX against
3436 * destination.
3438 c->src.orig_val = c->src.val;
3439 c->src.val = c->regs[VCPU_REGS_RAX];
3440 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3441 if (ctxt->eflags & EFLG_ZF) {
3442 /* Success: write back to memory. */
3443 c->dst.val = c->src.orig_val;
3444 } else {
3445 /* Failure: write the value we saw to EAX. */
3446 c->dst.type = OP_REG;
3447 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3449 break;
3450 case 0xb3:
3451 btr: /* btr */
3452 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3453 break;
3454 case 0xb6 ... 0xb7: /* movzx */
3455 c->dst.bytes = c->op_bytes;
3456 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3457 : (u16) c->src.val;
3458 break;
3459 case 0xba: /* Grp8 */
3460 switch (c->modrm_reg & 3) {
3461 case 0:
3462 goto bt;
3463 case 1:
3464 goto bts;
3465 case 2:
3466 goto btr;
3467 case 3:
3468 goto btc;
3470 break;
3471 case 0xbb:
3472 btc: /* btc */
3473 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3474 break;
3475 case 0xbe ... 0xbf: /* movsx */
3476 c->dst.bytes = c->op_bytes;
3477 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3478 (s16) c->src.val;
3479 break;
3480 case 0xc3: /* movnti */
3481 c->dst.bytes = c->op_bytes;
3482 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3483 (u64) c->src.val;
3484 break;
3485 case 0xc7: /* Grp9 (cmpxchg8b) */
3486 rc = emulate_grp9(ctxt, ops);
3487 if (rc != X86EMUL_CONTINUE)
3488 goto done;
3489 break;
3490 default:
3491 goto cannot_emulate;
3493 goto writeback;
3495 cannot_emulate:
3496 DPRINTF("Cannot emulate %02x\n", c->b);
3497 return -1;