1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
32 * Opcode effective-address decode tables.
33 * Note that we only emulate instructions that have at least one memory
34 * operand (excluding implicit stack references). We assume that stack
35 * references and instruction fetches will never occur in special memory
36 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp (1<<0) /* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
44 #define DstReg (2<<1) /* Register operand. */
45 #define DstMem (3<<1) /* Memory operand. */
46 #define DstAcc (4<<1) /* Destination Accumulator */
47 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
48 #define DstMem64 (6<<1) /* 64bit memory operand */
49 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50 #define DstMask (7<<1)
51 /* Source operand type. */
52 #define SrcNone (0<<4) /* No source operand. */
53 #define SrcReg (1<<4) /* Register operand. */
54 #define SrcMem (2<<4) /* Memory operand. */
55 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
56 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
57 #define SrcImm (5<<4) /* Immediate operand. */
58 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
59 #define SrcOne (7<<4) /* Implied '1' */
60 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
61 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
62 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
63 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
64 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
65 #define SrcAcc (0xd<<4) /* Source Accumulator */
66 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
67 #define SrcMask (0xf<<4)
68 /* Generic ModRM decode. */
70 /* Destination is only written; never read. */
73 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
74 #define String (1<<12) /* String instruction (rep capable) */
75 #define Stack (1<<13) /* Stack instruction (push/pop) */
76 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78 #define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79 #define Sse (1<<17) /* SSE Vector instruction */
80 #define RMExt (1<<18) /* Opcode extension in ModRM r/m if mod == 3 */
82 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
83 #define VendorSpecific (1<<22) /* Vendor specific instruction */
84 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
85 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
86 #define Undefined (1<<25) /* No Such Instruction */
87 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
88 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
90 /* Source 2 operand type */
91 #define Src2None (0<<29)
92 #define Src2CL (1<<29)
93 #define Src2ImmByte (2<<29)
94 #define Src2One (3<<29)
95 #define Src2Imm (4<<29)
96 #define Src2Mask (7<<29)
99 #define X3(x...) X2(x), x
100 #define X4(x...) X2(x), X2(x)
101 #define X5(x...) X4(x), x
102 #define X6(x...) X4(x), X2(x)
103 #define X7(x...) X4(x), X3(x)
104 #define X8(x...) X4(x), X4(x)
105 #define X16(x...) X8(x), X8(x)
111 int (*execute
)(struct x86_emulate_ctxt
*ctxt
);
112 struct opcode
*group
;
113 struct group_dual
*gdual
;
114 struct gprefix
*gprefix
;
116 int (*check_perm
)(struct x86_emulate_ctxt
*ctxt
);
120 struct opcode mod012
[8];
121 struct opcode mod3
[8];
125 struct opcode pfx_no
;
126 struct opcode pfx_66
;
127 struct opcode pfx_f2
;
128 struct opcode pfx_f3
;
131 /* EFLAGS bit definitions. */
132 #define EFLG_ID (1<<21)
133 #define EFLG_VIP (1<<20)
134 #define EFLG_VIF (1<<19)
135 #define EFLG_AC (1<<18)
136 #define EFLG_VM (1<<17)
137 #define EFLG_RF (1<<16)
138 #define EFLG_IOPL (3<<12)
139 #define EFLG_NT (1<<14)
140 #define EFLG_OF (1<<11)
141 #define EFLG_DF (1<<10)
142 #define EFLG_IF (1<<9)
143 #define EFLG_TF (1<<8)
144 #define EFLG_SF (1<<7)
145 #define EFLG_ZF (1<<6)
146 #define EFLG_AF (1<<4)
147 #define EFLG_PF (1<<2)
148 #define EFLG_CF (1<<0)
150 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
151 #define EFLG_RESERVED_ONE_MASK 2
154 * Instruction emulation:
155 * Most instructions are emulated directly via a fragment of inline assembly
156 * code. This allows us to save/restore EFLAGS and thus very easily pick up
157 * any modified flags.
160 #if defined(CONFIG_X86_64)
161 #define _LO32 "k" /* force 32-bit operand */
162 #define _STK "%%rsp" /* stack pointer */
163 #elif defined(__i386__)
164 #define _LO32 "" /* force 32-bit operand */
165 #define _STK "%%esp" /* stack pointer */
169 * These EFLAGS bits are restored from saved value during emulation, and
170 * any changes are written back to the saved value after emulation.
172 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
174 /* Before executing instruction: restore necessary bits in EFLAGS. */
175 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
176 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
177 "movl %"_sav",%"_LO32 _tmp"; " \
180 "movl %"_msk",%"_LO32 _tmp"; " \
181 "andl %"_LO32 _tmp",("_STK"); " \
183 "notl %"_LO32 _tmp"; " \
184 "andl %"_LO32 _tmp",("_STK"); " \
185 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
187 "orl %"_LO32 _tmp",("_STK"); " \
191 /* After executing instruction: write-back necessary bits in EFLAGS. */
192 #define _POST_EFLAGS(_sav, _msk, _tmp) \
193 /* _sav |= EFLAGS & _msk; */ \
196 "andl %"_msk",%"_LO32 _tmp"; " \
197 "orl %"_LO32 _tmp",%"_sav"; "
205 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
207 __asm__ __volatile__ ( \
208 _PRE_EFLAGS("0", "4", "2") \
209 _op _suffix " %"_x"3,%1; " \
210 _POST_EFLAGS("0", "4", "2") \
211 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
213 : _y ((_src).val), "i" (EFLAGS_MASK)); \
217 /* Raw emulation: instruction has two explicit operands. */
218 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
220 unsigned long _tmp; \
222 switch ((_dst).bytes) { \
224 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
227 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
230 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
235 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
237 unsigned long _tmp; \
238 switch ((_dst).bytes) { \
240 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
243 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
244 _wx, _wy, _lx, _ly, _qx, _qy); \
249 /* Source operand is byte-sized and may be restricted to just %cl. */
250 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
251 __emulate_2op(_op, _src, _dst, _eflags, \
252 "b", "c", "b", "c", "b", "c", "b", "c")
254 /* Source operand is byte, word, long or quad sized. */
255 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
256 __emulate_2op(_op, _src, _dst, _eflags, \
257 "b", "q", "w", "r", _LO32, "r", "", "r")
259 /* Source operand is word, long or quad sized. */
260 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
261 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
262 "w", "r", _LO32, "r", "", "r")
264 /* Instruction has three operands and one operand is stored in ECX register */
265 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
267 unsigned long _tmp; \
268 _type _clv = (_cl).val; \
269 _type _srcv = (_src).val; \
270 _type _dstv = (_dst).val; \
272 __asm__ __volatile__ ( \
273 _PRE_EFLAGS("0", "5", "2") \
274 _op _suffix " %4,%1 \n" \
275 _POST_EFLAGS("0", "5", "2") \
276 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
277 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
280 (_cl).val = (unsigned long) _clv; \
281 (_src).val = (unsigned long) _srcv; \
282 (_dst).val = (unsigned long) _dstv; \
285 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
287 switch ((_dst).bytes) { \
289 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
290 "w", unsigned short); \
293 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
294 "l", unsigned int); \
297 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
298 "q", unsigned long)); \
303 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
305 unsigned long _tmp; \
307 __asm__ __volatile__ ( \
308 _PRE_EFLAGS("0", "3", "2") \
309 _op _suffix " %1; " \
310 _POST_EFLAGS("0", "3", "2") \
311 : "=m" (_eflags), "+m" ((_dst).val), \
313 : "i" (EFLAGS_MASK)); \
316 /* Instruction has only one explicit operand (no source operand). */
317 #define emulate_1op(_op, _dst, _eflags) \
319 switch ((_dst).bytes) { \
320 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
321 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
322 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
323 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
329 unsigned long _tmp; \
331 __asm__ __volatile__ ( \
332 _PRE_EFLAGS("0", "4", "1") \
333 _op _suffix " %5; " \
334 _POST_EFLAGS("0", "4", "1") \
335 : "=m" (_eflags), "=&r" (_tmp), \
336 "+a" (_rax), "+d" (_rdx) \
337 : "i" (EFLAGS_MASK), "m" ((_src).val), \
338 "a" (_rax), "d" (_rdx)); \
341 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
343 unsigned long _tmp; \
345 __asm__ __volatile__ ( \
346 _PRE_EFLAGS("0", "5", "1") \
348 _op _suffix " %6; " \
350 _POST_EFLAGS("0", "5", "1") \
351 ".pushsection .fixup,\"ax\" \n\t" \
352 "3: movb $1, %4 \n\t" \
355 _ASM_EXTABLE(1b, 3b) \
356 : "=m" (_eflags), "=&r" (_tmp), \
357 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
358 : "i" (EFLAGS_MASK), "m" ((_src).val), \
359 "a" (_rax), "d" (_rdx)); \
362 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
363 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
365 switch((_src).bytes) { \
366 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
367 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
368 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
369 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
373 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
375 switch((_src).bytes) { \
377 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
378 _eflags, "b", _ex); \
381 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
382 _eflags, "w", _ex); \
385 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
386 _eflags, "l", _ex); \
389 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
390 _eflags, "q", _ex)); \
395 /* Fetch next part of the instruction being emulated. */
396 #define insn_fetch(_type, _size, _eip) \
397 ({ unsigned long _x; \
398 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
399 if (rc != X86EMUL_CONTINUE) \
405 #define insn_fetch_arr(_arr, _size, _eip) \
406 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
407 if (rc != X86EMUL_CONTINUE) \
412 static int emulator_check_intercept(struct x86_emulate_ctxt
*ctxt
,
413 enum x86_intercept intercept
,
414 enum x86_intercept_stage stage
)
416 struct x86_instruction_info info
= {
417 .intercept
= intercept
,
418 .rep_prefix
= ctxt
->decode
.rep_prefix
,
419 .modrm_mod
= ctxt
->decode
.modrm_mod
,
420 .modrm_reg
= ctxt
->decode
.modrm_reg
,
421 .modrm_rm
= ctxt
->decode
.modrm_rm
,
422 .src_val
= ctxt
->decode
.src
.val64
,
423 .src_bytes
= ctxt
->decode
.src
.bytes
,
424 .dst_bytes
= ctxt
->decode
.dst
.bytes
,
425 .ad_bytes
= ctxt
->decode
.ad_bytes
,
426 .next_rip
= ctxt
->eip
,
429 return ctxt
->ops
->intercept(ctxt
->vcpu
, &info
, stage
);
432 static inline unsigned long ad_mask(struct decode_cache
*c
)
434 return (1UL << (c
->ad_bytes
<< 3)) - 1;
437 /* Access/update address held in a register, based on addressing mode. */
438 static inline unsigned long
439 address_mask(struct decode_cache
*c
, unsigned long reg
)
441 if (c
->ad_bytes
== sizeof(unsigned long))
444 return reg
& ad_mask(c
);
447 static inline unsigned long
448 register_address(struct decode_cache
*c
, unsigned long reg
)
450 return address_mask(c
, reg
);
454 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
456 if (c
->ad_bytes
== sizeof(unsigned long))
459 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
462 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
464 register_address_increment(c
, &c
->eip
, rel
);
467 static u32
desc_limit_scaled(struct desc_struct
*desc
)
469 u32 limit
= get_desc_limit(desc
);
471 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
474 static void set_seg_override(struct decode_cache
*c
, int seg
)
476 c
->has_seg_override
= true;
477 c
->seg_override
= seg
;
480 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
481 struct x86_emulate_ops
*ops
, int seg
)
483 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
486 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
489 static unsigned seg_override(struct x86_emulate_ctxt
*ctxt
,
490 struct x86_emulate_ops
*ops
,
491 struct decode_cache
*c
)
493 if (!c
->has_seg_override
)
496 return c
->seg_override
;
499 static int emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
500 u32 error
, bool valid
)
502 ctxt
->exception
.vector
= vec
;
503 ctxt
->exception
.error_code
= error
;
504 ctxt
->exception
.error_code_valid
= valid
;
505 return X86EMUL_PROPAGATE_FAULT
;
508 static int emulate_db(struct x86_emulate_ctxt
*ctxt
)
510 return emulate_exception(ctxt
, DB_VECTOR
, 0, false);
513 static int emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
515 return emulate_exception(ctxt
, GP_VECTOR
, err
, true);
518 static int emulate_ud(struct x86_emulate_ctxt
*ctxt
)
520 return emulate_exception(ctxt
, UD_VECTOR
, 0, false);
523 static int emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
525 return emulate_exception(ctxt
, TS_VECTOR
, err
, true);
528 static int emulate_de(struct x86_emulate_ctxt
*ctxt
)
530 return emulate_exception(ctxt
, DE_VECTOR
, 0, false);
533 static int emulate_nm(struct x86_emulate_ctxt
*ctxt
)
535 return emulate_exception(ctxt
, NM_VECTOR
, 0, false);
538 static int linearize(struct x86_emulate_ctxt
*ctxt
,
539 struct segmented_address addr
,
540 unsigned size
, bool write
,
543 struct decode_cache
*c
= &ctxt
->decode
;
546 la
= seg_base(ctxt
, ctxt
->ops
, addr
.seg
) + addr
.ea
;
547 if (c
->ad_bytes
!= 8)
550 return X86EMUL_CONTINUE
;
553 static int segmented_read_std(struct x86_emulate_ctxt
*ctxt
,
554 struct segmented_address addr
,
561 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
562 if (rc
!= X86EMUL_CONTINUE
)
564 return ctxt
->ops
->read_std(linear
, data
, size
, ctxt
->vcpu
,
568 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
569 struct x86_emulate_ops
*ops
,
570 unsigned long eip
, u8
*dest
)
572 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
576 if (eip
== fc
->end
) {
577 cur_size
= fc
->end
- fc
->start
;
578 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
579 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
580 size
, ctxt
->vcpu
, &ctxt
->exception
);
581 if (rc
!= X86EMUL_CONTINUE
)
585 *dest
= fc
->data
[eip
- fc
->start
];
586 return X86EMUL_CONTINUE
;
589 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
590 struct x86_emulate_ops
*ops
,
591 unsigned long eip
, void *dest
, unsigned size
)
595 /* x86 instructions are limited to 15 bytes. */
596 if (eip
+ size
- ctxt
->eip
> 15)
597 return X86EMUL_UNHANDLEABLE
;
599 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
600 if (rc
!= X86EMUL_CONTINUE
)
603 return X86EMUL_CONTINUE
;
607 * Given the 'reg' portion of a ModRM byte, and a register block, return a
608 * pointer into the block that addresses the relevant register.
609 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
611 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
616 p
= ®s
[modrm_reg
];
617 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
618 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
622 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
623 struct x86_emulate_ops
*ops
,
624 struct segmented_address addr
,
625 u16
*size
, unsigned long *address
, int op_bytes
)
632 rc
= segmented_read_std(ctxt
, addr
, size
, 2);
633 if (rc
!= X86EMUL_CONTINUE
)
636 rc
= segmented_read_std(ctxt
, addr
, address
, op_bytes
);
640 static int test_cc(unsigned int condition
, unsigned int flags
)
644 switch ((condition
& 15) >> 1) {
646 rc
|= (flags
& EFLG_OF
);
648 case 1: /* b/c/nae */
649 rc
|= (flags
& EFLG_CF
);
652 rc
|= (flags
& EFLG_ZF
);
655 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
658 rc
|= (flags
& EFLG_SF
);
661 rc
|= (flags
& EFLG_PF
);
664 rc
|= (flags
& EFLG_ZF
);
667 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
671 /* Odd condition identifiers (lsb == 1) have inverted sense. */
672 return (!!rc
^ (condition
& 1));
675 static void fetch_register_operand(struct operand
*op
)
679 op
->val
= *(u8
*)op
->addr
.reg
;
682 op
->val
= *(u16
*)op
->addr
.reg
;
685 op
->val
= *(u32
*)op
->addr
.reg
;
688 op
->val
= *(u64
*)op
->addr
.reg
;
693 static void read_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
, int reg
)
695 ctxt
->ops
->get_fpu(ctxt
);
697 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data
)); break;
698 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data
)); break;
699 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data
)); break;
700 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data
)); break;
701 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data
)); break;
702 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data
)); break;
703 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data
)); break;
704 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data
)); break;
706 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data
)); break;
707 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data
)); break;
708 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data
)); break;
709 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data
)); break;
710 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data
)); break;
711 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data
)); break;
712 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data
)); break;
713 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data
)); break;
717 ctxt
->ops
->put_fpu(ctxt
);
720 static void write_sse_reg(struct x86_emulate_ctxt
*ctxt
, sse128_t
*data
,
723 ctxt
->ops
->get_fpu(ctxt
);
725 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data
)); break;
726 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data
)); break;
727 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data
)); break;
728 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data
)); break;
729 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data
)); break;
730 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data
)); break;
731 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data
)); break;
732 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data
)); break;
734 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data
)); break;
735 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data
)); break;
736 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data
)); break;
737 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data
)); break;
738 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data
)); break;
739 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data
)); break;
740 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data
)); break;
741 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data
)); break;
745 ctxt
->ops
->put_fpu(ctxt
);
748 static void decode_register_operand(struct x86_emulate_ctxt
*ctxt
,
750 struct decode_cache
*c
,
753 unsigned reg
= c
->modrm_reg
;
754 int highbyte_regs
= c
->rex_prefix
== 0;
757 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
763 read_sse_reg(ctxt
, &op
->vec_val
, reg
);
768 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
769 op
->addr
.reg
= decode_register(reg
, c
->regs
, highbyte_regs
);
772 op
->addr
.reg
= decode_register(reg
, c
->regs
, 0);
773 op
->bytes
= c
->op_bytes
;
775 fetch_register_operand(op
);
776 op
->orig_val
= op
->val
;
779 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
780 struct x86_emulate_ops
*ops
,
783 struct decode_cache
*c
= &ctxt
->decode
;
785 int index_reg
= 0, base_reg
= 0, scale
;
786 int rc
= X86EMUL_CONTINUE
;
790 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
791 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
792 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
795 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
796 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
797 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
798 c
->modrm_rm
|= (c
->modrm
& 0x07);
799 c
->modrm_seg
= VCPU_SREG_DS
;
801 if (c
->modrm_mod
== 3) {
803 op
->bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
804 op
->addr
.reg
= decode_register(c
->modrm_rm
,
805 c
->regs
, c
->d
& ByteOp
);
809 op
->addr
.xmm
= c
->modrm_rm
;
810 read_sse_reg(ctxt
, &op
->vec_val
, c
->modrm_rm
);
813 fetch_register_operand(op
);
819 if (c
->ad_bytes
== 2) {
820 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
821 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
822 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
823 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
825 /* 16-bit ModR/M decode. */
826 switch (c
->modrm_mod
) {
828 if (c
->modrm_rm
== 6)
829 modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
832 modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
835 modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
838 switch (c
->modrm_rm
) {
858 if (c
->modrm_mod
!= 0)
865 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
866 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
867 c
->modrm_seg
= VCPU_SREG_SS
;
868 modrm_ea
= (u16
)modrm_ea
;
870 /* 32/64-bit ModR/M decode. */
871 if ((c
->modrm_rm
& 7) == 4) {
872 sib
= insn_fetch(u8
, 1, c
->eip
);
873 index_reg
|= (sib
>> 3) & 7;
877 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
878 modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
880 modrm_ea
+= c
->regs
[base_reg
];
882 modrm_ea
+= c
->regs
[index_reg
] << scale
;
883 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
884 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
887 modrm_ea
+= c
->regs
[c
->modrm_rm
];
888 switch (c
->modrm_mod
) {
890 if (c
->modrm_rm
== 5)
891 modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
894 modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
897 modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
901 op
->addr
.mem
.ea
= modrm_ea
;
906 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
907 struct x86_emulate_ops
*ops
,
910 struct decode_cache
*c
= &ctxt
->decode
;
911 int rc
= X86EMUL_CONTINUE
;
914 switch (c
->ad_bytes
) {
916 op
->addr
.mem
.ea
= insn_fetch(u16
, 2, c
->eip
);
919 op
->addr
.mem
.ea
= insn_fetch(u32
, 4, c
->eip
);
922 op
->addr
.mem
.ea
= insn_fetch(u64
, 8, c
->eip
);
929 static void fetch_bit_operand(struct decode_cache
*c
)
933 if (c
->dst
.type
== OP_MEM
&& c
->src
.type
== OP_REG
) {
934 mask
= ~(c
->dst
.bytes
* 8 - 1);
936 if (c
->src
.bytes
== 2)
937 sv
= (s16
)c
->src
.val
& (s16
)mask
;
938 else if (c
->src
.bytes
== 4)
939 sv
= (s32
)c
->src
.val
& (s32
)mask
;
941 c
->dst
.addr
.mem
.ea
+= (sv
>> 3);
944 /* only subword offset */
945 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
948 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
949 struct x86_emulate_ops
*ops
,
950 unsigned long addr
, void *dest
, unsigned size
)
953 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
956 int n
= min(size
, 8u);
958 if (mc
->pos
< mc
->end
)
961 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
,
962 &ctxt
->exception
, ctxt
->vcpu
);
963 if (rc
!= X86EMUL_CONTINUE
)
968 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
973 return X86EMUL_CONTINUE
;
976 static int segmented_read(struct x86_emulate_ctxt
*ctxt
,
977 struct segmented_address addr
,
984 rc
= linearize(ctxt
, addr
, size
, false, &linear
);
985 if (rc
!= X86EMUL_CONTINUE
)
987 return read_emulated(ctxt
, ctxt
->ops
, linear
, data
, size
);
990 static int segmented_write(struct x86_emulate_ctxt
*ctxt
,
991 struct segmented_address addr
,
998 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
999 if (rc
!= X86EMUL_CONTINUE
)
1001 return ctxt
->ops
->write_emulated(linear
, data
, size
,
1002 &ctxt
->exception
, ctxt
->vcpu
);
1005 static int segmented_cmpxchg(struct x86_emulate_ctxt
*ctxt
,
1006 struct segmented_address addr
,
1007 const void *orig_data
, const void *data
,
1013 rc
= linearize(ctxt
, addr
, size
, true, &linear
);
1014 if (rc
!= X86EMUL_CONTINUE
)
1016 return ctxt
->ops
->cmpxchg_emulated(linear
, orig_data
, data
,
1017 size
, &ctxt
->exception
, ctxt
->vcpu
);
1020 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1021 struct x86_emulate_ops
*ops
,
1022 unsigned int size
, unsigned short port
,
1025 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1027 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1028 struct decode_cache
*c
= &ctxt
->decode
;
1029 unsigned int in_page
, n
;
1030 unsigned int count
= c
->rep_prefix
?
1031 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1032 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1033 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1034 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1035 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1039 rc
->pos
= rc
->end
= 0;
1040 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1045 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1050 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1051 struct x86_emulate_ops
*ops
,
1052 u16 selector
, struct desc_ptr
*dt
)
1054 if (selector
& 1 << 2) {
1055 struct desc_struct desc
;
1056 memset (dt
, 0, sizeof *dt
);
1057 if (!ops
->get_cached_descriptor(&desc
, NULL
, VCPU_SREG_LDTR
,
1061 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1062 dt
->address
= get_desc_base(&desc
);
1064 ops
->get_gdt(dt
, ctxt
->vcpu
);
1067 /* allowed just for 8 bytes segments */
1068 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1069 struct x86_emulate_ops
*ops
,
1070 u16 selector
, struct desc_struct
*desc
)
1073 u16 index
= selector
>> 3;
1077 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1079 if (dt
.size
< index
* 8 + 7)
1080 return emulate_gp(ctxt
, selector
& 0xfffc);
1081 addr
= dt
.address
+ index
* 8;
1082 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
,
1088 /* allowed just for 8 bytes segments */
1089 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1090 struct x86_emulate_ops
*ops
,
1091 u16 selector
, struct desc_struct
*desc
)
1094 u16 index
= selector
>> 3;
1098 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1100 if (dt
.size
< index
* 8 + 7)
1101 return emulate_gp(ctxt
, selector
& 0xfffc);
1103 addr
= dt
.address
+ index
* 8;
1104 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
,
1110 /* Does not support long mode */
1111 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1112 struct x86_emulate_ops
*ops
,
1113 u16 selector
, int seg
)
1115 struct desc_struct seg_desc
;
1117 unsigned err_vec
= GP_VECTOR
;
1119 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1122 memset(&seg_desc
, 0, sizeof seg_desc
);
1124 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1125 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1126 /* set real mode segment descriptor */
1127 set_desc_base(&seg_desc
, selector
<< 4);
1128 set_desc_limit(&seg_desc
, 0xffff);
1135 /* NULL selector is not valid for TR, CS and SS */
1136 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1140 /* TR should be in GDT only */
1141 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1144 if (null_selector
) /* for NULL selector skip all following checks */
1147 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1148 if (ret
!= X86EMUL_CONTINUE
)
1151 err_code
= selector
& 0xfffc;
1152 err_vec
= GP_VECTOR
;
1154 /* can't load system descriptor into segment selecor */
1155 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1159 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1165 cpl
= ops
->cpl(ctxt
->vcpu
);
1170 * segment is not a writable data segment or segment
1171 * selector's RPL != CPL or segment selector's RPL != CPL
1173 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1177 if (!(seg_desc
.type
& 8))
1180 if (seg_desc
.type
& 4) {
1186 if (rpl
> cpl
|| dpl
!= cpl
)
1189 /* CS(RPL) <- CPL */
1190 selector
= (selector
& 0xfffc) | cpl
;
1193 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1196 case VCPU_SREG_LDTR
:
1197 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1200 default: /* DS, ES, FS, or GS */
1202 * segment is not a data or readable code segment or
1203 * ((segment is a data or nonconforming code segment)
1204 * and (both RPL and CPL > DPL))
1206 if ((seg_desc
.type
& 0xa) == 0x8 ||
1207 (((seg_desc
.type
& 0xc) != 0xc) &&
1208 (rpl
> dpl
&& cpl
> dpl
)))
1214 /* mark segment as accessed */
1216 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1217 if (ret
!= X86EMUL_CONTINUE
)
1221 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1222 ops
->set_cached_descriptor(&seg_desc
, 0, seg
, ctxt
->vcpu
);
1223 return X86EMUL_CONTINUE
;
1225 emulate_exception(ctxt
, err_vec
, err_code
, true);
1226 return X86EMUL_PROPAGATE_FAULT
;
1229 static void write_register_operand(struct operand
*op
)
1231 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1232 switch (op
->bytes
) {
1234 *(u8
*)op
->addr
.reg
= (u8
)op
->val
;
1237 *(u16
*)op
->addr
.reg
= (u16
)op
->val
;
1240 *op
->addr
.reg
= (u32
)op
->val
;
1241 break; /* 64b: zero-extend */
1243 *op
->addr
.reg
= op
->val
;
1248 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1249 struct x86_emulate_ops
*ops
)
1252 struct decode_cache
*c
= &ctxt
->decode
;
1254 switch (c
->dst
.type
) {
1256 write_register_operand(&c
->dst
);
1260 rc
= segmented_cmpxchg(ctxt
,
1266 rc
= segmented_write(ctxt
,
1270 if (rc
!= X86EMUL_CONTINUE
)
1274 write_sse_reg(ctxt
, &c
->dst
.vec_val
, c
->dst
.addr
.xmm
);
1282 return X86EMUL_CONTINUE
;
1285 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1286 struct x86_emulate_ops
*ops
)
1288 struct decode_cache
*c
= &ctxt
->decode
;
1290 c
->dst
.type
= OP_MEM
;
1291 c
->dst
.bytes
= c
->op_bytes
;
1292 c
->dst
.val
= c
->src
.val
;
1293 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1294 c
->dst
.addr
.mem
.ea
= register_address(c
, c
->regs
[VCPU_REGS_RSP
]);
1295 c
->dst
.addr
.mem
.seg
= VCPU_SREG_SS
;
1298 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1299 struct x86_emulate_ops
*ops
,
1300 void *dest
, int len
)
1302 struct decode_cache
*c
= &ctxt
->decode
;
1304 struct segmented_address addr
;
1306 addr
.ea
= register_address(c
, c
->regs
[VCPU_REGS_RSP
]);
1307 addr
.seg
= VCPU_SREG_SS
;
1308 rc
= segmented_read(ctxt
, addr
, dest
, len
);
1309 if (rc
!= X86EMUL_CONTINUE
)
1312 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1316 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1317 struct x86_emulate_ops
*ops
,
1318 void *dest
, int len
)
1321 unsigned long val
, change_mask
;
1322 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1323 int cpl
= ops
->cpl(ctxt
->vcpu
);
1325 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1326 if (rc
!= X86EMUL_CONTINUE
)
1329 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1330 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1332 switch(ctxt
->mode
) {
1333 case X86EMUL_MODE_PROT64
:
1334 case X86EMUL_MODE_PROT32
:
1335 case X86EMUL_MODE_PROT16
:
1337 change_mask
|= EFLG_IOPL
;
1339 change_mask
|= EFLG_IF
;
1341 case X86EMUL_MODE_VM86
:
1343 return emulate_gp(ctxt
, 0);
1344 change_mask
|= EFLG_IF
;
1346 default: /* real mode */
1347 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1351 *(unsigned long *)dest
=
1352 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1357 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1358 struct x86_emulate_ops
*ops
, int seg
)
1360 struct decode_cache
*c
= &ctxt
->decode
;
1362 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1364 emulate_push(ctxt
, ops
);
1367 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1368 struct x86_emulate_ops
*ops
, int seg
)
1370 struct decode_cache
*c
= &ctxt
->decode
;
1371 unsigned long selector
;
1374 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1375 if (rc
!= X86EMUL_CONTINUE
)
1378 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1382 static int emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1383 struct x86_emulate_ops
*ops
)
1385 struct decode_cache
*c
= &ctxt
->decode
;
1386 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1387 int rc
= X86EMUL_CONTINUE
;
1388 int reg
= VCPU_REGS_RAX
;
1390 while (reg
<= VCPU_REGS_RDI
) {
1391 (reg
== VCPU_REGS_RSP
) ?
1392 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1394 emulate_push(ctxt
, ops
);
1396 rc
= writeback(ctxt
, ops
);
1397 if (rc
!= X86EMUL_CONTINUE
)
1403 /* Disable writeback. */
1404 c
->dst
.type
= OP_NONE
;
1409 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1410 struct x86_emulate_ops
*ops
)
1412 struct decode_cache
*c
= &ctxt
->decode
;
1413 int rc
= X86EMUL_CONTINUE
;
1414 int reg
= VCPU_REGS_RDI
;
1416 while (reg
>= VCPU_REGS_RAX
) {
1417 if (reg
== VCPU_REGS_RSP
) {
1418 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1423 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1424 if (rc
!= X86EMUL_CONTINUE
)
1431 int emulate_int_real(struct x86_emulate_ctxt
*ctxt
,
1432 struct x86_emulate_ops
*ops
, int irq
)
1434 struct decode_cache
*c
= &ctxt
->decode
;
1441 /* TODO: Add limit checks */
1442 c
->src
.val
= ctxt
->eflags
;
1443 emulate_push(ctxt
, ops
);
1444 rc
= writeback(ctxt
, ops
);
1445 if (rc
!= X86EMUL_CONTINUE
)
1448 ctxt
->eflags
&= ~(EFLG_IF
| EFLG_TF
| EFLG_AC
);
1450 c
->src
.val
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
1451 emulate_push(ctxt
, ops
);
1452 rc
= writeback(ctxt
, ops
);
1453 if (rc
!= X86EMUL_CONTINUE
)
1456 c
->src
.val
= c
->eip
;
1457 emulate_push(ctxt
, ops
);
1458 rc
= writeback(ctxt
, ops
);
1459 if (rc
!= X86EMUL_CONTINUE
)
1462 c
->dst
.type
= OP_NONE
;
1464 ops
->get_idt(&dt
, ctxt
->vcpu
);
1466 eip_addr
= dt
.address
+ (irq
<< 2);
1467 cs_addr
= dt
.address
+ (irq
<< 2) + 2;
1469 rc
= ops
->read_std(cs_addr
, &cs
, 2, ctxt
->vcpu
, &ctxt
->exception
);
1470 if (rc
!= X86EMUL_CONTINUE
)
1473 rc
= ops
->read_std(eip_addr
, &eip
, 2, ctxt
->vcpu
, &ctxt
->exception
);
1474 if (rc
!= X86EMUL_CONTINUE
)
1477 rc
= load_segment_descriptor(ctxt
, ops
, cs
, VCPU_SREG_CS
);
1478 if (rc
!= X86EMUL_CONTINUE
)
1486 static int emulate_int(struct x86_emulate_ctxt
*ctxt
,
1487 struct x86_emulate_ops
*ops
, int irq
)
1489 switch(ctxt
->mode
) {
1490 case X86EMUL_MODE_REAL
:
1491 return emulate_int_real(ctxt
, ops
, irq
);
1492 case X86EMUL_MODE_VM86
:
1493 case X86EMUL_MODE_PROT16
:
1494 case X86EMUL_MODE_PROT32
:
1495 case X86EMUL_MODE_PROT64
:
1497 /* Protected mode interrupts unimplemented yet */
1498 return X86EMUL_UNHANDLEABLE
;
1502 static int emulate_iret_real(struct x86_emulate_ctxt
*ctxt
,
1503 struct x86_emulate_ops
*ops
)
1505 struct decode_cache
*c
= &ctxt
->decode
;
1506 int rc
= X86EMUL_CONTINUE
;
1507 unsigned long temp_eip
= 0;
1508 unsigned long temp_eflags
= 0;
1509 unsigned long cs
= 0;
1510 unsigned long mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_TF
|
1511 EFLG_IF
| EFLG_DF
| EFLG_OF
| EFLG_IOPL
| EFLG_NT
| EFLG_RF
|
1512 EFLG_AC
| EFLG_ID
| (1 << 1); /* Last one is the reserved bit */
1513 unsigned long vm86_mask
= EFLG_VM
| EFLG_VIF
| EFLG_VIP
;
1515 /* TODO: Add stack limit check */
1517 rc
= emulate_pop(ctxt
, ops
, &temp_eip
, c
->op_bytes
);
1519 if (rc
!= X86EMUL_CONTINUE
)
1522 if (temp_eip
& ~0xffff)
1523 return emulate_gp(ctxt
, 0);
1525 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1527 if (rc
!= X86EMUL_CONTINUE
)
1530 rc
= emulate_pop(ctxt
, ops
, &temp_eflags
, c
->op_bytes
);
1532 if (rc
!= X86EMUL_CONTINUE
)
1535 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1537 if (rc
!= X86EMUL_CONTINUE
)
1543 if (c
->op_bytes
== 4)
1544 ctxt
->eflags
= ((temp_eflags
& mask
) | (ctxt
->eflags
& vm86_mask
));
1545 else if (c
->op_bytes
== 2) {
1546 ctxt
->eflags
&= ~0xffff;
1547 ctxt
->eflags
|= temp_eflags
;
1550 ctxt
->eflags
&= ~EFLG_RESERVED_ZEROS_MASK
; /* Clear reserved zeros */
1551 ctxt
->eflags
|= EFLG_RESERVED_ONE_MASK
;
1556 static inline int emulate_iret(struct x86_emulate_ctxt
*ctxt
,
1557 struct x86_emulate_ops
* ops
)
1559 switch(ctxt
->mode
) {
1560 case X86EMUL_MODE_REAL
:
1561 return emulate_iret_real(ctxt
, ops
);
1562 case X86EMUL_MODE_VM86
:
1563 case X86EMUL_MODE_PROT16
:
1564 case X86EMUL_MODE_PROT32
:
1565 case X86EMUL_MODE_PROT64
:
1567 /* iret from protected mode unimplemented yet */
1568 return X86EMUL_UNHANDLEABLE
;
1572 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1573 struct x86_emulate_ops
*ops
)
1575 struct decode_cache
*c
= &ctxt
->decode
;
1577 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1580 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1582 struct decode_cache
*c
= &ctxt
->decode
;
1583 switch (c
->modrm_reg
) {
1585 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1588 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1591 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1594 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1596 case 4: /* sal/shl */
1597 case 6: /* sal/shl */
1598 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1601 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1604 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1609 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1610 struct x86_emulate_ops
*ops
)
1612 struct decode_cache
*c
= &ctxt
->decode
;
1613 unsigned long *rax
= &c
->regs
[VCPU_REGS_RAX
];
1614 unsigned long *rdx
= &c
->regs
[VCPU_REGS_RDX
];
1617 switch (c
->modrm_reg
) {
1618 case 0 ... 1: /* test */
1619 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1622 c
->dst
.val
= ~c
->dst
.val
;
1625 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1628 emulate_1op_rax_rdx("mul", c
->src
, *rax
, *rdx
, ctxt
->eflags
);
1631 emulate_1op_rax_rdx("imul", c
->src
, *rax
, *rdx
, ctxt
->eflags
);
1634 emulate_1op_rax_rdx_ex("div", c
->src
, *rax
, *rdx
,
1638 emulate_1op_rax_rdx_ex("idiv", c
->src
, *rax
, *rdx
,
1642 return X86EMUL_UNHANDLEABLE
;
1645 return emulate_de(ctxt
);
1646 return X86EMUL_CONTINUE
;
1649 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1650 struct x86_emulate_ops
*ops
)
1652 struct decode_cache
*c
= &ctxt
->decode
;
1654 switch (c
->modrm_reg
) {
1656 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1659 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1661 case 2: /* call near abs */ {
1664 c
->eip
= c
->src
.val
;
1665 c
->src
.val
= old_eip
;
1666 emulate_push(ctxt
, ops
);
1669 case 4: /* jmp abs */
1670 c
->eip
= c
->src
.val
;
1673 emulate_push(ctxt
, ops
);
1676 return X86EMUL_CONTINUE
;
1679 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1680 struct x86_emulate_ops
*ops
)
1682 struct decode_cache
*c
= &ctxt
->decode
;
1683 u64 old
= c
->dst
.orig_val64
;
1685 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1686 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1687 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1688 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1689 ctxt
->eflags
&= ~EFLG_ZF
;
1691 c
->dst
.val64
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1692 (u32
) c
->regs
[VCPU_REGS_RBX
];
1694 ctxt
->eflags
|= EFLG_ZF
;
1696 return X86EMUL_CONTINUE
;
1699 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1700 struct x86_emulate_ops
*ops
)
1702 struct decode_cache
*c
= &ctxt
->decode
;
1706 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1707 if (rc
!= X86EMUL_CONTINUE
)
1709 if (c
->op_bytes
== 4)
1710 c
->eip
= (u32
)c
->eip
;
1711 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1712 if (rc
!= X86EMUL_CONTINUE
)
1714 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1718 static int emulate_load_segment(struct x86_emulate_ctxt
*ctxt
,
1719 struct x86_emulate_ops
*ops
, int seg
)
1721 struct decode_cache
*c
= &ctxt
->decode
;
1725 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
1727 rc
= load_segment_descriptor(ctxt
, ops
, sel
, seg
);
1728 if (rc
!= X86EMUL_CONTINUE
)
1731 c
->dst
.val
= c
->src
.val
;
1736 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1737 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1738 struct desc_struct
*ss
)
1740 memset(cs
, 0, sizeof(struct desc_struct
));
1741 ops
->get_cached_descriptor(cs
, NULL
, VCPU_SREG_CS
, ctxt
->vcpu
);
1742 memset(ss
, 0, sizeof(struct desc_struct
));
1744 cs
->l
= 0; /* will be adjusted later */
1745 set_desc_base(cs
, 0); /* flat segment */
1746 cs
->g
= 1; /* 4kb granularity */
1747 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1748 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1750 cs
->dpl
= 0; /* will be adjusted later */
1754 set_desc_base(ss
, 0); /* flat segment */
1755 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
1756 ss
->g
= 1; /* 4kb granularity */
1758 ss
->type
= 0x03; /* Read/Write, Accessed */
1759 ss
->d
= 1; /* 32bit stack segment */
1765 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1767 struct decode_cache
*c
= &ctxt
->decode
;
1768 struct desc_struct cs
, ss
;
1772 /* syscall is not available in real mode */
1773 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1774 ctxt
->mode
== X86EMUL_MODE_VM86
)
1775 return emulate_ud(ctxt
);
1777 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1779 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1781 cs_sel
= (u16
)(msr_data
& 0xfffc);
1782 ss_sel
= (u16
)(msr_data
+ 8);
1784 if (is_long_mode(ctxt
->vcpu
)) {
1788 ops
->set_cached_descriptor(&cs
, 0, VCPU_SREG_CS
, ctxt
->vcpu
);
1789 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1790 ops
->set_cached_descriptor(&ss
, 0, VCPU_SREG_SS
, ctxt
->vcpu
);
1791 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1793 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1794 if (is_long_mode(ctxt
->vcpu
)) {
1795 #ifdef CONFIG_X86_64
1796 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1798 ops
->get_msr(ctxt
->vcpu
,
1799 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1800 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1803 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1804 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1808 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1809 c
->eip
= (u32
)msr_data
;
1811 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1814 return X86EMUL_CONTINUE
;
1818 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1820 struct decode_cache
*c
= &ctxt
->decode
;
1821 struct desc_struct cs
, ss
;
1825 /* inject #GP if in real mode */
1826 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
1827 return emulate_gp(ctxt
, 0);
1829 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1830 * Therefore, we inject an #UD.
1832 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1833 return emulate_ud(ctxt
);
1835 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1837 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1838 switch (ctxt
->mode
) {
1839 case X86EMUL_MODE_PROT32
:
1840 if ((msr_data
& 0xfffc) == 0x0)
1841 return emulate_gp(ctxt
, 0);
1843 case X86EMUL_MODE_PROT64
:
1844 if (msr_data
== 0x0)
1845 return emulate_gp(ctxt
, 0);
1849 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1850 cs_sel
= (u16
)msr_data
;
1851 cs_sel
&= ~SELECTOR_RPL_MASK
;
1852 ss_sel
= cs_sel
+ 8;
1853 ss_sel
&= ~SELECTOR_RPL_MASK
;
1854 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1855 || is_long_mode(ctxt
->vcpu
)) {
1860 ops
->set_cached_descriptor(&cs
, 0, VCPU_SREG_CS
, ctxt
->vcpu
);
1861 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1862 ops
->set_cached_descriptor(&ss
, 0, VCPU_SREG_SS
, ctxt
->vcpu
);
1863 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1865 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1868 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1869 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1871 return X86EMUL_CONTINUE
;
1875 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1877 struct decode_cache
*c
= &ctxt
->decode
;
1878 struct desc_struct cs
, ss
;
1883 /* inject #GP if in real mode or Virtual 8086 mode */
1884 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1885 ctxt
->mode
== X86EMUL_MODE_VM86
)
1886 return emulate_gp(ctxt
, 0);
1888 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1890 if ((c
->rex_prefix
& 0x8) != 0x0)
1891 usermode
= X86EMUL_MODE_PROT64
;
1893 usermode
= X86EMUL_MODE_PROT32
;
1897 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1899 case X86EMUL_MODE_PROT32
:
1900 cs_sel
= (u16
)(msr_data
+ 16);
1901 if ((msr_data
& 0xfffc) == 0x0)
1902 return emulate_gp(ctxt
, 0);
1903 ss_sel
= (u16
)(msr_data
+ 24);
1905 case X86EMUL_MODE_PROT64
:
1906 cs_sel
= (u16
)(msr_data
+ 32);
1907 if (msr_data
== 0x0)
1908 return emulate_gp(ctxt
, 0);
1909 ss_sel
= cs_sel
+ 8;
1914 cs_sel
|= SELECTOR_RPL_MASK
;
1915 ss_sel
|= SELECTOR_RPL_MASK
;
1917 ops
->set_cached_descriptor(&cs
, 0, VCPU_SREG_CS
, ctxt
->vcpu
);
1918 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1919 ops
->set_cached_descriptor(&ss
, 0, VCPU_SREG_SS
, ctxt
->vcpu
);
1920 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1922 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
1923 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
1925 return X86EMUL_CONTINUE
;
1928 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
1929 struct x86_emulate_ops
*ops
)
1932 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
1934 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
1936 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1937 return ops
->cpl(ctxt
->vcpu
) > iopl
;
1940 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
1941 struct x86_emulate_ops
*ops
,
1944 struct desc_struct tr_seg
;
1947 u16 io_bitmap_ptr
, perm
, bit_idx
= port
& 0x7;
1948 unsigned mask
= (1 << len
) - 1;
1951 ops
->get_cached_descriptor(&tr_seg
, &base3
, VCPU_SREG_TR
, ctxt
->vcpu
);
1954 if (desc_limit_scaled(&tr_seg
) < 103)
1956 base
= get_desc_base(&tr_seg
);
1957 #ifdef CONFIG_X86_64
1958 base
|= ((u64
)base3
) << 32;
1960 r
= ops
->read_std(base
+ 102, &io_bitmap_ptr
, 2, ctxt
->vcpu
, NULL
);
1961 if (r
!= X86EMUL_CONTINUE
)
1963 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
1965 r
= ops
->read_std(base
+ io_bitmap_ptr
+ port
/8, &perm
, 2, ctxt
->vcpu
,
1967 if (r
!= X86EMUL_CONTINUE
)
1969 if ((perm
>> bit_idx
) & mask
)
1974 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
1975 struct x86_emulate_ops
*ops
,
1981 if (emulator_bad_iopl(ctxt
, ops
))
1982 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
1985 ctxt
->perm_ok
= true;
1990 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
1991 struct x86_emulate_ops
*ops
,
1992 struct tss_segment_16
*tss
)
1994 struct decode_cache
*c
= &ctxt
->decode
;
1997 tss
->flag
= ctxt
->eflags
;
1998 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
1999 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2000 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2001 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2002 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2003 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2004 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2005 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2007 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2008 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2009 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2010 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2011 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2014 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2015 struct x86_emulate_ops
*ops
,
2016 struct tss_segment_16
*tss
)
2018 struct decode_cache
*c
= &ctxt
->decode
;
2022 ctxt
->eflags
= tss
->flag
| 2;
2023 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2024 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2025 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2026 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2027 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2028 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2029 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2030 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2033 * SDM says that segment selectors are loaded before segment
2036 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2037 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2038 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2039 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2040 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2043 * Now load segment descriptors. If fault happenes at this stage
2044 * it is handled in a context of new task
2046 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2047 if (ret
!= X86EMUL_CONTINUE
)
2049 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2050 if (ret
!= X86EMUL_CONTINUE
)
2052 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2053 if (ret
!= X86EMUL_CONTINUE
)
2055 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2056 if (ret
!= X86EMUL_CONTINUE
)
2058 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2059 if (ret
!= X86EMUL_CONTINUE
)
2062 return X86EMUL_CONTINUE
;
2065 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2066 struct x86_emulate_ops
*ops
,
2067 u16 tss_selector
, u16 old_tss_sel
,
2068 ulong old_tss_base
, struct desc_struct
*new_desc
)
2070 struct tss_segment_16 tss_seg
;
2072 u32 new_tss_base
= get_desc_base(new_desc
);
2074 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2076 if (ret
!= X86EMUL_CONTINUE
)
2077 /* FIXME: need to provide precise fault address */
2080 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2082 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2084 if (ret
!= X86EMUL_CONTINUE
)
2085 /* FIXME: need to provide precise fault address */
2088 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2090 if (ret
!= X86EMUL_CONTINUE
)
2091 /* FIXME: need to provide precise fault address */
2094 if (old_tss_sel
!= 0xffff) {
2095 tss_seg
.prev_task_link
= old_tss_sel
;
2097 ret
= ops
->write_std(new_tss_base
,
2098 &tss_seg
.prev_task_link
,
2099 sizeof tss_seg
.prev_task_link
,
2100 ctxt
->vcpu
, &ctxt
->exception
);
2101 if (ret
!= X86EMUL_CONTINUE
)
2102 /* FIXME: need to provide precise fault address */
2106 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2109 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2110 struct x86_emulate_ops
*ops
,
2111 struct tss_segment_32
*tss
)
2113 struct decode_cache
*c
= &ctxt
->decode
;
2115 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2117 tss
->eflags
= ctxt
->eflags
;
2118 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2119 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2120 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2121 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2122 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2123 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2124 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2125 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2127 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2128 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2129 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2130 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2131 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2132 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2133 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2136 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2137 struct x86_emulate_ops
*ops
,
2138 struct tss_segment_32
*tss
)
2140 struct decode_cache
*c
= &ctxt
->decode
;
2143 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
))
2144 return emulate_gp(ctxt
, 0);
2146 ctxt
->eflags
= tss
->eflags
| 2;
2147 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2148 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2149 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2150 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2151 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2152 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2153 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2154 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2157 * SDM says that segment selectors are loaded before segment
2160 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2161 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2162 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2163 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2164 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2165 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2166 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2169 * Now load segment descriptors. If fault happenes at this stage
2170 * it is handled in a context of new task
2172 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2173 if (ret
!= X86EMUL_CONTINUE
)
2175 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2176 if (ret
!= X86EMUL_CONTINUE
)
2178 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2179 if (ret
!= X86EMUL_CONTINUE
)
2181 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2182 if (ret
!= X86EMUL_CONTINUE
)
2184 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2185 if (ret
!= X86EMUL_CONTINUE
)
2187 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2188 if (ret
!= X86EMUL_CONTINUE
)
2190 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2191 if (ret
!= X86EMUL_CONTINUE
)
2194 return X86EMUL_CONTINUE
;
2197 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2198 struct x86_emulate_ops
*ops
,
2199 u16 tss_selector
, u16 old_tss_sel
,
2200 ulong old_tss_base
, struct desc_struct
*new_desc
)
2202 struct tss_segment_32 tss_seg
;
2204 u32 new_tss_base
= get_desc_base(new_desc
);
2206 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2208 if (ret
!= X86EMUL_CONTINUE
)
2209 /* FIXME: need to provide precise fault address */
2212 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2214 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2216 if (ret
!= X86EMUL_CONTINUE
)
2217 /* FIXME: need to provide precise fault address */
2220 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2222 if (ret
!= X86EMUL_CONTINUE
)
2223 /* FIXME: need to provide precise fault address */
2226 if (old_tss_sel
!= 0xffff) {
2227 tss_seg
.prev_task_link
= old_tss_sel
;
2229 ret
= ops
->write_std(new_tss_base
,
2230 &tss_seg
.prev_task_link
,
2231 sizeof tss_seg
.prev_task_link
,
2232 ctxt
->vcpu
, &ctxt
->exception
);
2233 if (ret
!= X86EMUL_CONTINUE
)
2234 /* FIXME: need to provide precise fault address */
2238 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2241 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2242 struct x86_emulate_ops
*ops
,
2243 u16 tss_selector
, int reason
,
2244 bool has_error_code
, u32 error_code
)
2246 struct desc_struct curr_tss_desc
, next_tss_desc
;
2248 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2249 ulong old_tss_base
=
2250 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2253 /* FIXME: old_tss_base == ~0 ? */
2255 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2256 if (ret
!= X86EMUL_CONTINUE
)
2258 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2259 if (ret
!= X86EMUL_CONTINUE
)
2262 /* FIXME: check that next_tss_desc is tss */
2264 if (reason
!= TASK_SWITCH_IRET
) {
2265 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2266 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
)
2267 return emulate_gp(ctxt
, 0);
2270 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2271 if (!next_tss_desc
.p
||
2272 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2273 desc_limit
< 0x2b)) {
2274 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2275 return X86EMUL_PROPAGATE_FAULT
;
2278 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2279 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2280 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2284 if (reason
== TASK_SWITCH_IRET
)
2285 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2287 /* set back link to prev task only if NT bit is set in eflags
2288 note that old_tss_sel is not used afetr this point */
2289 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2290 old_tss_sel
= 0xffff;
2292 if (next_tss_desc
.type
& 8)
2293 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2294 old_tss_base
, &next_tss_desc
);
2296 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2297 old_tss_base
, &next_tss_desc
);
2298 if (ret
!= X86EMUL_CONTINUE
)
2301 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2302 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2304 if (reason
!= TASK_SWITCH_IRET
) {
2305 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2306 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2310 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2311 ops
->set_cached_descriptor(&next_tss_desc
, 0, VCPU_SREG_TR
, ctxt
->vcpu
);
2312 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2314 if (has_error_code
) {
2315 struct decode_cache
*c
= &ctxt
->decode
;
2317 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2319 c
->src
.val
= (unsigned long) error_code
;
2320 emulate_push(ctxt
, ops
);
2326 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2327 u16 tss_selector
, int reason
,
2328 bool has_error_code
, u32 error_code
)
2330 struct x86_emulate_ops
*ops
= ctxt
->ops
;
2331 struct decode_cache
*c
= &ctxt
->decode
;
2335 c
->dst
.type
= OP_NONE
;
2337 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2338 has_error_code
, error_code
);
2340 if (rc
== X86EMUL_CONTINUE
) {
2341 rc
= writeback(ctxt
, ops
);
2342 if (rc
== X86EMUL_CONTINUE
)
2346 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
2349 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned seg
,
2350 int reg
, struct operand
*op
)
2352 struct decode_cache
*c
= &ctxt
->decode
;
2353 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2355 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2356 op
->addr
.mem
.ea
= register_address(c
, c
->regs
[reg
]);
2357 op
->addr
.mem
.seg
= seg
;
2360 static int em_push(struct x86_emulate_ctxt
*ctxt
)
2362 emulate_push(ctxt
, ctxt
->ops
);
2363 return X86EMUL_CONTINUE
;
2366 static int em_das(struct x86_emulate_ctxt
*ctxt
)
2368 struct decode_cache
*c
= &ctxt
->decode
;
2370 bool af
, cf
, old_cf
;
2372 cf
= ctxt
->eflags
& X86_EFLAGS_CF
;
2378 af
= ctxt
->eflags
& X86_EFLAGS_AF
;
2379 if ((al
& 0x0f) > 9 || af
) {
2381 cf
= old_cf
| (al
>= 250);
2386 if (old_al
> 0x99 || old_cf
) {
2392 /* Set PF, ZF, SF */
2393 c
->src
.type
= OP_IMM
;
2396 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2397 ctxt
->eflags
&= ~(X86_EFLAGS_AF
| X86_EFLAGS_CF
);
2399 ctxt
->eflags
|= X86_EFLAGS_CF
;
2401 ctxt
->eflags
|= X86_EFLAGS_AF
;
2402 return X86EMUL_CONTINUE
;
2405 static int em_call_far(struct x86_emulate_ctxt
*ctxt
)
2407 struct decode_cache
*c
= &ctxt
->decode
;
2412 old_cs
= ctxt
->ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2415 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
2416 if (load_segment_descriptor(ctxt
, ctxt
->ops
, sel
, VCPU_SREG_CS
))
2417 return X86EMUL_CONTINUE
;
2420 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
2422 c
->src
.val
= old_cs
;
2423 emulate_push(ctxt
, ctxt
->ops
);
2424 rc
= writeback(ctxt
, ctxt
->ops
);
2425 if (rc
!= X86EMUL_CONTINUE
)
2428 c
->src
.val
= old_eip
;
2429 emulate_push(ctxt
, ctxt
->ops
);
2430 rc
= writeback(ctxt
, ctxt
->ops
);
2431 if (rc
!= X86EMUL_CONTINUE
)
2434 c
->dst
.type
= OP_NONE
;
2436 return X86EMUL_CONTINUE
;
2439 static int em_ret_near_imm(struct x86_emulate_ctxt
*ctxt
)
2441 struct decode_cache
*c
= &ctxt
->decode
;
2444 c
->dst
.type
= OP_REG
;
2445 c
->dst
.addr
.reg
= &c
->eip
;
2446 c
->dst
.bytes
= c
->op_bytes
;
2447 rc
= emulate_pop(ctxt
, ctxt
->ops
, &c
->dst
.val
, c
->op_bytes
);
2448 if (rc
!= X86EMUL_CONTINUE
)
2450 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], c
->src
.val
);
2451 return X86EMUL_CONTINUE
;
2454 static int em_imul(struct x86_emulate_ctxt
*ctxt
)
2456 struct decode_cache
*c
= &ctxt
->decode
;
2458 emulate_2op_SrcV_nobyte("imul", c
->src
, c
->dst
, ctxt
->eflags
);
2459 return X86EMUL_CONTINUE
;
2462 static int em_imul_3op(struct x86_emulate_ctxt
*ctxt
)
2464 struct decode_cache
*c
= &ctxt
->decode
;
2466 c
->dst
.val
= c
->src2
.val
;
2467 return em_imul(ctxt
);
2470 static int em_cwd(struct x86_emulate_ctxt
*ctxt
)
2472 struct decode_cache
*c
= &ctxt
->decode
;
2474 c
->dst
.type
= OP_REG
;
2475 c
->dst
.bytes
= c
->src
.bytes
;
2476 c
->dst
.addr
.reg
= &c
->regs
[VCPU_REGS_RDX
];
2477 c
->dst
.val
= ~((c
->src
.val
>> (c
->src
.bytes
* 8 - 1)) - 1);
2479 return X86EMUL_CONTINUE
;
2482 static int em_rdtsc(struct x86_emulate_ctxt
*ctxt
)
2484 struct decode_cache
*c
= &ctxt
->decode
;
2487 ctxt
->ops
->get_msr(ctxt
->vcpu
, MSR_IA32_TSC
, &tsc
);
2488 c
->regs
[VCPU_REGS_RAX
] = (u32
)tsc
;
2489 c
->regs
[VCPU_REGS_RDX
] = tsc
>> 32;
2490 return X86EMUL_CONTINUE
;
2493 static int em_mov(struct x86_emulate_ctxt
*ctxt
)
2495 struct decode_cache
*c
= &ctxt
->decode
;
2496 c
->dst
.val
= c
->src
.val
;
2497 return X86EMUL_CONTINUE
;
2500 static int em_movdqu(struct x86_emulate_ctxt
*ctxt
)
2502 struct decode_cache
*c
= &ctxt
->decode
;
2503 memcpy(&c
->dst
.vec_val
, &c
->src
.vec_val
, c
->op_bytes
);
2504 return X86EMUL_CONTINUE
;
2507 static int em_invlpg(struct x86_emulate_ctxt
*ctxt
)
2509 struct decode_cache
*c
= &ctxt
->decode
;
2513 rc
= linearize(ctxt
, c
->src
.addr
.mem
, 1, false, &linear
);
2514 if (rc
== X86EMUL_CONTINUE
)
2515 emulate_invlpg(ctxt
->vcpu
, linear
);
2516 /* Disable writeback. */
2517 c
->dst
.type
= OP_NONE
;
2518 return X86EMUL_CONTINUE
;
2521 static bool valid_cr(int nr
)
2533 static int check_cr_read(struct x86_emulate_ctxt
*ctxt
)
2535 struct decode_cache
*c
= &ctxt
->decode
;
2537 if (!valid_cr(c
->modrm_reg
))
2538 return emulate_ud(ctxt
);
2540 return X86EMUL_CONTINUE
;
2543 static int check_cr_write(struct x86_emulate_ctxt
*ctxt
)
2545 struct decode_cache
*c
= &ctxt
->decode
;
2546 u64 new_val
= c
->src
.val64
;
2547 int cr
= c
->modrm_reg
;
2549 static u64 cr_reserved_bits
[] = {
2550 0xffffffff00000000ULL
,
2551 0, 0, 0, /* CR3 checked later */
2558 return emulate_ud(ctxt
);
2560 if (new_val
& cr_reserved_bits
[cr
])
2561 return emulate_gp(ctxt
, 0);
2566 if (((new_val
& X86_CR0_PG
) && !(new_val
& X86_CR0_PE
)) ||
2567 ((new_val
& X86_CR0_NW
) && !(new_val
& X86_CR0_CD
)))
2568 return emulate_gp(ctxt
, 0);
2570 cr4
= ctxt
->ops
->get_cr(4, ctxt
->vcpu
);
2571 ctxt
->ops
->get_msr(ctxt
->vcpu
, MSR_EFER
, &efer
);
2573 if ((new_val
& X86_CR0_PG
) && (efer
& EFER_LME
) &&
2574 !(cr4
& X86_CR4_PAE
))
2575 return emulate_gp(ctxt
, 0);
2582 if (is_long_mode(ctxt
->vcpu
))
2583 rsvd
= CR3_L_MODE_RESERVED_BITS
;
2584 else if (is_pae(ctxt
->vcpu
))
2585 rsvd
= CR3_PAE_RESERVED_BITS
;
2586 else if (is_paging(ctxt
->vcpu
))
2587 rsvd
= CR3_NONPAE_RESERVED_BITS
;
2590 return emulate_gp(ctxt
, 0);
2597 cr4
= ctxt
->ops
->get_cr(4, ctxt
->vcpu
);
2598 ctxt
->ops
->get_msr(ctxt
->vcpu
, MSR_EFER
, &efer
);
2600 if ((efer
& EFER_LMA
) && !(new_val
& X86_CR4_PAE
))
2601 return emulate_gp(ctxt
, 0);
2607 return X86EMUL_CONTINUE
;
2610 static int check_dr7_gd(struct x86_emulate_ctxt
*ctxt
)
2614 ctxt
->ops
->get_dr(7, &dr7
, ctxt
->vcpu
);
2616 /* Check if DR7.Global_Enable is set */
2617 return dr7
& (1 << 13);
2620 static int check_dr_read(struct x86_emulate_ctxt
*ctxt
)
2622 struct decode_cache
*c
= &ctxt
->decode
;
2623 int dr
= c
->modrm_reg
;
2627 return emulate_ud(ctxt
);
2629 cr4
= ctxt
->ops
->get_cr(4, ctxt
->vcpu
);
2630 if ((cr4
& X86_CR4_DE
) && (dr
== 4 || dr
== 5))
2631 return emulate_ud(ctxt
);
2633 if (check_dr7_gd(ctxt
))
2634 return emulate_db(ctxt
);
2636 return X86EMUL_CONTINUE
;
2639 static int check_dr_write(struct x86_emulate_ctxt
*ctxt
)
2641 struct decode_cache
*c
= &ctxt
->decode
;
2642 u64 new_val
= c
->src
.val64
;
2643 int dr
= c
->modrm_reg
;
2645 if ((dr
== 6 || dr
== 7) && (new_val
& 0xffffffff00000000ULL
))
2646 return emulate_gp(ctxt
, 0);
2648 return check_dr_read(ctxt
);
2651 static int check_svme(struct x86_emulate_ctxt
*ctxt
)
2655 ctxt
->ops
->get_msr(ctxt
->vcpu
, MSR_EFER
, &efer
);
2657 if (!(efer
& EFER_SVME
))
2658 return emulate_ud(ctxt
);
2660 return X86EMUL_CONTINUE
;
2663 static int check_svme_pa(struct x86_emulate_ctxt
*ctxt
)
2665 u64 rax
= kvm_register_read(ctxt
->vcpu
, VCPU_REGS_RAX
);
2667 /* Valid physical address? */
2668 if (rax
& 0xffff000000000000)
2669 return emulate_gp(ctxt
, 0);
2671 return check_svme(ctxt
);
2674 static int check_rdtsc(struct x86_emulate_ctxt
*ctxt
)
2676 u64 cr4
= ctxt
->ops
->get_cr(4, ctxt
->vcpu
);
2678 if (cr4
& X86_CR4_TSD
&& ctxt
->ops
->cpl(ctxt
->vcpu
))
2679 return emulate_ud(ctxt
);
2681 return X86EMUL_CONTINUE
;
2684 static int check_rdpmc(struct x86_emulate_ctxt
*ctxt
)
2686 u64 cr4
= ctxt
->ops
->get_cr(4, ctxt
->vcpu
);
2687 u64 rcx
= kvm_register_read(ctxt
->vcpu
, VCPU_REGS_RCX
);
2689 if ((!(cr4
& X86_CR4_PCE
) && ctxt
->ops
->cpl(ctxt
->vcpu
)) ||
2691 return emulate_gp(ctxt
, 0);
2693 return X86EMUL_CONTINUE
;
2696 static int check_perm_in(struct x86_emulate_ctxt
*ctxt
)
2698 struct decode_cache
*c
= &ctxt
->decode
;
2700 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2701 if (!emulator_io_permited(ctxt
, ctxt
->ops
, c
->src
.val
, c
->dst
.bytes
))
2702 return emulate_gp(ctxt
, 0);
2704 return X86EMUL_CONTINUE
;
2707 static int check_perm_out(struct x86_emulate_ctxt
*ctxt
)
2709 struct decode_cache
*c
= &ctxt
->decode
;
2711 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2712 if (!emulator_io_permited(ctxt
, ctxt
->ops
, c
->dst
.val
, c
->src
.bytes
))
2713 return emulate_gp(ctxt
, 0);
2715 return X86EMUL_CONTINUE
;
2718 #define D(_y) { .flags = (_y) }
2719 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2720 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2721 .check_perm = (_p) }
2723 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2724 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2725 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2726 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2727 #define II(_f, _e, _i) \
2728 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2729 #define IIP(_f, _e, _i, _p) \
2730 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2731 .check_perm = (_p) }
2732 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2734 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2735 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2736 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2738 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2739 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2740 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2742 static struct opcode group7_rm1
[] = {
2743 DI(SrcNone
| ModRM
| Priv
, monitor
),
2744 DI(SrcNone
| ModRM
| Priv
, mwait
),
2748 static struct opcode group7_rm3
[] = {
2749 DIP(SrcNone
| ModRM
| Prot
| Priv
, vmrun
, check_svme_pa
),
2750 DI(SrcNone
| ModRM
| Prot
| VendorSpecific
, vmmcall
),
2751 DIP(SrcNone
| ModRM
| Prot
| Priv
, vmload
, check_svme_pa
),
2752 DIP(SrcNone
| ModRM
| Prot
| Priv
, vmsave
, check_svme_pa
),
2753 DIP(SrcNone
| ModRM
| Prot
| Priv
, stgi
, check_svme
),
2754 DIP(SrcNone
| ModRM
| Prot
| Priv
, clgi
, check_svme
),
2755 DIP(SrcNone
| ModRM
| Prot
| Priv
, skinit
, check_svme
),
2756 DIP(SrcNone
| ModRM
| Prot
| Priv
, invlpga
, check_svme
),
2759 static struct opcode group7_rm7
[] = {
2761 DIP(SrcNone
| ModRM
, rdtscp
, check_rdtsc
),
2764 static struct opcode group1
[] = {
2768 static struct opcode group1A
[] = {
2769 D(DstMem
| SrcNone
| ModRM
| Mov
| Stack
), N
, N
, N
, N
, N
, N
, N
,
2772 static struct opcode group3
[] = {
2773 D(DstMem
| SrcImm
| ModRM
), D(DstMem
| SrcImm
| ModRM
),
2774 D(DstMem
| SrcNone
| ModRM
| Lock
), D(DstMem
| SrcNone
| ModRM
| Lock
),
2775 X4(D(SrcMem
| ModRM
)),
2778 static struct opcode group4
[] = {
2779 D(ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
), D(ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
),
2783 static struct opcode group5
[] = {
2784 D(DstMem
| SrcNone
| ModRM
| Lock
), D(DstMem
| SrcNone
| ModRM
| Lock
),
2785 D(SrcMem
| ModRM
| Stack
),
2786 I(SrcMemFAddr
| ModRM
| ImplicitOps
| Stack
, em_call_far
),
2787 D(SrcMem
| ModRM
| Stack
), D(SrcMemFAddr
| ModRM
| ImplicitOps
),
2788 D(SrcMem
| ModRM
| Stack
), N
,
2791 static struct opcode group6
[] = {
2792 DI(ModRM
| Prot
, sldt
),
2793 DI(ModRM
| Prot
, str
),
2794 DI(ModRM
| Prot
| Priv
, lldt
),
2795 DI(ModRM
| Prot
| Priv
, ltr
),
2799 static struct group_dual group7
= { {
2800 DI(ModRM
| Mov
| DstMem
| Priv
, sgdt
),
2801 DI(ModRM
| Mov
| DstMem
| Priv
, sidt
),
2802 DI(ModRM
| SrcMem
| Priv
, lgdt
), DI(ModRM
| SrcMem
| Priv
, lidt
),
2803 DI(SrcNone
| ModRM
| DstMem
| Mov
, smsw
), N
,
2804 DI(SrcMem16
| ModRM
| Mov
| Priv
, lmsw
),
2805 DI(SrcMem
| ModRM
| ByteOp
| Priv
| NoAccess
, invlpg
),
2807 D(SrcNone
| ModRM
| Priv
| VendorSpecific
), EXT(0, group7_rm1
),
2808 N
, EXT(0, group7_rm3
),
2809 DI(SrcNone
| ModRM
| DstMem
| Mov
, smsw
), N
,
2810 DI(SrcMem16
| ModRM
| Mov
| Priv
, lmsw
), EXT(0, group7_rm7
),
2813 static struct opcode group8
[] = {
2815 D(DstMem
| SrcImmByte
| ModRM
), D(DstMem
| SrcImmByte
| ModRM
| Lock
),
2816 D(DstMem
| SrcImmByte
| ModRM
| Lock
), D(DstMem
| SrcImmByte
| ModRM
| Lock
),
2819 static struct group_dual group9
= { {
2820 N
, D(DstMem64
| ModRM
| Lock
), N
, N
, N
, N
, N
, N
,
2822 N
, N
, N
, N
, N
, N
, N
, N
,
2825 static struct opcode group11
[] = {
2826 I(DstMem
| SrcImm
| ModRM
| Mov
, em_mov
), X7(D(Undefined
)),
2829 static struct gprefix pfx_0f_6f_0f_7f
= {
2830 N
, N
, N
, I(Sse
, em_movdqu
),
2833 static struct opcode opcode_table
[256] = {
2836 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
2839 D(ImplicitOps
| Stack
| No64
), N
,
2842 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
2845 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
2849 D6ALU(Lock
), N
, I(ByteOp
| DstAcc
| No64
, em_das
),
2857 X8(I(SrcReg
| Stack
, em_push
)),
2859 X8(D(DstReg
| Stack
)),
2861 D(ImplicitOps
| Stack
| No64
), D(ImplicitOps
| Stack
| No64
),
2862 N
, D(DstReg
| SrcMem32
| ModRM
| Mov
) /* movsxd (x86/64) */ ,
2865 I(SrcImm
| Mov
| Stack
, em_push
),
2866 I(DstReg
| SrcMem
| ModRM
| Src2Imm
, em_imul_3op
),
2867 I(SrcImmByte
| Mov
| Stack
, em_push
),
2868 I(DstReg
| SrcMem
| ModRM
| Src2ImmByte
, em_imul_3op
),
2869 D2bvIP(DstDI
| Mov
| String
, ins
, check_perm_in
), /* insb, insw/insd */
2870 D2bvIP(SrcSI
| ImplicitOps
| String
, outs
, check_perm_out
), /* outsb, outsw/outsd */
2874 G(ByteOp
| DstMem
| SrcImm
| ModRM
| Group
, group1
),
2875 G(DstMem
| SrcImm
| ModRM
| Group
, group1
),
2876 G(ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Group
, group1
),
2877 G(DstMem
| SrcImmByte
| ModRM
| Group
, group1
),
2878 D2bv(DstMem
| SrcReg
| ModRM
), D2bv(DstMem
| SrcReg
| ModRM
| Lock
),
2880 I2bv(DstMem
| SrcReg
| ModRM
| Mov
, em_mov
),
2881 I2bv(DstReg
| SrcMem
| ModRM
| Mov
, em_mov
),
2882 D(DstMem
| SrcNone
| ModRM
| Mov
), D(ModRM
| SrcMem
| NoAccess
| DstReg
),
2883 D(ImplicitOps
| SrcMem16
| ModRM
), G(0, group1A
),
2885 DI(SrcAcc
| DstReg
, pause
), X7(D(SrcAcc
| DstReg
)),
2887 D(DstAcc
| SrcNone
), I(ImplicitOps
| SrcAcc
, em_cwd
),
2888 I(SrcImmFAddr
| No64
, em_call_far
), N
,
2889 DI(ImplicitOps
| Stack
, pushf
), DI(ImplicitOps
| Stack
, popf
), N
, N
,
2891 I2bv(DstAcc
| SrcMem
| Mov
| MemAbs
, em_mov
),
2892 I2bv(DstMem
| SrcAcc
| Mov
| MemAbs
, em_mov
),
2893 I2bv(SrcSI
| DstDI
| Mov
| String
, em_mov
),
2894 D2bv(SrcSI
| DstDI
| String
),
2896 D2bv(DstAcc
| SrcImm
),
2897 I2bv(SrcAcc
| DstDI
| Mov
| String
, em_mov
),
2898 I2bv(SrcSI
| DstAcc
| Mov
| String
, em_mov
),
2899 D2bv(SrcAcc
| DstDI
| String
),
2901 X8(I(ByteOp
| DstReg
| SrcImm
| Mov
, em_mov
)),
2903 X8(I(DstReg
| SrcImm
| Mov
, em_mov
)),
2905 D2bv(DstMem
| SrcImmByte
| ModRM
),
2906 I(ImplicitOps
| Stack
| SrcImmU16
, em_ret_near_imm
),
2907 D(ImplicitOps
| Stack
),
2908 D(DstReg
| SrcMemFAddr
| ModRM
| No64
), D(DstReg
| SrcMemFAddr
| ModRM
| No64
),
2909 G(ByteOp
, group11
), G(0, group11
),
2911 N
, N
, N
, D(ImplicitOps
| Stack
),
2912 D(ImplicitOps
), DI(SrcImmByte
, intn
),
2913 D(ImplicitOps
| No64
), DI(ImplicitOps
, iret
),
2915 D2bv(DstMem
| SrcOne
| ModRM
), D2bv(DstMem
| ModRM
),
2918 N
, N
, N
, N
, N
, N
, N
, N
,
2921 D2bvIP(SrcImmUByte
| DstAcc
, in
, check_perm_in
),
2922 D2bvIP(SrcAcc
| DstImmUByte
, out
, check_perm_out
),
2924 D(SrcImm
| Stack
), D(SrcImm
| ImplicitOps
),
2925 D(SrcImmFAddr
| No64
), D(SrcImmByte
| ImplicitOps
),
2926 D2bvIP(SrcNone
| DstAcc
, in
, check_perm_in
),
2927 D2bvIP(SrcAcc
| ImplicitOps
, out
, check_perm_out
),
2929 N
, DI(ImplicitOps
, icebp
), N
, N
,
2930 DI(ImplicitOps
| Priv
, hlt
), D(ImplicitOps
),
2931 G(ByteOp
, group3
), G(0, group3
),
2933 D(ImplicitOps
), D(ImplicitOps
), D(ImplicitOps
), D(ImplicitOps
),
2934 D(ImplicitOps
), D(ImplicitOps
), G(0, group4
), G(0, group5
),
2937 static struct opcode twobyte_table
[256] = {
2939 G(0, group6
), GD(0, &group7
), N
, N
,
2940 N
, D(ImplicitOps
| VendorSpecific
), DI(ImplicitOps
| Priv
, clts
), N
,
2941 DI(ImplicitOps
| Priv
, invd
), DI(ImplicitOps
| Priv
, wbinvd
), N
, N
,
2942 N
, D(ImplicitOps
| ModRM
), N
, N
,
2944 N
, N
, N
, N
, N
, N
, N
, N
, D(ImplicitOps
| ModRM
), N
, N
, N
, N
, N
, N
, N
,
2946 DIP(ModRM
| DstMem
| Priv
| Op3264
, cr_read
, check_cr_read
),
2947 DIP(ModRM
| DstMem
| Priv
| Op3264
, dr_read
, check_dr_read
),
2948 DIP(ModRM
| SrcMem
| Priv
| Op3264
, cr_write
, check_cr_write
),
2949 DIP(ModRM
| SrcMem
| Priv
| Op3264
, dr_write
, check_dr_write
),
2951 N
, N
, N
, N
, N
, N
, N
, N
,
2953 DI(ImplicitOps
| Priv
, wrmsr
),
2954 IIP(ImplicitOps
, em_rdtsc
, rdtsc
, check_rdtsc
),
2955 DI(ImplicitOps
| Priv
, rdmsr
),
2956 DIP(ImplicitOps
| Priv
, rdpmc
, check_rdpmc
),
2957 D(ImplicitOps
| VendorSpecific
), D(ImplicitOps
| Priv
| VendorSpecific
),
2959 N
, N
, N
, N
, N
, N
, N
, N
,
2961 X16(D(DstReg
| SrcMem
| ModRM
| Mov
)),
2963 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
2968 N
, N
, N
, GP(SrcMem
| DstReg
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
2973 N
, N
, N
, GP(SrcReg
| DstMem
| ModRM
| Mov
, &pfx_0f_6f_0f_7f
),
2977 X16(D(ByteOp
| DstMem
| SrcNone
| ModRM
| Mov
)),
2979 D(ImplicitOps
| Stack
), D(ImplicitOps
| Stack
),
2980 DI(ImplicitOps
, cpuid
), D(DstMem
| SrcReg
| ModRM
| BitOp
),
2981 D(DstMem
| SrcReg
| Src2ImmByte
| ModRM
),
2982 D(DstMem
| SrcReg
| Src2CL
| ModRM
), N
, N
,
2984 D(ImplicitOps
| Stack
), D(ImplicitOps
| Stack
),
2985 DI(ImplicitOps
, rsm
), D(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
),
2986 D(DstMem
| SrcReg
| Src2ImmByte
| ModRM
),
2987 D(DstMem
| SrcReg
| Src2CL
| ModRM
),
2988 D(ModRM
), I(DstReg
| SrcMem
| ModRM
, em_imul
),
2990 D2bv(DstMem
| SrcReg
| ModRM
| Lock
),
2991 D(DstReg
| SrcMemFAddr
| ModRM
), D(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
),
2992 D(DstReg
| SrcMemFAddr
| ModRM
), D(DstReg
| SrcMemFAddr
| ModRM
),
2993 D(ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
2996 G(BitOp
, group8
), D(DstMem
| SrcReg
| ModRM
| BitOp
| Lock
),
2997 D(DstReg
| SrcMem
| ModRM
), D(DstReg
| SrcMem
| ModRM
),
2998 D(ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
), D(DstReg
| SrcMem16
| ModRM
| Mov
),
3000 D2bv(DstMem
| SrcReg
| ModRM
| Lock
),
3001 N
, D(DstMem
| SrcReg
| ModRM
| Mov
),
3002 N
, N
, N
, GD(0, &group9
),
3003 N
, N
, N
, N
, N
, N
, N
, N
,
3005 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3007 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
,
3009 N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
, N
3025 static unsigned imm_size(struct decode_cache
*c
)
3029 size
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
3035 static int decode_imm(struct x86_emulate_ctxt
*ctxt
, struct operand
*op
,
3036 unsigned size
, bool sign_extension
)
3038 struct decode_cache
*c
= &ctxt
->decode
;
3039 struct x86_emulate_ops
*ops
= ctxt
->ops
;
3040 int rc
= X86EMUL_CONTINUE
;
3044 op
->addr
.mem
.ea
= c
->eip
;
3045 /* NB. Immediates are sign-extended as necessary. */
3046 switch (op
->bytes
) {
3048 op
->val
= insn_fetch(s8
, 1, c
->eip
);
3051 op
->val
= insn_fetch(s16
, 2, c
->eip
);
3054 op
->val
= insn_fetch(s32
, 4, c
->eip
);
3057 if (!sign_extension
) {
3058 switch (op
->bytes
) {
3066 op
->val
&= 0xffffffff;
3075 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, void *insn
, int insn_len
)
3077 struct x86_emulate_ops
*ops
= ctxt
->ops
;
3078 struct decode_cache
*c
= &ctxt
->decode
;
3079 int rc
= X86EMUL_CONTINUE
;
3080 int mode
= ctxt
->mode
;
3081 int def_op_bytes
, def_ad_bytes
, dual
, goffset
, simd_prefix
;
3082 bool op_prefix
= false;
3083 struct opcode opcode
, *g_mod012
, *g_mod3
;
3084 struct operand memop
= { .type
= OP_NONE
};
3087 c
->fetch
.start
= c
->eip
;
3088 c
->fetch
.end
= c
->fetch
.start
+ insn_len
;
3090 memcpy(c
->fetch
.data
, insn
, insn_len
);
3091 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
3094 case X86EMUL_MODE_REAL
:
3095 case X86EMUL_MODE_VM86
:
3096 case X86EMUL_MODE_PROT16
:
3097 def_op_bytes
= def_ad_bytes
= 2;
3099 case X86EMUL_MODE_PROT32
:
3100 def_op_bytes
= def_ad_bytes
= 4;
3102 #ifdef CONFIG_X86_64
3103 case X86EMUL_MODE_PROT64
:
3112 c
->op_bytes
= def_op_bytes
;
3113 c
->ad_bytes
= def_ad_bytes
;
3115 /* Legacy prefixes. */
3117 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
3118 case 0x66: /* operand-size override */
3120 /* switch between 2/4 bytes */
3121 c
->op_bytes
= def_op_bytes
^ 6;
3123 case 0x67: /* address-size override */
3124 if (mode
== X86EMUL_MODE_PROT64
)
3125 /* switch between 4/8 bytes */
3126 c
->ad_bytes
= def_ad_bytes
^ 12;
3128 /* switch between 2/4 bytes */
3129 c
->ad_bytes
= def_ad_bytes
^ 6;
3131 case 0x26: /* ES override */
3132 case 0x2e: /* CS override */
3133 case 0x36: /* SS override */
3134 case 0x3e: /* DS override */
3135 set_seg_override(c
, (c
->b
>> 3) & 3);
3137 case 0x64: /* FS override */
3138 case 0x65: /* GS override */
3139 set_seg_override(c
, c
->b
& 7);
3141 case 0x40 ... 0x4f: /* REX */
3142 if (mode
!= X86EMUL_MODE_PROT64
)
3144 c
->rex_prefix
= c
->b
;
3146 case 0xf0: /* LOCK */
3149 case 0xf2: /* REPNE/REPNZ */
3150 case 0xf3: /* REP/REPE/REPZ */
3151 c
->rep_prefix
= c
->b
;
3157 /* Any legacy prefix after a REX prefix nullifies its effect. */
3165 if (c
->rex_prefix
& 8)
3166 c
->op_bytes
= 8; /* REX.W */
3168 /* Opcode byte(s). */
3169 opcode
= opcode_table
[c
->b
];
3170 /* Two-byte opcode? */
3173 c
->b
= insn_fetch(u8
, 1, c
->eip
);
3174 opcode
= twobyte_table
[c
->b
];
3176 c
->d
= opcode
.flags
;
3179 dual
= c
->d
& GroupDual
;
3180 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
3183 if (c
->d
& GroupDual
) {
3184 g_mod012
= opcode
.u
.gdual
->mod012
;
3185 g_mod3
= opcode
.u
.gdual
->mod3
;
3187 g_mod012
= g_mod3
= opcode
.u
.group
;
3189 c
->d
&= ~(Group
| GroupDual
);
3191 goffset
= (c
->modrm
>> 3) & 7;
3193 if ((c
->modrm
>> 6) == 3)
3194 opcode
= g_mod3
[goffset
];
3196 opcode
= g_mod012
[goffset
];
3198 if (opcode
.flags
& RMExt
) {
3199 goffset
= c
->modrm
& 7;
3200 opcode
= opcode
.u
.group
[goffset
];
3203 c
->d
|= opcode
.flags
;
3206 if (c
->d
& Prefix
) {
3207 if (c
->rep_prefix
&& op_prefix
)
3208 return X86EMUL_UNHANDLEABLE
;
3209 simd_prefix
= op_prefix
? 0x66 : c
->rep_prefix
;
3210 switch (simd_prefix
) {
3211 case 0x00: opcode
= opcode
.u
.gprefix
->pfx_no
; break;
3212 case 0x66: opcode
= opcode
.u
.gprefix
->pfx_66
; break;
3213 case 0xf2: opcode
= opcode
.u
.gprefix
->pfx_f2
; break;
3214 case 0xf3: opcode
= opcode
.u
.gprefix
->pfx_f3
; break;
3216 c
->d
|= opcode
.flags
;
3219 c
->execute
= opcode
.u
.execute
;
3220 c
->check_perm
= opcode
.check_perm
;
3221 c
->intercept
= opcode
.intercept
;
3224 if (c
->d
== 0 || (c
->d
& Undefined
))
3227 if (!(c
->d
& VendorSpecific
) && ctxt
->only_vendor_specific_insn
)
3230 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
3233 if (c
->d
& Op3264
) {
3234 if (mode
== X86EMUL_MODE_PROT64
)
3243 /* ModRM and SIB bytes. */
3245 rc
= decode_modrm(ctxt
, ops
, &memop
);
3246 if (!c
->has_seg_override
)
3247 set_seg_override(c
, c
->modrm_seg
);
3248 } else if (c
->d
& MemAbs
)
3249 rc
= decode_abs(ctxt
, ops
, &memop
);
3250 if (rc
!= X86EMUL_CONTINUE
)
3253 if (!c
->has_seg_override
)
3254 set_seg_override(c
, VCPU_SREG_DS
);
3256 memop
.addr
.mem
.seg
= seg_override(ctxt
, ops
, c
);
3258 if (memop
.type
== OP_MEM
&& c
->ad_bytes
!= 8)
3259 memop
.addr
.mem
.ea
= (u32
)memop
.addr
.mem
.ea
;
3261 if (memop
.type
== OP_MEM
&& c
->rip_relative
)
3262 memop
.addr
.mem
.ea
+= c
->eip
;
3265 * Decode and fetch the source operand: register, memory
3268 switch (c
->d
& SrcMask
) {
3272 decode_register_operand(ctxt
, &c
->src
, c
, 0);
3281 memop
.bytes
= (c
->d
& ByteOp
) ? 1 :
3287 rc
= decode_imm(ctxt
, &c
->src
, 2, false);
3290 rc
= decode_imm(ctxt
, &c
->src
, imm_size(c
), true);
3293 rc
= decode_imm(ctxt
, &c
->src
, imm_size(c
), false);
3296 rc
= decode_imm(ctxt
, &c
->src
, 1, true);
3299 rc
= decode_imm(ctxt
, &c
->src
, 1, false);
3302 c
->src
.type
= OP_REG
;
3303 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
3304 c
->src
.addr
.reg
= &c
->regs
[VCPU_REGS_RAX
];
3305 fetch_register_operand(&c
->src
);
3312 c
->src
.type
= OP_MEM
;
3313 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
3314 c
->src
.addr
.mem
.ea
=
3315 register_address(c
, c
->regs
[VCPU_REGS_RSI
]);
3316 c
->src
.addr
.mem
.seg
= seg_override(ctxt
, ops
, c
),
3320 c
->src
.type
= OP_IMM
;
3321 c
->src
.addr
.mem
.ea
= c
->eip
;
3322 c
->src
.bytes
= c
->op_bytes
+ 2;
3323 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
3326 memop
.bytes
= c
->op_bytes
+ 2;
3331 if (rc
!= X86EMUL_CONTINUE
)
3335 * Decode and fetch the second source operand: register, memory
3338 switch (c
->d
& Src2Mask
) {
3343 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
3346 rc
= decode_imm(ctxt
, &c
->src2
, 1, true);
3353 rc
= decode_imm(ctxt
, &c
->src2
, imm_size(c
), true);
3357 if (rc
!= X86EMUL_CONTINUE
)
3360 /* Decode and fetch the destination operand: register or memory. */
3361 switch (c
->d
& DstMask
) {
3363 decode_register_operand(ctxt
, &c
->dst
, c
,
3364 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
3367 c
->dst
.type
= OP_IMM
;
3368 c
->dst
.addr
.mem
.ea
= c
->eip
;
3370 c
->dst
.val
= insn_fetch(u8
, 1, c
->eip
);
3375 if ((c
->d
& DstMask
) == DstMem64
)
3378 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
3380 fetch_bit_operand(c
);
3381 c
->dst
.orig_val
= c
->dst
.val
;
3384 c
->dst
.type
= OP_REG
;
3385 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
3386 c
->dst
.addr
.reg
= &c
->regs
[VCPU_REGS_RAX
];
3387 fetch_register_operand(&c
->dst
);
3388 c
->dst
.orig_val
= c
->dst
.val
;
3391 c
->dst
.type
= OP_MEM
;
3392 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
3393 c
->dst
.addr
.mem
.ea
=
3394 register_address(c
, c
->regs
[VCPU_REGS_RDI
]);
3395 c
->dst
.addr
.mem
.seg
= VCPU_SREG_ES
;
3399 /* Special instructions do their own operand decoding. */
3401 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3406 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3409 static bool string_insn_completed(struct x86_emulate_ctxt
*ctxt
)
3411 struct decode_cache
*c
= &ctxt
->decode
;
3413 /* The second termination condition only applies for REPE
3414 * and REPNE. Test if the repeat string operation prefix is
3415 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3416 * corresponding termination condition according to:
3417 * - if REPE/REPZ and ZF = 0 then done
3418 * - if REPNE/REPNZ and ZF = 1 then done
3420 if (((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
3421 (c
->b
== 0xae) || (c
->b
== 0xaf))
3422 && (((c
->rep_prefix
== REPE_PREFIX
) &&
3423 ((ctxt
->eflags
& EFLG_ZF
) == 0))
3424 || ((c
->rep_prefix
== REPNE_PREFIX
) &&
3425 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))))
3432 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
)
3434 struct x86_emulate_ops
*ops
= ctxt
->ops
;
3436 struct decode_cache
*c
= &ctxt
->decode
;
3437 int rc
= X86EMUL_CONTINUE
;
3438 int saved_dst_type
= c
->dst
.type
;
3439 int irq
; /* Used for int 3, int, and into */
3441 ctxt
->decode
.mem_read
.pos
= 0;
3443 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
3444 rc
= emulate_ud(ctxt
);
3448 /* LOCK prefix is allowed only with some instructions */
3449 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
3450 rc
= emulate_ud(ctxt
);
3454 if ((c
->d
& SrcMask
) == SrcMemFAddr
&& c
->src
.type
!= OP_MEM
) {
3455 rc
= emulate_ud(ctxt
);
3460 && ((ops
->get_cr(0, ctxt
->vcpu
) & X86_CR0_EM
)
3461 || !(ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_OSFXSR
))) {
3462 rc
= emulate_ud(ctxt
);
3466 if ((c
->d
& Sse
) && (ops
->get_cr(0, ctxt
->vcpu
) & X86_CR0_TS
)) {
3467 rc
= emulate_nm(ctxt
);
3471 if (unlikely(ctxt
->guest_mode
) && c
->intercept
) {
3472 rc
= emulator_check_intercept(ctxt
, c
->intercept
,
3473 X86_ICPT_PRE_EXCEPT
);
3474 if (rc
!= X86EMUL_CONTINUE
)
3478 /* Privileged instruction can be executed only in CPL=0 */
3479 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
3480 rc
= emulate_gp(ctxt
, 0);
3484 /* Instruction can only be executed in protected mode */
3485 if ((c
->d
& Prot
) && !(ctxt
->mode
& X86EMUL_MODE_PROT
)) {
3486 rc
= emulate_ud(ctxt
);
3490 /* Do instruction specific permission checks */
3491 if (c
->check_perm
) {
3492 rc
= c
->check_perm(ctxt
);
3493 if (rc
!= X86EMUL_CONTINUE
)
3497 if (unlikely(ctxt
->guest_mode
) && c
->intercept
) {
3498 rc
= emulator_check_intercept(ctxt
, c
->intercept
,
3499 X86_ICPT_POST_EXCEPT
);
3500 if (rc
!= X86EMUL_CONTINUE
)
3504 if (c
->rep_prefix
&& (c
->d
& String
)) {
3505 /* All REP prefixes have the same first termination condition */
3506 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
3512 if ((c
->src
.type
== OP_MEM
) && !(c
->d
& NoAccess
)) {
3513 rc
= segmented_read(ctxt
, c
->src
.addr
.mem
,
3514 c
->src
.valptr
, c
->src
.bytes
);
3515 if (rc
!= X86EMUL_CONTINUE
)
3517 c
->src
.orig_val64
= c
->src
.val64
;
3520 if (c
->src2
.type
== OP_MEM
) {
3521 rc
= segmented_read(ctxt
, c
->src2
.addr
.mem
,
3522 &c
->src2
.val
, c
->src2
.bytes
);
3523 if (rc
!= X86EMUL_CONTINUE
)
3527 if ((c
->d
& DstMask
) == ImplicitOps
)
3531 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
3532 /* optimisation - avoid slow emulated read if Mov */
3533 rc
= segmented_read(ctxt
, c
->dst
.addr
.mem
,
3534 &c
->dst
.val
, c
->dst
.bytes
);
3535 if (rc
!= X86EMUL_CONTINUE
)
3538 c
->dst
.orig_val
= c
->dst
.val
;
3542 if (unlikely(ctxt
->guest_mode
) && c
->intercept
) {
3543 rc
= emulator_check_intercept(ctxt
, c
->intercept
,
3544 X86_ICPT_POST_MEMACCESS
);
3545 if (rc
!= X86EMUL_CONTINUE
)
3550 rc
= c
->execute(ctxt
);
3551 if (rc
!= X86EMUL_CONTINUE
)
3562 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
3564 case 0x06: /* push es */
3565 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
3567 case 0x07: /* pop es */
3568 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
3572 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
3574 case 0x0e: /* push cs */
3575 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
3579 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
3581 case 0x16: /* push ss */
3582 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
3584 case 0x17: /* pop ss */
3585 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
3589 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
3591 case 0x1e: /* push ds */
3592 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
3594 case 0x1f: /* pop ds */
3595 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
3599 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
3603 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
3607 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
3611 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3613 case 0x40 ... 0x47: /* inc r16/r32 */
3614 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
3616 case 0x48 ... 0x4f: /* dec r16/r32 */
3617 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
3619 case 0x58 ... 0x5f: /* pop reg */
3621 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
3623 case 0x60: /* pusha */
3624 rc
= emulate_pusha(ctxt
, ops
);
3626 case 0x61: /* popa */
3627 rc
= emulate_popa(ctxt
, ops
);
3629 case 0x63: /* movsxd */
3630 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
3631 goto cannot_emulate
;
3632 c
->dst
.val
= (s32
) c
->src
.val
;
3634 case 0x6c: /* insb */
3635 case 0x6d: /* insw/insd */
3636 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
3638 case 0x6e: /* outsb */
3639 case 0x6f: /* outsw/outsd */
3640 c
->dst
.val
= c
->regs
[VCPU_REGS_RDX
];
3643 case 0x70 ... 0x7f: /* jcc (short) */
3644 if (test_cc(c
->b
, ctxt
->eflags
))
3645 jmp_rel(c
, c
->src
.val
);
3647 case 0x80 ... 0x83: /* Grp1 */
3648 switch (c
->modrm_reg
) {
3669 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
3671 case 0x86 ... 0x87: /* xchg */
3673 /* Write back the register source. */
3674 c
->src
.val
= c
->dst
.val
;
3675 write_register_operand(&c
->src
);
3677 * Write back the memory destination with implicit LOCK
3680 c
->dst
.val
= c
->src
.orig_val
;
3683 case 0x8c: /* mov r/m, sreg */
3684 if (c
->modrm_reg
> VCPU_SREG_GS
) {
3685 rc
= emulate_ud(ctxt
);
3688 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
3690 case 0x8d: /* lea r16/r32, m */
3691 c
->dst
.val
= c
->src
.addr
.mem
.ea
;
3693 case 0x8e: { /* mov seg, r/m16 */
3698 if (c
->modrm_reg
== VCPU_SREG_CS
||
3699 c
->modrm_reg
> VCPU_SREG_GS
) {
3700 rc
= emulate_ud(ctxt
);
3704 if (c
->modrm_reg
== VCPU_SREG_SS
)
3705 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
3707 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
3709 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3712 case 0x8f: /* pop (sole member of Grp1a) */
3713 rc
= emulate_grp1a(ctxt
, ops
);
3715 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3716 if (c
->dst
.addr
.reg
== &c
->regs
[VCPU_REGS_RAX
])
3719 case 0x98: /* cbw/cwde/cdqe */
3720 switch (c
->op_bytes
) {
3721 case 2: c
->dst
.val
= (s8
)c
->dst
.val
; break;
3722 case 4: c
->dst
.val
= (s16
)c
->dst
.val
; break;
3723 case 8: c
->dst
.val
= (s32
)c
->dst
.val
; break;
3726 case 0x9c: /* pushf */
3727 c
->src
.val
= (unsigned long) ctxt
->eflags
;
3728 emulate_push(ctxt
, ops
);
3730 case 0x9d: /* popf */
3731 c
->dst
.type
= OP_REG
;
3732 c
->dst
.addr
.reg
= &ctxt
->eflags
;
3733 c
->dst
.bytes
= c
->op_bytes
;
3734 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
3736 case 0xa6 ... 0xa7: /* cmps */
3737 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3739 case 0xa8 ... 0xa9: /* test ax, imm */
3741 case 0xae ... 0xaf: /* scas */
3746 case 0xc3: /* ret */
3747 c
->dst
.type
= OP_REG
;
3748 c
->dst
.addr
.reg
= &c
->eip
;
3749 c
->dst
.bytes
= c
->op_bytes
;
3750 goto pop_instruction
;
3751 case 0xc4: /* les */
3752 rc
= emulate_load_segment(ctxt
, ops
, VCPU_SREG_ES
);
3754 case 0xc5: /* lds */
3755 rc
= emulate_load_segment(ctxt
, ops
, VCPU_SREG_DS
);
3757 case 0xcb: /* ret far */
3758 rc
= emulate_ret_far(ctxt
, ops
);
3760 case 0xcc: /* int3 */
3763 case 0xcd: /* int n */
3766 rc
= emulate_int(ctxt
, ops
, irq
);
3768 case 0xce: /* into */
3769 if (ctxt
->eflags
& EFLG_OF
) {
3774 case 0xcf: /* iret */
3775 rc
= emulate_iret(ctxt
, ops
);
3777 case 0xd0 ... 0xd1: /* Grp2 */
3780 case 0xd2 ... 0xd3: /* Grp2 */
3781 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
3784 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3785 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3786 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) != 0 &&
3787 (c
->b
== 0xe2 || test_cc(c
->b
^ 0x5, ctxt
->eflags
)))
3788 jmp_rel(c
, c
->src
.val
);
3790 case 0xe3: /* jcxz/jecxz/jrcxz */
3791 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0)
3792 jmp_rel(c
, c
->src
.val
);
3794 case 0xe4: /* inb */
3797 case 0xe6: /* outb */
3798 case 0xe7: /* out */
3800 case 0xe8: /* call (near) */ {
3801 long int rel
= c
->src
.val
;
3802 c
->src
.val
= (unsigned long) c
->eip
;
3804 emulate_push(ctxt
, ops
);
3807 case 0xe9: /* jmp rel */
3809 case 0xea: { /* jmp far */
3812 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
3814 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
3818 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
3822 jmp
: /* jmp rel short */
3823 jmp_rel(c
, c
->src
.val
);
3824 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3826 case 0xec: /* in al,dx */
3827 case 0xed: /* in (e/r)ax,dx */
3828 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
3830 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
3832 goto done
; /* IO is needed */
3834 case 0xee: /* out dx,al */
3835 case 0xef: /* out dx,(e/r)ax */
3836 c
->dst
.val
= c
->regs
[VCPU_REGS_RDX
];
3838 ops
->pio_out_emulated(c
->src
.bytes
, c
->dst
.val
,
3839 &c
->src
.val
, 1, ctxt
->vcpu
);
3840 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3842 case 0xf4: /* hlt */
3843 ctxt
->vcpu
->arch
.halt_request
= 1;
3845 case 0xf5: /* cmc */
3846 /* complement carry flag from eflags reg */
3847 ctxt
->eflags
^= EFLG_CF
;
3849 case 0xf6 ... 0xf7: /* Grp3 */
3850 rc
= emulate_grp3(ctxt
, ops
);
3852 case 0xf8: /* clc */
3853 ctxt
->eflags
&= ~EFLG_CF
;
3855 case 0xf9: /* stc */
3856 ctxt
->eflags
|= EFLG_CF
;
3858 case 0xfa: /* cli */
3859 if (emulator_bad_iopl(ctxt
, ops
)) {
3860 rc
= emulate_gp(ctxt
, 0);
3863 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
3865 case 0xfb: /* sti */
3866 if (emulator_bad_iopl(ctxt
, ops
)) {
3867 rc
= emulate_gp(ctxt
, 0);
3870 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
3871 ctxt
->eflags
|= X86_EFLAGS_IF
;
3874 case 0xfc: /* cld */
3875 ctxt
->eflags
&= ~EFLG_DF
;
3877 case 0xfd: /* std */
3878 ctxt
->eflags
|= EFLG_DF
;
3880 case 0xfe: /* Grp4 */
3882 rc
= emulate_grp45(ctxt
, ops
);
3884 case 0xff: /* Grp5 */
3885 if (c
->modrm_reg
== 5)
3889 goto cannot_emulate
;
3892 if (rc
!= X86EMUL_CONTINUE
)
3896 rc
= writeback(ctxt
, ops
);
3897 if (rc
!= X86EMUL_CONTINUE
)
3901 * restore dst type in case the decoding will be reused
3902 * (happens for string instruction )
3904 c
->dst
.type
= saved_dst_type
;
3906 if ((c
->d
& SrcMask
) == SrcSI
)
3907 string_addr_inc(ctxt
, seg_override(ctxt
, ops
, c
),
3908 VCPU_REGS_RSI
, &c
->src
);
3910 if ((c
->d
& DstMask
) == DstDI
)
3911 string_addr_inc(ctxt
, VCPU_SREG_ES
, VCPU_REGS_RDI
,
3914 if (c
->rep_prefix
&& (c
->d
& String
)) {
3915 struct read_cache
*r
= &ctxt
->decode
.io_read
;
3916 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3918 if (!string_insn_completed(ctxt
)) {
3920 * Re-enter guest when pio read ahead buffer is empty
3921 * or, if it is not used, after each 1024 iteration.
3923 if ((r
->end
!= 0 || c
->regs
[VCPU_REGS_RCX
] & 0x3ff) &&
3924 (r
->end
== 0 || r
->end
!= r
->pos
)) {
3926 * Reset read cache. Usually happens before
3927 * decode, but since instruction is restarted
3928 * we have to do it here.
3930 ctxt
->decode
.mem_read
.end
= 0;
3931 return EMULATION_RESTART
;
3933 goto done
; /* skip rip writeback */
3940 if (rc
== X86EMUL_PROPAGATE_FAULT
)
3941 ctxt
->have_exception
= true;
3942 if (rc
== X86EMUL_INTERCEPTED
)
3943 return EMULATION_INTERCEPTED
;
3945 return (rc
== X86EMUL_UNHANDLEABLE
) ? EMULATION_FAILED
: EMULATION_OK
;
3949 case 0x01: /* lgdt, lidt, lmsw */
3950 switch (c
->modrm_reg
) {
3952 unsigned long address
;
3954 case 0: /* vmcall */
3955 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3956 goto cannot_emulate
;
3958 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3959 if (rc
!= X86EMUL_CONTINUE
)
3962 /* Let the processor re-execute the fixed hypercall */
3964 /* Disable writeback. */
3965 c
->dst
.type
= OP_NONE
;
3968 rc
= read_descriptor(ctxt
, ops
, c
->src
.addr
.mem
,
3969 &size
, &address
, c
->op_bytes
);
3970 if (rc
!= X86EMUL_CONTINUE
)
3972 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3973 /* Disable writeback. */
3974 c
->dst
.type
= OP_NONE
;
3976 case 3: /* lidt/vmmcall */
3977 if (c
->modrm_mod
== 3) {
3978 switch (c
->modrm_rm
) {
3980 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3983 goto cannot_emulate
;
3986 rc
= read_descriptor(ctxt
, ops
, c
->src
.addr
.mem
,
3989 if (rc
!= X86EMUL_CONTINUE
)
3991 realmode_lidt(ctxt
->vcpu
, size
, address
);
3993 /* Disable writeback. */
3994 c
->dst
.type
= OP_NONE
;
3998 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
4001 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0eul
) |
4002 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
4003 c
->dst
.type
= OP_NONE
;
4005 case 5: /* not defined */
4007 rc
= X86EMUL_PROPAGATE_FAULT
;
4010 rc
= em_invlpg(ctxt
);
4013 goto cannot_emulate
;
4016 case 0x05: /* syscall */
4017 rc
= emulate_syscall(ctxt
, ops
);
4020 emulate_clts(ctxt
->vcpu
);
4022 case 0x09: /* wbinvd */
4023 kvm_emulate_wbinvd(ctxt
->vcpu
);
4025 case 0x08: /* invd */
4026 case 0x0d: /* GrpP (prefetch) */
4027 case 0x18: /* Grp16 (prefetch/nop) */
4029 case 0x20: /* mov cr, reg */
4030 c
->dst
.val
= ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
4032 case 0x21: /* mov from dr to reg */
4033 ops
->get_dr(c
->modrm_reg
, &c
->dst
.val
, ctxt
->vcpu
);
4035 case 0x22: /* mov reg, cr */
4036 if (ops
->set_cr(c
->modrm_reg
, c
->src
.val
, ctxt
->vcpu
)) {
4037 emulate_gp(ctxt
, 0);
4038 rc
= X86EMUL_PROPAGATE_FAULT
;
4041 c
->dst
.type
= OP_NONE
;
4043 case 0x23: /* mov from reg to dr */
4044 if (ops
->set_dr(c
->modrm_reg
, c
->src
.val
&
4045 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
4046 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
4047 /* #UD condition is already handled by the code above */
4048 emulate_gp(ctxt
, 0);
4049 rc
= X86EMUL_PROPAGATE_FAULT
;
4053 c
->dst
.type
= OP_NONE
; /* no writeback */
4057 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
4058 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
4059 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
4060 emulate_gp(ctxt
, 0);
4061 rc
= X86EMUL_PROPAGATE_FAULT
;
4064 rc
= X86EMUL_CONTINUE
;
4068 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
4069 emulate_gp(ctxt
, 0);
4070 rc
= X86EMUL_PROPAGATE_FAULT
;
4073 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
4074 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
4076 rc
= X86EMUL_CONTINUE
;
4078 case 0x34: /* sysenter */
4079 rc
= emulate_sysenter(ctxt
, ops
);
4081 case 0x35: /* sysexit */
4082 rc
= emulate_sysexit(ctxt
, ops
);
4084 case 0x40 ... 0x4f: /* cmov */
4085 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
4086 if (!test_cc(c
->b
, ctxt
->eflags
))
4087 c
->dst
.type
= OP_NONE
; /* no writeback */
4089 case 0x80 ... 0x8f: /* jnz rel, etc*/
4090 if (test_cc(c
->b
, ctxt
->eflags
))
4091 jmp_rel(c
, c
->src
.val
);
4093 case 0x90 ... 0x9f: /* setcc r/m8 */
4094 c
->dst
.val
= test_cc(c
->b
, ctxt
->eflags
);
4096 case 0xa0: /* push fs */
4097 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
4099 case 0xa1: /* pop fs */
4100 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
4104 c
->dst
.type
= OP_NONE
;
4105 /* only subword offset */
4106 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
4107 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
4109 case 0xa4: /* shld imm8, r, r/m */
4110 case 0xa5: /* shld cl, r, r/m */
4111 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
4113 case 0xa8: /* push gs */
4114 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
4116 case 0xa9: /* pop gs */
4117 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
4121 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
4123 case 0xac: /* shrd imm8, r, r/m */
4124 case 0xad: /* shrd cl, r, r/m */
4125 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
4127 case 0xae: /* clflush */
4129 case 0xb0 ... 0xb1: /* cmpxchg */
4131 * Save real source value, then compare EAX against
4134 c
->src
.orig_val
= c
->src
.val
;
4135 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
4136 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
4137 if (ctxt
->eflags
& EFLG_ZF
) {
4138 /* Success: write back to memory. */
4139 c
->dst
.val
= c
->src
.orig_val
;
4141 /* Failure: write the value we saw to EAX. */
4142 c
->dst
.type
= OP_REG
;
4143 c
->dst
.addr
.reg
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
4146 case 0xb2: /* lss */
4147 rc
= emulate_load_segment(ctxt
, ops
, VCPU_SREG_SS
);
4151 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
4153 case 0xb4: /* lfs */
4154 rc
= emulate_load_segment(ctxt
, ops
, VCPU_SREG_FS
);
4156 case 0xb5: /* lgs */
4157 rc
= emulate_load_segment(ctxt
, ops
, VCPU_SREG_GS
);
4159 case 0xb6 ... 0xb7: /* movzx */
4160 c
->dst
.bytes
= c
->op_bytes
;
4161 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
4164 case 0xba: /* Grp8 */
4165 switch (c
->modrm_reg
& 3) {
4178 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
4180 case 0xbc: { /* bsf */
4182 __asm__ ("bsf %2, %0; setz %1"
4183 : "=r"(c
->dst
.val
), "=q"(zf
)
4185 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
4187 ctxt
->eflags
|= X86_EFLAGS_ZF
;
4188 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
4192 case 0xbd: { /* bsr */
4194 __asm__ ("bsr %2, %0; setz %1"
4195 : "=r"(c
->dst
.val
), "=q"(zf
)
4197 ctxt
->eflags
&= ~X86_EFLAGS_ZF
;
4199 ctxt
->eflags
|= X86_EFLAGS_ZF
;
4200 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
4204 case 0xbe ... 0xbf: /* movsx */
4205 c
->dst
.bytes
= c
->op_bytes
;
4206 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
4209 case 0xc0 ... 0xc1: /* xadd */
4210 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
4211 /* Write back the register source. */
4212 c
->src
.val
= c
->dst
.orig_val
;
4213 write_register_operand(&c
->src
);
4215 case 0xc3: /* movnti */
4216 c
->dst
.bytes
= c
->op_bytes
;
4217 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
4220 case 0xc7: /* Grp9 (cmpxchg8b) */
4221 rc
= emulate_grp9(ctxt
, ops
);
4224 goto cannot_emulate
;
4227 if (rc
!= X86EMUL_CONTINUE
)
4233 return EMULATION_FAILED
;