1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
35 #include "mmu.h" /* for is_long_mode() */
38 * Opcode effective-address decode tables.
39 * Note that we only emulate instructions that have at least one memory
40 * operand (excluding implicit stack references). We assume that stack
41 * references and instruction fetches will never occur in special memory
42 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
46 /* Operand sizes: 8-bit operands or specified/overridden size. */
47 #define ByteOp (1<<0) /* 8-bit operands. */
48 /* Destination operand type. */
49 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
50 #define DstReg (2<<1) /* Register operand. */
51 #define DstMem (3<<1) /* Memory operand. */
52 #define DstAcc (4<<1) /* Destination Accumulator */
53 #define DstMask (7<<1)
54 /* Source operand type. */
55 #define SrcNone (0<<4) /* No source operand. */
56 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
57 #define SrcReg (1<<4) /* Register operand. */
58 #define SrcMem (2<<4) /* Memory operand. */
59 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
60 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
61 #define SrcImm (5<<4) /* Immediate operand. */
62 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
63 #define SrcOne (7<<4) /* Implied '1' */
64 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
66 #define SrcMask (0xf<<4)
67 /* Generic ModRM decode. */
69 /* Destination is only written; never read. */
72 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
73 #define String (1<<12) /* String instruction (rep capable) */
74 #define Stack (1<<13) /* Stack instruction (push/pop) */
75 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
76 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
77 #define GroupMask 0xff /* Group number stored in bits 0:7 */
78 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
79 /* Source 2 operand type */
80 #define Src2None (0<<29)
81 #define Src2CL (1<<29)
82 #define Src2ImmByte (2<<29)
83 #define Src2One (3<<29)
84 #define Src2Imm16 (4<<29)
85 #define Src2Mask (7<<29)
88 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
89 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
93 static u32 opcode_table
[256] = {
95 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
96 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
97 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
, 0, 0,
99 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
100 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
103 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
104 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
105 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
, 0, 0,
107 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
108 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
109 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
, 0, 0,
111 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
112 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
113 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
115 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
116 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
119 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
120 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
123 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
124 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
125 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
128 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
130 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
132 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
133 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
135 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
136 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
138 0, 0, 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
141 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
142 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* insb, insw/insd */
143 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
, /* outsb, outsw/outsd */
145 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
146 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
148 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
149 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
151 Group
| Group1_80
, Group
| Group1_81
,
152 Group
| Group1_82
, Group
| Group1_83
,
153 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
154 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
156 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
157 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
158 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
159 DstReg
| SrcMem
| ModRM
| Mov
, Group
| Group1A
,
161 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
163 0, 0, SrcImm
| Src2Imm16
, 0,
164 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
166 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
167 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
168 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
169 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
171 0, 0, ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
172 ByteOp
| ImplicitOps
| Mov
| String
, ImplicitOps
| Mov
| String
,
173 ByteOp
| ImplicitOps
| String
, ImplicitOps
| String
,
175 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
176 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
177 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
178 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
180 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
181 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
182 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
183 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
185 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
186 0, ImplicitOps
| Stack
, 0, 0,
187 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
189 0, 0, 0, ImplicitOps
| Stack
,
190 ImplicitOps
, SrcImmByte
, ImplicitOps
, ImplicitOps
,
192 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
193 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
196 0, 0, 0, 0, 0, 0, 0, 0,
199 ByteOp
| SrcImmUByte
, SrcImmUByte
,
200 ByteOp
| SrcImmUByte
, SrcImmUByte
,
202 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
203 SrcImmU
| Src2Imm16
, SrcImmByte
| ImplicitOps
,
204 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
205 SrcNone
| ByteOp
| ImplicitOps
, SrcNone
| ImplicitOps
,
208 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
210 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
211 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
214 static u32 twobyte_table
[256] = {
216 0, Group
| GroupDual
| Group7
, 0, 0,
217 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
218 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
219 0, ImplicitOps
| ModRM
, 0, 0,
221 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
223 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
224 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
226 0, 0, 0, 0, 0, 0, 0, 0,
228 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
229 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
230 0, 0, 0, 0, 0, 0, 0, 0,
232 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
233 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
234 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
235 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
237 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
238 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
239 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
240 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
242 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
246 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
248 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
249 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
251 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
253 0, 0, 0, DstMem
| SrcReg
| ModRM
| BitOp
,
254 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
255 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
257 0, 0, 0, DstMem
| SrcReg
| ModRM
| BitOp
,
258 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
259 DstMem
| SrcReg
| Src2CL
| ModRM
,
262 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
, 0,
263 DstMem
| SrcReg
| ModRM
| BitOp
,
264 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
265 DstReg
| SrcMem16
| ModRM
| Mov
,
267 0, 0, Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
,
268 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
269 DstReg
| SrcMem16
| ModRM
| Mov
,
271 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
272 0, 0, 0, Group
| GroupDual
| Group9
,
273 0, 0, 0, 0, 0, 0, 0, 0,
275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
282 static u32 group_table
[] = {
284 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
285 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
286 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
287 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
289 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
290 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
291 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
292 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
294 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
295 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
296 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
297 ByteOp
| DstMem
| SrcImm
| ModRM
, ByteOp
| DstMem
| SrcImm
| ModRM
,
299 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
300 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
301 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
302 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
304 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
306 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
307 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
310 DstMem
| SrcImm
| ModRM
, 0,
311 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
314 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
317 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
318 SrcMem
| ModRM
| Stack
, 0,
319 SrcMem
| ModRM
| Stack
, 0, SrcMem
| ModRM
| Stack
, 0,
321 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
322 SrcNone
| ModRM
| DstMem
| Mov
, 0,
323 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
326 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
327 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
329 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0,
332 static u32 group2_table
[] = {
334 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
,
335 SrcNone
| ModRM
| DstMem
| Mov
, 0,
336 SrcMem16
| ModRM
| Mov
, 0,
338 0, 0, 0, 0, 0, 0, 0, 0,
341 /* EFLAGS bit definitions. */
342 #define EFLG_VM (1<<17)
343 #define EFLG_RF (1<<16)
344 #define EFLG_OF (1<<11)
345 #define EFLG_DF (1<<10)
346 #define EFLG_IF (1<<9)
347 #define EFLG_SF (1<<7)
348 #define EFLG_ZF (1<<6)
349 #define EFLG_AF (1<<4)
350 #define EFLG_PF (1<<2)
351 #define EFLG_CF (1<<0)
354 * Instruction emulation:
355 * Most instructions are emulated directly via a fragment of inline assembly
356 * code. This allows us to save/restore EFLAGS and thus very easily pick up
357 * any modified flags.
360 #if defined(CONFIG_X86_64)
361 #define _LO32 "k" /* force 32-bit operand */
362 #define _STK "%%rsp" /* stack pointer */
363 #elif defined(__i386__)
364 #define _LO32 "" /* force 32-bit operand */
365 #define _STK "%%esp" /* stack pointer */
369 * These EFLAGS bits are restored from saved value during emulation, and
370 * any changes are written back to the saved value after emulation.
372 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
374 /* Before executing instruction: restore necessary bits in EFLAGS. */
375 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
376 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
377 "movl %"_sav",%"_LO32 _tmp"; " \
380 "movl %"_msk",%"_LO32 _tmp"; " \
381 "andl %"_LO32 _tmp",("_STK"); " \
383 "notl %"_LO32 _tmp"; " \
384 "andl %"_LO32 _tmp",("_STK"); " \
385 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
387 "orl %"_LO32 _tmp",("_STK"); " \
391 /* After executing instruction: write-back necessary bits in EFLAGS. */
392 #define _POST_EFLAGS(_sav, _msk, _tmp) \
393 /* _sav |= EFLAGS & _msk; */ \
396 "andl %"_msk",%"_LO32 _tmp"; " \
397 "orl %"_LO32 _tmp",%"_sav"; "
405 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
407 __asm__ __volatile__ ( \
408 _PRE_EFLAGS("0", "4", "2") \
409 _op _suffix " %"_x"3,%1; " \
410 _POST_EFLAGS("0", "4", "2") \
411 : "=m" (_eflags), "=m" ((_dst).val), \
413 : _y ((_src).val), "i" (EFLAGS_MASK)); \
417 /* Raw emulation: instruction has two explicit operands. */
418 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
420 unsigned long _tmp; \
422 switch ((_dst).bytes) { \
424 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
427 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
430 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
435 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
437 unsigned long _tmp; \
438 switch ((_dst).bytes) { \
440 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
443 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
444 _wx, _wy, _lx, _ly, _qx, _qy); \
449 /* Source operand is byte-sized and may be restricted to just %cl. */
450 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
451 __emulate_2op(_op, _src, _dst, _eflags, \
452 "b", "c", "b", "c", "b", "c", "b", "c")
454 /* Source operand is byte, word, long or quad sized. */
455 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
456 __emulate_2op(_op, _src, _dst, _eflags, \
457 "b", "q", "w", "r", _LO32, "r", "", "r")
459 /* Source operand is word, long or quad sized. */
460 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
461 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
462 "w", "r", _LO32, "r", "", "r")
464 /* Instruction has three operands and one operand is stored in ECX register */
465 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
467 unsigned long _tmp; \
468 _type _clv = (_cl).val; \
469 _type _srcv = (_src).val; \
470 _type _dstv = (_dst).val; \
472 __asm__ __volatile__ ( \
473 _PRE_EFLAGS("0", "5", "2") \
474 _op _suffix " %4,%1 \n" \
475 _POST_EFLAGS("0", "5", "2") \
476 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
477 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
480 (_cl).val = (unsigned long) _clv; \
481 (_src).val = (unsigned long) _srcv; \
482 (_dst).val = (unsigned long) _dstv; \
485 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
487 switch ((_dst).bytes) { \
489 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
490 "w", unsigned short); \
493 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
494 "l", unsigned int); \
497 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
498 "q", unsigned long)); \
503 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
505 unsigned long _tmp; \
507 __asm__ __volatile__ ( \
508 _PRE_EFLAGS("0", "3", "2") \
509 _op _suffix " %1; " \
510 _POST_EFLAGS("0", "3", "2") \
511 : "=m" (_eflags), "+m" ((_dst).val), \
513 : "i" (EFLAGS_MASK)); \
516 /* Instruction has only one explicit operand (no source operand). */
517 #define emulate_1op(_op, _dst, _eflags) \
519 switch ((_dst).bytes) { \
520 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
521 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
522 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
523 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
527 /* Fetch next part of the instruction being emulated. */
528 #define insn_fetch(_type, _size, _eip) \
529 ({ unsigned long _x; \
530 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
537 static inline unsigned long ad_mask(struct decode_cache
*c
)
539 return (1UL << (c
->ad_bytes
<< 3)) - 1;
542 /* Access/update address held in a register, based on addressing mode. */
543 static inline unsigned long
544 address_mask(struct decode_cache
*c
, unsigned long reg
)
546 if (c
->ad_bytes
== sizeof(unsigned long))
549 return reg
& ad_mask(c
);
552 static inline unsigned long
553 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
555 return base
+ address_mask(c
, reg
);
559 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
561 if (c
->ad_bytes
== sizeof(unsigned long))
564 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
567 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
569 register_address_increment(c
, &c
->eip
, rel
);
572 static void set_seg_override(struct decode_cache
*c
, int seg
)
574 c
->has_seg_override
= true;
575 c
->seg_override
= seg
;
578 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
, int seg
)
580 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
583 return kvm_x86_ops
->get_segment_base(ctxt
->vcpu
, seg
);
586 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
587 struct decode_cache
*c
)
589 if (!c
->has_seg_override
)
592 return seg_base(ctxt
, c
->seg_override
);
595 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
)
597 return seg_base(ctxt
, VCPU_SREG_ES
);
600 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
)
602 return seg_base(ctxt
, VCPU_SREG_SS
);
605 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
606 struct x86_emulate_ops
*ops
,
607 unsigned long linear
, u8
*dest
)
609 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
613 if (linear
< fc
->start
|| linear
>= fc
->end
) {
614 size
= min(15UL, PAGE_SIZE
- offset_in_page(linear
));
615 rc
= ops
->read_std(linear
, fc
->data
, size
, ctxt
->vcpu
);
619 fc
->end
= linear
+ size
;
621 *dest
= fc
->data
[linear
- fc
->start
];
625 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
626 struct x86_emulate_ops
*ops
,
627 unsigned long eip
, void *dest
, unsigned size
)
631 /* x86 instructions are limited to 15 bytes. */
632 if (eip
+ size
- ctxt
->decode
.eip_orig
> 15)
633 return X86EMUL_UNHANDLEABLE
;
634 eip
+= ctxt
->cs_base
;
636 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
644 * Given the 'reg' portion of a ModRM byte, and a register block, return a
645 * pointer into the block that addresses the relevant register.
646 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
648 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
653 p
= ®s
[modrm_reg
];
654 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
655 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
659 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
660 struct x86_emulate_ops
*ops
,
662 u16
*size
, unsigned long *address
, int op_bytes
)
669 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
673 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
678 static int test_cc(unsigned int condition
, unsigned int flags
)
682 switch ((condition
& 15) >> 1) {
684 rc
|= (flags
& EFLG_OF
);
686 case 1: /* b/c/nae */
687 rc
|= (flags
& EFLG_CF
);
690 rc
|= (flags
& EFLG_ZF
);
693 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
696 rc
|= (flags
& EFLG_SF
);
699 rc
|= (flags
& EFLG_PF
);
702 rc
|= (flags
& EFLG_ZF
);
705 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
709 /* Odd condition identifiers (lsb == 1) have inverted sense. */
710 return (!!rc
^ (condition
& 1));
713 static void decode_register_operand(struct operand
*op
,
714 struct decode_cache
*c
,
717 unsigned reg
= c
->modrm_reg
;
718 int highbyte_regs
= c
->rex_prefix
== 0;
721 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
723 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
724 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
725 op
->val
= *(u8
*)op
->ptr
;
728 op
->ptr
= decode_register(reg
, c
->regs
, 0);
729 op
->bytes
= c
->op_bytes
;
732 op
->val
= *(u16
*)op
->ptr
;
735 op
->val
= *(u32
*)op
->ptr
;
738 op
->val
= *(u64
*) op
->ptr
;
742 op
->orig_val
= op
->val
;
745 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
746 struct x86_emulate_ops
*ops
)
748 struct decode_cache
*c
= &ctxt
->decode
;
750 int index_reg
= 0, base_reg
= 0, scale
;
754 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
755 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
756 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
759 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
760 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
761 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
762 c
->modrm_rm
|= (c
->modrm
& 0x07);
766 if (c
->modrm_mod
== 3) {
767 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
768 c
->regs
, c
->d
& ByteOp
);
769 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
773 if (c
->ad_bytes
== 2) {
774 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
775 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
776 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
777 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
779 /* 16-bit ModR/M decode. */
780 switch (c
->modrm_mod
) {
782 if (c
->modrm_rm
== 6)
783 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
786 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
789 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
792 switch (c
->modrm_rm
) {
794 c
->modrm_ea
+= bx
+ si
;
797 c
->modrm_ea
+= bx
+ di
;
800 c
->modrm_ea
+= bp
+ si
;
803 c
->modrm_ea
+= bp
+ di
;
812 if (c
->modrm_mod
!= 0)
819 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
820 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
821 if (!c
->has_seg_override
)
822 set_seg_override(c
, VCPU_SREG_SS
);
823 c
->modrm_ea
= (u16
)c
->modrm_ea
;
825 /* 32/64-bit ModR/M decode. */
826 if ((c
->modrm_rm
& 7) == 4) {
827 sib
= insn_fetch(u8
, 1, c
->eip
);
828 index_reg
|= (sib
>> 3) & 7;
832 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
833 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
835 c
->modrm_ea
+= c
->regs
[base_reg
];
837 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
838 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
839 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
842 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
843 switch (c
->modrm_mod
) {
845 if (c
->modrm_rm
== 5)
846 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
849 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
852 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
860 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
861 struct x86_emulate_ops
*ops
)
863 struct decode_cache
*c
= &ctxt
->decode
;
866 switch (c
->ad_bytes
) {
868 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
871 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
874 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
882 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
884 struct decode_cache
*c
= &ctxt
->decode
;
886 int mode
= ctxt
->mode
;
887 int def_op_bytes
, def_ad_bytes
, group
;
889 /* Shadow copy of register state. Committed on successful emulation. */
891 memset(c
, 0, sizeof(struct decode_cache
));
892 c
->eip
= c
->eip_orig
= kvm_rip_read(ctxt
->vcpu
);
893 ctxt
->cs_base
= seg_base(ctxt
, VCPU_SREG_CS
);
894 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
897 case X86EMUL_MODE_REAL
:
898 case X86EMUL_MODE_PROT16
:
899 def_op_bytes
= def_ad_bytes
= 2;
901 case X86EMUL_MODE_PROT32
:
902 def_op_bytes
= def_ad_bytes
= 4;
905 case X86EMUL_MODE_PROT64
:
914 c
->op_bytes
= def_op_bytes
;
915 c
->ad_bytes
= def_ad_bytes
;
917 /* Legacy prefixes. */
919 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
920 case 0x66: /* operand-size override */
921 /* switch between 2/4 bytes */
922 c
->op_bytes
= def_op_bytes
^ 6;
924 case 0x67: /* address-size override */
925 if (mode
== X86EMUL_MODE_PROT64
)
926 /* switch between 4/8 bytes */
927 c
->ad_bytes
= def_ad_bytes
^ 12;
929 /* switch between 2/4 bytes */
930 c
->ad_bytes
= def_ad_bytes
^ 6;
932 case 0x26: /* ES override */
933 case 0x2e: /* CS override */
934 case 0x36: /* SS override */
935 case 0x3e: /* DS override */
936 set_seg_override(c
, (c
->b
>> 3) & 3);
938 case 0x64: /* FS override */
939 case 0x65: /* GS override */
940 set_seg_override(c
, c
->b
& 7);
942 case 0x40 ... 0x4f: /* REX */
943 if (mode
!= X86EMUL_MODE_PROT64
)
945 c
->rex_prefix
= c
->b
;
947 case 0xf0: /* LOCK */
950 case 0xf2: /* REPNE/REPNZ */
951 c
->rep_prefix
= REPNE_PREFIX
;
953 case 0xf3: /* REP/REPE/REPZ */
954 c
->rep_prefix
= REPE_PREFIX
;
960 /* Any legacy prefix after a REX prefix nullifies its effect. */
969 if (c
->rex_prefix
& 8)
970 c
->op_bytes
= 8; /* REX.W */
972 /* Opcode byte(s). */
973 c
->d
= opcode_table
[c
->b
];
975 /* Two-byte opcode? */
978 c
->b
= insn_fetch(u8
, 1, c
->eip
);
979 c
->d
= twobyte_table
[c
->b
];
984 group
= c
->d
& GroupMask
;
985 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
988 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
989 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
990 c
->d
= group2_table
[group
];
992 c
->d
= group_table
[group
];
997 DPRINTF("Cannot emulate %02x\n", c
->b
);
1001 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1004 /* ModRM and SIB bytes. */
1006 rc
= decode_modrm(ctxt
, ops
);
1007 else if (c
->d
& MemAbs
)
1008 rc
= decode_abs(ctxt
, ops
);
1012 if (!c
->has_seg_override
)
1013 set_seg_override(c
, VCPU_SREG_DS
);
1015 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1016 c
->modrm_ea
+= seg_override_base(ctxt
, c
);
1018 if (c
->ad_bytes
!= 8)
1019 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1021 * Decode and fetch the source operand: register, memory
1024 switch (c
->d
& SrcMask
) {
1028 decode_register_operand(&c
->src
, c
, 0);
1037 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1039 /* Don't fetch the address for invlpg: it could be unmapped. */
1040 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1044 * For instructions with a ModR/M byte, switch to register
1045 * access if Mod = 3.
1047 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1048 c
->src
.type
= OP_REG
;
1049 c
->src
.val
= c
->modrm_val
;
1050 c
->src
.ptr
= c
->modrm_ptr
;
1053 c
->src
.type
= OP_MEM
;
1057 c
->src
.type
= OP_IMM
;
1058 c
->src
.ptr
= (unsigned long *)c
->eip
;
1059 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1060 if (c
->src
.bytes
== 8)
1062 /* NB. Immediates are sign-extended as necessary. */
1063 switch (c
->src
.bytes
) {
1065 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1068 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1071 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1074 if ((c
->d
& SrcMask
) == SrcImmU
) {
1075 switch (c
->src
.bytes
) {
1080 c
->src
.val
&= 0xffff;
1083 c
->src
.val
&= 0xffffffff;
1090 c
->src
.type
= OP_IMM
;
1091 c
->src
.ptr
= (unsigned long *)c
->eip
;
1093 if ((c
->d
& SrcMask
) == SrcImmByte
)
1094 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1096 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1105 * Decode and fetch the second source operand: register, memory
1108 switch (c
->d
& Src2Mask
) {
1113 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1116 c
->src2
.type
= OP_IMM
;
1117 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1119 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1122 c
->src2
.type
= OP_IMM
;
1123 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1125 c
->src2
.val
= insn_fetch(u16
, 2, c
->eip
);
1133 /* Decode and fetch the destination operand: register or memory. */
1134 switch (c
->d
& DstMask
) {
1136 /* Special instructions do their own operand decoding. */
1139 decode_register_operand(&c
->dst
, c
,
1140 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1143 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1144 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1145 c
->dst
.type
= OP_REG
;
1146 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1147 c
->dst
.ptr
= c
->modrm_ptr
;
1150 c
->dst
.type
= OP_MEM
;
1153 c
->dst
.type
= OP_REG
;
1154 c
->dst
.bytes
= c
->op_bytes
;
1155 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1156 switch (c
->op_bytes
) {
1158 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1161 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1164 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1167 c
->dst
.orig_val
= c
->dst
.val
;
1171 if (c
->rip_relative
)
1172 c
->modrm_ea
+= c
->eip
;
1175 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1178 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
)
1180 struct decode_cache
*c
= &ctxt
->decode
;
1182 c
->dst
.type
= OP_MEM
;
1183 c
->dst
.bytes
= c
->op_bytes
;
1184 c
->dst
.val
= c
->src
.val
;
1185 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1186 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
),
1187 c
->regs
[VCPU_REGS_RSP
]);
1190 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1191 struct x86_emulate_ops
*ops
,
1192 void *dest
, int len
)
1194 struct decode_cache
*c
= &ctxt
->decode
;
1197 rc
= ops
->read_emulated(register_address(c
, ss_base(ctxt
),
1198 c
->regs
[VCPU_REGS_RSP
]),
1199 dest
, len
, ctxt
->vcpu
);
1203 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1207 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1208 struct x86_emulate_ops
*ops
)
1210 struct decode_cache
*c
= &ctxt
->decode
;
1213 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1219 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1221 struct decode_cache
*c
= &ctxt
->decode
;
1222 switch (c
->modrm_reg
) {
1224 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1227 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1230 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1233 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1235 case 4: /* sal/shl */
1236 case 6: /* sal/shl */
1237 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1240 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1243 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1248 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1249 struct x86_emulate_ops
*ops
)
1251 struct decode_cache
*c
= &ctxt
->decode
;
1254 switch (c
->modrm_reg
) {
1255 case 0 ... 1: /* test */
1256 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1259 c
->dst
.val
= ~c
->dst
.val
;
1262 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1265 DPRINTF("Cannot emulate %02x\n", c
->b
);
1266 rc
= X86EMUL_UNHANDLEABLE
;
1272 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1273 struct x86_emulate_ops
*ops
)
1275 struct decode_cache
*c
= &ctxt
->decode
;
1277 switch (c
->modrm_reg
) {
1279 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1282 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1284 case 2: /* call near abs */ {
1287 c
->eip
= c
->src
.val
;
1288 c
->src
.val
= old_eip
;
1292 case 4: /* jmp abs */
1293 c
->eip
= c
->src
.val
;
1302 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1303 struct x86_emulate_ops
*ops
,
1304 unsigned long memop
)
1306 struct decode_cache
*c
= &ctxt
->decode
;
1310 rc
= ops
->read_emulated(memop
, &old
, 8, ctxt
->vcpu
);
1314 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1315 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1317 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1318 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1319 ctxt
->eflags
&= ~EFLG_ZF
;
1322 new = ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1323 (u32
) c
->regs
[VCPU_REGS_RBX
];
1325 rc
= ops
->cmpxchg_emulated(memop
, &old
, &new, 8, ctxt
->vcpu
);
1328 ctxt
->eflags
|= EFLG_ZF
;
1333 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1334 struct x86_emulate_ops
*ops
)
1336 struct decode_cache
*c
= &ctxt
->decode
;
1340 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1343 if (c
->op_bytes
== 4)
1344 c
->eip
= (u32
)c
->eip
;
1345 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1348 rc
= kvm_load_segment_descriptor(ctxt
->vcpu
, (u16
)cs
, 1, VCPU_SREG_CS
);
1352 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1353 struct x86_emulate_ops
*ops
)
1356 struct decode_cache
*c
= &ctxt
->decode
;
1358 switch (c
->dst
.type
) {
1360 /* The 4-byte case *is* correct:
1361 * in 64-bit mode we zero-extend.
1363 switch (c
->dst
.bytes
) {
1365 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1368 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1371 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1372 break; /* 64b: zero-ext */
1374 *c
->dst
.ptr
= c
->dst
.val
;
1380 rc
= ops
->cmpxchg_emulated(
1381 (unsigned long)c
->dst
.ptr
,
1387 rc
= ops
->write_emulated(
1388 (unsigned long)c
->dst
.ptr
,
1404 static void toggle_interruptibility(struct x86_emulate_ctxt
*ctxt
, u32 mask
)
1406 u32 int_shadow
= kvm_x86_ops
->get_interrupt_shadow(ctxt
->vcpu
, mask
);
1408 * an sti; sti; sequence only disable interrupts for the first
1409 * instruction. So, if the last instruction, be it emulated or
1410 * not, left the system with the INT_STI flag enabled, it
1411 * means that the last instruction is an sti. We should not
1412 * leave the flag on in this case. The same goes for mov ss
1414 if (!(int_shadow
& mask
))
1415 ctxt
->interruptibility
= mask
;
1419 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1420 struct kvm_segment
*cs
, struct kvm_segment
*ss
)
1422 memset(cs
, 0, sizeof(struct kvm_segment
));
1423 kvm_x86_ops
->get_segment(ctxt
->vcpu
, cs
, VCPU_SREG_CS
);
1424 memset(ss
, 0, sizeof(struct kvm_segment
));
1426 cs
->l
= 0; /* will be adjusted later */
1427 cs
->base
= 0; /* flat segment */
1428 cs
->g
= 1; /* 4kb granularity */
1429 cs
->limit
= 0xffffffff; /* 4GB limit */
1430 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1432 cs
->dpl
= 0; /* will be adjusted later */
1437 ss
->base
= 0; /* flat segment */
1438 ss
->limit
= 0xffffffff; /* 4GB limit */
1439 ss
->g
= 1; /* 4kb granularity */
1441 ss
->type
= 0x03; /* Read/Write, Accessed */
1442 ss
->db
= 1; /* 32bit stack segment */
1448 emulate_syscall(struct x86_emulate_ctxt
*ctxt
)
1450 struct decode_cache
*c
= &ctxt
->decode
;
1451 struct kvm_segment cs
, ss
;
1454 /* syscall is not available in real mode */
1455 if (c
->lock_prefix
|| ctxt
->mode
== X86EMUL_MODE_REAL
1456 || !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
))
1459 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1461 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1463 cs
.selector
= (u16
)(msr_data
& 0xfffc);
1464 ss
.selector
= (u16
)(msr_data
+ 8);
1466 if (is_long_mode(ctxt
->vcpu
)) {
1470 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1471 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1473 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1474 if (is_long_mode(ctxt
->vcpu
)) {
1475 #ifdef CONFIG_X86_64
1476 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1478 kvm_x86_ops
->get_msr(ctxt
->vcpu
,
1479 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1480 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1483 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1484 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1488 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1489 c
->eip
= (u32
)msr_data
;
1491 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1498 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
)
1500 struct decode_cache
*c
= &ctxt
->decode
;
1501 struct kvm_segment cs
, ss
;
1504 /* inject #UD if LOCK prefix is used */
1508 /* inject #GP if in real mode or paging is disabled */
1509 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1510 !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1511 kvm_inject_gp(ctxt
->vcpu
, 0);
1515 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1516 * Therefore, we inject an #UD.
1518 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
1521 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1523 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1524 switch (ctxt
->mode
) {
1525 case X86EMUL_MODE_PROT32
:
1526 if ((msr_data
& 0xfffc) == 0x0) {
1527 kvm_inject_gp(ctxt
->vcpu
, 0);
1531 case X86EMUL_MODE_PROT64
:
1532 if (msr_data
== 0x0) {
1533 kvm_inject_gp(ctxt
->vcpu
, 0);
1539 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1540 cs
.selector
= (u16
)msr_data
;
1541 cs
.selector
&= ~SELECTOR_RPL_MASK
;
1542 ss
.selector
= cs
.selector
+ 8;
1543 ss
.selector
&= ~SELECTOR_RPL_MASK
;
1544 if (ctxt
->mode
== X86EMUL_MODE_PROT64
1545 || is_long_mode(ctxt
->vcpu
)) {
1550 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1551 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1553 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
1556 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
1557 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
1563 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
)
1565 struct decode_cache
*c
= &ctxt
->decode
;
1566 struct kvm_segment cs
, ss
;
1570 /* inject #UD if LOCK prefix is used */
1574 /* inject #GP if in real mode or paging is disabled */
1575 if (ctxt
->mode
== X86EMUL_MODE_REAL
1576 || !(ctxt
->vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1577 kvm_inject_gp(ctxt
->vcpu
, 0);
1581 setup_syscalls_segments(ctxt
, &cs
, &ss
);
1583 if ((c
->rex_prefix
& 0x8) != 0x0)
1584 usermode
= X86EMUL_MODE_PROT64
;
1586 usermode
= X86EMUL_MODE_PROT32
;
1590 kvm_x86_ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1592 case X86EMUL_MODE_PROT32
:
1593 cs
.selector
= (u16
)(msr_data
+ 16);
1594 if ((msr_data
& 0xfffc) == 0x0) {
1595 kvm_inject_gp(ctxt
->vcpu
, 0);
1598 ss
.selector
= (u16
)(msr_data
+ 24);
1600 case X86EMUL_MODE_PROT64
:
1601 cs
.selector
= (u16
)(msr_data
+ 32);
1602 if (msr_data
== 0x0) {
1603 kvm_inject_gp(ctxt
->vcpu
, 0);
1606 ss
.selector
= cs
.selector
+ 8;
1611 cs
.selector
|= SELECTOR_RPL_MASK
;
1612 ss
.selector
|= SELECTOR_RPL_MASK
;
1614 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &cs
, VCPU_SREG_CS
);
1615 kvm_x86_ops
->set_segment(ctxt
->vcpu
, &ss
, VCPU_SREG_SS
);
1617 c
->eip
= ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RDX
];
1618 c
->regs
[VCPU_REGS_RSP
] = ctxt
->vcpu
->arch
.regs
[VCPU_REGS_RCX
];
1624 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1626 unsigned long memop
= 0;
1628 unsigned long saved_eip
= 0;
1629 struct decode_cache
*c
= &ctxt
->decode
;
1634 ctxt
->interruptibility
= 0;
1636 /* Shadow copy of register state. Committed on successful emulation.
1637 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
1641 memcpy(c
->regs
, ctxt
->vcpu
->arch
.regs
, sizeof c
->regs
);
1644 /* Privileged instruction can be executed only in CPL=0 */
1645 if ((c
->d
& Priv
) && kvm_x86_ops
->get_cpl(ctxt
->vcpu
)) {
1646 kvm_inject_gp(ctxt
->vcpu
, 0);
1650 if (((c
->d
& ModRM
) && (c
->modrm_mod
!= 3)) || (c
->d
& MemAbs
))
1651 memop
= c
->modrm_ea
;
1653 if (c
->rep_prefix
&& (c
->d
& String
)) {
1654 /* All REP prefixes have the same first termination condition */
1655 if (c
->regs
[VCPU_REGS_RCX
] == 0) {
1656 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1659 /* The second termination condition only applies for REPE
1660 * and REPNE. Test if the repeat string operation prefix is
1661 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
1662 * corresponding termination condition according to:
1663 * - if REPE/REPZ and ZF = 0 then done
1664 * - if REPNE/REPNZ and ZF = 1 then done
1666 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
1667 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
1668 if ((c
->rep_prefix
== REPE_PREFIX
) &&
1669 ((ctxt
->eflags
& EFLG_ZF
) == 0)) {
1670 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1673 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
1674 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
)) {
1675 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
1679 c
->regs
[VCPU_REGS_RCX
]--;
1680 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
1683 if (c
->src
.type
== OP_MEM
) {
1684 c
->src
.ptr
= (unsigned long *)memop
;
1686 rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1692 c
->src
.orig_val
= c
->src
.val
;
1695 if ((c
->d
& DstMask
) == ImplicitOps
)
1699 if (c
->dst
.type
== OP_MEM
) {
1700 c
->dst
.ptr
= (unsigned long *)memop
;
1701 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1704 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1706 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1707 (c
->src
.val
& mask
) / 8;
1709 if (!(c
->d
& Mov
) &&
1710 /* optimisation - avoid slow emulated read */
1711 ((rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1713 c
->dst
.bytes
, ctxt
->vcpu
)) != 0))
1716 c
->dst
.orig_val
= c
->dst
.val
;
1726 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
1730 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
1734 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
1738 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
1742 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
1746 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
1750 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
1754 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
1756 case 0x40 ... 0x47: /* inc r16/r32 */
1757 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1759 case 0x48 ... 0x4f: /* dec r16/r32 */
1760 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1762 case 0x50 ... 0x57: /* push reg */
1765 case 0x58 ... 0x5f: /* pop reg */
1767 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
1771 case 0x63: /* movsxd */
1772 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
1773 goto cannot_emulate
;
1774 c
->dst
.val
= (s32
) c
->src
.val
;
1776 case 0x68: /* push imm */
1777 case 0x6a: /* push imm8 */
1780 case 0x6c: /* insb */
1781 case 0x6d: /* insw/insd */
1782 if (kvm_emulate_pio_string(ctxt
->vcpu
, NULL
,
1784 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
1786 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
1787 (ctxt
->eflags
& EFLG_DF
),
1788 register_address(c
, es_base(ctxt
),
1789 c
->regs
[VCPU_REGS_RDI
]),
1791 c
->regs
[VCPU_REGS_RDX
]) == 0) {
1796 case 0x6e: /* outsb */
1797 case 0x6f: /* outsw/outsd */
1798 if (kvm_emulate_pio_string(ctxt
->vcpu
, NULL
,
1800 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
1802 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1,
1803 (ctxt
->eflags
& EFLG_DF
),
1805 seg_override_base(ctxt
, c
),
1806 c
->regs
[VCPU_REGS_RSI
]),
1808 c
->regs
[VCPU_REGS_RDX
]) == 0) {
1813 case 0x70 ... 0x7f: /* jcc (short) */
1814 if (test_cc(c
->b
, ctxt
->eflags
))
1815 jmp_rel(c
, c
->src
.val
);
1817 case 0x80 ... 0x83: /* Grp1 */
1818 switch (c
->modrm_reg
) {
1838 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1840 case 0x86 ... 0x87: /* xchg */
1842 /* Write back the register source. */
1843 switch (c
->dst
.bytes
) {
1845 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
1848 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
1851 *c
->src
.ptr
= (u32
) c
->dst
.val
;
1852 break; /* 64b reg: zero-extend */
1854 *c
->src
.ptr
= c
->dst
.val
;
1858 * Write back the memory destination with implicit LOCK
1861 c
->dst
.val
= c
->src
.val
;
1864 case 0x88 ... 0x8b: /* mov */
1866 case 0x8c: { /* mov r/m, sreg */
1867 struct kvm_segment segreg
;
1869 if (c
->modrm_reg
<= 5)
1870 kvm_get_segment(ctxt
->vcpu
, &segreg
, c
->modrm_reg
);
1872 printk(KERN_INFO
"0x8c: Invalid segreg in modrm byte 0x%02x\n",
1874 goto cannot_emulate
;
1876 c
->dst
.val
= segreg
.selector
;
1879 case 0x8d: /* lea r16/r32, m */
1880 c
->dst
.val
= c
->modrm_ea
;
1882 case 0x8e: { /* mov seg, r/m16 */
1889 if (c
->modrm_reg
== VCPU_SREG_CS
) {
1890 kvm_queue_exception(ctxt
->vcpu
, UD_VECTOR
);
1894 if (c
->modrm_reg
== VCPU_SREG_SS
)
1895 toggle_interruptibility(ctxt
, X86_SHADOW_INT_MOV_SS
);
1897 if (c
->modrm_reg
<= 5) {
1898 type_bits
= (c
->modrm_reg
== 1) ? 9 : 1;
1899 err
= kvm_load_segment_descriptor(ctxt
->vcpu
, sel
,
1900 type_bits
, c
->modrm_reg
);
1902 printk(KERN_INFO
"Invalid segreg in modrm byte 0x%02x\n",
1904 goto cannot_emulate
;
1908 goto cannot_emulate
;
1910 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
1913 case 0x8f: /* pop (sole member of Grp1a) */
1914 rc
= emulate_grp1a(ctxt
, ops
);
1918 case 0x90: /* nop / xchg r8,rax */
1919 if (!(c
->rex_prefix
& 1)) { /* nop */
1920 c
->dst
.type
= OP_NONE
;
1923 case 0x91 ... 0x97: /* xchg reg,rax */
1924 c
->src
.type
= c
->dst
.type
= OP_REG
;
1925 c
->src
.bytes
= c
->dst
.bytes
= c
->op_bytes
;
1926 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
1927 c
->src
.val
= *(c
->src
.ptr
);
1929 case 0x9c: /* pushf */
1930 c
->src
.val
= (unsigned long) ctxt
->eflags
;
1933 case 0x9d: /* popf */
1934 c
->dst
.type
= OP_REG
;
1935 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
1936 c
->dst
.bytes
= c
->op_bytes
;
1937 goto pop_instruction
;
1938 case 0xa0 ... 0xa1: /* mov */
1939 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
1940 c
->dst
.val
= c
->src
.val
;
1942 case 0xa2 ... 0xa3: /* mov */
1943 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
1945 case 0xa4 ... 0xa5: /* movs */
1946 c
->dst
.type
= OP_MEM
;
1947 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1948 c
->dst
.ptr
= (unsigned long *)register_address(c
,
1950 c
->regs
[VCPU_REGS_RDI
]);
1951 if ((rc
= ops
->read_emulated(register_address(c
,
1952 seg_override_base(ctxt
, c
),
1953 c
->regs
[VCPU_REGS_RSI
]),
1955 c
->dst
.bytes
, ctxt
->vcpu
)) != 0)
1957 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
1958 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1960 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
1961 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1964 case 0xa6 ... 0xa7: /* cmps */
1965 c
->src
.type
= OP_NONE
; /* Disable writeback. */
1966 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1967 c
->src
.ptr
= (unsigned long *)register_address(c
,
1968 seg_override_base(ctxt
, c
),
1969 c
->regs
[VCPU_REGS_RSI
]);
1970 if ((rc
= ops
->read_emulated((unsigned long)c
->src
.ptr
,
1976 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
1977 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1978 c
->dst
.ptr
= (unsigned long *)register_address(c
,
1980 c
->regs
[VCPU_REGS_RDI
]);
1981 if ((rc
= ops
->read_emulated((unsigned long)c
->dst
.ptr
,
1987 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
1989 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
1991 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
1992 (ctxt
->eflags
& EFLG_DF
) ? -c
->src
.bytes
1994 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
1995 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
1999 case 0xaa ... 0xab: /* stos */
2000 c
->dst
.type
= OP_MEM
;
2001 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2002 c
->dst
.ptr
= (unsigned long *)register_address(c
,
2004 c
->regs
[VCPU_REGS_RDI
]);
2005 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2006 register_address_increment(c
, &c
->regs
[VCPU_REGS_RDI
],
2007 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2010 case 0xac ... 0xad: /* lods */
2011 c
->dst
.type
= OP_REG
;
2012 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
2013 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2014 if ((rc
= ops
->read_emulated(register_address(c
,
2015 seg_override_base(ctxt
, c
),
2016 c
->regs
[VCPU_REGS_RSI
]),
2021 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSI
],
2022 (ctxt
->eflags
& EFLG_DF
) ? -c
->dst
.bytes
2025 case 0xae ... 0xaf: /* scas */
2026 DPRINTF("Urk! I don't handle SCAS.\n");
2027 goto cannot_emulate
;
2028 case 0xb0 ... 0xbf: /* mov r, imm */
2033 case 0xc3: /* ret */
2034 c
->dst
.type
= OP_REG
;
2035 c
->dst
.ptr
= &c
->eip
;
2036 c
->dst
.bytes
= c
->op_bytes
;
2037 goto pop_instruction
;
2038 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2040 c
->dst
.val
= c
->src
.val
;
2042 case 0xcb: /* ret far */
2043 rc
= emulate_ret_far(ctxt
, ops
);
2047 case 0xd0 ... 0xd1: /* Grp2 */
2051 case 0xd2 ... 0xd3: /* Grp2 */
2052 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2055 case 0xe4: /* inb */
2060 case 0xe6: /* outb */
2061 case 0xe7: /* out */
2065 case 0xe8: /* call (near) */ {
2066 long int rel
= c
->src
.val
;
2067 c
->src
.val
= (unsigned long) c
->eip
;
2072 case 0xe9: /* jmp rel */
2074 case 0xea: /* jmp far */
2075 if (kvm_load_segment_descriptor(ctxt
->vcpu
, c
->src2
.val
, 9,
2076 VCPU_SREG_CS
) < 0) {
2077 DPRINTF("jmp far: Failed to load CS descriptor\n");
2078 goto cannot_emulate
;
2081 c
->eip
= c
->src
.val
;
2084 jmp
: /* jmp rel short */
2085 jmp_rel(c
, c
->src
.val
);
2086 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2088 case 0xec: /* in al,dx */
2089 case 0xed: /* in (e/r)ax,dx */
2090 port
= c
->regs
[VCPU_REGS_RDX
];
2093 case 0xee: /* out al,dx */
2094 case 0xef: /* out (e/r)ax,dx */
2095 port
= c
->regs
[VCPU_REGS_RDX
];
2097 do_io
: if (kvm_emulate_pio(ctxt
->vcpu
, NULL
, io_dir_in
,
2098 (c
->d
& ByteOp
) ? 1 : c
->op_bytes
,
2101 goto cannot_emulate
;
2104 case 0xf4: /* hlt */
2105 ctxt
->vcpu
->arch
.halt_request
= 1;
2107 case 0xf5: /* cmc */
2108 /* complement carry flag from eflags reg */
2109 ctxt
->eflags
^= EFLG_CF
;
2110 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2112 case 0xf6 ... 0xf7: /* Grp3 */
2113 rc
= emulate_grp3(ctxt
, ops
);
2117 case 0xf8: /* clc */
2118 ctxt
->eflags
&= ~EFLG_CF
;
2119 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2121 case 0xfa: /* cli */
2122 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2123 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2125 case 0xfb: /* sti */
2126 toggle_interruptibility(ctxt
, X86_SHADOW_INT_STI
);
2127 ctxt
->eflags
|= X86_EFLAGS_IF
;
2128 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2130 case 0xfc: /* cld */
2131 ctxt
->eflags
&= ~EFLG_DF
;
2132 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2134 case 0xfd: /* std */
2135 ctxt
->eflags
|= EFLG_DF
;
2136 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2138 case 0xfe ... 0xff: /* Grp4/Grp5 */
2139 rc
= emulate_grp45(ctxt
, ops
);
2146 rc
= writeback(ctxt
, ops
);
2150 /* Commit shadow register state. */
2151 memcpy(ctxt
->vcpu
->arch
.regs
, c
->regs
, sizeof c
->regs
);
2152 kvm_rip_write(ctxt
->vcpu
, c
->eip
);
2155 if (rc
== X86EMUL_UNHANDLEABLE
) {
2163 case 0x01: /* lgdt, lidt, lmsw */
2164 switch (c
->modrm_reg
) {
2166 unsigned long address
;
2168 case 0: /* vmcall */
2169 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
2170 goto cannot_emulate
;
2172 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2176 /* Let the processor re-execute the fixed hypercall */
2177 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2178 /* Disable writeback. */
2179 c
->dst
.type
= OP_NONE
;
2182 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2183 &size
, &address
, c
->op_bytes
);
2186 realmode_lgdt(ctxt
->vcpu
, size
, address
);
2187 /* Disable writeback. */
2188 c
->dst
.type
= OP_NONE
;
2190 case 3: /* lidt/vmmcall */
2191 if (c
->modrm_mod
== 3) {
2192 switch (c
->modrm_rm
) {
2194 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
2199 goto cannot_emulate
;
2202 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
2207 realmode_lidt(ctxt
->vcpu
, size
, address
);
2209 /* Disable writeback. */
2210 c
->dst
.type
= OP_NONE
;
2214 c
->dst
.val
= realmode_get_cr(ctxt
->vcpu
, 0);
2217 realmode_lmsw(ctxt
->vcpu
, (u16
)c
->src
.val
,
2219 c
->dst
.type
= OP_NONE
;
2222 emulate_invlpg(ctxt
->vcpu
, memop
);
2223 /* Disable writeback. */
2224 c
->dst
.type
= OP_NONE
;
2227 goto cannot_emulate
;
2230 case 0x05: /* syscall */
2231 if (emulate_syscall(ctxt
) == -1)
2232 goto cannot_emulate
;
2237 emulate_clts(ctxt
->vcpu
);
2238 c
->dst
.type
= OP_NONE
;
2240 case 0x08: /* invd */
2241 case 0x09: /* wbinvd */
2242 case 0x0d: /* GrpP (prefetch) */
2243 case 0x18: /* Grp16 (prefetch/nop) */
2244 c
->dst
.type
= OP_NONE
;
2246 case 0x20: /* mov cr, reg */
2247 if (c
->modrm_mod
!= 3)
2248 goto cannot_emulate
;
2249 c
->regs
[c
->modrm_rm
] =
2250 realmode_get_cr(ctxt
->vcpu
, c
->modrm_reg
);
2251 c
->dst
.type
= OP_NONE
; /* no writeback */
2253 case 0x21: /* mov from dr to reg */
2254 if (c
->modrm_mod
!= 3)
2255 goto cannot_emulate
;
2256 rc
= emulator_get_dr(ctxt
, c
->modrm_reg
, &c
->regs
[c
->modrm_rm
]);
2258 goto cannot_emulate
;
2259 c
->dst
.type
= OP_NONE
; /* no writeback */
2261 case 0x22: /* mov reg, cr */
2262 if (c
->modrm_mod
!= 3)
2263 goto cannot_emulate
;
2264 realmode_set_cr(ctxt
->vcpu
,
2265 c
->modrm_reg
, c
->modrm_val
, &ctxt
->eflags
);
2266 c
->dst
.type
= OP_NONE
;
2268 case 0x23: /* mov from reg to dr */
2269 if (c
->modrm_mod
!= 3)
2270 goto cannot_emulate
;
2271 rc
= emulator_set_dr(ctxt
, c
->modrm_reg
,
2272 c
->regs
[c
->modrm_rm
]);
2274 goto cannot_emulate
;
2275 c
->dst
.type
= OP_NONE
; /* no writeback */
2279 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
2280 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
2281 rc
= kvm_set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
);
2283 kvm_inject_gp(ctxt
->vcpu
, 0);
2284 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2286 rc
= X86EMUL_CONTINUE
;
2287 c
->dst
.type
= OP_NONE
;
2291 rc
= kvm_get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
);
2293 kvm_inject_gp(ctxt
->vcpu
, 0);
2294 c
->eip
= kvm_rip_read(ctxt
->vcpu
);
2296 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
2297 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
2299 rc
= X86EMUL_CONTINUE
;
2300 c
->dst
.type
= OP_NONE
;
2302 case 0x34: /* sysenter */
2303 if (emulate_sysenter(ctxt
) == -1)
2304 goto cannot_emulate
;
2308 case 0x35: /* sysexit */
2309 if (emulate_sysexit(ctxt
) == -1)
2310 goto cannot_emulate
;
2314 case 0x40 ... 0x4f: /* cmov */
2315 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
2316 if (!test_cc(c
->b
, ctxt
->eflags
))
2317 c
->dst
.type
= OP_NONE
; /* no writeback */
2319 case 0x80 ... 0x8f: /* jnz rel, etc*/
2320 if (test_cc(c
->b
, ctxt
->eflags
))
2321 jmp_rel(c
, c
->src
.val
);
2322 c
->dst
.type
= OP_NONE
;
2326 c
->dst
.type
= OP_NONE
;
2327 /* only subword offset */
2328 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2329 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
2331 case 0xa4: /* shld imm8, r, r/m */
2332 case 0xa5: /* shld cl, r, r/m */
2333 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2337 /* only subword offset */
2338 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2339 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
2341 case 0xac: /* shrd imm8, r, r/m */
2342 case 0xad: /* shrd cl, r, r/m */
2343 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
2345 case 0xae: /* clflush */
2347 case 0xb0 ... 0xb1: /* cmpxchg */
2349 * Save real source value, then compare EAX against
2352 c
->src
.orig_val
= c
->src
.val
;
2353 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
2354 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2355 if (ctxt
->eflags
& EFLG_ZF
) {
2356 /* Success: write back to memory. */
2357 c
->dst
.val
= c
->src
.orig_val
;
2359 /* Failure: write the value we saw to EAX. */
2360 c
->dst
.type
= OP_REG
;
2361 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2366 /* only subword offset */
2367 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2368 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
2370 case 0xb6 ... 0xb7: /* movzx */
2371 c
->dst
.bytes
= c
->op_bytes
;
2372 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
2375 case 0xba: /* Grp8 */
2376 switch (c
->modrm_reg
& 3) {
2389 /* only subword offset */
2390 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
2391 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
2393 case 0xbe ... 0xbf: /* movsx */
2394 c
->dst
.bytes
= c
->op_bytes
;
2395 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
2398 case 0xc3: /* movnti */
2399 c
->dst
.bytes
= c
->op_bytes
;
2400 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
2403 case 0xc7: /* Grp9 (cmpxchg8b) */
2404 rc
= emulate_grp9(ctxt
, ops
, memop
);
2407 c
->dst
.type
= OP_NONE
;
2413 DPRINTF("Cannot emulate %02x\n", c
->b
);