1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<0) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<1) /* Register operand. */
53 #define DstMem (3<<1) /* Memory operand. */
54 #define DstAcc (4<<1) /* Destination Accumulator */
55 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<1) /* 64bit memory operand */
57 #define DstMask (7<<1)
58 /* Source operand type. */
59 #define SrcNone (0<<4) /* No source operand. */
60 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcMask (0xf<<4)
74 /* Generic ModRM decode. */
76 /* Destination is only written; never read. */
79 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
80 #define String (1<<12) /* String instruction (rep capable) */
81 #define Stack (1<<13) /* Stack instruction (push/pop) */
82 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
83 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
84 #define GroupMask 0xff /* Group number stored in bits 0:7 */
86 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
87 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
89 /* Source 2 operand type */
90 #define Src2None (0<<29)
91 #define Src2CL (1<<29)
92 #define Src2ImmByte (2<<29)
93 #define Src2One (3<<29)
94 #define Src2Mask (7<<29)
97 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
98 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
102 static u32 opcode_table
[256] = {
104 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
105 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
106 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
107 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
109 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
110 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
111 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
112 ImplicitOps
| Stack
| No64
, 0,
114 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
115 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
116 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
117 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
119 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
120 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
121 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
122 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
124 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
125 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
126 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
128 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
129 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
130 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
132 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
133 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
134 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
136 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
137 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
138 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
141 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
143 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
145 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
146 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
148 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
149 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
151 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
152 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
155 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
156 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
157 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
160 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
163 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
165 Group
| Group1_80
, Group
| Group1_81
,
166 Group
| Group1_82
, Group
| Group1_83
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
168 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
170 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
171 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
172 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
173 ImplicitOps
| SrcMem16
| ModRM
, Group
| Group1A
,
175 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
177 0, 0, SrcImmFAddr
| No64
, 0,
178 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
180 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
181 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
182 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
183 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
185 DstAcc
| SrcImmByte
| ByteOp
, DstAcc
| SrcImm
, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
186 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
187 ByteOp
| DstDI
| String
, DstDI
| String
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
190 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
192 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
195 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
197 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
199 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
200 0, ImplicitOps
| Stack
, 0, 0,
201 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
203 0, 0, 0, ImplicitOps
| Stack
,
204 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
206 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
207 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
210 0, 0, 0, 0, 0, 0, 0, 0,
213 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
214 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
216 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
217 SrcImmFAddr
| No64
, SrcImmByte
| ImplicitOps
,
218 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
219 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
222 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
224 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
225 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
228 static u32 twobyte_table
[256] = {
230 0, Group
| GroupDual
| Group7
, 0, 0,
231 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
232 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
233 0, ImplicitOps
| ModRM
, 0, 0,
235 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
237 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
238 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
240 0, 0, 0, 0, 0, 0, 0, 0,
242 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
243 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
244 0, 0, 0, 0, 0, 0, 0, 0,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
247 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
249 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
252 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
254 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
256 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
260 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
262 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
263 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
265 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
268 0, DstMem
| SrcReg
| ModRM
| BitOp
,
269 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
270 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
272 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
273 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
274 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
275 DstMem
| SrcReg
| Src2CL
| ModRM
,
278 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
279 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
280 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
281 DstReg
| SrcMem16
| ModRM
| Mov
,
284 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
285 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
286 DstReg
| SrcMem16
| ModRM
| Mov
,
288 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
289 0, 0, 0, Group
| GroupDual
| Group9
,
290 0, 0, 0, 0, 0, 0, 0, 0,
292 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
299 static u32 group_table
[] = {
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
306 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
307 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
308 ByteOp
| DstMem
| SrcImm
| ModRM
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
| Lock
,
315 DstMem
| SrcImm
| ModRM
| Lock
,
316 DstMem
| SrcImm
| ModRM
| Lock
,
317 DstMem
| SrcImm
| ModRM
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
324 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
325 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
326 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
| Lock
,
333 DstMem
| SrcImmByte
| ModRM
| Lock
,
334 DstMem
| SrcImmByte
| ModRM
| Lock
,
335 DstMem
| SrcImmByte
| ModRM
,
337 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
339 ByteOp
| SrcImm
| DstMem
| ModRM
, ByteOp
| SrcImm
| DstMem
| ModRM
,
340 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
343 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
344 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
347 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
350 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
351 SrcMem
| ModRM
| Stack
, 0,
352 SrcMem
| ModRM
| Stack
, SrcMemFAddr
| ModRM
| ImplicitOps
,
353 SrcMem
| ModRM
| Stack
, 0,
355 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
356 SrcNone
| ModRM
| DstMem
| Mov
, 0,
357 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
360 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
361 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
363 0, DstMem64
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
366 static u32 group2_table
[] = {
368 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
369 SrcNone
| ModRM
| DstMem
| Mov
, 0,
370 SrcMem16
| ModRM
| Mov
| Priv
, 0,
372 0, 0, 0, 0, 0, 0, 0, 0,
375 /* EFLAGS bit definitions. */
376 #define EFLG_ID (1<<21)
377 #define EFLG_VIP (1<<20)
378 #define EFLG_VIF (1<<19)
379 #define EFLG_AC (1<<18)
380 #define EFLG_VM (1<<17)
381 #define EFLG_RF (1<<16)
382 #define EFLG_IOPL (3<<12)
383 #define EFLG_NT (1<<14)
384 #define EFLG_OF (1<<11)
385 #define EFLG_DF (1<<10)
386 #define EFLG_IF (1<<9)
387 #define EFLG_TF (1<<8)
388 #define EFLG_SF (1<<7)
389 #define EFLG_ZF (1<<6)
390 #define EFLG_AF (1<<4)
391 #define EFLG_PF (1<<2)
392 #define EFLG_CF (1<<0)
395 * Instruction emulation:
396 * Most instructions are emulated directly via a fragment of inline assembly
397 * code. This allows us to save/restore EFLAGS and thus very easily pick up
398 * any modified flags.
401 #if defined(CONFIG_X86_64)
402 #define _LO32 "k" /* force 32-bit operand */
403 #define _STK "%%rsp" /* stack pointer */
404 #elif defined(__i386__)
405 #define _LO32 "" /* force 32-bit operand */
406 #define _STK "%%esp" /* stack pointer */
410 * These EFLAGS bits are restored from saved value during emulation, and
411 * any changes are written back to the saved value after emulation.
413 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
415 /* Before executing instruction: restore necessary bits in EFLAGS. */
416 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
417 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
418 "movl %"_sav",%"_LO32 _tmp"; " \
421 "movl %"_msk",%"_LO32 _tmp"; " \
422 "andl %"_LO32 _tmp",("_STK"); " \
424 "notl %"_LO32 _tmp"; " \
425 "andl %"_LO32 _tmp",("_STK"); " \
426 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
428 "orl %"_LO32 _tmp",("_STK"); " \
432 /* After executing instruction: write-back necessary bits in EFLAGS. */
433 #define _POST_EFLAGS(_sav, _msk, _tmp) \
434 /* _sav |= EFLAGS & _msk; */ \
437 "andl %"_msk",%"_LO32 _tmp"; " \
438 "orl %"_LO32 _tmp",%"_sav"; "
446 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
448 __asm__ __volatile__ ( \
449 _PRE_EFLAGS("0", "4", "2") \
450 _op _suffix " %"_x"3,%1; " \
451 _POST_EFLAGS("0", "4", "2") \
452 : "=m" (_eflags), "=m" ((_dst).val), \
454 : _y ((_src).val), "i" (EFLAGS_MASK)); \
458 /* Raw emulation: instruction has two explicit operands. */
459 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
461 unsigned long _tmp; \
463 switch ((_dst).bytes) { \
465 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
468 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
471 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
476 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
478 unsigned long _tmp; \
479 switch ((_dst).bytes) { \
481 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
484 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
485 _wx, _wy, _lx, _ly, _qx, _qy); \
490 /* Source operand is byte-sized and may be restricted to just %cl. */
491 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
492 __emulate_2op(_op, _src, _dst, _eflags, \
493 "b", "c", "b", "c", "b", "c", "b", "c")
495 /* Source operand is byte, word, long or quad sized. */
496 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
497 __emulate_2op(_op, _src, _dst, _eflags, \
498 "b", "q", "w", "r", _LO32, "r", "", "r")
500 /* Source operand is word, long or quad sized. */
501 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
502 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
503 "w", "r", _LO32, "r", "", "r")
505 /* Instruction has three operands and one operand is stored in ECX register */
506 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
508 unsigned long _tmp; \
509 _type _clv = (_cl).val; \
510 _type _srcv = (_src).val; \
511 _type _dstv = (_dst).val; \
513 __asm__ __volatile__ ( \
514 _PRE_EFLAGS("0", "5", "2") \
515 _op _suffix " %4,%1 \n" \
516 _POST_EFLAGS("0", "5", "2") \
517 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
518 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
521 (_cl).val = (unsigned long) _clv; \
522 (_src).val = (unsigned long) _srcv; \
523 (_dst).val = (unsigned long) _dstv; \
526 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
528 switch ((_dst).bytes) { \
530 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
531 "w", unsigned short); \
534 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
535 "l", unsigned int); \
538 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
539 "q", unsigned long)); \
544 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
546 unsigned long _tmp; \
548 __asm__ __volatile__ ( \
549 _PRE_EFLAGS("0", "3", "2") \
550 _op _suffix " %1; " \
551 _POST_EFLAGS("0", "3", "2") \
552 : "=m" (_eflags), "+m" ((_dst).val), \
554 : "i" (EFLAGS_MASK)); \
557 /* Instruction has only one explicit operand (no source operand). */
558 #define emulate_1op(_op, _dst, _eflags) \
560 switch ((_dst).bytes) { \
561 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
562 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
563 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
564 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
568 /* Fetch next part of the instruction being emulated. */
569 #define insn_fetch(_type, _size, _eip) \
570 ({ unsigned long _x; \
571 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
572 if (rc != X86EMUL_CONTINUE) \
578 #define insn_fetch_arr(_arr, _size, _eip) \
579 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
580 if (rc != X86EMUL_CONTINUE) \
585 static inline unsigned long ad_mask(struct decode_cache
*c
)
587 return (1UL << (c
->ad_bytes
<< 3)) - 1;
590 /* Access/update address held in a register, based on addressing mode. */
591 static inline unsigned long
592 address_mask(struct decode_cache
*c
, unsigned long reg
)
594 if (c
->ad_bytes
== sizeof(unsigned long))
597 return reg
& ad_mask(c
);
600 static inline unsigned long
601 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
603 return base
+ address_mask(c
, reg
);
607 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
609 if (c
->ad_bytes
== sizeof(unsigned long))
612 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
615 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
617 register_address_increment(c
, &c
->eip
, rel
);
620 static void set_seg_override(struct decode_cache
*c
, int seg
)
622 c
->has_seg_override
= true;
623 c
->seg_override
= seg
;
626 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
627 struct x86_emulate_ops
*ops
, int seg
)
629 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
632 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
635 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
636 struct x86_emulate_ops
*ops
,
637 struct decode_cache
*c
)
639 if (!c
->has_seg_override
)
642 return seg_base(ctxt
, ops
, c
->seg_override
);
645 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
,
646 struct x86_emulate_ops
*ops
)
648 return seg_base(ctxt
, ops
, VCPU_SREG_ES
);
651 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
,
652 struct x86_emulate_ops
*ops
)
654 return seg_base(ctxt
, ops
, VCPU_SREG_SS
);
657 static void emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
658 u32 error
, bool valid
)
660 ctxt
->exception
= vec
;
661 ctxt
->error_code
= error
;
662 ctxt
->error_code_valid
= valid
;
663 ctxt
->restart
= false;
666 static void emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
668 emulate_exception(ctxt
, GP_VECTOR
, err
, true);
671 static void emulate_pf(struct x86_emulate_ctxt
*ctxt
, unsigned long addr
,
675 emulate_exception(ctxt
, PF_VECTOR
, err
, true);
678 static void emulate_ud(struct x86_emulate_ctxt
*ctxt
)
680 emulate_exception(ctxt
, UD_VECTOR
, 0, false);
683 static void emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
685 emulate_exception(ctxt
, TS_VECTOR
, err
, true);
688 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
689 struct x86_emulate_ops
*ops
,
690 unsigned long eip
, u8
*dest
)
692 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
696 if (eip
== fc
->end
) {
697 cur_size
= fc
->end
- fc
->start
;
698 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
699 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
700 size
, ctxt
->vcpu
, NULL
);
701 if (rc
!= X86EMUL_CONTINUE
)
705 *dest
= fc
->data
[eip
- fc
->start
];
706 return X86EMUL_CONTINUE
;
709 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
710 struct x86_emulate_ops
*ops
,
711 unsigned long eip
, void *dest
, unsigned size
)
715 /* x86 instructions are limited to 15 bytes. */
716 if (eip
+ size
- ctxt
->eip
> 15)
717 return X86EMUL_UNHANDLEABLE
;
719 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
720 if (rc
!= X86EMUL_CONTINUE
)
723 return X86EMUL_CONTINUE
;
727 * Given the 'reg' portion of a ModRM byte, and a register block, return a
728 * pointer into the block that addresses the relevant register.
729 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
731 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
736 p
= ®s
[modrm_reg
];
737 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
738 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
742 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
743 struct x86_emulate_ops
*ops
,
745 u16
*size
, unsigned long *address
, int op_bytes
)
752 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
754 if (rc
!= X86EMUL_CONTINUE
)
756 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
761 static int test_cc(unsigned int condition
, unsigned int flags
)
765 switch ((condition
& 15) >> 1) {
767 rc
|= (flags
& EFLG_OF
);
769 case 1: /* b/c/nae */
770 rc
|= (flags
& EFLG_CF
);
773 rc
|= (flags
& EFLG_ZF
);
776 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
779 rc
|= (flags
& EFLG_SF
);
782 rc
|= (flags
& EFLG_PF
);
785 rc
|= (flags
& EFLG_ZF
);
788 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
792 /* Odd condition identifiers (lsb == 1) have inverted sense. */
793 return (!!rc
^ (condition
& 1));
796 static void decode_register_operand(struct operand
*op
,
797 struct decode_cache
*c
,
800 unsigned reg
= c
->modrm_reg
;
801 int highbyte_regs
= c
->rex_prefix
== 0;
804 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
806 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
807 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
808 op
->val
= *(u8
*)op
->ptr
;
811 op
->ptr
= decode_register(reg
, c
->regs
, 0);
812 op
->bytes
= c
->op_bytes
;
815 op
->val
= *(u16
*)op
->ptr
;
818 op
->val
= *(u32
*)op
->ptr
;
821 op
->val
= *(u64
*) op
->ptr
;
825 op
->orig_val
= op
->val
;
828 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
829 struct x86_emulate_ops
*ops
)
831 struct decode_cache
*c
= &ctxt
->decode
;
833 int index_reg
= 0, base_reg
= 0, scale
;
834 int rc
= X86EMUL_CONTINUE
;
837 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
838 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
839 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
842 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
843 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
844 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
845 c
->modrm_rm
|= (c
->modrm
& 0x07);
849 if (c
->modrm_mod
== 3) {
850 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
851 c
->regs
, c
->d
& ByteOp
);
852 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
856 if (c
->ad_bytes
== 2) {
857 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
858 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
859 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
860 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
862 /* 16-bit ModR/M decode. */
863 switch (c
->modrm_mod
) {
865 if (c
->modrm_rm
== 6)
866 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
869 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
872 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
875 switch (c
->modrm_rm
) {
877 c
->modrm_ea
+= bx
+ si
;
880 c
->modrm_ea
+= bx
+ di
;
883 c
->modrm_ea
+= bp
+ si
;
886 c
->modrm_ea
+= bp
+ di
;
895 if (c
->modrm_mod
!= 0)
902 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
903 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
904 if (!c
->has_seg_override
)
905 set_seg_override(c
, VCPU_SREG_SS
);
906 c
->modrm_ea
= (u16
)c
->modrm_ea
;
908 /* 32/64-bit ModR/M decode. */
909 if ((c
->modrm_rm
& 7) == 4) {
910 sib
= insn_fetch(u8
, 1, c
->eip
);
911 index_reg
|= (sib
>> 3) & 7;
915 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
916 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
918 c
->modrm_ea
+= c
->regs
[base_reg
];
920 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
921 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
922 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
925 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
926 switch (c
->modrm_mod
) {
928 if (c
->modrm_rm
== 5)
929 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
932 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
935 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
943 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
944 struct x86_emulate_ops
*ops
)
946 struct decode_cache
*c
= &ctxt
->decode
;
947 int rc
= X86EMUL_CONTINUE
;
949 switch (c
->ad_bytes
) {
951 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
954 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
957 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
965 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
967 struct decode_cache
*c
= &ctxt
->decode
;
968 int rc
= X86EMUL_CONTINUE
;
969 int mode
= ctxt
->mode
;
970 int def_op_bytes
, def_ad_bytes
, group
;
973 /* we cannot decode insn before we complete previous rep insn */
974 WARN_ON(ctxt
->restart
);
977 c
->fetch
.start
= c
->fetch
.end
= c
->eip
;
978 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
981 case X86EMUL_MODE_REAL
:
982 case X86EMUL_MODE_VM86
:
983 case X86EMUL_MODE_PROT16
:
984 def_op_bytes
= def_ad_bytes
= 2;
986 case X86EMUL_MODE_PROT32
:
987 def_op_bytes
= def_ad_bytes
= 4;
990 case X86EMUL_MODE_PROT64
:
999 c
->op_bytes
= def_op_bytes
;
1000 c
->ad_bytes
= def_ad_bytes
;
1002 /* Legacy prefixes. */
1004 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
1005 case 0x66: /* operand-size override */
1006 /* switch between 2/4 bytes */
1007 c
->op_bytes
= def_op_bytes
^ 6;
1009 case 0x67: /* address-size override */
1010 if (mode
== X86EMUL_MODE_PROT64
)
1011 /* switch between 4/8 bytes */
1012 c
->ad_bytes
= def_ad_bytes
^ 12;
1014 /* switch between 2/4 bytes */
1015 c
->ad_bytes
= def_ad_bytes
^ 6;
1017 case 0x26: /* ES override */
1018 case 0x2e: /* CS override */
1019 case 0x36: /* SS override */
1020 case 0x3e: /* DS override */
1021 set_seg_override(c
, (c
->b
>> 3) & 3);
1023 case 0x64: /* FS override */
1024 case 0x65: /* GS override */
1025 set_seg_override(c
, c
->b
& 7);
1027 case 0x40 ... 0x4f: /* REX */
1028 if (mode
!= X86EMUL_MODE_PROT64
)
1030 c
->rex_prefix
= c
->b
;
1032 case 0xf0: /* LOCK */
1035 case 0xf2: /* REPNE/REPNZ */
1036 c
->rep_prefix
= REPNE_PREFIX
;
1038 case 0xf3: /* REP/REPE/REPZ */
1039 c
->rep_prefix
= REPE_PREFIX
;
1045 /* Any legacy prefix after a REX prefix nullifies its effect. */
1054 if (c
->rex_prefix
& 8)
1055 c
->op_bytes
= 8; /* REX.W */
1057 /* Opcode byte(s). */
1058 c
->d
= opcode_table
[c
->b
];
1060 /* Two-byte opcode? */
1063 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1064 c
->d
= twobyte_table
[c
->b
];
1069 group
= c
->d
& GroupMask
;
1070 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1073 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1074 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1075 c
->d
= group2_table
[group
];
1077 c
->d
= group_table
[group
];
1082 DPRINTF("Cannot emulate %02x\n", c
->b
);
1086 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1089 /* ModRM and SIB bytes. */
1091 rc
= decode_modrm(ctxt
, ops
);
1092 else if (c
->d
& MemAbs
)
1093 rc
= decode_abs(ctxt
, ops
);
1094 if (rc
!= X86EMUL_CONTINUE
)
1097 if (!c
->has_seg_override
)
1098 set_seg_override(c
, VCPU_SREG_DS
);
1100 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1101 c
->modrm_ea
+= seg_override_base(ctxt
, ops
, c
);
1103 if (c
->ad_bytes
!= 8)
1104 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1106 if (c
->rip_relative
)
1107 c
->modrm_ea
+= c
->eip
;
1110 * Decode and fetch the source operand: register, memory
1113 switch (c
->d
& SrcMask
) {
1117 decode_register_operand(&c
->src
, c
, 0);
1126 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1128 /* Don't fetch the address for invlpg: it could be unmapped. */
1129 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1133 * For instructions with a ModR/M byte, switch to register
1134 * access if Mod = 3.
1136 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1137 c
->src
.type
= OP_REG
;
1138 c
->src
.val
= c
->modrm_val
;
1139 c
->src
.ptr
= c
->modrm_ptr
;
1142 c
->src
.type
= OP_MEM
;
1143 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1148 c
->src
.type
= OP_IMM
;
1149 c
->src
.ptr
= (unsigned long *)c
->eip
;
1150 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1151 if (c
->src
.bytes
== 8)
1153 /* NB. Immediates are sign-extended as necessary. */
1154 switch (c
->src
.bytes
) {
1156 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1159 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1162 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1165 if ((c
->d
& SrcMask
) == SrcImmU
) {
1166 switch (c
->src
.bytes
) {
1171 c
->src
.val
&= 0xffff;
1174 c
->src
.val
&= 0xffffffff;
1181 c
->src
.type
= OP_IMM
;
1182 c
->src
.ptr
= (unsigned long *)c
->eip
;
1184 if ((c
->d
& SrcMask
) == SrcImmByte
)
1185 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1187 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1194 c
->src
.type
= OP_MEM
;
1195 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1196 c
->src
.ptr
= (unsigned long *)
1197 register_address(c
, seg_override_base(ctxt
, ops
, c
),
1198 c
->regs
[VCPU_REGS_RSI
]);
1202 c
->src
.type
= OP_IMM
;
1203 c
->src
.ptr
= (unsigned long *)c
->eip
;
1204 c
->src
.bytes
= c
->op_bytes
+ 2;
1205 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
1208 c
->src
.type
= OP_MEM
;
1209 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1210 c
->src
.bytes
= c
->op_bytes
+ 2;
1215 * Decode and fetch the second source operand: register, memory
1218 switch (c
->d
& Src2Mask
) {
1223 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1226 c
->src2
.type
= OP_IMM
;
1227 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1229 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1237 /* Decode and fetch the destination operand: register or memory. */
1238 switch (c
->d
& DstMask
) {
1240 /* Special instructions do their own operand decoding. */
1243 decode_register_operand(&c
->dst
, c
,
1244 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1248 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1249 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1250 c
->dst
.type
= OP_REG
;
1251 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1252 c
->dst
.ptr
= c
->modrm_ptr
;
1255 c
->dst
.type
= OP_MEM
;
1256 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1257 if ((c
->d
& DstMask
) == DstMem64
)
1260 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1263 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1265 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1266 (c
->src
.val
& mask
) / 8;
1270 c
->dst
.type
= OP_REG
;
1271 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1272 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1273 switch (c
->dst
.bytes
) {
1275 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1278 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1281 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1284 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1287 c
->dst
.orig_val
= c
->dst
.val
;
1290 c
->dst
.type
= OP_MEM
;
1291 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1292 c
->dst
.ptr
= (unsigned long *)
1293 register_address(c
, es_base(ctxt
, ops
),
1294 c
->regs
[VCPU_REGS_RDI
]);
1300 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1303 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1304 struct x86_emulate_ops
*ops
,
1305 unsigned long addr
, void *dest
, unsigned size
)
1308 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
1312 int n
= min(size
, 8u);
1314 if (mc
->pos
< mc
->end
)
1317 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
, &err
,
1319 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1320 emulate_pf(ctxt
, addr
, err
);
1321 if (rc
!= X86EMUL_CONTINUE
)
1326 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
1331 return X86EMUL_CONTINUE
;
1334 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1335 struct x86_emulate_ops
*ops
,
1336 unsigned int size
, unsigned short port
,
1339 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1341 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1342 struct decode_cache
*c
= &ctxt
->decode
;
1343 unsigned int in_page
, n
;
1344 unsigned int count
= c
->rep_prefix
?
1345 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1346 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1347 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1348 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1349 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1353 rc
->pos
= rc
->end
= 0;
1354 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1359 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1364 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1366 u32 limit
= get_desc_limit(desc
);
1368 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1371 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1372 struct x86_emulate_ops
*ops
,
1373 u16 selector
, struct desc_ptr
*dt
)
1375 if (selector
& 1 << 2) {
1376 struct desc_struct desc
;
1377 memset (dt
, 0, sizeof *dt
);
1378 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1381 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1382 dt
->address
= get_desc_base(&desc
);
1384 ops
->get_gdt(dt
, ctxt
->vcpu
);
1387 /* allowed just for 8 bytes segments */
1388 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1389 struct x86_emulate_ops
*ops
,
1390 u16 selector
, struct desc_struct
*desc
)
1393 u16 index
= selector
>> 3;
1398 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1400 if (dt
.size
< index
* 8 + 7) {
1401 emulate_gp(ctxt
, selector
& 0xfffc);
1402 return X86EMUL_PROPAGATE_FAULT
;
1404 addr
= dt
.address
+ index
* 8;
1405 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1406 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1407 emulate_pf(ctxt
, addr
, err
);
1412 /* allowed just for 8 bytes segments */
1413 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1414 struct x86_emulate_ops
*ops
,
1415 u16 selector
, struct desc_struct
*desc
)
1418 u16 index
= selector
>> 3;
1423 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1425 if (dt
.size
< index
* 8 + 7) {
1426 emulate_gp(ctxt
, selector
& 0xfffc);
1427 return X86EMUL_PROPAGATE_FAULT
;
1430 addr
= dt
.address
+ index
* 8;
1431 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1432 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1433 emulate_pf(ctxt
, addr
, err
);
1438 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1439 struct x86_emulate_ops
*ops
,
1440 u16 selector
, int seg
)
1442 struct desc_struct seg_desc
;
1444 unsigned err_vec
= GP_VECTOR
;
1446 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1449 memset(&seg_desc
, 0, sizeof seg_desc
);
1451 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1452 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1453 /* set real mode segment descriptor */
1454 set_desc_base(&seg_desc
, selector
<< 4);
1455 set_desc_limit(&seg_desc
, 0xffff);
1462 /* NULL selector is not valid for TR, CS and SS */
1463 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1467 /* TR should be in GDT only */
1468 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1471 if (null_selector
) /* for NULL selector skip all following checks */
1474 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1475 if (ret
!= X86EMUL_CONTINUE
)
1478 err_code
= selector
& 0xfffc;
1479 err_vec
= GP_VECTOR
;
1481 /* can't load system descriptor into segment selecor */
1482 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1486 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1492 cpl
= ops
->cpl(ctxt
->vcpu
);
1497 * segment is not a writable data segment or segment
1498 * selector's RPL != CPL or segment selector's RPL != CPL
1500 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1504 if (!(seg_desc
.type
& 8))
1507 if (seg_desc
.type
& 4) {
1513 if (rpl
> cpl
|| dpl
!= cpl
)
1516 /* CS(RPL) <- CPL */
1517 selector
= (selector
& 0xfffc) | cpl
;
1520 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1523 case VCPU_SREG_LDTR
:
1524 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1527 default: /* DS, ES, FS, or GS */
1529 * segment is not a data or readable code segment or
1530 * ((segment is a data or nonconforming code segment)
1531 * and (both RPL and CPL > DPL))
1533 if ((seg_desc
.type
& 0xa) == 0x8 ||
1534 (((seg_desc
.type
& 0xc) != 0xc) &&
1535 (rpl
> dpl
&& cpl
> dpl
)))
1541 /* mark segment as accessed */
1543 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1544 if (ret
!= X86EMUL_CONTINUE
)
1548 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1549 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1550 return X86EMUL_CONTINUE
;
1552 emulate_exception(ctxt
, err_vec
, err_code
, true);
1553 return X86EMUL_PROPAGATE_FAULT
;
1556 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1557 struct x86_emulate_ops
*ops
)
1560 struct decode_cache
*c
= &ctxt
->decode
;
1563 switch (c
->dst
.type
) {
1565 /* The 4-byte case *is* correct:
1566 * in 64-bit mode we zero-extend.
1568 switch (c
->dst
.bytes
) {
1570 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1573 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1576 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1577 break; /* 64b: zero-ext */
1579 *c
->dst
.ptr
= c
->dst
.val
;
1585 rc
= ops
->cmpxchg_emulated(
1586 (unsigned long)c
->dst
.ptr
,
1593 rc
= ops
->write_emulated(
1594 (unsigned long)c
->dst
.ptr
,
1599 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1601 (unsigned long)c
->dst
.ptr
, err
);
1602 if (rc
!= X86EMUL_CONTINUE
)
1611 return X86EMUL_CONTINUE
;
1614 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1615 struct x86_emulate_ops
*ops
)
1617 struct decode_cache
*c
= &ctxt
->decode
;
1619 c
->dst
.type
= OP_MEM
;
1620 c
->dst
.bytes
= c
->op_bytes
;
1621 c
->dst
.val
= c
->src
.val
;
1622 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1623 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
, ops
),
1624 c
->regs
[VCPU_REGS_RSP
]);
1627 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1628 struct x86_emulate_ops
*ops
,
1629 void *dest
, int len
)
1631 struct decode_cache
*c
= &ctxt
->decode
;
1634 rc
= read_emulated(ctxt
, ops
, register_address(c
, ss_base(ctxt
, ops
),
1635 c
->regs
[VCPU_REGS_RSP
]),
1637 if (rc
!= X86EMUL_CONTINUE
)
1640 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1644 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1645 struct x86_emulate_ops
*ops
,
1646 void *dest
, int len
)
1649 unsigned long val
, change_mask
;
1650 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1651 int cpl
= ops
->cpl(ctxt
->vcpu
);
1653 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1654 if (rc
!= X86EMUL_CONTINUE
)
1657 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1658 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1660 switch(ctxt
->mode
) {
1661 case X86EMUL_MODE_PROT64
:
1662 case X86EMUL_MODE_PROT32
:
1663 case X86EMUL_MODE_PROT16
:
1665 change_mask
|= EFLG_IOPL
;
1667 change_mask
|= EFLG_IF
;
1669 case X86EMUL_MODE_VM86
:
1671 emulate_gp(ctxt
, 0);
1672 return X86EMUL_PROPAGATE_FAULT
;
1674 change_mask
|= EFLG_IF
;
1676 default: /* real mode */
1677 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1681 *(unsigned long *)dest
=
1682 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1687 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1688 struct x86_emulate_ops
*ops
, int seg
)
1690 struct decode_cache
*c
= &ctxt
->decode
;
1692 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1694 emulate_push(ctxt
, ops
);
1697 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1698 struct x86_emulate_ops
*ops
, int seg
)
1700 struct decode_cache
*c
= &ctxt
->decode
;
1701 unsigned long selector
;
1704 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1705 if (rc
!= X86EMUL_CONTINUE
)
1708 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1712 static int emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1713 struct x86_emulate_ops
*ops
)
1715 struct decode_cache
*c
= &ctxt
->decode
;
1716 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1717 int rc
= X86EMUL_CONTINUE
;
1718 int reg
= VCPU_REGS_RAX
;
1720 while (reg
<= VCPU_REGS_RDI
) {
1721 (reg
== VCPU_REGS_RSP
) ?
1722 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1724 emulate_push(ctxt
, ops
);
1726 rc
= writeback(ctxt
, ops
);
1727 if (rc
!= X86EMUL_CONTINUE
)
1733 /* Disable writeback. */
1734 c
->dst
.type
= OP_NONE
;
1739 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1740 struct x86_emulate_ops
*ops
)
1742 struct decode_cache
*c
= &ctxt
->decode
;
1743 int rc
= X86EMUL_CONTINUE
;
1744 int reg
= VCPU_REGS_RDI
;
1746 while (reg
>= VCPU_REGS_RAX
) {
1747 if (reg
== VCPU_REGS_RSP
) {
1748 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1753 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1754 if (rc
!= X86EMUL_CONTINUE
)
1761 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1762 struct x86_emulate_ops
*ops
)
1764 struct decode_cache
*c
= &ctxt
->decode
;
1766 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1769 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1771 struct decode_cache
*c
= &ctxt
->decode
;
1772 switch (c
->modrm_reg
) {
1774 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1777 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1780 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1783 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1785 case 4: /* sal/shl */
1786 case 6: /* sal/shl */
1787 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1790 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1793 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1798 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1799 struct x86_emulate_ops
*ops
)
1801 struct decode_cache
*c
= &ctxt
->decode
;
1803 switch (c
->modrm_reg
) {
1804 case 0 ... 1: /* test */
1805 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1808 c
->dst
.val
= ~c
->dst
.val
;
1811 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1819 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1820 struct x86_emulate_ops
*ops
)
1822 struct decode_cache
*c
= &ctxt
->decode
;
1824 switch (c
->modrm_reg
) {
1826 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1829 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1831 case 2: /* call near abs */ {
1834 c
->eip
= c
->src
.val
;
1835 c
->src
.val
= old_eip
;
1836 emulate_push(ctxt
, ops
);
1839 case 4: /* jmp abs */
1840 c
->eip
= c
->src
.val
;
1843 emulate_push(ctxt
, ops
);
1846 return X86EMUL_CONTINUE
;
1849 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1850 struct x86_emulate_ops
*ops
)
1852 struct decode_cache
*c
= &ctxt
->decode
;
1853 u64 old
= c
->dst
.orig_val
;
1855 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1856 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1858 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1859 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1860 ctxt
->eflags
&= ~EFLG_ZF
;
1862 c
->dst
.val
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1863 (u32
) c
->regs
[VCPU_REGS_RBX
];
1865 ctxt
->eflags
|= EFLG_ZF
;
1867 return X86EMUL_CONTINUE
;
1870 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1871 struct x86_emulate_ops
*ops
)
1873 struct decode_cache
*c
= &ctxt
->decode
;
1877 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1878 if (rc
!= X86EMUL_CONTINUE
)
1880 if (c
->op_bytes
== 4)
1881 c
->eip
= (u32
)c
->eip
;
1882 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1883 if (rc
!= X86EMUL_CONTINUE
)
1885 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1890 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1891 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1892 struct desc_struct
*ss
)
1894 memset(cs
, 0, sizeof(struct desc_struct
));
1895 ops
->get_cached_descriptor(cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1896 memset(ss
, 0, sizeof(struct desc_struct
));
1898 cs
->l
= 0; /* will be adjusted later */
1899 set_desc_base(cs
, 0); /* flat segment */
1900 cs
->g
= 1; /* 4kb granularity */
1901 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1902 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1904 cs
->dpl
= 0; /* will be adjusted later */
1908 set_desc_base(ss
, 0); /* flat segment */
1909 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
1910 ss
->g
= 1; /* 4kb granularity */
1912 ss
->type
= 0x03; /* Read/Write, Accessed */
1913 ss
->d
= 1; /* 32bit stack segment */
1919 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1921 struct decode_cache
*c
= &ctxt
->decode
;
1922 struct desc_struct cs
, ss
;
1926 /* syscall is not available in real mode */
1927 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1928 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1930 return X86EMUL_PROPAGATE_FAULT
;
1933 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1935 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1937 cs_sel
= (u16
)(msr_data
& 0xfffc);
1938 ss_sel
= (u16
)(msr_data
+ 8);
1940 if (is_long_mode(ctxt
->vcpu
)) {
1944 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1945 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1946 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1947 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1949 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1950 if (is_long_mode(ctxt
->vcpu
)) {
1951 #ifdef CONFIG_X86_64
1952 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1954 ops
->get_msr(ctxt
->vcpu
,
1955 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1956 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1959 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1960 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1964 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1965 c
->eip
= (u32
)msr_data
;
1967 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1970 return X86EMUL_CONTINUE
;
1974 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1976 struct decode_cache
*c
= &ctxt
->decode
;
1977 struct desc_struct cs
, ss
;
1981 /* inject #GP if in real mode */
1982 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1983 emulate_gp(ctxt
, 0);
1984 return X86EMUL_PROPAGATE_FAULT
;
1987 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1988 * Therefore, we inject an #UD.
1990 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1992 return X86EMUL_PROPAGATE_FAULT
;
1995 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1997 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1998 switch (ctxt
->mode
) {
1999 case X86EMUL_MODE_PROT32
:
2000 if ((msr_data
& 0xfffc) == 0x0) {
2001 emulate_gp(ctxt
, 0);
2002 return X86EMUL_PROPAGATE_FAULT
;
2005 case X86EMUL_MODE_PROT64
:
2006 if (msr_data
== 0x0) {
2007 emulate_gp(ctxt
, 0);
2008 return X86EMUL_PROPAGATE_FAULT
;
2013 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2014 cs_sel
= (u16
)msr_data
;
2015 cs_sel
&= ~SELECTOR_RPL_MASK
;
2016 ss_sel
= cs_sel
+ 8;
2017 ss_sel
&= ~SELECTOR_RPL_MASK
;
2018 if (ctxt
->mode
== X86EMUL_MODE_PROT64
2019 || is_long_mode(ctxt
->vcpu
)) {
2024 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2025 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2026 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2027 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2029 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2032 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2033 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
2035 return X86EMUL_CONTINUE
;
2039 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2041 struct decode_cache
*c
= &ctxt
->decode
;
2042 struct desc_struct cs
, ss
;
2047 /* inject #GP if in real mode or Virtual 8086 mode */
2048 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2049 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2050 emulate_gp(ctxt
, 0);
2051 return X86EMUL_PROPAGATE_FAULT
;
2054 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2056 if ((c
->rex_prefix
& 0x8) != 0x0)
2057 usermode
= X86EMUL_MODE_PROT64
;
2059 usermode
= X86EMUL_MODE_PROT32
;
2063 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2065 case X86EMUL_MODE_PROT32
:
2066 cs_sel
= (u16
)(msr_data
+ 16);
2067 if ((msr_data
& 0xfffc) == 0x0) {
2068 emulate_gp(ctxt
, 0);
2069 return X86EMUL_PROPAGATE_FAULT
;
2071 ss_sel
= (u16
)(msr_data
+ 24);
2073 case X86EMUL_MODE_PROT64
:
2074 cs_sel
= (u16
)(msr_data
+ 32);
2075 if (msr_data
== 0x0) {
2076 emulate_gp(ctxt
, 0);
2077 return X86EMUL_PROPAGATE_FAULT
;
2079 ss_sel
= cs_sel
+ 8;
2084 cs_sel
|= SELECTOR_RPL_MASK
;
2085 ss_sel
|= SELECTOR_RPL_MASK
;
2087 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2088 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2089 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2090 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2092 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
2093 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
2095 return X86EMUL_CONTINUE
;
2098 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2099 struct x86_emulate_ops
*ops
)
2102 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2104 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2106 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2107 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2110 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2111 struct x86_emulate_ops
*ops
,
2114 struct desc_struct tr_seg
;
2117 u8 perm
, bit_idx
= port
& 0x7;
2118 unsigned mask
= (1 << len
) - 1;
2120 ops
->get_cached_descriptor(&tr_seg
, VCPU_SREG_TR
, ctxt
->vcpu
);
2123 if (desc_limit_scaled(&tr_seg
) < 103)
2125 r
= ops
->read_std(get_desc_base(&tr_seg
) + 102, &io_bitmap_ptr
, 2,
2127 if (r
!= X86EMUL_CONTINUE
)
2129 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2131 r
= ops
->read_std(get_desc_base(&tr_seg
) + io_bitmap_ptr
+ port
/8,
2132 &perm
, 1, ctxt
->vcpu
, NULL
);
2133 if (r
!= X86EMUL_CONTINUE
)
2135 if ((perm
>> bit_idx
) & mask
)
2140 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2141 struct x86_emulate_ops
*ops
,
2144 if (emulator_bad_iopl(ctxt
, ops
))
2145 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2150 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2151 struct x86_emulate_ops
*ops
,
2152 struct tss_segment_16
*tss
)
2154 struct decode_cache
*c
= &ctxt
->decode
;
2157 tss
->flag
= ctxt
->eflags
;
2158 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2159 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2160 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2161 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2162 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2163 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2164 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2165 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2167 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2168 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2169 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2170 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2171 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2174 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2175 struct x86_emulate_ops
*ops
,
2176 struct tss_segment_16
*tss
)
2178 struct decode_cache
*c
= &ctxt
->decode
;
2182 ctxt
->eflags
= tss
->flag
| 2;
2183 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2184 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2185 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2186 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2187 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2188 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2189 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2190 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2193 * SDM says that segment selectors are loaded before segment
2196 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2197 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2198 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2199 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2200 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2203 * Now load segment descriptors. If fault happenes at this stage
2204 * it is handled in a context of new task
2206 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2207 if (ret
!= X86EMUL_CONTINUE
)
2209 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2210 if (ret
!= X86EMUL_CONTINUE
)
2212 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2213 if (ret
!= X86EMUL_CONTINUE
)
2215 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2216 if (ret
!= X86EMUL_CONTINUE
)
2218 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2219 if (ret
!= X86EMUL_CONTINUE
)
2222 return X86EMUL_CONTINUE
;
2225 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2226 struct x86_emulate_ops
*ops
,
2227 u16 tss_selector
, u16 old_tss_sel
,
2228 ulong old_tss_base
, struct desc_struct
*new_desc
)
2230 struct tss_segment_16 tss_seg
;
2232 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2234 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2236 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2237 /* FIXME: need to provide precise fault address */
2238 emulate_pf(ctxt
, old_tss_base
, err
);
2242 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2244 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2246 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2247 /* FIXME: need to provide precise fault address */
2248 emulate_pf(ctxt
, old_tss_base
, err
);
2252 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2254 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2255 /* FIXME: need to provide precise fault address */
2256 emulate_pf(ctxt
, new_tss_base
, err
);
2260 if (old_tss_sel
!= 0xffff) {
2261 tss_seg
.prev_task_link
= old_tss_sel
;
2263 ret
= ops
->write_std(new_tss_base
,
2264 &tss_seg
.prev_task_link
,
2265 sizeof tss_seg
.prev_task_link
,
2267 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2268 /* FIXME: need to provide precise fault address */
2269 emulate_pf(ctxt
, new_tss_base
, err
);
2274 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2277 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2278 struct x86_emulate_ops
*ops
,
2279 struct tss_segment_32
*tss
)
2281 struct decode_cache
*c
= &ctxt
->decode
;
2283 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2285 tss
->eflags
= ctxt
->eflags
;
2286 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2287 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2288 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2289 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2290 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2291 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2292 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2293 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2295 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2296 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2297 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2298 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2299 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2300 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2301 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2304 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2305 struct x86_emulate_ops
*ops
,
2306 struct tss_segment_32
*tss
)
2308 struct decode_cache
*c
= &ctxt
->decode
;
2311 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
)) {
2312 emulate_gp(ctxt
, 0);
2313 return X86EMUL_PROPAGATE_FAULT
;
2316 ctxt
->eflags
= tss
->eflags
| 2;
2317 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2318 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2319 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2320 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2321 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2322 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2323 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2324 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2327 * SDM says that segment selectors are loaded before segment
2330 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2331 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2332 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2333 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2334 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2335 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2336 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2339 * Now load segment descriptors. If fault happenes at this stage
2340 * it is handled in a context of new task
2342 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2343 if (ret
!= X86EMUL_CONTINUE
)
2345 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2346 if (ret
!= X86EMUL_CONTINUE
)
2348 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2349 if (ret
!= X86EMUL_CONTINUE
)
2351 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2352 if (ret
!= X86EMUL_CONTINUE
)
2354 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2355 if (ret
!= X86EMUL_CONTINUE
)
2357 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2358 if (ret
!= X86EMUL_CONTINUE
)
2360 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2361 if (ret
!= X86EMUL_CONTINUE
)
2364 return X86EMUL_CONTINUE
;
2367 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2368 struct x86_emulate_ops
*ops
,
2369 u16 tss_selector
, u16 old_tss_sel
,
2370 ulong old_tss_base
, struct desc_struct
*new_desc
)
2372 struct tss_segment_32 tss_seg
;
2374 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2376 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2378 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2379 /* FIXME: need to provide precise fault address */
2380 emulate_pf(ctxt
, old_tss_base
, err
);
2384 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2386 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2388 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2389 /* FIXME: need to provide precise fault address */
2390 emulate_pf(ctxt
, old_tss_base
, err
);
2394 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2396 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2397 /* FIXME: need to provide precise fault address */
2398 emulate_pf(ctxt
, new_tss_base
, err
);
2402 if (old_tss_sel
!= 0xffff) {
2403 tss_seg
.prev_task_link
= old_tss_sel
;
2405 ret
= ops
->write_std(new_tss_base
,
2406 &tss_seg
.prev_task_link
,
2407 sizeof tss_seg
.prev_task_link
,
2409 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2410 /* FIXME: need to provide precise fault address */
2411 emulate_pf(ctxt
, new_tss_base
, err
);
2416 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2419 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2420 struct x86_emulate_ops
*ops
,
2421 u16 tss_selector
, int reason
,
2422 bool has_error_code
, u32 error_code
)
2424 struct desc_struct curr_tss_desc
, next_tss_desc
;
2426 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2427 ulong old_tss_base
=
2428 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2431 /* FIXME: old_tss_base == ~0 ? */
2433 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2434 if (ret
!= X86EMUL_CONTINUE
)
2436 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2437 if (ret
!= X86EMUL_CONTINUE
)
2440 /* FIXME: check that next_tss_desc is tss */
2442 if (reason
!= TASK_SWITCH_IRET
) {
2443 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2444 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2445 emulate_gp(ctxt
, 0);
2446 return X86EMUL_PROPAGATE_FAULT
;
2450 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2451 if (!next_tss_desc
.p
||
2452 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2453 desc_limit
< 0x2b)) {
2454 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2455 return X86EMUL_PROPAGATE_FAULT
;
2458 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2459 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2460 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2464 if (reason
== TASK_SWITCH_IRET
)
2465 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2467 /* set back link to prev task only if NT bit is set in eflags
2468 note that old_tss_sel is not used afetr this point */
2469 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2470 old_tss_sel
= 0xffff;
2472 if (next_tss_desc
.type
& 8)
2473 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2474 old_tss_base
, &next_tss_desc
);
2476 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2477 old_tss_base
, &next_tss_desc
);
2478 if (ret
!= X86EMUL_CONTINUE
)
2481 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2482 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2484 if (reason
!= TASK_SWITCH_IRET
) {
2485 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2486 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2490 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2491 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2492 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2494 if (has_error_code
) {
2495 struct decode_cache
*c
= &ctxt
->decode
;
2497 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2499 c
->src
.val
= (unsigned long) error_code
;
2500 emulate_push(ctxt
, ops
);
2506 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2507 struct x86_emulate_ops
*ops
,
2508 u16 tss_selector
, int reason
,
2509 bool has_error_code
, u32 error_code
)
2511 struct decode_cache
*c
= &ctxt
->decode
;
2515 c
->dst
.type
= OP_NONE
;
2517 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2518 has_error_code
, error_code
);
2520 if (rc
== X86EMUL_CONTINUE
) {
2521 rc
= writeback(ctxt
, ops
);
2522 if (rc
== X86EMUL_CONTINUE
)
2526 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2529 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2530 int reg
, struct operand
*op
)
2532 struct decode_cache
*c
= &ctxt
->decode
;
2533 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2535 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2536 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2540 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2543 struct decode_cache
*c
= &ctxt
->decode
;
2544 int rc
= X86EMUL_CONTINUE
;
2545 int saved_dst_type
= c
->dst
.type
;
2547 ctxt
->decode
.mem_read
.pos
= 0;
2549 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2554 /* LOCK prefix is allowed only with some instructions */
2555 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2560 /* Privileged instruction can be executed only in CPL=0 */
2561 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2562 emulate_gp(ctxt
, 0);
2566 if (c
->rep_prefix
&& (c
->d
& String
)) {
2567 ctxt
->restart
= true;
2568 /* All REP prefixes have the same first termination condition */
2569 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2571 ctxt
->restart
= false;
2575 /* The second termination condition only applies for REPE
2576 * and REPNE. Test if the repeat string operation prefix is
2577 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2578 * corresponding termination condition according to:
2579 * - if REPE/REPZ and ZF = 0 then done
2580 * - if REPNE/REPNZ and ZF = 1 then done
2582 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2583 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2584 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2585 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2587 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2588 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2594 if (c
->src
.type
== OP_MEM
) {
2595 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src
.ptr
,
2596 c
->src
.valptr
, c
->src
.bytes
);
2597 if (rc
!= X86EMUL_CONTINUE
)
2599 c
->src
.orig_val
= c
->src
.val
;
2602 if (c
->src2
.type
== OP_MEM
) {
2603 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src2
.ptr
,
2604 &c
->src2
.val
, c
->src2
.bytes
);
2605 if (rc
!= X86EMUL_CONTINUE
)
2609 if ((c
->d
& DstMask
) == ImplicitOps
)
2613 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2614 /* optimisation - avoid slow emulated read if Mov */
2615 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->dst
.ptr
,
2616 &c
->dst
.val
, c
->dst
.bytes
);
2617 if (rc
!= X86EMUL_CONTINUE
)
2620 c
->dst
.orig_val
= c
->dst
.val
;
2630 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2632 case 0x06: /* push es */
2633 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2635 case 0x07: /* pop es */
2636 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2637 if (rc
!= X86EMUL_CONTINUE
)
2642 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2644 case 0x0e: /* push cs */
2645 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
2649 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2651 case 0x16: /* push ss */
2652 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2654 case 0x17: /* pop ss */
2655 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2656 if (rc
!= X86EMUL_CONTINUE
)
2661 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2663 case 0x1e: /* push ds */
2664 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2666 case 0x1f: /* pop ds */
2667 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2668 if (rc
!= X86EMUL_CONTINUE
)
2673 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2677 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2681 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2685 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2687 case 0x40 ... 0x47: /* inc r16/r32 */
2688 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2690 case 0x48 ... 0x4f: /* dec r16/r32 */
2691 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2693 case 0x50 ... 0x57: /* push reg */
2694 emulate_push(ctxt
, ops
);
2696 case 0x58 ... 0x5f: /* pop reg */
2698 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2699 if (rc
!= X86EMUL_CONTINUE
)
2702 case 0x60: /* pusha */
2703 rc
= emulate_pusha(ctxt
, ops
);
2704 if (rc
!= X86EMUL_CONTINUE
)
2707 case 0x61: /* popa */
2708 rc
= emulate_popa(ctxt
, ops
);
2709 if (rc
!= X86EMUL_CONTINUE
)
2712 case 0x63: /* movsxd */
2713 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2714 goto cannot_emulate
;
2715 c
->dst
.val
= (s32
) c
->src
.val
;
2717 case 0x68: /* push imm */
2718 case 0x6a: /* push imm8 */
2719 emulate_push(ctxt
, ops
);
2721 case 0x6c: /* insb */
2722 case 0x6d: /* insw/insd */
2723 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2724 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2726 emulate_gp(ctxt
, 0);
2729 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2730 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2731 goto done
; /* IO is needed, skip writeback */
2733 case 0x6e: /* outsb */
2734 case 0x6f: /* outsw/outsd */
2735 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2736 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2738 emulate_gp(ctxt
, 0);
2741 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2742 &c
->src
.val
, 1, ctxt
->vcpu
);
2744 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2746 case 0x70 ... 0x7f: /* jcc (short) */
2747 if (test_cc(c
->b
, ctxt
->eflags
))
2748 jmp_rel(c
, c
->src
.val
);
2750 case 0x80 ... 0x83: /* Grp1 */
2751 switch (c
->modrm_reg
) {
2772 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2774 case 0x86 ... 0x87: /* xchg */
2776 /* Write back the register source. */
2777 switch (c
->dst
.bytes
) {
2779 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2782 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2785 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2786 break; /* 64b reg: zero-extend */
2788 *c
->src
.ptr
= c
->dst
.val
;
2792 * Write back the memory destination with implicit LOCK
2795 c
->dst
.val
= c
->src
.val
;
2798 case 0x88 ... 0x8b: /* mov */
2800 case 0x8c: /* mov r/m, sreg */
2801 if (c
->modrm_reg
> VCPU_SREG_GS
) {
2805 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
2807 case 0x8d: /* lea r16/r32, m */
2808 c
->dst
.val
= c
->modrm_ea
;
2810 case 0x8e: { /* mov seg, r/m16 */
2815 if (c
->modrm_reg
== VCPU_SREG_CS
||
2816 c
->modrm_reg
> VCPU_SREG_GS
) {
2821 if (c
->modrm_reg
== VCPU_SREG_SS
)
2822 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2824 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2826 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2829 case 0x8f: /* pop (sole member of Grp1a) */
2830 rc
= emulate_grp1a(ctxt
, ops
);
2831 if (rc
!= X86EMUL_CONTINUE
)
2834 case 0x90: /* nop / xchg r8,rax */
2835 if (c
->dst
.ptr
== (unsigned long *)&c
->regs
[VCPU_REGS_RAX
]) {
2836 c
->dst
.type
= OP_NONE
; /* nop */
2839 case 0x91 ... 0x97: /* xchg reg,rax */
2840 c
->src
.type
= OP_REG
;
2841 c
->src
.bytes
= c
->op_bytes
;
2842 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2843 c
->src
.val
= *(c
->src
.ptr
);
2845 case 0x9c: /* pushf */
2846 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2847 emulate_push(ctxt
, ops
);
2849 case 0x9d: /* popf */
2850 c
->dst
.type
= OP_REG
;
2851 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2852 c
->dst
.bytes
= c
->op_bytes
;
2853 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2854 if (rc
!= X86EMUL_CONTINUE
)
2857 case 0xa0 ... 0xa1: /* mov */
2858 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2859 c
->dst
.val
= c
->src
.val
;
2861 case 0xa2 ... 0xa3: /* mov */
2862 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2864 case 0xa4 ... 0xa5: /* movs */
2866 case 0xa6 ... 0xa7: /* cmps */
2867 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2868 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2870 case 0xa8 ... 0xa9: /* test ax, imm */
2872 case 0xaa ... 0xab: /* stos */
2873 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2875 case 0xac ... 0xad: /* lods */
2877 case 0xae ... 0xaf: /* scas */
2878 DPRINTF("Urk! I don't handle SCAS.\n");
2879 goto cannot_emulate
;
2880 case 0xb0 ... 0xbf: /* mov r, imm */
2885 case 0xc3: /* ret */
2886 c
->dst
.type
= OP_REG
;
2887 c
->dst
.ptr
= &c
->eip
;
2888 c
->dst
.bytes
= c
->op_bytes
;
2889 goto pop_instruction
;
2890 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2892 c
->dst
.val
= c
->src
.val
;
2894 case 0xcb: /* ret far */
2895 rc
= emulate_ret_far(ctxt
, ops
);
2896 if (rc
!= X86EMUL_CONTINUE
)
2899 case 0xd0 ... 0xd1: /* Grp2 */
2903 case 0xd2 ... 0xd3: /* Grp2 */
2904 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2907 case 0xe4: /* inb */
2910 case 0xe6: /* outb */
2911 case 0xe7: /* out */
2913 case 0xe8: /* call (near) */ {
2914 long int rel
= c
->src
.val
;
2915 c
->src
.val
= (unsigned long) c
->eip
;
2917 emulate_push(ctxt
, ops
);
2920 case 0xe9: /* jmp rel */
2922 case 0xea: { /* jmp far */
2925 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
2927 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
2931 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
2935 jmp
: /* jmp rel short */
2936 jmp_rel(c
, c
->src
.val
);
2937 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2939 case 0xec: /* in al,dx */
2940 case 0xed: /* in (e/r)ax,dx */
2941 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2943 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2944 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2945 emulate_gp(ctxt
, 0);
2948 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
2950 goto done
; /* IO is needed */
2952 case 0xee: /* out dx,al */
2953 case 0xef: /* out dx,(e/r)ax */
2954 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2956 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2957 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2958 emulate_gp(ctxt
, 0);
2961 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2963 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2965 case 0xf4: /* hlt */
2966 ctxt
->vcpu
->arch
.halt_request
= 1;
2968 case 0xf5: /* cmc */
2969 /* complement carry flag from eflags reg */
2970 ctxt
->eflags
^= EFLG_CF
;
2971 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2973 case 0xf6 ... 0xf7: /* Grp3 */
2974 if (!emulate_grp3(ctxt
, ops
))
2975 goto cannot_emulate
;
2977 case 0xf8: /* clc */
2978 ctxt
->eflags
&= ~EFLG_CF
;
2979 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2981 case 0xfa: /* cli */
2982 if (emulator_bad_iopl(ctxt
, ops
))
2983 emulate_gp(ctxt
, 0);
2985 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2986 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2989 case 0xfb: /* sti */
2990 if (emulator_bad_iopl(ctxt
, ops
))
2991 emulate_gp(ctxt
, 0);
2993 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
2994 ctxt
->eflags
|= X86_EFLAGS_IF
;
2995 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2998 case 0xfc: /* cld */
2999 ctxt
->eflags
&= ~EFLG_DF
;
3000 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3002 case 0xfd: /* std */
3003 ctxt
->eflags
|= EFLG_DF
;
3004 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3006 case 0xfe: /* Grp4 */
3008 rc
= emulate_grp45(ctxt
, ops
);
3009 if (rc
!= X86EMUL_CONTINUE
)
3012 case 0xff: /* Grp5 */
3013 if (c
->modrm_reg
== 5)
3019 rc
= writeback(ctxt
, ops
);
3020 if (rc
!= X86EMUL_CONTINUE
)
3024 * restore dst type in case the decoding will be reused
3025 * (happens for string instruction )
3027 c
->dst
.type
= saved_dst_type
;
3029 if ((c
->d
& SrcMask
) == SrcSI
)
3030 string_addr_inc(ctxt
, seg_override_base(ctxt
, ops
, c
),
3031 VCPU_REGS_RSI
, &c
->src
);
3033 if ((c
->d
& DstMask
) == DstDI
)
3034 string_addr_inc(ctxt
, es_base(ctxt
, ops
), VCPU_REGS_RDI
,
3037 if (c
->rep_prefix
&& (c
->d
& String
)) {
3038 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
3039 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3041 * Re-enter guest when pio read ahead buffer is empty or,
3042 * if it is not used, after each 1024 iteration.
3044 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
3045 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
3046 ctxt
->restart
= false;
3049 * reset read cache here in case string instruction is restared
3052 ctxt
->decode
.mem_read
.end
= 0;
3056 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
3060 case 0x01: /* lgdt, lidt, lmsw */
3061 switch (c
->modrm_reg
) {
3063 unsigned long address
;
3065 case 0: /* vmcall */
3066 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3067 goto cannot_emulate
;
3069 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3070 if (rc
!= X86EMUL_CONTINUE
)
3073 /* Let the processor re-execute the fixed hypercall */
3075 /* Disable writeback. */
3076 c
->dst
.type
= OP_NONE
;
3079 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3080 &size
, &address
, c
->op_bytes
);
3081 if (rc
!= X86EMUL_CONTINUE
)
3083 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3084 /* Disable writeback. */
3085 c
->dst
.type
= OP_NONE
;
3087 case 3: /* lidt/vmmcall */
3088 if (c
->modrm_mod
== 3) {
3089 switch (c
->modrm_rm
) {
3091 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3092 if (rc
!= X86EMUL_CONTINUE
)
3096 goto cannot_emulate
;
3099 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3102 if (rc
!= X86EMUL_CONTINUE
)
3104 realmode_lidt(ctxt
->vcpu
, size
, address
);
3106 /* Disable writeback. */
3107 c
->dst
.type
= OP_NONE
;
3111 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3114 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3115 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3116 c
->dst
.type
= OP_NONE
;
3118 case 5: /* not defined */
3122 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3123 /* Disable writeback. */
3124 c
->dst
.type
= OP_NONE
;
3127 goto cannot_emulate
;
3130 case 0x05: /* syscall */
3131 rc
= emulate_syscall(ctxt
, ops
);
3132 if (rc
!= X86EMUL_CONTINUE
)
3138 emulate_clts(ctxt
->vcpu
);
3139 c
->dst
.type
= OP_NONE
;
3141 case 0x09: /* wbinvd */
3142 kvm_emulate_wbinvd(ctxt
->vcpu
);
3143 c
->dst
.type
= OP_NONE
;
3145 case 0x08: /* invd */
3146 case 0x0d: /* GrpP (prefetch) */
3147 case 0x18: /* Grp16 (prefetch/nop) */
3148 c
->dst
.type
= OP_NONE
;
3150 case 0x20: /* mov cr, reg */
3151 switch (c
->modrm_reg
) {
3158 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3159 c
->dst
.type
= OP_NONE
; /* no writeback */
3161 case 0x21: /* mov from dr to reg */
3162 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3163 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3167 ops
->get_dr(c
->modrm_reg
, &c
->regs
[c
->modrm_rm
], ctxt
->vcpu
);
3168 c
->dst
.type
= OP_NONE
; /* no writeback */
3170 case 0x22: /* mov reg, cr */
3171 if (ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
)) {
3172 emulate_gp(ctxt
, 0);
3175 c
->dst
.type
= OP_NONE
;
3177 case 0x23: /* mov from reg to dr */
3178 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3179 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3184 if (ops
->set_dr(c
->modrm_reg
, c
->regs
[c
->modrm_rm
] &
3185 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
3186 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
3187 /* #UD condition is already handled by the code above */
3188 emulate_gp(ctxt
, 0);
3192 c
->dst
.type
= OP_NONE
; /* no writeback */
3196 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3197 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3198 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3199 emulate_gp(ctxt
, 0);
3202 rc
= X86EMUL_CONTINUE
;
3203 c
->dst
.type
= OP_NONE
;
3207 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3208 emulate_gp(ctxt
, 0);
3211 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3212 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3214 rc
= X86EMUL_CONTINUE
;
3215 c
->dst
.type
= OP_NONE
;
3217 case 0x34: /* sysenter */
3218 rc
= emulate_sysenter(ctxt
, ops
);
3219 if (rc
!= X86EMUL_CONTINUE
)
3224 case 0x35: /* sysexit */
3225 rc
= emulate_sysexit(ctxt
, ops
);
3226 if (rc
!= X86EMUL_CONTINUE
)
3231 case 0x40 ... 0x4f: /* cmov */
3232 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3233 if (!test_cc(c
->b
, ctxt
->eflags
))
3234 c
->dst
.type
= OP_NONE
; /* no writeback */
3236 case 0x80 ... 0x8f: /* jnz rel, etc*/
3237 if (test_cc(c
->b
, ctxt
->eflags
))
3238 jmp_rel(c
, c
->src
.val
);
3239 c
->dst
.type
= OP_NONE
;
3241 case 0xa0: /* push fs */
3242 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3244 case 0xa1: /* pop fs */
3245 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3246 if (rc
!= X86EMUL_CONTINUE
)
3251 c
->dst
.type
= OP_NONE
;
3252 /* only subword offset */
3253 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3254 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3256 case 0xa4: /* shld imm8, r, r/m */
3257 case 0xa5: /* shld cl, r, r/m */
3258 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3260 case 0xa8: /* push gs */
3261 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3263 case 0xa9: /* pop gs */
3264 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3265 if (rc
!= X86EMUL_CONTINUE
)
3270 /* only subword offset */
3271 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3272 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3274 case 0xac: /* shrd imm8, r, r/m */
3275 case 0xad: /* shrd cl, r, r/m */
3276 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3278 case 0xae: /* clflush */
3280 case 0xb0 ... 0xb1: /* cmpxchg */
3282 * Save real source value, then compare EAX against
3285 c
->src
.orig_val
= c
->src
.val
;
3286 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3287 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3288 if (ctxt
->eflags
& EFLG_ZF
) {
3289 /* Success: write back to memory. */
3290 c
->dst
.val
= c
->src
.orig_val
;
3292 /* Failure: write the value we saw to EAX. */
3293 c
->dst
.type
= OP_REG
;
3294 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3299 /* only subword offset */
3300 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3301 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3303 case 0xb6 ... 0xb7: /* movzx */
3304 c
->dst
.bytes
= c
->op_bytes
;
3305 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3308 case 0xba: /* Grp8 */
3309 switch (c
->modrm_reg
& 3) {
3322 /* only subword offset */
3323 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3324 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3326 case 0xbe ... 0xbf: /* movsx */
3327 c
->dst
.bytes
= c
->op_bytes
;
3328 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3331 case 0xc3: /* movnti */
3332 c
->dst
.bytes
= c
->op_bytes
;
3333 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3336 case 0xc7: /* Grp9 (cmpxchg8b) */
3337 rc
= emulate_grp9(ctxt
, ops
);
3338 if (rc
!= X86EMUL_CONTINUE
)
3345 DPRINTF("Cannot emulate %02x\n", c
->b
);