1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<0) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<1) /* Register operand. */
53 #define DstMem (3<<1) /* Memory operand. */
54 #define DstAcc (4<<1) /* Destination Accumulator */
55 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<1) /* 64bit memory operand */
57 #define DstMask (7<<1)
58 /* Source operand type. */
59 #define SrcNone (0<<4) /* No source operand. */
60 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcAcc (0xd<<4) /* Source Accumulator */
74 #define SrcMask (0xf<<4)
75 /* Generic ModRM decode. */
77 /* Destination is only written; never read. */
80 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
81 #define String (1<<12) /* String instruction (rep capable) */
82 #define Stack (1<<13) /* Stack instruction (push/pop) */
83 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
84 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
85 #define GroupMask 0xff /* Group number stored in bits 0:7 */
87 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
88 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
90 /* Source 2 operand type */
91 #define Src2None (0<<29)
92 #define Src2CL (1<<29)
93 #define Src2ImmByte (2<<29)
94 #define Src2One (3<<29)
95 #define Src2Mask (7<<29)
98 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
99 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
103 static u32 opcode_table
[256] = {
105 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
106 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
107 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
108 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
110 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
111 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
112 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
113 ImplicitOps
| Stack
| No64
, 0,
115 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
116 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
117 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
118 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
120 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
121 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
122 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
123 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
125 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
126 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
127 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
129 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
130 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
131 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
133 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
134 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
135 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
137 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
138 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
139 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
142 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
144 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
146 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
147 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
149 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
150 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
152 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
153 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
156 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
157 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
158 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
160 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
161 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
163 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
164 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
166 Group
| Group1_80
, Group
| Group1_81
,
167 Group
| Group1_82
, Group
| Group1_83
,
168 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
169 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
171 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
172 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
173 DstMem
| SrcNone
| ModRM
| Mov
, ModRM
| DstReg
,
174 ImplicitOps
| SrcMem16
| ModRM
, Group
| Group1A
,
176 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
178 0, 0, SrcImmFAddr
| No64
, 0,
179 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
181 ByteOp
| DstAcc
| SrcMem
| Mov
| MemAbs
, DstAcc
| SrcMem
| Mov
| MemAbs
,
182 ByteOp
| DstMem
| SrcAcc
| Mov
| MemAbs
, DstMem
| SrcAcc
| Mov
| MemAbs
,
183 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
184 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
186 DstAcc
| SrcImmByte
| ByteOp
, DstAcc
| SrcImm
, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
187 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
188 ByteOp
| DstDI
| String
, DstDI
| String
,
190 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
192 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
193 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
195 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
197 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
198 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
200 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
201 0, ImplicitOps
| Stack
, 0, 0,
202 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
204 0, 0, 0, ImplicitOps
| Stack
,
205 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
207 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
208 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
211 0, 0, 0, 0, 0, 0, 0, 0,
214 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
215 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
217 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
218 SrcImmFAddr
| No64
, SrcImmByte
| ImplicitOps
,
219 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
220 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
223 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
225 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
226 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
229 static u32 twobyte_table
[256] = {
231 0, Group
| GroupDual
| Group7
, 0, 0,
232 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
233 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
234 0, ImplicitOps
| ModRM
, 0, 0,
236 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
238 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
239 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
241 0, 0, 0, 0, 0, 0, 0, 0,
243 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
244 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
245 0, 0, 0, 0, 0, 0, 0, 0,
247 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
249 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
250 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
252 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
254 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
255 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
259 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
261 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
263 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
264 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
269 0, DstMem
| SrcReg
| ModRM
| BitOp
,
270 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
271 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
273 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
274 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
275 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
276 DstMem
| SrcReg
| Src2CL
| ModRM
,
279 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
280 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
281 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
282 DstReg
| SrcMem16
| ModRM
| Mov
,
285 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
286 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
287 DstReg
| SrcMem16
| ModRM
| Mov
,
289 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
290 0, 0, 0, Group
| GroupDual
| Group9
,
291 0, 0, 0, 0, 0, 0, 0, 0,
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
297 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
300 static u32 group_table
[] = {
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
306 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
307 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
308 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
309 ByteOp
| DstMem
| SrcImm
| ModRM
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
| Lock
,
315 DstMem
| SrcImm
| ModRM
| Lock
,
316 DstMem
| SrcImm
| ModRM
| Lock
,
317 DstMem
| SrcImm
| ModRM
| Lock
,
318 DstMem
| SrcImm
| ModRM
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
324 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
325 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
326 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
327 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
| Lock
,
333 DstMem
| SrcImmByte
| ModRM
| Lock
,
334 DstMem
| SrcImmByte
| ModRM
| Lock
,
335 DstMem
| SrcImmByte
| ModRM
| Lock
,
336 DstMem
| SrcImmByte
| ModRM
,
338 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
340 ByteOp
| SrcImm
| DstMem
| ModRM
, ByteOp
| SrcImm
| DstMem
| ModRM
,
341 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
344 DstMem
| SrcImm
| ModRM
, DstMem
| SrcImm
| ModRM
,
345 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
348 ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
, ByteOp
| DstMem
| SrcNone
| ModRM
| Lock
,
351 DstMem
| SrcNone
| ModRM
| Lock
, DstMem
| SrcNone
| ModRM
| Lock
,
352 SrcMem
| ModRM
| Stack
, 0,
353 SrcMem
| ModRM
| Stack
, SrcMemFAddr
| ModRM
| ImplicitOps
,
354 SrcMem
| ModRM
| Stack
, 0,
356 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
357 SrcNone
| ModRM
| DstMem
| Mov
, 0,
358 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
361 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
362 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
364 0, DstMem64
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
367 static u32 group2_table
[] = {
369 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
370 SrcNone
| ModRM
| DstMem
| Mov
, 0,
371 SrcMem16
| ModRM
| Mov
| Priv
, 0,
373 0, 0, 0, 0, 0, 0, 0, 0,
376 /* EFLAGS bit definitions. */
377 #define EFLG_ID (1<<21)
378 #define EFLG_VIP (1<<20)
379 #define EFLG_VIF (1<<19)
380 #define EFLG_AC (1<<18)
381 #define EFLG_VM (1<<17)
382 #define EFLG_RF (1<<16)
383 #define EFLG_IOPL (3<<12)
384 #define EFLG_NT (1<<14)
385 #define EFLG_OF (1<<11)
386 #define EFLG_DF (1<<10)
387 #define EFLG_IF (1<<9)
388 #define EFLG_TF (1<<8)
389 #define EFLG_SF (1<<7)
390 #define EFLG_ZF (1<<6)
391 #define EFLG_AF (1<<4)
392 #define EFLG_PF (1<<2)
393 #define EFLG_CF (1<<0)
396 * Instruction emulation:
397 * Most instructions are emulated directly via a fragment of inline assembly
398 * code. This allows us to save/restore EFLAGS and thus very easily pick up
399 * any modified flags.
402 #if defined(CONFIG_X86_64)
403 #define _LO32 "k" /* force 32-bit operand */
404 #define _STK "%%rsp" /* stack pointer */
405 #elif defined(__i386__)
406 #define _LO32 "" /* force 32-bit operand */
407 #define _STK "%%esp" /* stack pointer */
411 * These EFLAGS bits are restored from saved value during emulation, and
412 * any changes are written back to the saved value after emulation.
414 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
416 /* Before executing instruction: restore necessary bits in EFLAGS. */
417 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
418 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
419 "movl %"_sav",%"_LO32 _tmp"; " \
422 "movl %"_msk",%"_LO32 _tmp"; " \
423 "andl %"_LO32 _tmp",("_STK"); " \
425 "notl %"_LO32 _tmp"; " \
426 "andl %"_LO32 _tmp",("_STK"); " \
427 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
429 "orl %"_LO32 _tmp",("_STK"); " \
433 /* After executing instruction: write-back necessary bits in EFLAGS. */
434 #define _POST_EFLAGS(_sav, _msk, _tmp) \
435 /* _sav |= EFLAGS & _msk; */ \
438 "andl %"_msk",%"_LO32 _tmp"; " \
439 "orl %"_LO32 _tmp",%"_sav"; "
447 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
449 __asm__ __volatile__ ( \
450 _PRE_EFLAGS("0", "4", "2") \
451 _op _suffix " %"_x"3,%1; " \
452 _POST_EFLAGS("0", "4", "2") \
453 : "=m" (_eflags), "=m" ((_dst).val), \
455 : _y ((_src).val), "i" (EFLAGS_MASK)); \
459 /* Raw emulation: instruction has two explicit operands. */
460 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
462 unsigned long _tmp; \
464 switch ((_dst).bytes) { \
466 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
469 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
472 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
477 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
479 unsigned long _tmp; \
480 switch ((_dst).bytes) { \
482 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
485 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
486 _wx, _wy, _lx, _ly, _qx, _qy); \
491 /* Source operand is byte-sized and may be restricted to just %cl. */
492 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
493 __emulate_2op(_op, _src, _dst, _eflags, \
494 "b", "c", "b", "c", "b", "c", "b", "c")
496 /* Source operand is byte, word, long or quad sized. */
497 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
498 __emulate_2op(_op, _src, _dst, _eflags, \
499 "b", "q", "w", "r", _LO32, "r", "", "r")
501 /* Source operand is word, long or quad sized. */
502 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
503 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
504 "w", "r", _LO32, "r", "", "r")
506 /* Instruction has three operands and one operand is stored in ECX register */
507 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
509 unsigned long _tmp; \
510 _type _clv = (_cl).val; \
511 _type _srcv = (_src).val; \
512 _type _dstv = (_dst).val; \
514 __asm__ __volatile__ ( \
515 _PRE_EFLAGS("0", "5", "2") \
516 _op _suffix " %4,%1 \n" \
517 _POST_EFLAGS("0", "5", "2") \
518 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
519 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
522 (_cl).val = (unsigned long) _clv; \
523 (_src).val = (unsigned long) _srcv; \
524 (_dst).val = (unsigned long) _dstv; \
527 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
529 switch ((_dst).bytes) { \
531 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
532 "w", unsigned short); \
535 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
536 "l", unsigned int); \
539 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
540 "q", unsigned long)); \
545 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
547 unsigned long _tmp; \
549 __asm__ __volatile__ ( \
550 _PRE_EFLAGS("0", "3", "2") \
551 _op _suffix " %1; " \
552 _POST_EFLAGS("0", "3", "2") \
553 : "=m" (_eflags), "+m" ((_dst).val), \
555 : "i" (EFLAGS_MASK)); \
558 /* Instruction has only one explicit operand (no source operand). */
559 #define emulate_1op(_op, _dst, _eflags) \
561 switch ((_dst).bytes) { \
562 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
563 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
564 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
565 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
569 /* Fetch next part of the instruction being emulated. */
570 #define insn_fetch(_type, _size, _eip) \
571 ({ unsigned long _x; \
572 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
573 if (rc != X86EMUL_CONTINUE) \
579 #define insn_fetch_arr(_arr, _size, _eip) \
580 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
581 if (rc != X86EMUL_CONTINUE) \
586 static inline unsigned long ad_mask(struct decode_cache
*c
)
588 return (1UL << (c
->ad_bytes
<< 3)) - 1;
591 /* Access/update address held in a register, based on addressing mode. */
592 static inline unsigned long
593 address_mask(struct decode_cache
*c
, unsigned long reg
)
595 if (c
->ad_bytes
== sizeof(unsigned long))
598 return reg
& ad_mask(c
);
601 static inline unsigned long
602 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
604 return base
+ address_mask(c
, reg
);
608 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
610 if (c
->ad_bytes
== sizeof(unsigned long))
613 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
616 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
618 register_address_increment(c
, &c
->eip
, rel
);
621 static void set_seg_override(struct decode_cache
*c
, int seg
)
623 c
->has_seg_override
= true;
624 c
->seg_override
= seg
;
627 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
628 struct x86_emulate_ops
*ops
, int seg
)
630 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
633 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
636 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
637 struct x86_emulate_ops
*ops
,
638 struct decode_cache
*c
)
640 if (!c
->has_seg_override
)
643 return seg_base(ctxt
, ops
, c
->seg_override
);
646 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
,
647 struct x86_emulate_ops
*ops
)
649 return seg_base(ctxt
, ops
, VCPU_SREG_ES
);
652 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
,
653 struct x86_emulate_ops
*ops
)
655 return seg_base(ctxt
, ops
, VCPU_SREG_SS
);
658 static void emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
659 u32 error
, bool valid
)
661 ctxt
->exception
= vec
;
662 ctxt
->error_code
= error
;
663 ctxt
->error_code_valid
= valid
;
664 ctxt
->restart
= false;
667 static void emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
669 emulate_exception(ctxt
, GP_VECTOR
, err
, true);
672 static void emulate_pf(struct x86_emulate_ctxt
*ctxt
, unsigned long addr
,
676 emulate_exception(ctxt
, PF_VECTOR
, err
, true);
679 static void emulate_ud(struct x86_emulate_ctxt
*ctxt
)
681 emulate_exception(ctxt
, UD_VECTOR
, 0, false);
684 static void emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
686 emulate_exception(ctxt
, TS_VECTOR
, err
, true);
689 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
690 struct x86_emulate_ops
*ops
,
691 unsigned long eip
, u8
*dest
)
693 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
697 if (eip
== fc
->end
) {
698 cur_size
= fc
->end
- fc
->start
;
699 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
700 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
701 size
, ctxt
->vcpu
, NULL
);
702 if (rc
!= X86EMUL_CONTINUE
)
706 *dest
= fc
->data
[eip
- fc
->start
];
707 return X86EMUL_CONTINUE
;
710 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
711 struct x86_emulate_ops
*ops
,
712 unsigned long eip
, void *dest
, unsigned size
)
716 /* x86 instructions are limited to 15 bytes. */
717 if (eip
+ size
- ctxt
->eip
> 15)
718 return X86EMUL_UNHANDLEABLE
;
720 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
721 if (rc
!= X86EMUL_CONTINUE
)
724 return X86EMUL_CONTINUE
;
728 * Given the 'reg' portion of a ModRM byte, and a register block, return a
729 * pointer into the block that addresses the relevant register.
730 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
732 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
737 p
= ®s
[modrm_reg
];
738 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
739 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
743 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
744 struct x86_emulate_ops
*ops
,
746 u16
*size
, unsigned long *address
, int op_bytes
)
753 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
755 if (rc
!= X86EMUL_CONTINUE
)
757 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
762 static int test_cc(unsigned int condition
, unsigned int flags
)
766 switch ((condition
& 15) >> 1) {
768 rc
|= (flags
& EFLG_OF
);
770 case 1: /* b/c/nae */
771 rc
|= (flags
& EFLG_CF
);
774 rc
|= (flags
& EFLG_ZF
);
777 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
780 rc
|= (flags
& EFLG_SF
);
783 rc
|= (flags
& EFLG_PF
);
786 rc
|= (flags
& EFLG_ZF
);
789 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
793 /* Odd condition identifiers (lsb == 1) have inverted sense. */
794 return (!!rc
^ (condition
& 1));
797 static void decode_register_operand(struct operand
*op
,
798 struct decode_cache
*c
,
801 unsigned reg
= c
->modrm_reg
;
802 int highbyte_regs
= c
->rex_prefix
== 0;
805 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
807 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
808 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
809 op
->val
= *(u8
*)op
->ptr
;
812 op
->ptr
= decode_register(reg
, c
->regs
, 0);
813 op
->bytes
= c
->op_bytes
;
816 op
->val
= *(u16
*)op
->ptr
;
819 op
->val
= *(u32
*)op
->ptr
;
822 op
->val
= *(u64
*) op
->ptr
;
826 op
->orig_val
= op
->val
;
829 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
830 struct x86_emulate_ops
*ops
)
832 struct decode_cache
*c
= &ctxt
->decode
;
834 int index_reg
= 0, base_reg
= 0, scale
;
835 int rc
= X86EMUL_CONTINUE
;
838 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
839 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
840 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
843 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
844 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
845 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
846 c
->modrm_rm
|= (c
->modrm
& 0x07);
850 if (c
->modrm_mod
== 3) {
851 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
852 c
->regs
, c
->d
& ByteOp
);
853 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
857 if (c
->ad_bytes
== 2) {
858 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
859 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
860 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
861 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
863 /* 16-bit ModR/M decode. */
864 switch (c
->modrm_mod
) {
866 if (c
->modrm_rm
== 6)
867 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
870 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
873 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
876 switch (c
->modrm_rm
) {
878 c
->modrm_ea
+= bx
+ si
;
881 c
->modrm_ea
+= bx
+ di
;
884 c
->modrm_ea
+= bp
+ si
;
887 c
->modrm_ea
+= bp
+ di
;
896 if (c
->modrm_mod
!= 0)
903 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
904 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
905 if (!c
->has_seg_override
)
906 set_seg_override(c
, VCPU_SREG_SS
);
907 c
->modrm_ea
= (u16
)c
->modrm_ea
;
909 /* 32/64-bit ModR/M decode. */
910 if ((c
->modrm_rm
& 7) == 4) {
911 sib
= insn_fetch(u8
, 1, c
->eip
);
912 index_reg
|= (sib
>> 3) & 7;
916 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
917 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
919 c
->modrm_ea
+= c
->regs
[base_reg
];
921 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
922 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
923 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
926 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
927 switch (c
->modrm_mod
) {
929 if (c
->modrm_rm
== 5)
930 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
933 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
936 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
944 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
945 struct x86_emulate_ops
*ops
)
947 struct decode_cache
*c
= &ctxt
->decode
;
948 int rc
= X86EMUL_CONTINUE
;
950 switch (c
->ad_bytes
) {
952 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
955 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
958 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
966 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
968 struct decode_cache
*c
= &ctxt
->decode
;
969 int rc
= X86EMUL_CONTINUE
;
970 int mode
= ctxt
->mode
;
971 int def_op_bytes
, def_ad_bytes
, group
;
974 /* we cannot decode insn before we complete previous rep insn */
975 WARN_ON(ctxt
->restart
);
978 c
->fetch
.start
= c
->fetch
.end
= c
->eip
;
979 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
982 case X86EMUL_MODE_REAL
:
983 case X86EMUL_MODE_VM86
:
984 case X86EMUL_MODE_PROT16
:
985 def_op_bytes
= def_ad_bytes
= 2;
987 case X86EMUL_MODE_PROT32
:
988 def_op_bytes
= def_ad_bytes
= 4;
991 case X86EMUL_MODE_PROT64
:
1000 c
->op_bytes
= def_op_bytes
;
1001 c
->ad_bytes
= def_ad_bytes
;
1003 /* Legacy prefixes. */
1005 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
1006 case 0x66: /* operand-size override */
1007 /* switch between 2/4 bytes */
1008 c
->op_bytes
= def_op_bytes
^ 6;
1010 case 0x67: /* address-size override */
1011 if (mode
== X86EMUL_MODE_PROT64
)
1012 /* switch between 4/8 bytes */
1013 c
->ad_bytes
= def_ad_bytes
^ 12;
1015 /* switch between 2/4 bytes */
1016 c
->ad_bytes
= def_ad_bytes
^ 6;
1018 case 0x26: /* ES override */
1019 case 0x2e: /* CS override */
1020 case 0x36: /* SS override */
1021 case 0x3e: /* DS override */
1022 set_seg_override(c
, (c
->b
>> 3) & 3);
1024 case 0x64: /* FS override */
1025 case 0x65: /* GS override */
1026 set_seg_override(c
, c
->b
& 7);
1028 case 0x40 ... 0x4f: /* REX */
1029 if (mode
!= X86EMUL_MODE_PROT64
)
1031 c
->rex_prefix
= c
->b
;
1033 case 0xf0: /* LOCK */
1036 case 0xf2: /* REPNE/REPNZ */
1037 c
->rep_prefix
= REPNE_PREFIX
;
1039 case 0xf3: /* REP/REPE/REPZ */
1040 c
->rep_prefix
= REPE_PREFIX
;
1046 /* Any legacy prefix after a REX prefix nullifies its effect. */
1055 if (c
->rex_prefix
& 8)
1056 c
->op_bytes
= 8; /* REX.W */
1058 /* Opcode byte(s). */
1059 c
->d
= opcode_table
[c
->b
];
1061 /* Two-byte opcode? */
1064 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1065 c
->d
= twobyte_table
[c
->b
];
1070 group
= c
->d
& GroupMask
;
1071 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1074 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1075 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1076 c
->d
= group2_table
[group
];
1078 c
->d
= group_table
[group
];
1083 DPRINTF("Cannot emulate %02x\n", c
->b
);
1087 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1090 /* ModRM and SIB bytes. */
1092 rc
= decode_modrm(ctxt
, ops
);
1093 else if (c
->d
& MemAbs
)
1094 rc
= decode_abs(ctxt
, ops
);
1095 if (rc
!= X86EMUL_CONTINUE
)
1098 if (!c
->has_seg_override
)
1099 set_seg_override(c
, VCPU_SREG_DS
);
1101 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1102 c
->modrm_ea
+= seg_override_base(ctxt
, ops
, c
);
1104 if (c
->ad_bytes
!= 8)
1105 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1107 if (c
->rip_relative
)
1108 c
->modrm_ea
+= c
->eip
;
1111 * Decode and fetch the source operand: register, memory
1114 switch (c
->d
& SrcMask
) {
1118 decode_register_operand(&c
->src
, c
, 0);
1127 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1129 /* Don't fetch the address for invlpg: it could be unmapped. */
1130 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1134 * For instructions with a ModR/M byte, switch to register
1135 * access if Mod = 3.
1137 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1138 c
->src
.type
= OP_REG
;
1139 c
->src
.val
= c
->modrm_val
;
1140 c
->src
.ptr
= c
->modrm_ptr
;
1143 c
->src
.type
= OP_MEM
;
1144 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1149 c
->src
.type
= OP_IMM
;
1150 c
->src
.ptr
= (unsigned long *)c
->eip
;
1151 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1152 if (c
->src
.bytes
== 8)
1154 /* NB. Immediates are sign-extended as necessary. */
1155 switch (c
->src
.bytes
) {
1157 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1160 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1163 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1166 if ((c
->d
& SrcMask
) == SrcImmU
) {
1167 switch (c
->src
.bytes
) {
1172 c
->src
.val
&= 0xffff;
1175 c
->src
.val
&= 0xffffffff;
1182 c
->src
.type
= OP_IMM
;
1183 c
->src
.ptr
= (unsigned long *)c
->eip
;
1185 if ((c
->d
& SrcMask
) == SrcImmByte
)
1186 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1188 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1191 c
->src
.type
= OP_REG
;
1192 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1193 c
->src
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1194 switch (c
->src
.bytes
) {
1196 c
->src
.val
= *(u8
*)c
->src
.ptr
;
1199 c
->src
.val
= *(u16
*)c
->src
.ptr
;
1202 c
->src
.val
= *(u32
*)c
->src
.ptr
;
1205 c
->src
.val
= *(u64
*)c
->src
.ptr
;
1214 c
->src
.type
= OP_MEM
;
1215 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1216 c
->src
.ptr
= (unsigned long *)
1217 register_address(c
, seg_override_base(ctxt
, ops
, c
),
1218 c
->regs
[VCPU_REGS_RSI
]);
1222 c
->src
.type
= OP_IMM
;
1223 c
->src
.ptr
= (unsigned long *)c
->eip
;
1224 c
->src
.bytes
= c
->op_bytes
+ 2;
1225 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
1228 c
->src
.type
= OP_MEM
;
1229 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1230 c
->src
.bytes
= c
->op_bytes
+ 2;
1235 * Decode and fetch the second source operand: register, memory
1238 switch (c
->d
& Src2Mask
) {
1243 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1246 c
->src2
.type
= OP_IMM
;
1247 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1249 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1257 /* Decode and fetch the destination operand: register or memory. */
1258 switch (c
->d
& DstMask
) {
1260 /* Special instructions do their own operand decoding. */
1263 decode_register_operand(&c
->dst
, c
,
1264 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1268 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1269 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1270 c
->dst
.type
= OP_REG
;
1271 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1272 c
->dst
.ptr
= c
->modrm_ptr
;
1275 c
->dst
.type
= OP_MEM
;
1276 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1277 if ((c
->d
& DstMask
) == DstMem64
)
1280 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1283 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1285 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1286 (c
->src
.val
& mask
) / 8;
1290 c
->dst
.type
= OP_REG
;
1291 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1292 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1293 switch (c
->dst
.bytes
) {
1295 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1298 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1301 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1304 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1307 c
->dst
.orig_val
= c
->dst
.val
;
1310 c
->dst
.type
= OP_MEM
;
1311 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1312 c
->dst
.ptr
= (unsigned long *)
1313 register_address(c
, es_base(ctxt
, ops
),
1314 c
->regs
[VCPU_REGS_RDI
]);
1320 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1323 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1324 struct x86_emulate_ops
*ops
,
1325 unsigned long addr
, void *dest
, unsigned size
)
1328 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
1332 int n
= min(size
, 8u);
1334 if (mc
->pos
< mc
->end
)
1337 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
, &err
,
1339 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1340 emulate_pf(ctxt
, addr
, err
);
1341 if (rc
!= X86EMUL_CONTINUE
)
1346 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
1351 return X86EMUL_CONTINUE
;
1354 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1355 struct x86_emulate_ops
*ops
,
1356 unsigned int size
, unsigned short port
,
1359 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1361 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1362 struct decode_cache
*c
= &ctxt
->decode
;
1363 unsigned int in_page
, n
;
1364 unsigned int count
= c
->rep_prefix
?
1365 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1366 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1367 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1368 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1369 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1373 rc
->pos
= rc
->end
= 0;
1374 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1379 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1384 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1386 u32 limit
= get_desc_limit(desc
);
1388 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1391 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1392 struct x86_emulate_ops
*ops
,
1393 u16 selector
, struct desc_ptr
*dt
)
1395 if (selector
& 1 << 2) {
1396 struct desc_struct desc
;
1397 memset (dt
, 0, sizeof *dt
);
1398 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1401 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1402 dt
->address
= get_desc_base(&desc
);
1404 ops
->get_gdt(dt
, ctxt
->vcpu
);
1407 /* allowed just for 8 bytes segments */
1408 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1409 struct x86_emulate_ops
*ops
,
1410 u16 selector
, struct desc_struct
*desc
)
1413 u16 index
= selector
>> 3;
1418 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1420 if (dt
.size
< index
* 8 + 7) {
1421 emulate_gp(ctxt
, selector
& 0xfffc);
1422 return X86EMUL_PROPAGATE_FAULT
;
1424 addr
= dt
.address
+ index
* 8;
1425 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1426 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1427 emulate_pf(ctxt
, addr
, err
);
1432 /* allowed just for 8 bytes segments */
1433 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1434 struct x86_emulate_ops
*ops
,
1435 u16 selector
, struct desc_struct
*desc
)
1438 u16 index
= selector
>> 3;
1443 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1445 if (dt
.size
< index
* 8 + 7) {
1446 emulate_gp(ctxt
, selector
& 0xfffc);
1447 return X86EMUL_PROPAGATE_FAULT
;
1450 addr
= dt
.address
+ index
* 8;
1451 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1452 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1453 emulate_pf(ctxt
, addr
, err
);
1458 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1459 struct x86_emulate_ops
*ops
,
1460 u16 selector
, int seg
)
1462 struct desc_struct seg_desc
;
1464 unsigned err_vec
= GP_VECTOR
;
1466 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1469 memset(&seg_desc
, 0, sizeof seg_desc
);
1471 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1472 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1473 /* set real mode segment descriptor */
1474 set_desc_base(&seg_desc
, selector
<< 4);
1475 set_desc_limit(&seg_desc
, 0xffff);
1482 /* NULL selector is not valid for TR, CS and SS */
1483 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1487 /* TR should be in GDT only */
1488 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1491 if (null_selector
) /* for NULL selector skip all following checks */
1494 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1495 if (ret
!= X86EMUL_CONTINUE
)
1498 err_code
= selector
& 0xfffc;
1499 err_vec
= GP_VECTOR
;
1501 /* can't load system descriptor into segment selecor */
1502 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1506 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1512 cpl
= ops
->cpl(ctxt
->vcpu
);
1517 * segment is not a writable data segment or segment
1518 * selector's RPL != CPL or segment selector's RPL != CPL
1520 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1524 if (!(seg_desc
.type
& 8))
1527 if (seg_desc
.type
& 4) {
1533 if (rpl
> cpl
|| dpl
!= cpl
)
1536 /* CS(RPL) <- CPL */
1537 selector
= (selector
& 0xfffc) | cpl
;
1540 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1543 case VCPU_SREG_LDTR
:
1544 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1547 default: /* DS, ES, FS, or GS */
1549 * segment is not a data or readable code segment or
1550 * ((segment is a data or nonconforming code segment)
1551 * and (both RPL and CPL > DPL))
1553 if ((seg_desc
.type
& 0xa) == 0x8 ||
1554 (((seg_desc
.type
& 0xc) != 0xc) &&
1555 (rpl
> dpl
&& cpl
> dpl
)))
1561 /* mark segment as accessed */
1563 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1564 if (ret
!= X86EMUL_CONTINUE
)
1568 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1569 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1570 return X86EMUL_CONTINUE
;
1572 emulate_exception(ctxt
, err_vec
, err_code
, true);
1573 return X86EMUL_PROPAGATE_FAULT
;
1576 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1577 struct x86_emulate_ops
*ops
)
1580 struct decode_cache
*c
= &ctxt
->decode
;
1583 switch (c
->dst
.type
) {
1585 /* The 4-byte case *is* correct:
1586 * in 64-bit mode we zero-extend.
1588 switch (c
->dst
.bytes
) {
1590 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1593 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1596 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1597 break; /* 64b: zero-ext */
1599 *c
->dst
.ptr
= c
->dst
.val
;
1605 rc
= ops
->cmpxchg_emulated(
1606 (unsigned long)c
->dst
.ptr
,
1613 rc
= ops
->write_emulated(
1614 (unsigned long)c
->dst
.ptr
,
1619 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1621 (unsigned long)c
->dst
.ptr
, err
);
1622 if (rc
!= X86EMUL_CONTINUE
)
1631 return X86EMUL_CONTINUE
;
1634 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1635 struct x86_emulate_ops
*ops
)
1637 struct decode_cache
*c
= &ctxt
->decode
;
1639 c
->dst
.type
= OP_MEM
;
1640 c
->dst
.bytes
= c
->op_bytes
;
1641 c
->dst
.val
= c
->src
.val
;
1642 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1643 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
, ops
),
1644 c
->regs
[VCPU_REGS_RSP
]);
1647 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1648 struct x86_emulate_ops
*ops
,
1649 void *dest
, int len
)
1651 struct decode_cache
*c
= &ctxt
->decode
;
1654 rc
= read_emulated(ctxt
, ops
, register_address(c
, ss_base(ctxt
, ops
),
1655 c
->regs
[VCPU_REGS_RSP
]),
1657 if (rc
!= X86EMUL_CONTINUE
)
1660 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1664 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1665 struct x86_emulate_ops
*ops
,
1666 void *dest
, int len
)
1669 unsigned long val
, change_mask
;
1670 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1671 int cpl
= ops
->cpl(ctxt
->vcpu
);
1673 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1674 if (rc
!= X86EMUL_CONTINUE
)
1677 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1678 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1680 switch(ctxt
->mode
) {
1681 case X86EMUL_MODE_PROT64
:
1682 case X86EMUL_MODE_PROT32
:
1683 case X86EMUL_MODE_PROT16
:
1685 change_mask
|= EFLG_IOPL
;
1687 change_mask
|= EFLG_IF
;
1689 case X86EMUL_MODE_VM86
:
1691 emulate_gp(ctxt
, 0);
1692 return X86EMUL_PROPAGATE_FAULT
;
1694 change_mask
|= EFLG_IF
;
1696 default: /* real mode */
1697 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1701 *(unsigned long *)dest
=
1702 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1707 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1708 struct x86_emulate_ops
*ops
, int seg
)
1710 struct decode_cache
*c
= &ctxt
->decode
;
1712 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1714 emulate_push(ctxt
, ops
);
1717 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1718 struct x86_emulate_ops
*ops
, int seg
)
1720 struct decode_cache
*c
= &ctxt
->decode
;
1721 unsigned long selector
;
1724 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1725 if (rc
!= X86EMUL_CONTINUE
)
1728 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1732 static int emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1733 struct x86_emulate_ops
*ops
)
1735 struct decode_cache
*c
= &ctxt
->decode
;
1736 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1737 int rc
= X86EMUL_CONTINUE
;
1738 int reg
= VCPU_REGS_RAX
;
1740 while (reg
<= VCPU_REGS_RDI
) {
1741 (reg
== VCPU_REGS_RSP
) ?
1742 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1744 emulate_push(ctxt
, ops
);
1746 rc
= writeback(ctxt
, ops
);
1747 if (rc
!= X86EMUL_CONTINUE
)
1753 /* Disable writeback. */
1754 c
->dst
.type
= OP_NONE
;
1759 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1760 struct x86_emulate_ops
*ops
)
1762 struct decode_cache
*c
= &ctxt
->decode
;
1763 int rc
= X86EMUL_CONTINUE
;
1764 int reg
= VCPU_REGS_RDI
;
1766 while (reg
>= VCPU_REGS_RAX
) {
1767 if (reg
== VCPU_REGS_RSP
) {
1768 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1773 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1774 if (rc
!= X86EMUL_CONTINUE
)
1781 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1782 struct x86_emulate_ops
*ops
)
1784 struct decode_cache
*c
= &ctxt
->decode
;
1786 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1789 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1791 struct decode_cache
*c
= &ctxt
->decode
;
1792 switch (c
->modrm_reg
) {
1794 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1797 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1800 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1803 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1805 case 4: /* sal/shl */
1806 case 6: /* sal/shl */
1807 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1810 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1813 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1818 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1819 struct x86_emulate_ops
*ops
)
1821 struct decode_cache
*c
= &ctxt
->decode
;
1823 switch (c
->modrm_reg
) {
1824 case 0 ... 1: /* test */
1825 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1828 c
->dst
.val
= ~c
->dst
.val
;
1831 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1839 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1840 struct x86_emulate_ops
*ops
)
1842 struct decode_cache
*c
= &ctxt
->decode
;
1844 switch (c
->modrm_reg
) {
1846 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1849 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1851 case 2: /* call near abs */ {
1854 c
->eip
= c
->src
.val
;
1855 c
->src
.val
= old_eip
;
1856 emulate_push(ctxt
, ops
);
1859 case 4: /* jmp abs */
1860 c
->eip
= c
->src
.val
;
1863 emulate_push(ctxt
, ops
);
1866 return X86EMUL_CONTINUE
;
1869 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1870 struct x86_emulate_ops
*ops
)
1872 struct decode_cache
*c
= &ctxt
->decode
;
1873 u64 old
= c
->dst
.orig_val64
;
1875 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1876 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1877 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1878 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1879 ctxt
->eflags
&= ~EFLG_ZF
;
1881 c
->dst
.val64
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1882 (u32
) c
->regs
[VCPU_REGS_RBX
];
1884 ctxt
->eflags
|= EFLG_ZF
;
1886 return X86EMUL_CONTINUE
;
1889 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1890 struct x86_emulate_ops
*ops
)
1892 struct decode_cache
*c
= &ctxt
->decode
;
1896 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1897 if (rc
!= X86EMUL_CONTINUE
)
1899 if (c
->op_bytes
== 4)
1900 c
->eip
= (u32
)c
->eip
;
1901 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1902 if (rc
!= X86EMUL_CONTINUE
)
1904 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1909 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1910 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1911 struct desc_struct
*ss
)
1913 memset(cs
, 0, sizeof(struct desc_struct
));
1914 ops
->get_cached_descriptor(cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1915 memset(ss
, 0, sizeof(struct desc_struct
));
1917 cs
->l
= 0; /* will be adjusted later */
1918 set_desc_base(cs
, 0); /* flat segment */
1919 cs
->g
= 1; /* 4kb granularity */
1920 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1921 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1923 cs
->dpl
= 0; /* will be adjusted later */
1927 set_desc_base(ss
, 0); /* flat segment */
1928 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
1929 ss
->g
= 1; /* 4kb granularity */
1931 ss
->type
= 0x03; /* Read/Write, Accessed */
1932 ss
->d
= 1; /* 32bit stack segment */
1938 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1940 struct decode_cache
*c
= &ctxt
->decode
;
1941 struct desc_struct cs
, ss
;
1945 /* syscall is not available in real mode */
1946 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1947 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1949 return X86EMUL_PROPAGATE_FAULT
;
1952 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1954 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1956 cs_sel
= (u16
)(msr_data
& 0xfffc);
1957 ss_sel
= (u16
)(msr_data
+ 8);
1959 if (is_long_mode(ctxt
->vcpu
)) {
1963 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1964 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1965 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1966 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1968 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1969 if (is_long_mode(ctxt
->vcpu
)) {
1970 #ifdef CONFIG_X86_64
1971 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1973 ops
->get_msr(ctxt
->vcpu
,
1974 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1975 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1978 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1979 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1983 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1984 c
->eip
= (u32
)msr_data
;
1986 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1989 return X86EMUL_CONTINUE
;
1993 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1995 struct decode_cache
*c
= &ctxt
->decode
;
1996 struct desc_struct cs
, ss
;
2000 /* inject #GP if in real mode */
2001 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
2002 emulate_gp(ctxt
, 0);
2003 return X86EMUL_PROPAGATE_FAULT
;
2006 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
2008 return X86EMUL_PROPAGATE_FAULT
;
2011 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2013 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2014 switch (ctxt
->mode
) {
2015 case X86EMUL_MODE_PROT32
:
2016 if ((msr_data
& 0xfffc) == 0x0) {
2017 emulate_gp(ctxt
, 0);
2018 return X86EMUL_PROPAGATE_FAULT
;
2021 case X86EMUL_MODE_PROT64
:
2022 if (msr_data
== 0x0) {
2023 emulate_gp(ctxt
, 0);
2024 return X86EMUL_PROPAGATE_FAULT
;
2029 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2030 cs_sel
= (u16
)msr_data
;
2031 cs_sel
&= ~SELECTOR_RPL_MASK
;
2032 ss_sel
= cs_sel
+ 8;
2033 ss_sel
&= ~SELECTOR_RPL_MASK
;
2034 if (ctxt
->mode
== X86EMUL_MODE_PROT64
2035 || is_long_mode(ctxt
->vcpu
)) {
2040 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2041 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2042 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2043 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2045 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2048 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2049 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
2051 return X86EMUL_CONTINUE
;
2055 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2057 struct decode_cache
*c
= &ctxt
->decode
;
2058 struct desc_struct cs
, ss
;
2063 /* inject #GP if in real mode or Virtual 8086 mode */
2064 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2065 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2066 emulate_gp(ctxt
, 0);
2067 return X86EMUL_PROPAGATE_FAULT
;
2070 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2072 if ((c
->rex_prefix
& 0x8) != 0x0)
2073 usermode
= X86EMUL_MODE_PROT64
;
2075 usermode
= X86EMUL_MODE_PROT32
;
2079 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2081 case X86EMUL_MODE_PROT32
:
2082 cs_sel
= (u16
)(msr_data
+ 16);
2083 if ((msr_data
& 0xfffc) == 0x0) {
2084 emulate_gp(ctxt
, 0);
2085 return X86EMUL_PROPAGATE_FAULT
;
2087 ss_sel
= (u16
)(msr_data
+ 24);
2089 case X86EMUL_MODE_PROT64
:
2090 cs_sel
= (u16
)(msr_data
+ 32);
2091 if (msr_data
== 0x0) {
2092 emulate_gp(ctxt
, 0);
2093 return X86EMUL_PROPAGATE_FAULT
;
2095 ss_sel
= cs_sel
+ 8;
2100 cs_sel
|= SELECTOR_RPL_MASK
;
2101 ss_sel
|= SELECTOR_RPL_MASK
;
2103 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2104 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2105 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2106 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2108 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
2109 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
2111 return X86EMUL_CONTINUE
;
2114 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2115 struct x86_emulate_ops
*ops
)
2118 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2120 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2122 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2123 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2126 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2127 struct x86_emulate_ops
*ops
,
2130 struct desc_struct tr_seg
;
2133 u8 perm
, bit_idx
= port
& 0x7;
2134 unsigned mask
= (1 << len
) - 1;
2136 ops
->get_cached_descriptor(&tr_seg
, VCPU_SREG_TR
, ctxt
->vcpu
);
2139 if (desc_limit_scaled(&tr_seg
) < 103)
2141 r
= ops
->read_std(get_desc_base(&tr_seg
) + 102, &io_bitmap_ptr
, 2,
2143 if (r
!= X86EMUL_CONTINUE
)
2145 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2147 r
= ops
->read_std(get_desc_base(&tr_seg
) + io_bitmap_ptr
+ port
/8,
2148 &perm
, 1, ctxt
->vcpu
, NULL
);
2149 if (r
!= X86EMUL_CONTINUE
)
2151 if ((perm
>> bit_idx
) & mask
)
2156 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2157 struct x86_emulate_ops
*ops
,
2160 if (emulator_bad_iopl(ctxt
, ops
))
2161 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2166 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2167 struct x86_emulate_ops
*ops
,
2168 struct tss_segment_16
*tss
)
2170 struct decode_cache
*c
= &ctxt
->decode
;
2173 tss
->flag
= ctxt
->eflags
;
2174 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2175 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2176 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2177 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2178 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2179 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2180 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2181 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2183 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2184 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2185 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2186 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2187 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2190 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2191 struct x86_emulate_ops
*ops
,
2192 struct tss_segment_16
*tss
)
2194 struct decode_cache
*c
= &ctxt
->decode
;
2198 ctxt
->eflags
= tss
->flag
| 2;
2199 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2200 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2201 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2202 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2203 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2204 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2205 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2206 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2209 * SDM says that segment selectors are loaded before segment
2212 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2213 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2214 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2215 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2216 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2219 * Now load segment descriptors. If fault happenes at this stage
2220 * it is handled in a context of new task
2222 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2223 if (ret
!= X86EMUL_CONTINUE
)
2225 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2226 if (ret
!= X86EMUL_CONTINUE
)
2228 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2229 if (ret
!= X86EMUL_CONTINUE
)
2231 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2232 if (ret
!= X86EMUL_CONTINUE
)
2234 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2235 if (ret
!= X86EMUL_CONTINUE
)
2238 return X86EMUL_CONTINUE
;
2241 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2242 struct x86_emulate_ops
*ops
,
2243 u16 tss_selector
, u16 old_tss_sel
,
2244 ulong old_tss_base
, struct desc_struct
*new_desc
)
2246 struct tss_segment_16 tss_seg
;
2248 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2250 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2252 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2253 emulate_pf(ctxt
, old_tss_base
, err
);
2257 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2259 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2261 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2262 emulate_pf(ctxt
, old_tss_base
, err
);
2266 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2268 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2269 emulate_pf(ctxt
, new_tss_base
, err
);
2273 if (old_tss_sel
!= 0xffff) {
2274 tss_seg
.prev_task_link
= old_tss_sel
;
2276 ret
= ops
->write_std(new_tss_base
,
2277 &tss_seg
.prev_task_link
,
2278 sizeof tss_seg
.prev_task_link
,
2280 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2281 emulate_pf(ctxt
, new_tss_base
, err
);
2286 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2289 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2290 struct x86_emulate_ops
*ops
,
2291 struct tss_segment_32
*tss
)
2293 struct decode_cache
*c
= &ctxt
->decode
;
2295 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2297 tss
->eflags
= ctxt
->eflags
;
2298 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2299 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2300 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2301 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2302 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2303 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2304 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2305 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2307 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2308 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2309 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2310 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2311 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2312 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2313 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2316 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2317 struct x86_emulate_ops
*ops
,
2318 struct tss_segment_32
*tss
)
2320 struct decode_cache
*c
= &ctxt
->decode
;
2323 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
)) {
2324 emulate_gp(ctxt
, 0);
2325 return X86EMUL_PROPAGATE_FAULT
;
2328 ctxt
->eflags
= tss
->eflags
| 2;
2329 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2330 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2331 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2332 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2333 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2334 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2335 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2336 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2339 * SDM says that segment selectors are loaded before segment
2342 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2343 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2344 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2345 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2346 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2347 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2348 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2351 * Now load segment descriptors. If fault happenes at this stage
2352 * it is handled in a context of new task
2354 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2355 if (ret
!= X86EMUL_CONTINUE
)
2357 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2358 if (ret
!= X86EMUL_CONTINUE
)
2360 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2361 if (ret
!= X86EMUL_CONTINUE
)
2363 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2364 if (ret
!= X86EMUL_CONTINUE
)
2366 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2367 if (ret
!= X86EMUL_CONTINUE
)
2369 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2370 if (ret
!= X86EMUL_CONTINUE
)
2372 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2373 if (ret
!= X86EMUL_CONTINUE
)
2376 return X86EMUL_CONTINUE
;
2379 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2380 struct x86_emulate_ops
*ops
,
2381 u16 tss_selector
, u16 old_tss_sel
,
2382 ulong old_tss_base
, struct desc_struct
*new_desc
)
2384 struct tss_segment_32 tss_seg
;
2386 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2388 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2390 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2391 emulate_pf(ctxt
, old_tss_base
, err
);
2395 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2397 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2399 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2400 emulate_pf(ctxt
, old_tss_base
, err
);
2404 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2406 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2407 emulate_pf(ctxt
, new_tss_base
, err
);
2411 if (old_tss_sel
!= 0xffff) {
2412 tss_seg
.prev_task_link
= old_tss_sel
;
2414 ret
= ops
->write_std(new_tss_base
,
2415 &tss_seg
.prev_task_link
,
2416 sizeof tss_seg
.prev_task_link
,
2418 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2419 emulate_pf(ctxt
, new_tss_base
, err
);
2424 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2427 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2428 struct x86_emulate_ops
*ops
,
2429 u16 tss_selector
, int reason
,
2430 bool has_error_code
, u32 error_code
)
2432 struct desc_struct curr_tss_desc
, next_tss_desc
;
2434 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2435 ulong old_tss_base
=
2436 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2440 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2441 if (ret
!= X86EMUL_CONTINUE
)
2443 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2444 if (ret
!= X86EMUL_CONTINUE
)
2448 if (reason
!= TASK_SWITCH_IRET
) {
2449 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2450 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2451 emulate_gp(ctxt
, 0);
2452 return X86EMUL_PROPAGATE_FAULT
;
2456 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2457 if (!next_tss_desc
.p
||
2458 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2459 desc_limit
< 0x2b)) {
2460 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2461 return X86EMUL_PROPAGATE_FAULT
;
2464 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2465 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2466 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2470 if (reason
== TASK_SWITCH_IRET
)
2471 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2473 /* set back link to prev task only if NT bit is set in eflags
2474 note that old_tss_sel is not used afetr this point */
2475 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2476 old_tss_sel
= 0xffff;
2478 if (next_tss_desc
.type
& 8)
2479 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2480 old_tss_base
, &next_tss_desc
);
2482 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2483 old_tss_base
, &next_tss_desc
);
2484 if (ret
!= X86EMUL_CONTINUE
)
2487 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2488 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2490 if (reason
!= TASK_SWITCH_IRET
) {
2491 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2492 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2496 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2497 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2498 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2500 if (has_error_code
) {
2501 struct decode_cache
*c
= &ctxt
->decode
;
2503 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2505 c
->src
.val
= (unsigned long) error_code
;
2506 emulate_push(ctxt
, ops
);
2512 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2513 struct x86_emulate_ops
*ops
,
2514 u16 tss_selector
, int reason
,
2515 bool has_error_code
, u32 error_code
)
2517 struct decode_cache
*c
= &ctxt
->decode
;
2521 c
->dst
.type
= OP_NONE
;
2523 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2524 has_error_code
, error_code
);
2526 if (rc
== X86EMUL_CONTINUE
) {
2527 rc
= writeback(ctxt
, ops
);
2528 if (rc
== X86EMUL_CONTINUE
)
2532 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2535 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2536 int reg
, struct operand
*op
)
2538 struct decode_cache
*c
= &ctxt
->decode
;
2539 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2541 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2542 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2546 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2549 struct decode_cache
*c
= &ctxt
->decode
;
2550 int rc
= X86EMUL_CONTINUE
;
2551 int saved_dst_type
= c
->dst
.type
;
2553 ctxt
->decode
.mem_read
.pos
= 0;
2555 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2560 /* LOCK prefix is allowed only with some instructions */
2561 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2566 /* Privileged instruction can be executed only in CPL=0 */
2567 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2568 emulate_gp(ctxt
, 0);
2572 if (c
->rep_prefix
&& (c
->d
& String
)) {
2573 ctxt
->restart
= true;
2574 /* All REP prefixes have the same first termination condition */
2575 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2577 ctxt
->restart
= false;
2581 /* The second termination condition only applies for REPE
2582 * and REPNE. Test if the repeat string operation prefix is
2583 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2584 * corresponding termination condition according to:
2585 * - if REPE/REPZ and ZF = 0 then done
2586 * - if REPNE/REPNZ and ZF = 1 then done
2588 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2589 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2590 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2591 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2593 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2594 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2600 if (c
->src
.type
== OP_MEM
) {
2601 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src
.ptr
,
2602 c
->src
.valptr
, c
->src
.bytes
);
2603 if (rc
!= X86EMUL_CONTINUE
)
2605 c
->src
.orig_val64
= c
->src
.val64
;
2608 if (c
->src2
.type
== OP_MEM
) {
2609 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src2
.ptr
,
2610 &c
->src2
.val
, c
->src2
.bytes
);
2611 if (rc
!= X86EMUL_CONTINUE
)
2615 if ((c
->d
& DstMask
) == ImplicitOps
)
2619 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2620 /* optimisation - avoid slow emulated read if Mov */
2621 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->dst
.ptr
,
2622 &c
->dst
.val
, c
->dst
.bytes
);
2623 if (rc
!= X86EMUL_CONTINUE
)
2626 c
->dst
.orig_val
= c
->dst
.val
;
2636 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2638 case 0x06: /* push es */
2639 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2641 case 0x07: /* pop es */
2642 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2643 if (rc
!= X86EMUL_CONTINUE
)
2648 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2650 case 0x0e: /* push cs */
2651 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
2655 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2657 case 0x16: /* push ss */
2658 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2660 case 0x17: /* pop ss */
2661 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2662 if (rc
!= X86EMUL_CONTINUE
)
2667 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2669 case 0x1e: /* push ds */
2670 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2672 case 0x1f: /* pop ds */
2673 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2674 if (rc
!= X86EMUL_CONTINUE
)
2679 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2683 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2687 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2691 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2693 case 0x40 ... 0x47: /* inc r16/r32 */
2694 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2696 case 0x48 ... 0x4f: /* dec r16/r32 */
2697 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2699 case 0x50 ... 0x57: /* push reg */
2700 emulate_push(ctxt
, ops
);
2702 case 0x58 ... 0x5f: /* pop reg */
2704 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2705 if (rc
!= X86EMUL_CONTINUE
)
2708 case 0x60: /* pusha */
2709 rc
= emulate_pusha(ctxt
, ops
);
2710 if (rc
!= X86EMUL_CONTINUE
)
2713 case 0x61: /* popa */
2714 rc
= emulate_popa(ctxt
, ops
);
2715 if (rc
!= X86EMUL_CONTINUE
)
2718 case 0x63: /* movsxd */
2719 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2720 goto cannot_emulate
;
2721 c
->dst
.val
= (s32
) c
->src
.val
;
2723 case 0x68: /* push imm */
2724 case 0x6a: /* push imm8 */
2725 emulate_push(ctxt
, ops
);
2727 case 0x6c: /* insb */
2728 case 0x6d: /* insw/insd */
2729 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2730 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2732 emulate_gp(ctxt
, 0);
2735 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2736 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2737 goto done
; /* IO is needed, skip writeback */
2739 case 0x6e: /* outsb */
2740 case 0x6f: /* outsw/outsd */
2741 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2742 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2744 emulate_gp(ctxt
, 0);
2747 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2748 &c
->src
.val
, 1, ctxt
->vcpu
);
2750 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2752 case 0x70 ... 0x7f: /* jcc (short) */
2753 if (test_cc(c
->b
, ctxt
->eflags
))
2754 jmp_rel(c
, c
->src
.val
);
2756 case 0x80 ... 0x83: /* Grp1 */
2757 switch (c
->modrm_reg
) {
2778 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2780 case 0x86 ... 0x87: /* xchg */
2782 /* Write back the register source. */
2783 switch (c
->dst
.bytes
) {
2785 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2788 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2791 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2792 break; /* 64b reg: zero-extend */
2794 *c
->src
.ptr
= c
->dst
.val
;
2798 * Write back the memory destination with implicit LOCK
2801 c
->dst
.val
= c
->src
.val
;
2804 case 0x88 ... 0x8b: /* mov */
2806 case 0x8c: /* mov r/m, sreg */
2807 if (c
->modrm_reg
> VCPU_SREG_GS
) {
2811 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
2813 case 0x8d: /* lea r16/r32, m */
2814 c
->dst
.val
= c
->modrm_ea
;
2816 case 0x8e: { /* mov seg, r/m16 */
2821 if (c
->modrm_reg
== VCPU_SREG_CS
||
2822 c
->modrm_reg
> VCPU_SREG_GS
) {
2827 if (c
->modrm_reg
== VCPU_SREG_SS
)
2828 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2830 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2832 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2835 case 0x8f: /* pop (sole member of Grp1a) */
2836 rc
= emulate_grp1a(ctxt
, ops
);
2837 if (rc
!= X86EMUL_CONTINUE
)
2840 case 0x90: /* nop / xchg r8,rax */
2841 if (c
->dst
.ptr
== (unsigned long *)&c
->regs
[VCPU_REGS_RAX
]) {
2842 c
->dst
.type
= OP_NONE
; /* nop */
2845 case 0x91 ... 0x97: /* xchg reg,rax */
2846 c
->src
.type
= OP_REG
;
2847 c
->src
.bytes
= c
->op_bytes
;
2848 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2849 c
->src
.val
= *(c
->src
.ptr
);
2851 case 0x9c: /* pushf */
2852 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2853 emulate_push(ctxt
, ops
);
2855 case 0x9d: /* popf */
2856 c
->dst
.type
= OP_REG
;
2857 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2858 c
->dst
.bytes
= c
->op_bytes
;
2859 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2860 if (rc
!= X86EMUL_CONTINUE
)
2863 case 0xa0 ... 0xa3: /* mov */
2864 case 0xa4 ... 0xa5: /* movs */
2866 case 0xa6 ... 0xa7: /* cmps */
2867 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2868 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2870 case 0xa8 ... 0xa9: /* test ax, imm */
2872 case 0xaa ... 0xab: /* stos */
2873 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2875 case 0xac ... 0xad: /* lods */
2877 case 0xae ... 0xaf: /* scas */
2878 DPRINTF("Urk! I don't handle SCAS.\n");
2879 goto cannot_emulate
;
2880 case 0xb0 ... 0xbf: /* mov r, imm */
2885 case 0xc3: /* ret */
2886 c
->dst
.type
= OP_REG
;
2887 c
->dst
.ptr
= &c
->eip
;
2888 c
->dst
.bytes
= c
->op_bytes
;
2889 goto pop_instruction
;
2890 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2892 c
->dst
.val
= c
->src
.val
;
2894 case 0xcb: /* ret far */
2895 rc
= emulate_ret_far(ctxt
, ops
);
2896 if (rc
!= X86EMUL_CONTINUE
)
2899 case 0xd0 ... 0xd1: /* Grp2 */
2903 case 0xd2 ... 0xd3: /* Grp2 */
2904 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2907 case 0xe4: /* inb */
2910 case 0xe6: /* outb */
2911 case 0xe7: /* out */
2913 case 0xe8: /* call (near) */ {
2914 long int rel
= c
->src
.val
;
2915 c
->src
.val
= (unsigned long) c
->eip
;
2917 emulate_push(ctxt
, ops
);
2920 case 0xe9: /* jmp rel */
2922 case 0xea: { /* jmp far */
2925 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
2927 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
2931 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
2935 jmp
: /* jmp rel short */
2936 jmp_rel(c
, c
->src
.val
);
2937 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2939 case 0xec: /* in al,dx */
2940 case 0xed: /* in (e/r)ax,dx */
2941 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2943 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2944 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2945 emulate_gp(ctxt
, 0);
2948 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
2950 goto done
; /* IO is needed */
2952 case 0xee: /* out dx,al */
2953 case 0xef: /* out dx,(e/r)ax */
2954 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2956 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2957 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2958 emulate_gp(ctxt
, 0);
2961 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2963 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2965 case 0xf4: /* hlt */
2966 ctxt
->vcpu
->arch
.halt_request
= 1;
2968 case 0xf5: /* cmc */
2969 /* complement carry flag from eflags reg */
2970 ctxt
->eflags
^= EFLG_CF
;
2971 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2973 case 0xf6 ... 0xf7: /* Grp3 */
2974 if (!emulate_grp3(ctxt
, ops
))
2975 goto cannot_emulate
;
2977 case 0xf8: /* clc */
2978 ctxt
->eflags
&= ~EFLG_CF
;
2979 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2981 case 0xfa: /* cli */
2982 if (emulator_bad_iopl(ctxt
, ops
)) {
2983 emulate_gp(ctxt
, 0);
2986 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2987 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2990 case 0xfb: /* sti */
2991 if (emulator_bad_iopl(ctxt
, ops
)) {
2992 emulate_gp(ctxt
, 0);
2995 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
2996 ctxt
->eflags
|= X86_EFLAGS_IF
;
2997 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3000 case 0xfc: /* cld */
3001 ctxt
->eflags
&= ~EFLG_DF
;
3002 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3004 case 0xfd: /* std */
3005 ctxt
->eflags
|= EFLG_DF
;
3006 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
3008 case 0xfe: /* Grp4 */
3010 rc
= emulate_grp45(ctxt
, ops
);
3011 if (rc
!= X86EMUL_CONTINUE
)
3014 case 0xff: /* Grp5 */
3015 if (c
->modrm_reg
== 5)
3021 rc
= writeback(ctxt
, ops
);
3022 if (rc
!= X86EMUL_CONTINUE
)
3026 * restore dst type in case the decoding will be reused
3027 * (happens for string instruction )
3029 c
->dst
.type
= saved_dst_type
;
3031 if ((c
->d
& SrcMask
) == SrcSI
)
3032 string_addr_inc(ctxt
, seg_override_base(ctxt
, ops
, c
),
3033 VCPU_REGS_RSI
, &c
->src
);
3035 if ((c
->d
& DstMask
) == DstDI
)
3036 string_addr_inc(ctxt
, es_base(ctxt
, ops
), VCPU_REGS_RDI
,
3039 if (c
->rep_prefix
&& (c
->d
& String
)) {
3040 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
3041 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3043 * Re-enter guest when pio read ahead buffer is empty or,
3044 * if it is not used, after each 1024 iteration.
3046 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
3047 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
3048 ctxt
->restart
= false;
3051 * reset read cache here in case string instruction is restared
3054 ctxt
->decode
.mem_read
.end
= 0;
3058 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
3062 case 0x01: /* lgdt, lidt, lmsw */
3063 switch (c
->modrm_reg
) {
3065 unsigned long address
;
3067 case 0: /* vmcall */
3068 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3069 goto cannot_emulate
;
3071 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3072 if (rc
!= X86EMUL_CONTINUE
)
3075 /* Let the processor re-execute the fixed hypercall */
3077 /* Disable writeback. */
3078 c
->dst
.type
= OP_NONE
;
3081 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3082 &size
, &address
, c
->op_bytes
);
3083 if (rc
!= X86EMUL_CONTINUE
)
3085 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3086 /* Disable writeback. */
3087 c
->dst
.type
= OP_NONE
;
3089 case 3: /* lidt/vmmcall */
3090 if (c
->modrm_mod
== 3) {
3091 switch (c
->modrm_rm
) {
3093 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3094 if (rc
!= X86EMUL_CONTINUE
)
3098 goto cannot_emulate
;
3101 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3104 if (rc
!= X86EMUL_CONTINUE
)
3106 realmode_lidt(ctxt
->vcpu
, size
, address
);
3108 /* Disable writeback. */
3109 c
->dst
.type
= OP_NONE
;
3113 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3116 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3117 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3118 c
->dst
.type
= OP_NONE
;
3120 case 5: /* not defined */
3124 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3125 /* Disable writeback. */
3126 c
->dst
.type
= OP_NONE
;
3129 goto cannot_emulate
;
3132 case 0x05: /* syscall */
3133 rc
= emulate_syscall(ctxt
, ops
);
3134 if (rc
!= X86EMUL_CONTINUE
)
3140 emulate_clts(ctxt
->vcpu
);
3141 c
->dst
.type
= OP_NONE
;
3143 case 0x09: /* wbinvd */
3144 kvm_emulate_wbinvd(ctxt
->vcpu
);
3145 c
->dst
.type
= OP_NONE
;
3147 case 0x08: /* invd */
3148 case 0x0d: /* GrpP (prefetch) */
3149 case 0x18: /* Grp16 (prefetch/nop) */
3150 c
->dst
.type
= OP_NONE
;
3152 case 0x20: /* mov cr, reg */
3153 switch (c
->modrm_reg
) {
3160 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3161 c
->dst
.type
= OP_NONE
; /* no writeback */
3163 case 0x21: /* mov from dr to reg */
3164 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3165 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3169 ops
->get_dr(c
->modrm_reg
, &c
->regs
[c
->modrm_rm
], ctxt
->vcpu
);
3170 c
->dst
.type
= OP_NONE
; /* no writeback */
3172 case 0x22: /* mov reg, cr */
3173 if (ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
)) {
3174 emulate_gp(ctxt
, 0);
3177 c
->dst
.type
= OP_NONE
;
3179 case 0x23: /* mov from reg to dr */
3180 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3181 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3186 if (ops
->set_dr(c
->modrm_reg
, c
->regs
[c
->modrm_rm
] &
3187 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
3188 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
3189 /* #UD condition is already handled by the code above */
3190 emulate_gp(ctxt
, 0);
3194 c
->dst
.type
= OP_NONE
; /* no writeback */
3198 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3199 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3200 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3201 emulate_gp(ctxt
, 0);
3204 rc
= X86EMUL_CONTINUE
;
3205 c
->dst
.type
= OP_NONE
;
3209 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3210 emulate_gp(ctxt
, 0);
3213 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3214 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3216 rc
= X86EMUL_CONTINUE
;
3217 c
->dst
.type
= OP_NONE
;
3219 case 0x34: /* sysenter */
3220 rc
= emulate_sysenter(ctxt
, ops
);
3221 if (rc
!= X86EMUL_CONTINUE
)
3226 case 0x35: /* sysexit */
3227 rc
= emulate_sysexit(ctxt
, ops
);
3228 if (rc
!= X86EMUL_CONTINUE
)
3233 case 0x40 ... 0x4f: /* cmov */
3234 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3235 if (!test_cc(c
->b
, ctxt
->eflags
))
3236 c
->dst
.type
= OP_NONE
; /* no writeback */
3238 case 0x80 ... 0x8f: /* jnz rel, etc*/
3239 if (test_cc(c
->b
, ctxt
->eflags
))
3240 jmp_rel(c
, c
->src
.val
);
3241 c
->dst
.type
= OP_NONE
;
3243 case 0xa0: /* push fs */
3244 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3246 case 0xa1: /* pop fs */
3247 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3248 if (rc
!= X86EMUL_CONTINUE
)
3253 c
->dst
.type
= OP_NONE
;
3254 /* only subword offset */
3255 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3256 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3258 case 0xa4: /* shld imm8, r, r/m */
3259 case 0xa5: /* shld cl, r, r/m */
3260 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3262 case 0xa8: /* push gs */
3263 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3265 case 0xa9: /* pop gs */
3266 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3267 if (rc
!= X86EMUL_CONTINUE
)
3272 /* only subword offset */
3273 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3274 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3276 case 0xac: /* shrd imm8, r, r/m */
3277 case 0xad: /* shrd cl, r, r/m */
3278 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3280 case 0xae: /* clflush */
3282 case 0xb0 ... 0xb1: /* cmpxchg */
3284 * Save real source value, then compare EAX against
3287 c
->src
.orig_val
= c
->src
.val
;
3288 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3289 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3290 if (ctxt
->eflags
& EFLG_ZF
) {
3291 /* Success: write back to memory. */
3292 c
->dst
.val
= c
->src
.orig_val
;
3294 /* Failure: write the value we saw to EAX. */
3295 c
->dst
.type
= OP_REG
;
3296 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3301 /* only subword offset */
3302 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3303 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3305 case 0xb6 ... 0xb7: /* movzx */
3306 c
->dst
.bytes
= c
->op_bytes
;
3307 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3310 case 0xba: /* Grp8 */
3311 switch (c
->modrm_reg
& 3) {
3324 /* only subword offset */
3325 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3326 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3328 case 0xbe ... 0xbf: /* movsx */
3329 c
->dst
.bytes
= c
->op_bytes
;
3330 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3333 case 0xc3: /* movnti */
3334 c
->dst
.bytes
= c
->op_bytes
;
3335 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3338 case 0xc7: /* Grp9 (cmpxchg8b) */
3339 rc
= emulate_grp9(ctxt
, ops
);
3340 if (rc
!= X86EMUL_CONTINUE
)
3347 DPRINTF("Cannot emulate %02x\n", c
->b
);