KVM: x86 emulator: Add missing decoder flags for xor instructions
[linux-2.6/btrfs-unstable.git] / arch / x86 / kvm / emulate.c
blob7a36eec8bab855931d7fd051b7b2f374ed4c1c50
1 /******************************************************************************
2 * emulate.c
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
22 #ifndef __KERNEL__
23 #include <stdio.h>
24 #include <stdint.h>
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
27 #else
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
31 #endif
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
35 #include "x86.h"
36 #include "tss.h"
39 * Opcode effective-address decode tables.
40 * Note that we only emulate instructions that have at least one memory
41 * operand (excluding implicit stack references). We assume that stack
42 * references and instruction fetches will never occur in special memory
43 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
44 * not be handled.
47 /* Operand sizes: 8-bit operands or specified/overridden size. */
48 #define ByteOp (1<<0) /* 8-bit operands. */
49 /* Destination operand type. */
50 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
51 #define DstReg (2<<1) /* Register operand. */
52 #define DstMem (3<<1) /* Memory operand. */
53 #define DstAcc (4<<1) /* Destination Accumulator */
54 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
55 #define DstMem64 (6<<1) /* 64bit memory operand */
56 #define DstMask (7<<1)
57 /* Source operand type. */
58 #define SrcNone (0<<4) /* No source operand. */
59 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
60 #define SrcReg (1<<4) /* Register operand. */
61 #define SrcMem (2<<4) /* Memory operand. */
62 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
63 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
64 #define SrcImm (5<<4) /* Immediate operand. */
65 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
66 #define SrcOne (7<<4) /* Implied '1' */
67 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
68 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
69 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
70 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
71 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
72 #define SrcMask (0xf<<4)
73 /* Generic ModRM decode. */
74 #define ModRM (1<<8)
75 /* Destination is only written; never read. */
76 #define Mov (1<<9)
77 #define BitOp (1<<10)
78 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
79 #define String (1<<12) /* String instruction (rep capable) */
80 #define Stack (1<<13) /* Stack instruction (push/pop) */
81 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
82 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
83 #define GroupMask 0xff /* Group number stored in bits 0:7 */
84 /* Misc flags */
85 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
86 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
87 #define No64 (1<<28)
88 /* Source 2 operand type */
89 #define Src2None (0<<29)
90 #define Src2CL (1<<29)
91 #define Src2ImmByte (2<<29)
92 #define Src2One (3<<29)
93 #define Src2Mask (7<<29)
95 enum {
96 Group1_80, Group1_81, Group1_82, Group1_83,
97 Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
98 Group8, Group9,
101 static u32 opcode_table[256] = {
102 /* 0x00 - 0x07 */
103 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
104 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
105 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
106 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
107 /* 0x08 - 0x0F */
108 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
109 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
110 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
111 ImplicitOps | Stack | No64, 0,
112 /* 0x10 - 0x17 */
113 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
114 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
115 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
116 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
117 /* 0x18 - 0x1F */
118 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
119 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
120 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
121 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
122 /* 0x20 - 0x27 */
123 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
124 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
125 DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
126 /* 0x28 - 0x2F */
127 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
128 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
129 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
130 /* 0x30 - 0x37 */
131 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
132 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
133 ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
134 /* 0x38 - 0x3F */
135 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
136 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
137 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
138 0, 0,
139 /* 0x40 - 0x47 */
140 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
141 /* 0x48 - 0x4F */
142 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
143 /* 0x50 - 0x57 */
144 SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
145 SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
146 /* 0x58 - 0x5F */
147 DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
148 DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
149 /* 0x60 - 0x67 */
150 ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
151 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
152 0, 0, 0, 0,
153 /* 0x68 - 0x6F */
154 SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
155 DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */
156 SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */
157 /* 0x70 - 0x77 */
158 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
159 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
160 /* 0x78 - 0x7F */
161 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
162 SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
163 /* 0x80 - 0x87 */
164 Group | Group1_80, Group | Group1_81,
165 Group | Group1_82, Group | Group1_83,
166 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
167 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
168 /* 0x88 - 0x8F */
169 ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
170 ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
171 DstMem | SrcReg | ModRM | Mov, ModRM | DstReg,
172 ImplicitOps | SrcMem | ModRM, Group | Group1A,
173 /* 0x90 - 0x97 */
174 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
175 /* 0x98 - 0x9F */
176 0, 0, SrcImmFAddr | No64, 0,
177 ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
178 /* 0xA0 - 0xA7 */
179 ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
180 ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
181 ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String,
182 ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String,
183 /* 0xA8 - 0xAF */
184 DstAcc | SrcImmByte | ByteOp, DstAcc | SrcImm, ByteOp | DstDI | Mov | String, DstDI | Mov | String,
185 ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String,
186 ByteOp | DstDI | String, DstDI | String,
187 /* 0xB0 - 0xB7 */
188 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
189 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
190 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
191 ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
192 /* 0xB8 - 0xBF */
193 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
194 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
195 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
196 DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
197 /* 0xC0 - 0xC7 */
198 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
199 0, ImplicitOps | Stack, 0, 0,
200 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
201 /* 0xC8 - 0xCF */
202 0, 0, 0, ImplicitOps | Stack,
203 ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps,
204 /* 0xD0 - 0xD7 */
205 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
206 ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
207 0, 0, 0, 0,
208 /* 0xD8 - 0xDF */
209 0, 0, 0, 0, 0, 0, 0, 0,
210 /* 0xE0 - 0xE7 */
211 0, 0, 0, 0,
212 ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
213 ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
214 /* 0xE8 - 0xEF */
215 SrcImm | Stack, SrcImm | ImplicitOps,
216 SrcImmFAddr | No64, SrcImmByte | ImplicitOps,
217 SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
218 SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
219 /* 0xF0 - 0xF7 */
220 0, 0, 0, 0,
221 ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
222 /* 0xF8 - 0xFF */
223 ImplicitOps, 0, ImplicitOps, ImplicitOps,
224 ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
227 static u32 twobyte_table[256] = {
228 /* 0x00 - 0x0F */
229 0, Group | GroupDual | Group7, 0, 0,
230 0, ImplicitOps, ImplicitOps | Priv, 0,
231 ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
232 0, ImplicitOps | ModRM, 0, 0,
233 /* 0x10 - 0x1F */
234 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
235 /* 0x20 - 0x2F */
236 ModRM | ImplicitOps | Priv, ModRM | Priv,
237 ModRM | ImplicitOps | Priv, ModRM | Priv,
238 0, 0, 0, 0,
239 0, 0, 0, 0, 0, 0, 0, 0,
240 /* 0x30 - 0x3F */
241 ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
242 ImplicitOps, ImplicitOps | Priv, 0, 0,
243 0, 0, 0, 0, 0, 0, 0, 0,
244 /* 0x40 - 0x47 */
245 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
246 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
247 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
248 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
249 /* 0x48 - 0x4F */
250 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
251 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
252 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
253 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
254 /* 0x50 - 0x5F */
255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
256 /* 0x60 - 0x6F */
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
258 /* 0x70 - 0x7F */
259 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
260 /* 0x80 - 0x8F */
261 SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
262 SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
263 /* 0x90 - 0x9F */
264 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
265 /* 0xA0 - 0xA7 */
266 ImplicitOps | Stack, ImplicitOps | Stack,
267 0, DstMem | SrcReg | ModRM | BitOp,
268 DstMem | SrcReg | Src2ImmByte | ModRM,
269 DstMem | SrcReg | Src2CL | ModRM, 0, 0,
270 /* 0xA8 - 0xAF */
271 ImplicitOps | Stack, ImplicitOps | Stack,
272 0, DstMem | SrcReg | ModRM | BitOp | Lock,
273 DstMem | SrcReg | Src2ImmByte | ModRM,
274 DstMem | SrcReg | Src2CL | ModRM,
275 ModRM, 0,
276 /* 0xB0 - 0xB7 */
277 ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
278 0, DstMem | SrcReg | ModRM | BitOp | Lock,
279 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
280 DstReg | SrcMem16 | ModRM | Mov,
281 /* 0xB8 - 0xBF */
282 0, 0,
283 Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock,
284 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
285 DstReg | SrcMem16 | ModRM | Mov,
286 /* 0xC0 - 0xCF */
287 0, 0, 0, DstMem | SrcReg | ModRM | Mov,
288 0, 0, 0, Group | GroupDual | Group9,
289 0, 0, 0, 0, 0, 0, 0, 0,
290 /* 0xD0 - 0xDF */
291 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
292 /* 0xE0 - 0xEF */
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294 /* 0xF0 - 0xFF */
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
298 static u32 group_table[] = {
299 [Group1_80*8] =
300 ByteOp | DstMem | SrcImm | ModRM | Lock,
301 ByteOp | DstMem | SrcImm | ModRM | Lock,
302 ByteOp | DstMem | SrcImm | ModRM | Lock,
303 ByteOp | DstMem | SrcImm | ModRM | Lock,
304 ByteOp | DstMem | SrcImm | ModRM | Lock,
305 ByteOp | DstMem | SrcImm | ModRM | Lock,
306 ByteOp | DstMem | SrcImm | ModRM | Lock,
307 ByteOp | DstMem | SrcImm | ModRM,
308 [Group1_81*8] =
309 DstMem | SrcImm | ModRM | Lock,
310 DstMem | SrcImm | ModRM | Lock,
311 DstMem | SrcImm | ModRM | Lock,
312 DstMem | SrcImm | ModRM | Lock,
313 DstMem | SrcImm | ModRM | Lock,
314 DstMem | SrcImm | ModRM | Lock,
315 DstMem | SrcImm | ModRM | Lock,
316 DstMem | SrcImm | ModRM,
317 [Group1_82*8] =
318 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
319 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
320 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
321 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
322 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
323 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
324 ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
325 ByteOp | DstMem | SrcImm | ModRM | No64,
326 [Group1_83*8] =
327 DstMem | SrcImmByte | ModRM | Lock,
328 DstMem | SrcImmByte | ModRM | Lock,
329 DstMem | SrcImmByte | ModRM | Lock,
330 DstMem | SrcImmByte | ModRM | Lock,
331 DstMem | SrcImmByte | ModRM | Lock,
332 DstMem | SrcImmByte | ModRM | Lock,
333 DstMem | SrcImmByte | ModRM | Lock,
334 DstMem | SrcImmByte | ModRM,
335 [Group1A*8] =
336 DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
337 [Group3_Byte*8] =
338 ByteOp | SrcImm | DstMem | ModRM, 0,
339 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
340 0, 0, 0, 0,
341 [Group3*8] =
342 DstMem | SrcImm | ModRM, 0,
343 DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
344 0, 0, 0, 0,
345 [Group4*8] =
346 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
347 0, 0, 0, 0, 0, 0,
348 [Group5*8] =
349 DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
350 SrcMem | ModRM | Stack, 0,
351 SrcMem | ModRM | Stack, SrcMemFAddr | ModRM | ImplicitOps,
352 SrcMem | ModRM | Stack, 0,
353 [Group7*8] =
354 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
355 SrcNone | ModRM | DstMem | Mov, 0,
356 SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
357 [Group8*8] =
358 0, 0, 0, 0,
359 DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
360 DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
361 [Group9*8] =
362 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0,
365 static u32 group2_table[] = {
366 [Group7*8] =
367 SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
368 SrcNone | ModRM | DstMem | Mov, 0,
369 SrcMem16 | ModRM | Mov | Priv, 0,
370 [Group9*8] =
371 0, 0, 0, 0, 0, 0, 0, 0,
374 /* EFLAGS bit definitions. */
375 #define EFLG_ID (1<<21)
376 #define EFLG_VIP (1<<20)
377 #define EFLG_VIF (1<<19)
378 #define EFLG_AC (1<<18)
379 #define EFLG_VM (1<<17)
380 #define EFLG_RF (1<<16)
381 #define EFLG_IOPL (3<<12)
382 #define EFLG_NT (1<<14)
383 #define EFLG_OF (1<<11)
384 #define EFLG_DF (1<<10)
385 #define EFLG_IF (1<<9)
386 #define EFLG_TF (1<<8)
387 #define EFLG_SF (1<<7)
388 #define EFLG_ZF (1<<6)
389 #define EFLG_AF (1<<4)
390 #define EFLG_PF (1<<2)
391 #define EFLG_CF (1<<0)
394 * Instruction emulation:
395 * Most instructions are emulated directly via a fragment of inline assembly
396 * code. This allows us to save/restore EFLAGS and thus very easily pick up
397 * any modified flags.
400 #if defined(CONFIG_X86_64)
401 #define _LO32 "k" /* force 32-bit operand */
402 #define _STK "%%rsp" /* stack pointer */
403 #elif defined(__i386__)
404 #define _LO32 "" /* force 32-bit operand */
405 #define _STK "%%esp" /* stack pointer */
406 #endif
409 * These EFLAGS bits are restored from saved value during emulation, and
410 * any changes are written back to the saved value after emulation.
412 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
414 /* Before executing instruction: restore necessary bits in EFLAGS. */
415 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
416 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
417 "movl %"_sav",%"_LO32 _tmp"; " \
418 "push %"_tmp"; " \
419 "push %"_tmp"; " \
420 "movl %"_msk",%"_LO32 _tmp"; " \
421 "andl %"_LO32 _tmp",("_STK"); " \
422 "pushf; " \
423 "notl %"_LO32 _tmp"; " \
424 "andl %"_LO32 _tmp",("_STK"); " \
425 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
426 "pop %"_tmp"; " \
427 "orl %"_LO32 _tmp",("_STK"); " \
428 "popf; " \
429 "pop %"_sav"; "
431 /* After executing instruction: write-back necessary bits in EFLAGS. */
432 #define _POST_EFLAGS(_sav, _msk, _tmp) \
433 /* _sav |= EFLAGS & _msk; */ \
434 "pushf; " \
435 "pop %"_tmp"; " \
436 "andl %"_msk",%"_LO32 _tmp"; " \
437 "orl %"_LO32 _tmp",%"_sav"; "
439 #ifdef CONFIG_X86_64
440 #define ON64(x) x
441 #else
442 #define ON64(x)
443 #endif
445 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
446 do { \
447 __asm__ __volatile__ ( \
448 _PRE_EFLAGS("0", "4", "2") \
449 _op _suffix " %"_x"3,%1; " \
450 _POST_EFLAGS("0", "4", "2") \
451 : "=m" (_eflags), "=m" ((_dst).val), \
452 "=&r" (_tmp) \
453 : _y ((_src).val), "i" (EFLAGS_MASK)); \
454 } while (0)
457 /* Raw emulation: instruction has two explicit operands. */
458 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
459 do { \
460 unsigned long _tmp; \
462 switch ((_dst).bytes) { \
463 case 2: \
464 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
465 break; \
466 case 4: \
467 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
468 break; \
469 case 8: \
470 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
471 break; \
473 } while (0)
475 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
476 do { \
477 unsigned long _tmp; \
478 switch ((_dst).bytes) { \
479 case 1: \
480 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
481 break; \
482 default: \
483 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
484 _wx, _wy, _lx, _ly, _qx, _qy); \
485 break; \
487 } while (0)
489 /* Source operand is byte-sized and may be restricted to just %cl. */
490 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
491 __emulate_2op(_op, _src, _dst, _eflags, \
492 "b", "c", "b", "c", "b", "c", "b", "c")
494 /* Source operand is byte, word, long or quad sized. */
495 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
496 __emulate_2op(_op, _src, _dst, _eflags, \
497 "b", "q", "w", "r", _LO32, "r", "", "r")
499 /* Source operand is word, long or quad sized. */
500 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
501 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
502 "w", "r", _LO32, "r", "", "r")
504 /* Instruction has three operands and one operand is stored in ECX register */
505 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
506 do { \
507 unsigned long _tmp; \
508 _type _clv = (_cl).val; \
509 _type _srcv = (_src).val; \
510 _type _dstv = (_dst).val; \
512 __asm__ __volatile__ ( \
513 _PRE_EFLAGS("0", "5", "2") \
514 _op _suffix " %4,%1 \n" \
515 _POST_EFLAGS("0", "5", "2") \
516 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
517 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
518 ); \
520 (_cl).val = (unsigned long) _clv; \
521 (_src).val = (unsigned long) _srcv; \
522 (_dst).val = (unsigned long) _dstv; \
523 } while (0)
525 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
526 do { \
527 switch ((_dst).bytes) { \
528 case 2: \
529 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
530 "w", unsigned short); \
531 break; \
532 case 4: \
533 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
534 "l", unsigned int); \
535 break; \
536 case 8: \
537 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
538 "q", unsigned long)); \
539 break; \
541 } while (0)
543 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
544 do { \
545 unsigned long _tmp; \
547 __asm__ __volatile__ ( \
548 _PRE_EFLAGS("0", "3", "2") \
549 _op _suffix " %1; " \
550 _POST_EFLAGS("0", "3", "2") \
551 : "=m" (_eflags), "+m" ((_dst).val), \
552 "=&r" (_tmp) \
553 : "i" (EFLAGS_MASK)); \
554 } while (0)
556 /* Instruction has only one explicit operand (no source operand). */
557 #define emulate_1op(_op, _dst, _eflags) \
558 do { \
559 switch ((_dst).bytes) { \
560 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
561 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
562 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
563 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
565 } while (0)
567 /* Fetch next part of the instruction being emulated. */
568 #define insn_fetch(_type, _size, _eip) \
569 ({ unsigned long _x; \
570 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
571 if (rc != X86EMUL_CONTINUE) \
572 goto done; \
573 (_eip) += (_size); \
574 (_type)_x; \
577 #define insn_fetch_arr(_arr, _size, _eip) \
578 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
579 if (rc != X86EMUL_CONTINUE) \
580 goto done; \
581 (_eip) += (_size); \
584 static inline unsigned long ad_mask(struct decode_cache *c)
586 return (1UL << (c->ad_bytes << 3)) - 1;
589 /* Access/update address held in a register, based on addressing mode. */
590 static inline unsigned long
591 address_mask(struct decode_cache *c, unsigned long reg)
593 if (c->ad_bytes == sizeof(unsigned long))
594 return reg;
595 else
596 return reg & ad_mask(c);
599 static inline unsigned long
600 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
602 return base + address_mask(c, reg);
605 static inline void
606 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
608 if (c->ad_bytes == sizeof(unsigned long))
609 *reg += inc;
610 else
611 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
614 static inline void jmp_rel(struct decode_cache *c, int rel)
616 register_address_increment(c, &c->eip, rel);
619 static void set_seg_override(struct decode_cache *c, int seg)
621 c->has_seg_override = true;
622 c->seg_override = seg;
625 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
626 struct x86_emulate_ops *ops, int seg)
628 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
629 return 0;
631 return ops->get_cached_segment_base(seg, ctxt->vcpu);
634 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
635 struct x86_emulate_ops *ops,
636 struct decode_cache *c)
638 if (!c->has_seg_override)
639 return 0;
641 return seg_base(ctxt, ops, c->seg_override);
644 static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
645 struct x86_emulate_ops *ops)
647 return seg_base(ctxt, ops, VCPU_SREG_ES);
650 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
651 struct x86_emulate_ops *ops)
653 return seg_base(ctxt, ops, VCPU_SREG_SS);
656 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
657 u32 error, bool valid)
659 ctxt->exception = vec;
660 ctxt->error_code = error;
661 ctxt->error_code_valid = valid;
662 ctxt->restart = false;
665 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
667 emulate_exception(ctxt, GP_VECTOR, err, true);
670 static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
671 int err)
673 ctxt->cr2 = addr;
674 emulate_exception(ctxt, PF_VECTOR, err, true);
677 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
679 emulate_exception(ctxt, UD_VECTOR, 0, false);
682 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
684 emulate_exception(ctxt, TS_VECTOR, err, true);
687 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
688 struct x86_emulate_ops *ops,
689 unsigned long eip, u8 *dest)
691 struct fetch_cache *fc = &ctxt->decode.fetch;
692 int rc;
693 int size, cur_size;
695 if (eip == fc->end) {
696 cur_size = fc->end - fc->start;
697 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
698 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
699 size, ctxt->vcpu, NULL);
700 if (rc != X86EMUL_CONTINUE)
701 return rc;
702 fc->end += size;
704 *dest = fc->data[eip - fc->start];
705 return X86EMUL_CONTINUE;
708 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
709 struct x86_emulate_ops *ops,
710 unsigned long eip, void *dest, unsigned size)
712 int rc;
714 /* x86 instructions are limited to 15 bytes. */
715 if (eip + size - ctxt->eip > 15)
716 return X86EMUL_UNHANDLEABLE;
717 while (size--) {
718 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
719 if (rc != X86EMUL_CONTINUE)
720 return rc;
722 return X86EMUL_CONTINUE;
726 * Given the 'reg' portion of a ModRM byte, and a register block, return a
727 * pointer into the block that addresses the relevant register.
728 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
730 static void *decode_register(u8 modrm_reg, unsigned long *regs,
731 int highbyte_regs)
733 void *p;
735 p = &regs[modrm_reg];
736 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
737 p = (unsigned char *)&regs[modrm_reg & 3] + 1;
738 return p;
741 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
742 struct x86_emulate_ops *ops,
743 void *ptr,
744 u16 *size, unsigned long *address, int op_bytes)
746 int rc;
748 if (op_bytes == 2)
749 op_bytes = 3;
750 *address = 0;
751 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
752 ctxt->vcpu, NULL);
753 if (rc != X86EMUL_CONTINUE)
754 return rc;
755 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
756 ctxt->vcpu, NULL);
757 return rc;
760 static int test_cc(unsigned int condition, unsigned int flags)
762 int rc = 0;
764 switch ((condition & 15) >> 1) {
765 case 0: /* o */
766 rc |= (flags & EFLG_OF);
767 break;
768 case 1: /* b/c/nae */
769 rc |= (flags & EFLG_CF);
770 break;
771 case 2: /* z/e */
772 rc |= (flags & EFLG_ZF);
773 break;
774 case 3: /* be/na */
775 rc |= (flags & (EFLG_CF|EFLG_ZF));
776 break;
777 case 4: /* s */
778 rc |= (flags & EFLG_SF);
779 break;
780 case 5: /* p/pe */
781 rc |= (flags & EFLG_PF);
782 break;
783 case 7: /* le/ng */
784 rc |= (flags & EFLG_ZF);
785 /* fall through */
786 case 6: /* l/nge */
787 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
788 break;
791 /* Odd condition identifiers (lsb == 1) have inverted sense. */
792 return (!!rc ^ (condition & 1));
795 static void decode_register_operand(struct operand *op,
796 struct decode_cache *c,
797 int inhibit_bytereg)
799 unsigned reg = c->modrm_reg;
800 int highbyte_regs = c->rex_prefix == 0;
802 if (!(c->d & ModRM))
803 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
804 op->type = OP_REG;
805 if ((c->d & ByteOp) && !inhibit_bytereg) {
806 op->ptr = decode_register(reg, c->regs, highbyte_regs);
807 op->val = *(u8 *)op->ptr;
808 op->bytes = 1;
809 } else {
810 op->ptr = decode_register(reg, c->regs, 0);
811 op->bytes = c->op_bytes;
812 switch (op->bytes) {
813 case 2:
814 op->val = *(u16 *)op->ptr;
815 break;
816 case 4:
817 op->val = *(u32 *)op->ptr;
818 break;
819 case 8:
820 op->val = *(u64 *) op->ptr;
821 break;
824 op->orig_val = op->val;
827 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
828 struct x86_emulate_ops *ops)
830 struct decode_cache *c = &ctxt->decode;
831 u8 sib;
832 int index_reg = 0, base_reg = 0, scale;
833 int rc = X86EMUL_CONTINUE;
835 if (c->rex_prefix) {
836 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
837 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
838 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
841 c->modrm = insn_fetch(u8, 1, c->eip);
842 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
843 c->modrm_reg |= (c->modrm & 0x38) >> 3;
844 c->modrm_rm |= (c->modrm & 0x07);
845 c->modrm_ea = 0;
846 c->use_modrm_ea = 1;
848 if (c->modrm_mod == 3) {
849 c->modrm_ptr = decode_register(c->modrm_rm,
850 c->regs, c->d & ByteOp);
851 c->modrm_val = *(unsigned long *)c->modrm_ptr;
852 return rc;
855 if (c->ad_bytes == 2) {
856 unsigned bx = c->regs[VCPU_REGS_RBX];
857 unsigned bp = c->regs[VCPU_REGS_RBP];
858 unsigned si = c->regs[VCPU_REGS_RSI];
859 unsigned di = c->regs[VCPU_REGS_RDI];
861 /* 16-bit ModR/M decode. */
862 switch (c->modrm_mod) {
863 case 0:
864 if (c->modrm_rm == 6)
865 c->modrm_ea += insn_fetch(u16, 2, c->eip);
866 break;
867 case 1:
868 c->modrm_ea += insn_fetch(s8, 1, c->eip);
869 break;
870 case 2:
871 c->modrm_ea += insn_fetch(u16, 2, c->eip);
872 break;
874 switch (c->modrm_rm) {
875 case 0:
876 c->modrm_ea += bx + si;
877 break;
878 case 1:
879 c->modrm_ea += bx + di;
880 break;
881 case 2:
882 c->modrm_ea += bp + si;
883 break;
884 case 3:
885 c->modrm_ea += bp + di;
886 break;
887 case 4:
888 c->modrm_ea += si;
889 break;
890 case 5:
891 c->modrm_ea += di;
892 break;
893 case 6:
894 if (c->modrm_mod != 0)
895 c->modrm_ea += bp;
896 break;
897 case 7:
898 c->modrm_ea += bx;
899 break;
901 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
902 (c->modrm_rm == 6 && c->modrm_mod != 0))
903 if (!c->has_seg_override)
904 set_seg_override(c, VCPU_SREG_SS);
905 c->modrm_ea = (u16)c->modrm_ea;
906 } else {
907 /* 32/64-bit ModR/M decode. */
908 if ((c->modrm_rm & 7) == 4) {
909 sib = insn_fetch(u8, 1, c->eip);
910 index_reg |= (sib >> 3) & 7;
911 base_reg |= sib & 7;
912 scale = sib >> 6;
914 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
915 c->modrm_ea += insn_fetch(s32, 4, c->eip);
916 else
917 c->modrm_ea += c->regs[base_reg];
918 if (index_reg != 4)
919 c->modrm_ea += c->regs[index_reg] << scale;
920 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
921 if (ctxt->mode == X86EMUL_MODE_PROT64)
922 c->rip_relative = 1;
923 } else
924 c->modrm_ea += c->regs[c->modrm_rm];
925 switch (c->modrm_mod) {
926 case 0:
927 if (c->modrm_rm == 5)
928 c->modrm_ea += insn_fetch(s32, 4, c->eip);
929 break;
930 case 1:
931 c->modrm_ea += insn_fetch(s8, 1, c->eip);
932 break;
933 case 2:
934 c->modrm_ea += insn_fetch(s32, 4, c->eip);
935 break;
938 done:
939 return rc;
942 static int decode_abs(struct x86_emulate_ctxt *ctxt,
943 struct x86_emulate_ops *ops)
945 struct decode_cache *c = &ctxt->decode;
946 int rc = X86EMUL_CONTINUE;
948 switch (c->ad_bytes) {
949 case 2:
950 c->modrm_ea = insn_fetch(u16, 2, c->eip);
951 break;
952 case 4:
953 c->modrm_ea = insn_fetch(u32, 4, c->eip);
954 break;
955 case 8:
956 c->modrm_ea = insn_fetch(u64, 8, c->eip);
957 break;
959 done:
960 return rc;
964 x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
966 struct decode_cache *c = &ctxt->decode;
967 int rc = X86EMUL_CONTINUE;
968 int mode = ctxt->mode;
969 int def_op_bytes, def_ad_bytes, group;
972 /* we cannot decode insn before we complete previous rep insn */
973 WARN_ON(ctxt->restart);
975 c->eip = ctxt->eip;
976 c->fetch.start = c->fetch.end = c->eip;
977 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
979 switch (mode) {
980 case X86EMUL_MODE_REAL:
981 case X86EMUL_MODE_VM86:
982 case X86EMUL_MODE_PROT16:
983 def_op_bytes = def_ad_bytes = 2;
984 break;
985 case X86EMUL_MODE_PROT32:
986 def_op_bytes = def_ad_bytes = 4;
987 break;
988 #ifdef CONFIG_X86_64
989 case X86EMUL_MODE_PROT64:
990 def_op_bytes = 4;
991 def_ad_bytes = 8;
992 break;
993 #endif
994 default:
995 return -1;
998 c->op_bytes = def_op_bytes;
999 c->ad_bytes = def_ad_bytes;
1001 /* Legacy prefixes. */
1002 for (;;) {
1003 switch (c->b = insn_fetch(u8, 1, c->eip)) {
1004 case 0x66: /* operand-size override */
1005 /* switch between 2/4 bytes */
1006 c->op_bytes = def_op_bytes ^ 6;
1007 break;
1008 case 0x67: /* address-size override */
1009 if (mode == X86EMUL_MODE_PROT64)
1010 /* switch between 4/8 bytes */
1011 c->ad_bytes = def_ad_bytes ^ 12;
1012 else
1013 /* switch between 2/4 bytes */
1014 c->ad_bytes = def_ad_bytes ^ 6;
1015 break;
1016 case 0x26: /* ES override */
1017 case 0x2e: /* CS override */
1018 case 0x36: /* SS override */
1019 case 0x3e: /* DS override */
1020 set_seg_override(c, (c->b >> 3) & 3);
1021 break;
1022 case 0x64: /* FS override */
1023 case 0x65: /* GS override */
1024 set_seg_override(c, c->b & 7);
1025 break;
1026 case 0x40 ... 0x4f: /* REX */
1027 if (mode != X86EMUL_MODE_PROT64)
1028 goto done_prefixes;
1029 c->rex_prefix = c->b;
1030 continue;
1031 case 0xf0: /* LOCK */
1032 c->lock_prefix = 1;
1033 break;
1034 case 0xf2: /* REPNE/REPNZ */
1035 c->rep_prefix = REPNE_PREFIX;
1036 break;
1037 case 0xf3: /* REP/REPE/REPZ */
1038 c->rep_prefix = REPE_PREFIX;
1039 break;
1040 default:
1041 goto done_prefixes;
1044 /* Any legacy prefix after a REX prefix nullifies its effect. */
1046 c->rex_prefix = 0;
1049 done_prefixes:
1051 /* REX prefix. */
1052 if (c->rex_prefix)
1053 if (c->rex_prefix & 8)
1054 c->op_bytes = 8; /* REX.W */
1056 /* Opcode byte(s). */
1057 c->d = opcode_table[c->b];
1058 if (c->d == 0) {
1059 /* Two-byte opcode? */
1060 if (c->b == 0x0f) {
1061 c->twobyte = 1;
1062 c->b = insn_fetch(u8, 1, c->eip);
1063 c->d = twobyte_table[c->b];
1067 if (c->d & Group) {
1068 group = c->d & GroupMask;
1069 c->modrm = insn_fetch(u8, 1, c->eip);
1070 --c->eip;
1072 group = (group << 3) + ((c->modrm >> 3) & 7);
1073 if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
1074 c->d = group2_table[group];
1075 else
1076 c->d = group_table[group];
1079 /* Unrecognised? */
1080 if (c->d == 0) {
1081 DPRINTF("Cannot emulate %02x\n", c->b);
1082 return -1;
1085 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
1086 c->op_bytes = 8;
1088 /* ModRM and SIB bytes. */
1089 if (c->d & ModRM)
1090 rc = decode_modrm(ctxt, ops);
1091 else if (c->d & MemAbs)
1092 rc = decode_abs(ctxt, ops);
1093 if (rc != X86EMUL_CONTINUE)
1094 goto done;
1096 if (!c->has_seg_override)
1097 set_seg_override(c, VCPU_SREG_DS);
1099 if (!(!c->twobyte && c->b == 0x8d))
1100 c->modrm_ea += seg_override_base(ctxt, ops, c);
1102 if (c->ad_bytes != 8)
1103 c->modrm_ea = (u32)c->modrm_ea;
1105 if (c->rip_relative)
1106 c->modrm_ea += c->eip;
1109 * Decode and fetch the source operand: register, memory
1110 * or immediate.
1112 switch (c->d & SrcMask) {
1113 case SrcNone:
1114 break;
1115 case SrcReg:
1116 decode_register_operand(&c->src, c, 0);
1117 break;
1118 case SrcMem16:
1119 c->src.bytes = 2;
1120 goto srcmem_common;
1121 case SrcMem32:
1122 c->src.bytes = 4;
1123 goto srcmem_common;
1124 case SrcMem:
1125 c->src.bytes = (c->d & ByteOp) ? 1 :
1126 c->op_bytes;
1127 /* Don't fetch the address for invlpg: it could be unmapped. */
1128 if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
1129 break;
1130 srcmem_common:
1132 * For instructions with a ModR/M byte, switch to register
1133 * access if Mod = 3.
1135 if ((c->d & ModRM) && c->modrm_mod == 3) {
1136 c->src.type = OP_REG;
1137 c->src.val = c->modrm_val;
1138 c->src.ptr = c->modrm_ptr;
1139 break;
1141 c->src.type = OP_MEM;
1142 c->src.ptr = (unsigned long *)c->modrm_ea;
1143 c->src.val = 0;
1144 break;
1145 case SrcImm:
1146 case SrcImmU:
1147 c->src.type = OP_IMM;
1148 c->src.ptr = (unsigned long *)c->eip;
1149 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1150 if (c->src.bytes == 8)
1151 c->src.bytes = 4;
1152 /* NB. Immediates are sign-extended as necessary. */
1153 switch (c->src.bytes) {
1154 case 1:
1155 c->src.val = insn_fetch(s8, 1, c->eip);
1156 break;
1157 case 2:
1158 c->src.val = insn_fetch(s16, 2, c->eip);
1159 break;
1160 case 4:
1161 c->src.val = insn_fetch(s32, 4, c->eip);
1162 break;
1164 if ((c->d & SrcMask) == SrcImmU) {
1165 switch (c->src.bytes) {
1166 case 1:
1167 c->src.val &= 0xff;
1168 break;
1169 case 2:
1170 c->src.val &= 0xffff;
1171 break;
1172 case 4:
1173 c->src.val &= 0xffffffff;
1174 break;
1177 break;
1178 case SrcImmByte:
1179 case SrcImmUByte:
1180 c->src.type = OP_IMM;
1181 c->src.ptr = (unsigned long *)c->eip;
1182 c->src.bytes = 1;
1183 if ((c->d & SrcMask) == SrcImmByte)
1184 c->src.val = insn_fetch(s8, 1, c->eip);
1185 else
1186 c->src.val = insn_fetch(u8, 1, c->eip);
1187 break;
1188 case SrcOne:
1189 c->src.bytes = 1;
1190 c->src.val = 1;
1191 break;
1192 case SrcSI:
1193 c->src.type = OP_MEM;
1194 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1195 c->src.ptr = (unsigned long *)
1196 register_address(c, seg_override_base(ctxt, ops, c),
1197 c->regs[VCPU_REGS_RSI]);
1198 c->src.val = 0;
1199 break;
1200 case SrcImmFAddr:
1201 c->src.type = OP_IMM;
1202 c->src.ptr = (unsigned long *)c->eip;
1203 c->src.bytes = c->op_bytes + 2;
1204 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
1205 break;
1206 case SrcMemFAddr:
1207 c->src.type = OP_MEM;
1208 c->src.ptr = (unsigned long *)c->modrm_ea;
1209 c->src.bytes = c->op_bytes + 2;
1210 break;
1214 * Decode and fetch the second source operand: register, memory
1215 * or immediate.
1217 switch (c->d & Src2Mask) {
1218 case Src2None:
1219 break;
1220 case Src2CL:
1221 c->src2.bytes = 1;
1222 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
1223 break;
1224 case Src2ImmByte:
1225 c->src2.type = OP_IMM;
1226 c->src2.ptr = (unsigned long *)c->eip;
1227 c->src2.bytes = 1;
1228 c->src2.val = insn_fetch(u8, 1, c->eip);
1229 break;
1230 case Src2One:
1231 c->src2.bytes = 1;
1232 c->src2.val = 1;
1233 break;
1236 /* Decode and fetch the destination operand: register or memory. */
1237 switch (c->d & DstMask) {
1238 case ImplicitOps:
1239 /* Special instructions do their own operand decoding. */
1240 return 0;
1241 case DstReg:
1242 decode_register_operand(&c->dst, c,
1243 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
1244 break;
1245 case DstMem:
1246 case DstMem64:
1247 if ((c->d & ModRM) && c->modrm_mod == 3) {
1248 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1249 c->dst.type = OP_REG;
1250 c->dst.val = c->dst.orig_val = c->modrm_val;
1251 c->dst.ptr = c->modrm_ptr;
1252 break;
1254 c->dst.type = OP_MEM;
1255 c->dst.ptr = (unsigned long *)c->modrm_ea;
1256 if ((c->d & DstMask) == DstMem64)
1257 c->dst.bytes = 8;
1258 else
1259 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1260 c->dst.val = 0;
1261 if (c->d & BitOp) {
1262 unsigned long mask = ~(c->dst.bytes * 8 - 1);
1264 c->dst.ptr = (void *)c->dst.ptr +
1265 (c->src.val & mask) / 8;
1267 break;
1268 case DstAcc:
1269 c->dst.type = OP_REG;
1270 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1271 c->dst.ptr = &c->regs[VCPU_REGS_RAX];
1272 switch (c->dst.bytes) {
1273 case 1:
1274 c->dst.val = *(u8 *)c->dst.ptr;
1275 break;
1276 case 2:
1277 c->dst.val = *(u16 *)c->dst.ptr;
1278 break;
1279 case 4:
1280 c->dst.val = *(u32 *)c->dst.ptr;
1281 break;
1282 case 8:
1283 c->dst.val = *(u64 *)c->dst.ptr;
1284 break;
1286 c->dst.orig_val = c->dst.val;
1287 break;
1288 case DstDI:
1289 c->dst.type = OP_MEM;
1290 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1291 c->dst.ptr = (unsigned long *)
1292 register_address(c, es_base(ctxt, ops),
1293 c->regs[VCPU_REGS_RDI]);
1294 c->dst.val = 0;
1295 break;
1298 done:
1299 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
1302 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1303 struct x86_emulate_ops *ops,
1304 unsigned long addr, void *dest, unsigned size)
1306 int rc;
1307 struct read_cache *mc = &ctxt->decode.mem_read;
1308 u32 err;
1310 while (size) {
1311 int n = min(size, 8u);
1312 size -= n;
1313 if (mc->pos < mc->end)
1314 goto read_cached;
1316 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
1317 ctxt->vcpu);
1318 if (rc == X86EMUL_PROPAGATE_FAULT)
1319 emulate_pf(ctxt, addr, err);
1320 if (rc != X86EMUL_CONTINUE)
1321 return rc;
1322 mc->end += n;
1324 read_cached:
1325 memcpy(dest, mc->data + mc->pos, n);
1326 mc->pos += n;
1327 dest += n;
1328 addr += n;
1330 return X86EMUL_CONTINUE;
1333 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1334 struct x86_emulate_ops *ops,
1335 unsigned int size, unsigned short port,
1336 void *dest)
1338 struct read_cache *rc = &ctxt->decode.io_read;
1340 if (rc->pos == rc->end) { /* refill pio read ahead */
1341 struct decode_cache *c = &ctxt->decode;
1342 unsigned int in_page, n;
1343 unsigned int count = c->rep_prefix ?
1344 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
1345 in_page = (ctxt->eflags & EFLG_DF) ?
1346 offset_in_page(c->regs[VCPU_REGS_RDI]) :
1347 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
1348 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1349 count);
1350 if (n == 0)
1351 n = 1;
1352 rc->pos = rc->end = 0;
1353 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
1354 return 0;
1355 rc->end = n * size;
1358 memcpy(dest, rc->data + rc->pos, size);
1359 rc->pos += size;
1360 return 1;
1363 static u32 desc_limit_scaled(struct desc_struct *desc)
1365 u32 limit = get_desc_limit(desc);
1367 return desc->g ? (limit << 12) | 0xfff : limit;
1370 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1371 struct x86_emulate_ops *ops,
1372 u16 selector, struct desc_ptr *dt)
1374 if (selector & 1 << 2) {
1375 struct desc_struct desc;
1376 memset (dt, 0, sizeof *dt);
1377 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
1378 return;
1380 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1381 dt->address = get_desc_base(&desc);
1382 } else
1383 ops->get_gdt(dt, ctxt->vcpu);
1386 /* allowed just for 8 bytes segments */
1387 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1388 struct x86_emulate_ops *ops,
1389 u16 selector, struct desc_struct *desc)
1391 struct desc_ptr dt;
1392 u16 index = selector >> 3;
1393 int ret;
1394 u32 err;
1395 ulong addr;
1397 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1399 if (dt.size < index * 8 + 7) {
1400 emulate_gp(ctxt, selector & 0xfffc);
1401 return X86EMUL_PROPAGATE_FAULT;
1403 addr = dt.address + index * 8;
1404 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1405 if (ret == X86EMUL_PROPAGATE_FAULT)
1406 emulate_pf(ctxt, addr, err);
1408 return ret;
1411 /* allowed just for 8 bytes segments */
1412 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1413 struct x86_emulate_ops *ops,
1414 u16 selector, struct desc_struct *desc)
1416 struct desc_ptr dt;
1417 u16 index = selector >> 3;
1418 u32 err;
1419 ulong addr;
1420 int ret;
1422 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1424 if (dt.size < index * 8 + 7) {
1425 emulate_gp(ctxt, selector & 0xfffc);
1426 return X86EMUL_PROPAGATE_FAULT;
1429 addr = dt.address + index * 8;
1430 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1431 if (ret == X86EMUL_PROPAGATE_FAULT)
1432 emulate_pf(ctxt, addr, err);
1434 return ret;
1437 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1438 struct x86_emulate_ops *ops,
1439 u16 selector, int seg)
1441 struct desc_struct seg_desc;
1442 u8 dpl, rpl, cpl;
1443 unsigned err_vec = GP_VECTOR;
1444 u32 err_code = 0;
1445 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1446 int ret;
1448 memset(&seg_desc, 0, sizeof seg_desc);
1450 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1451 || ctxt->mode == X86EMUL_MODE_REAL) {
1452 /* set real mode segment descriptor */
1453 set_desc_base(&seg_desc, selector << 4);
1454 set_desc_limit(&seg_desc, 0xffff);
1455 seg_desc.type = 3;
1456 seg_desc.p = 1;
1457 seg_desc.s = 1;
1458 goto load;
1461 /* NULL selector is not valid for TR, CS and SS */
1462 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1463 && null_selector)
1464 goto exception;
1466 /* TR should be in GDT only */
1467 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1468 goto exception;
1470 if (null_selector) /* for NULL selector skip all following checks */
1471 goto load;
1473 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
1474 if (ret != X86EMUL_CONTINUE)
1475 return ret;
1477 err_code = selector & 0xfffc;
1478 err_vec = GP_VECTOR;
1480 /* can't load system descriptor into segment selecor */
1481 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1482 goto exception;
1484 if (!seg_desc.p) {
1485 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1486 goto exception;
1489 rpl = selector & 3;
1490 dpl = seg_desc.dpl;
1491 cpl = ops->cpl(ctxt->vcpu);
1493 switch (seg) {
1494 case VCPU_SREG_SS:
1496 * segment is not a writable data segment or segment
1497 * selector's RPL != CPL or segment selector's RPL != CPL
1499 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1500 goto exception;
1501 break;
1502 case VCPU_SREG_CS:
1503 if (!(seg_desc.type & 8))
1504 goto exception;
1506 if (seg_desc.type & 4) {
1507 /* conforming */
1508 if (dpl > cpl)
1509 goto exception;
1510 } else {
1511 /* nonconforming */
1512 if (rpl > cpl || dpl != cpl)
1513 goto exception;
1515 /* CS(RPL) <- CPL */
1516 selector = (selector & 0xfffc) | cpl;
1517 break;
1518 case VCPU_SREG_TR:
1519 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1520 goto exception;
1521 break;
1522 case VCPU_SREG_LDTR:
1523 if (seg_desc.s || seg_desc.type != 2)
1524 goto exception;
1525 break;
1526 default: /* DS, ES, FS, or GS */
1528 * segment is not a data or readable code segment or
1529 * ((segment is a data or nonconforming code segment)
1530 * and (both RPL and CPL > DPL))
1532 if ((seg_desc.type & 0xa) == 0x8 ||
1533 (((seg_desc.type & 0xc) != 0xc) &&
1534 (rpl > dpl && cpl > dpl)))
1535 goto exception;
1536 break;
1539 if (seg_desc.s) {
1540 /* mark segment as accessed */
1541 seg_desc.type |= 1;
1542 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1543 if (ret != X86EMUL_CONTINUE)
1544 return ret;
1546 load:
1547 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1548 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1549 return X86EMUL_CONTINUE;
1550 exception:
1551 emulate_exception(ctxt, err_vec, err_code, true);
1552 return X86EMUL_PROPAGATE_FAULT;
1555 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1556 struct x86_emulate_ops *ops)
1558 struct decode_cache *c = &ctxt->decode;
1560 c->dst.type = OP_MEM;
1561 c->dst.bytes = c->op_bytes;
1562 c->dst.val = c->src.val;
1563 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1564 c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops),
1565 c->regs[VCPU_REGS_RSP]);
1568 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1569 struct x86_emulate_ops *ops,
1570 void *dest, int len)
1572 struct decode_cache *c = &ctxt->decode;
1573 int rc;
1575 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1576 c->regs[VCPU_REGS_RSP]),
1577 dest, len);
1578 if (rc != X86EMUL_CONTINUE)
1579 return rc;
1581 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1582 return rc;
1585 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1586 struct x86_emulate_ops *ops,
1587 void *dest, int len)
1589 int rc;
1590 unsigned long val, change_mask;
1591 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1592 int cpl = ops->cpl(ctxt->vcpu);
1594 rc = emulate_pop(ctxt, ops, &val, len);
1595 if (rc != X86EMUL_CONTINUE)
1596 return rc;
1598 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1599 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1601 switch(ctxt->mode) {
1602 case X86EMUL_MODE_PROT64:
1603 case X86EMUL_MODE_PROT32:
1604 case X86EMUL_MODE_PROT16:
1605 if (cpl == 0)
1606 change_mask |= EFLG_IOPL;
1607 if (cpl <= iopl)
1608 change_mask |= EFLG_IF;
1609 break;
1610 case X86EMUL_MODE_VM86:
1611 if (iopl < 3) {
1612 emulate_gp(ctxt, 0);
1613 return X86EMUL_PROPAGATE_FAULT;
1615 change_mask |= EFLG_IF;
1616 break;
1617 default: /* real mode */
1618 change_mask |= (EFLG_IOPL | EFLG_IF);
1619 break;
1622 *(unsigned long *)dest =
1623 (ctxt->eflags & ~change_mask) | (val & change_mask);
1625 return rc;
1628 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1629 struct x86_emulate_ops *ops, int seg)
1631 struct decode_cache *c = &ctxt->decode;
1633 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1635 emulate_push(ctxt, ops);
1638 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1639 struct x86_emulate_ops *ops, int seg)
1641 struct decode_cache *c = &ctxt->decode;
1642 unsigned long selector;
1643 int rc;
1645 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1646 if (rc != X86EMUL_CONTINUE)
1647 return rc;
1649 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1650 return rc;
1653 static void emulate_pusha(struct x86_emulate_ctxt *ctxt,
1654 struct x86_emulate_ops *ops)
1656 struct decode_cache *c = &ctxt->decode;
1657 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1658 int reg = VCPU_REGS_RAX;
1660 while (reg <= VCPU_REGS_RDI) {
1661 (reg == VCPU_REGS_RSP) ?
1662 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1664 emulate_push(ctxt, ops);
1665 ++reg;
1669 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1670 struct x86_emulate_ops *ops)
1672 struct decode_cache *c = &ctxt->decode;
1673 int rc = X86EMUL_CONTINUE;
1674 int reg = VCPU_REGS_RDI;
1676 while (reg >= VCPU_REGS_RAX) {
1677 if (reg == VCPU_REGS_RSP) {
1678 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1679 c->op_bytes);
1680 --reg;
1683 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1684 if (rc != X86EMUL_CONTINUE)
1685 break;
1686 --reg;
1688 return rc;
1691 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1692 struct x86_emulate_ops *ops)
1694 struct decode_cache *c = &ctxt->decode;
1696 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1699 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1701 struct decode_cache *c = &ctxt->decode;
1702 switch (c->modrm_reg) {
1703 case 0: /* rol */
1704 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1705 break;
1706 case 1: /* ror */
1707 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1708 break;
1709 case 2: /* rcl */
1710 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1711 break;
1712 case 3: /* rcr */
1713 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1714 break;
1715 case 4: /* sal/shl */
1716 case 6: /* sal/shl */
1717 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1718 break;
1719 case 5: /* shr */
1720 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1721 break;
1722 case 7: /* sar */
1723 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1724 break;
1728 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1729 struct x86_emulate_ops *ops)
1731 struct decode_cache *c = &ctxt->decode;
1733 switch (c->modrm_reg) {
1734 case 0 ... 1: /* test */
1735 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1736 break;
1737 case 2: /* not */
1738 c->dst.val = ~c->dst.val;
1739 break;
1740 case 3: /* neg */
1741 emulate_1op("neg", c->dst, ctxt->eflags);
1742 break;
1743 default:
1744 return 0;
1746 return 1;
1749 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1750 struct x86_emulate_ops *ops)
1752 struct decode_cache *c = &ctxt->decode;
1754 switch (c->modrm_reg) {
1755 case 0: /* inc */
1756 emulate_1op("inc", c->dst, ctxt->eflags);
1757 break;
1758 case 1: /* dec */
1759 emulate_1op("dec", c->dst, ctxt->eflags);
1760 break;
1761 case 2: /* call near abs */ {
1762 long int old_eip;
1763 old_eip = c->eip;
1764 c->eip = c->src.val;
1765 c->src.val = old_eip;
1766 emulate_push(ctxt, ops);
1767 break;
1769 case 4: /* jmp abs */
1770 c->eip = c->src.val;
1771 break;
1772 case 6: /* push */
1773 emulate_push(ctxt, ops);
1774 break;
1776 return X86EMUL_CONTINUE;
1779 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1780 struct x86_emulate_ops *ops)
1782 struct decode_cache *c = &ctxt->decode;
1783 u64 old = c->dst.orig_val;
1785 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1786 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1788 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1789 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1790 ctxt->eflags &= ~EFLG_ZF;
1791 } else {
1792 c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1793 (u32) c->regs[VCPU_REGS_RBX];
1795 ctxt->eflags |= EFLG_ZF;
1797 return X86EMUL_CONTINUE;
1800 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1801 struct x86_emulate_ops *ops)
1803 struct decode_cache *c = &ctxt->decode;
1804 int rc;
1805 unsigned long cs;
1807 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1808 if (rc != X86EMUL_CONTINUE)
1809 return rc;
1810 if (c->op_bytes == 4)
1811 c->eip = (u32)c->eip;
1812 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1813 if (rc != X86EMUL_CONTINUE)
1814 return rc;
1815 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1816 return rc;
1819 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1820 struct x86_emulate_ops *ops)
1822 int rc;
1823 struct decode_cache *c = &ctxt->decode;
1824 u32 err;
1826 switch (c->dst.type) {
1827 case OP_REG:
1828 /* The 4-byte case *is* correct:
1829 * in 64-bit mode we zero-extend.
1831 switch (c->dst.bytes) {
1832 case 1:
1833 *(u8 *)c->dst.ptr = (u8)c->dst.val;
1834 break;
1835 case 2:
1836 *(u16 *)c->dst.ptr = (u16)c->dst.val;
1837 break;
1838 case 4:
1839 *c->dst.ptr = (u32)c->dst.val;
1840 break; /* 64b: zero-ext */
1841 case 8:
1842 *c->dst.ptr = c->dst.val;
1843 break;
1845 break;
1846 case OP_MEM:
1847 if (c->lock_prefix)
1848 rc = ops->cmpxchg_emulated(
1849 (unsigned long)c->dst.ptr,
1850 &c->dst.orig_val,
1851 &c->dst.val,
1852 c->dst.bytes,
1853 &err,
1854 ctxt->vcpu);
1855 else
1856 rc = ops->write_emulated(
1857 (unsigned long)c->dst.ptr,
1858 &c->dst.val,
1859 c->dst.bytes,
1860 &err,
1861 ctxt->vcpu);
1862 if (rc == X86EMUL_PROPAGATE_FAULT)
1863 emulate_pf(ctxt,
1864 (unsigned long)c->dst.ptr, err);
1865 if (rc != X86EMUL_CONTINUE)
1866 return rc;
1867 break;
1868 case OP_NONE:
1869 /* no writeback */
1870 break;
1871 default:
1872 break;
1874 return X86EMUL_CONTINUE;
1877 static inline void
1878 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1879 struct x86_emulate_ops *ops, struct desc_struct *cs,
1880 struct desc_struct *ss)
1882 memset(cs, 0, sizeof(struct desc_struct));
1883 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1884 memset(ss, 0, sizeof(struct desc_struct));
1886 cs->l = 0; /* will be adjusted later */
1887 set_desc_base(cs, 0); /* flat segment */
1888 cs->g = 1; /* 4kb granularity */
1889 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1890 cs->type = 0x0b; /* Read, Execute, Accessed */
1891 cs->s = 1;
1892 cs->dpl = 0; /* will be adjusted later */
1893 cs->p = 1;
1894 cs->d = 1;
1896 set_desc_base(ss, 0); /* flat segment */
1897 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1898 ss->g = 1; /* 4kb granularity */
1899 ss->s = 1;
1900 ss->type = 0x03; /* Read/Write, Accessed */
1901 ss->d = 1; /* 32bit stack segment */
1902 ss->dpl = 0;
1903 ss->p = 1;
1906 static int
1907 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1909 struct decode_cache *c = &ctxt->decode;
1910 struct desc_struct cs, ss;
1911 u64 msr_data;
1912 u16 cs_sel, ss_sel;
1914 /* syscall is not available in real mode */
1915 if (ctxt->mode == X86EMUL_MODE_REAL ||
1916 ctxt->mode == X86EMUL_MODE_VM86) {
1917 emulate_ud(ctxt);
1918 return X86EMUL_PROPAGATE_FAULT;
1921 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1923 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1924 msr_data >>= 32;
1925 cs_sel = (u16)(msr_data & 0xfffc);
1926 ss_sel = (u16)(msr_data + 8);
1928 if (is_long_mode(ctxt->vcpu)) {
1929 cs.d = 0;
1930 cs.l = 1;
1932 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1933 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1934 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1935 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1937 c->regs[VCPU_REGS_RCX] = c->eip;
1938 if (is_long_mode(ctxt->vcpu)) {
1939 #ifdef CONFIG_X86_64
1940 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1942 ops->get_msr(ctxt->vcpu,
1943 ctxt->mode == X86EMUL_MODE_PROT64 ?
1944 MSR_LSTAR : MSR_CSTAR, &msr_data);
1945 c->eip = msr_data;
1947 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1948 ctxt->eflags &= ~(msr_data | EFLG_RF);
1949 #endif
1950 } else {
1951 /* legacy mode */
1952 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1953 c->eip = (u32)msr_data;
1955 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1958 return X86EMUL_CONTINUE;
1961 static int
1962 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1964 struct decode_cache *c = &ctxt->decode;
1965 struct desc_struct cs, ss;
1966 u64 msr_data;
1967 u16 cs_sel, ss_sel;
1969 /* inject #GP if in real mode */
1970 if (ctxt->mode == X86EMUL_MODE_REAL) {
1971 emulate_gp(ctxt, 0);
1972 return X86EMUL_PROPAGATE_FAULT;
1975 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1976 * Therefore, we inject an #UD.
1978 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1979 emulate_ud(ctxt);
1980 return X86EMUL_PROPAGATE_FAULT;
1983 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1985 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1986 switch (ctxt->mode) {
1987 case X86EMUL_MODE_PROT32:
1988 if ((msr_data & 0xfffc) == 0x0) {
1989 emulate_gp(ctxt, 0);
1990 return X86EMUL_PROPAGATE_FAULT;
1992 break;
1993 case X86EMUL_MODE_PROT64:
1994 if (msr_data == 0x0) {
1995 emulate_gp(ctxt, 0);
1996 return X86EMUL_PROPAGATE_FAULT;
1998 break;
2001 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2002 cs_sel = (u16)msr_data;
2003 cs_sel &= ~SELECTOR_RPL_MASK;
2004 ss_sel = cs_sel + 8;
2005 ss_sel &= ~SELECTOR_RPL_MASK;
2006 if (ctxt->mode == X86EMUL_MODE_PROT64
2007 || is_long_mode(ctxt->vcpu)) {
2008 cs.d = 0;
2009 cs.l = 1;
2012 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
2013 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
2014 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
2015 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
2017 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
2018 c->eip = msr_data;
2020 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
2021 c->regs[VCPU_REGS_RSP] = msr_data;
2023 return X86EMUL_CONTINUE;
2026 static int
2027 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2029 struct decode_cache *c = &ctxt->decode;
2030 struct desc_struct cs, ss;
2031 u64 msr_data;
2032 int usermode;
2033 u16 cs_sel, ss_sel;
2035 /* inject #GP if in real mode or Virtual 8086 mode */
2036 if (ctxt->mode == X86EMUL_MODE_REAL ||
2037 ctxt->mode == X86EMUL_MODE_VM86) {
2038 emulate_gp(ctxt, 0);
2039 return X86EMUL_PROPAGATE_FAULT;
2042 setup_syscalls_segments(ctxt, ops, &cs, &ss);
2044 if ((c->rex_prefix & 0x8) != 0x0)
2045 usermode = X86EMUL_MODE_PROT64;
2046 else
2047 usermode = X86EMUL_MODE_PROT32;
2049 cs.dpl = 3;
2050 ss.dpl = 3;
2051 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
2052 switch (usermode) {
2053 case X86EMUL_MODE_PROT32:
2054 cs_sel = (u16)(msr_data + 16);
2055 if ((msr_data & 0xfffc) == 0x0) {
2056 emulate_gp(ctxt, 0);
2057 return X86EMUL_PROPAGATE_FAULT;
2059 ss_sel = (u16)(msr_data + 24);
2060 break;
2061 case X86EMUL_MODE_PROT64:
2062 cs_sel = (u16)(msr_data + 32);
2063 if (msr_data == 0x0) {
2064 emulate_gp(ctxt, 0);
2065 return X86EMUL_PROPAGATE_FAULT;
2067 ss_sel = cs_sel + 8;
2068 cs.d = 0;
2069 cs.l = 1;
2070 break;
2072 cs_sel |= SELECTOR_RPL_MASK;
2073 ss_sel |= SELECTOR_RPL_MASK;
2075 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
2076 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
2077 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
2078 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
2080 c->eip = c->regs[VCPU_REGS_RDX];
2081 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
2083 return X86EMUL_CONTINUE;
2086 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2087 struct x86_emulate_ops *ops)
2089 int iopl;
2090 if (ctxt->mode == X86EMUL_MODE_REAL)
2091 return false;
2092 if (ctxt->mode == X86EMUL_MODE_VM86)
2093 return true;
2094 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2095 return ops->cpl(ctxt->vcpu) > iopl;
2098 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2099 struct x86_emulate_ops *ops,
2100 u16 port, u16 len)
2102 struct desc_struct tr_seg;
2103 int r;
2104 u16 io_bitmap_ptr;
2105 u8 perm, bit_idx = port & 0x7;
2106 unsigned mask = (1 << len) - 1;
2108 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
2109 if (!tr_seg.p)
2110 return false;
2111 if (desc_limit_scaled(&tr_seg) < 103)
2112 return false;
2113 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
2114 ctxt->vcpu, NULL);
2115 if (r != X86EMUL_CONTINUE)
2116 return false;
2117 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2118 return false;
2119 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
2120 &perm, 1, ctxt->vcpu, NULL);
2121 if (r != X86EMUL_CONTINUE)
2122 return false;
2123 if ((perm >> bit_idx) & mask)
2124 return false;
2125 return true;
2128 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2129 struct x86_emulate_ops *ops,
2130 u16 port, u16 len)
2132 if (emulator_bad_iopl(ctxt, ops))
2133 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
2134 return false;
2135 return true;
2138 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2139 struct x86_emulate_ops *ops,
2140 struct tss_segment_16 *tss)
2142 struct decode_cache *c = &ctxt->decode;
2144 tss->ip = c->eip;
2145 tss->flag = ctxt->eflags;
2146 tss->ax = c->regs[VCPU_REGS_RAX];
2147 tss->cx = c->regs[VCPU_REGS_RCX];
2148 tss->dx = c->regs[VCPU_REGS_RDX];
2149 tss->bx = c->regs[VCPU_REGS_RBX];
2150 tss->sp = c->regs[VCPU_REGS_RSP];
2151 tss->bp = c->regs[VCPU_REGS_RBP];
2152 tss->si = c->regs[VCPU_REGS_RSI];
2153 tss->di = c->regs[VCPU_REGS_RDI];
2155 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2156 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2157 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2158 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2159 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2162 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2163 struct x86_emulate_ops *ops,
2164 struct tss_segment_16 *tss)
2166 struct decode_cache *c = &ctxt->decode;
2167 int ret;
2169 c->eip = tss->ip;
2170 ctxt->eflags = tss->flag | 2;
2171 c->regs[VCPU_REGS_RAX] = tss->ax;
2172 c->regs[VCPU_REGS_RCX] = tss->cx;
2173 c->regs[VCPU_REGS_RDX] = tss->dx;
2174 c->regs[VCPU_REGS_RBX] = tss->bx;
2175 c->regs[VCPU_REGS_RSP] = tss->sp;
2176 c->regs[VCPU_REGS_RBP] = tss->bp;
2177 c->regs[VCPU_REGS_RSI] = tss->si;
2178 c->regs[VCPU_REGS_RDI] = tss->di;
2181 * SDM says that segment selectors are loaded before segment
2182 * descriptors
2184 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
2185 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2186 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2187 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2188 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2191 * Now load segment descriptors. If fault happenes at this stage
2192 * it is handled in a context of new task
2194 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
2195 if (ret != X86EMUL_CONTINUE)
2196 return ret;
2197 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2198 if (ret != X86EMUL_CONTINUE)
2199 return ret;
2200 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2201 if (ret != X86EMUL_CONTINUE)
2202 return ret;
2203 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2204 if (ret != X86EMUL_CONTINUE)
2205 return ret;
2206 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2207 if (ret != X86EMUL_CONTINUE)
2208 return ret;
2210 return X86EMUL_CONTINUE;
2213 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2214 struct x86_emulate_ops *ops,
2215 u16 tss_selector, u16 old_tss_sel,
2216 ulong old_tss_base, struct desc_struct *new_desc)
2218 struct tss_segment_16 tss_seg;
2219 int ret;
2220 u32 err, new_tss_base = get_desc_base(new_desc);
2222 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2223 &err);
2224 if (ret == X86EMUL_PROPAGATE_FAULT) {
2225 /* FIXME: need to provide precise fault address */
2226 emulate_pf(ctxt, old_tss_base, err);
2227 return ret;
2230 save_state_to_tss16(ctxt, ops, &tss_seg);
2232 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2233 &err);
2234 if (ret == X86EMUL_PROPAGATE_FAULT) {
2235 /* FIXME: need to provide precise fault address */
2236 emulate_pf(ctxt, old_tss_base, err);
2237 return ret;
2240 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2241 &err);
2242 if (ret == X86EMUL_PROPAGATE_FAULT) {
2243 /* FIXME: need to provide precise fault address */
2244 emulate_pf(ctxt, new_tss_base, err);
2245 return ret;
2248 if (old_tss_sel != 0xffff) {
2249 tss_seg.prev_task_link = old_tss_sel;
2251 ret = ops->write_std(new_tss_base,
2252 &tss_seg.prev_task_link,
2253 sizeof tss_seg.prev_task_link,
2254 ctxt->vcpu, &err);
2255 if (ret == X86EMUL_PROPAGATE_FAULT) {
2256 /* FIXME: need to provide precise fault address */
2257 emulate_pf(ctxt, new_tss_base, err);
2258 return ret;
2262 return load_state_from_tss16(ctxt, ops, &tss_seg);
2265 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2266 struct x86_emulate_ops *ops,
2267 struct tss_segment_32 *tss)
2269 struct decode_cache *c = &ctxt->decode;
2271 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
2272 tss->eip = c->eip;
2273 tss->eflags = ctxt->eflags;
2274 tss->eax = c->regs[VCPU_REGS_RAX];
2275 tss->ecx = c->regs[VCPU_REGS_RCX];
2276 tss->edx = c->regs[VCPU_REGS_RDX];
2277 tss->ebx = c->regs[VCPU_REGS_RBX];
2278 tss->esp = c->regs[VCPU_REGS_RSP];
2279 tss->ebp = c->regs[VCPU_REGS_RBP];
2280 tss->esi = c->regs[VCPU_REGS_RSI];
2281 tss->edi = c->regs[VCPU_REGS_RDI];
2283 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2284 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2285 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2286 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2287 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2288 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2289 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2292 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2293 struct x86_emulate_ops *ops,
2294 struct tss_segment_32 *tss)
2296 struct decode_cache *c = &ctxt->decode;
2297 int ret;
2299 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
2300 emulate_gp(ctxt, 0);
2301 return X86EMUL_PROPAGATE_FAULT;
2303 c->eip = tss->eip;
2304 ctxt->eflags = tss->eflags | 2;
2305 c->regs[VCPU_REGS_RAX] = tss->eax;
2306 c->regs[VCPU_REGS_RCX] = tss->ecx;
2307 c->regs[VCPU_REGS_RDX] = tss->edx;
2308 c->regs[VCPU_REGS_RBX] = tss->ebx;
2309 c->regs[VCPU_REGS_RSP] = tss->esp;
2310 c->regs[VCPU_REGS_RBP] = tss->ebp;
2311 c->regs[VCPU_REGS_RSI] = tss->esi;
2312 c->regs[VCPU_REGS_RDI] = tss->edi;
2315 * SDM says that segment selectors are loaded before segment
2316 * descriptors
2318 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2319 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2320 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2321 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2322 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2323 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2324 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2327 * Now load segment descriptors. If fault happenes at this stage
2328 * it is handled in a context of new task
2330 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2331 if (ret != X86EMUL_CONTINUE)
2332 return ret;
2333 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2334 if (ret != X86EMUL_CONTINUE)
2335 return ret;
2336 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2337 if (ret != X86EMUL_CONTINUE)
2338 return ret;
2339 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2340 if (ret != X86EMUL_CONTINUE)
2341 return ret;
2342 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2343 if (ret != X86EMUL_CONTINUE)
2344 return ret;
2345 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2346 if (ret != X86EMUL_CONTINUE)
2347 return ret;
2348 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2349 if (ret != X86EMUL_CONTINUE)
2350 return ret;
2352 return X86EMUL_CONTINUE;
2355 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2356 struct x86_emulate_ops *ops,
2357 u16 tss_selector, u16 old_tss_sel,
2358 ulong old_tss_base, struct desc_struct *new_desc)
2360 struct tss_segment_32 tss_seg;
2361 int ret;
2362 u32 err, new_tss_base = get_desc_base(new_desc);
2364 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2365 &err);
2366 if (ret == X86EMUL_PROPAGATE_FAULT) {
2367 /* FIXME: need to provide precise fault address */
2368 emulate_pf(ctxt, old_tss_base, err);
2369 return ret;
2372 save_state_to_tss32(ctxt, ops, &tss_seg);
2374 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2375 &err);
2376 if (ret == X86EMUL_PROPAGATE_FAULT) {
2377 /* FIXME: need to provide precise fault address */
2378 emulate_pf(ctxt, old_tss_base, err);
2379 return ret;
2382 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2383 &err);
2384 if (ret == X86EMUL_PROPAGATE_FAULT) {
2385 /* FIXME: need to provide precise fault address */
2386 emulate_pf(ctxt, new_tss_base, err);
2387 return ret;
2390 if (old_tss_sel != 0xffff) {
2391 tss_seg.prev_task_link = old_tss_sel;
2393 ret = ops->write_std(new_tss_base,
2394 &tss_seg.prev_task_link,
2395 sizeof tss_seg.prev_task_link,
2396 ctxt->vcpu, &err);
2397 if (ret == X86EMUL_PROPAGATE_FAULT) {
2398 /* FIXME: need to provide precise fault address */
2399 emulate_pf(ctxt, new_tss_base, err);
2400 return ret;
2404 return load_state_from_tss32(ctxt, ops, &tss_seg);
2407 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2408 struct x86_emulate_ops *ops,
2409 u16 tss_selector, int reason,
2410 bool has_error_code, u32 error_code)
2412 struct desc_struct curr_tss_desc, next_tss_desc;
2413 int ret;
2414 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2415 ulong old_tss_base =
2416 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2417 u32 desc_limit;
2419 /* FIXME: old_tss_base == ~0 ? */
2421 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2422 if (ret != X86EMUL_CONTINUE)
2423 return ret;
2424 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2425 if (ret != X86EMUL_CONTINUE)
2426 return ret;
2428 /* FIXME: check that next_tss_desc is tss */
2430 if (reason != TASK_SWITCH_IRET) {
2431 if ((tss_selector & 3) > next_tss_desc.dpl ||
2432 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2433 emulate_gp(ctxt, 0);
2434 return X86EMUL_PROPAGATE_FAULT;
2438 desc_limit = desc_limit_scaled(&next_tss_desc);
2439 if (!next_tss_desc.p ||
2440 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2441 desc_limit < 0x2b)) {
2442 emulate_ts(ctxt, tss_selector & 0xfffc);
2443 return X86EMUL_PROPAGATE_FAULT;
2446 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2447 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2448 write_segment_descriptor(ctxt, ops, old_tss_sel,
2449 &curr_tss_desc);
2452 if (reason == TASK_SWITCH_IRET)
2453 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2455 /* set back link to prev task only if NT bit is set in eflags
2456 note that old_tss_sel is not used afetr this point */
2457 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2458 old_tss_sel = 0xffff;
2460 if (next_tss_desc.type & 8)
2461 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2462 old_tss_base, &next_tss_desc);
2463 else
2464 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2465 old_tss_base, &next_tss_desc);
2466 if (ret != X86EMUL_CONTINUE)
2467 return ret;
2469 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2470 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2472 if (reason != TASK_SWITCH_IRET) {
2473 next_tss_desc.type |= (1 << 1); /* set busy flag */
2474 write_segment_descriptor(ctxt, ops, tss_selector,
2475 &next_tss_desc);
2478 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2479 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2480 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2482 if (has_error_code) {
2483 struct decode_cache *c = &ctxt->decode;
2485 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2486 c->lock_prefix = 0;
2487 c->src.val = (unsigned long) error_code;
2488 emulate_push(ctxt, ops);
2491 return ret;
2494 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2495 struct x86_emulate_ops *ops,
2496 u16 tss_selector, int reason,
2497 bool has_error_code, u32 error_code)
2499 struct decode_cache *c = &ctxt->decode;
2500 int rc;
2502 c->eip = ctxt->eip;
2503 c->dst.type = OP_NONE;
2505 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2506 has_error_code, error_code);
2508 if (rc == X86EMUL_CONTINUE) {
2509 rc = writeback(ctxt, ops);
2510 if (rc == X86EMUL_CONTINUE)
2511 ctxt->eip = c->eip;
2514 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2517 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2518 int reg, struct operand *op)
2520 struct decode_cache *c = &ctxt->decode;
2521 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2523 register_address_increment(c, &c->regs[reg], df * op->bytes);
2524 op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]);
2528 x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2530 u64 msr_data;
2531 struct decode_cache *c = &ctxt->decode;
2532 int rc = X86EMUL_CONTINUE;
2533 int saved_dst_type = c->dst.type;
2535 ctxt->decode.mem_read.pos = 0;
2537 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2538 emulate_ud(ctxt);
2539 goto done;
2542 /* LOCK prefix is allowed only with some instructions */
2543 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2544 emulate_ud(ctxt);
2545 goto done;
2548 /* Privileged instruction can be executed only in CPL=0 */
2549 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2550 emulate_gp(ctxt, 0);
2551 goto done;
2554 if (c->rep_prefix && (c->d & String)) {
2555 ctxt->restart = true;
2556 /* All REP prefixes have the same first termination condition */
2557 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2558 string_done:
2559 ctxt->restart = false;
2560 ctxt->eip = c->eip;
2561 goto done;
2563 /* The second termination condition only applies for REPE
2564 * and REPNE. Test if the repeat string operation prefix is
2565 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2566 * corresponding termination condition according to:
2567 * - if REPE/REPZ and ZF = 0 then done
2568 * - if REPNE/REPNZ and ZF = 1 then done
2570 if ((c->b == 0xa6) || (c->b == 0xa7) ||
2571 (c->b == 0xae) || (c->b == 0xaf)) {
2572 if ((c->rep_prefix == REPE_PREFIX) &&
2573 ((ctxt->eflags & EFLG_ZF) == 0))
2574 goto string_done;
2575 if ((c->rep_prefix == REPNE_PREFIX) &&
2576 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))
2577 goto string_done;
2579 c->eip = ctxt->eip;
2582 if (c->src.type == OP_MEM) {
2583 rc = read_emulated(ctxt, ops, (unsigned long)c->src.ptr,
2584 c->src.valptr, c->src.bytes);
2585 if (rc != X86EMUL_CONTINUE)
2586 goto done;
2587 c->src.orig_val = c->src.val;
2590 if (c->src2.type == OP_MEM) {
2591 rc = read_emulated(ctxt, ops, (unsigned long)c->src2.ptr,
2592 &c->src2.val, c->src2.bytes);
2593 if (rc != X86EMUL_CONTINUE)
2594 goto done;
2597 if ((c->d & DstMask) == ImplicitOps)
2598 goto special_insn;
2601 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
2602 /* optimisation - avoid slow emulated read if Mov */
2603 rc = read_emulated(ctxt, ops, (unsigned long)c->dst.ptr,
2604 &c->dst.val, c->dst.bytes);
2605 if (rc != X86EMUL_CONTINUE)
2606 goto done;
2608 c->dst.orig_val = c->dst.val;
2610 special_insn:
2612 if (c->twobyte)
2613 goto twobyte_insn;
2615 switch (c->b) {
2616 case 0x00 ... 0x05:
2617 add: /* add */
2618 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2619 break;
2620 case 0x06: /* push es */
2621 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
2622 break;
2623 case 0x07: /* pop es */
2624 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
2625 if (rc != X86EMUL_CONTINUE)
2626 goto done;
2627 break;
2628 case 0x08 ... 0x0d:
2629 or: /* or */
2630 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2631 break;
2632 case 0x0e: /* push cs */
2633 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
2634 break;
2635 case 0x10 ... 0x15:
2636 adc: /* adc */
2637 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2638 break;
2639 case 0x16: /* push ss */
2640 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
2641 break;
2642 case 0x17: /* pop ss */
2643 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
2644 if (rc != X86EMUL_CONTINUE)
2645 goto done;
2646 break;
2647 case 0x18 ... 0x1d:
2648 sbb: /* sbb */
2649 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2650 break;
2651 case 0x1e: /* push ds */
2652 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
2653 break;
2654 case 0x1f: /* pop ds */
2655 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
2656 if (rc != X86EMUL_CONTINUE)
2657 goto done;
2658 break;
2659 case 0x20 ... 0x25:
2660 and: /* and */
2661 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
2662 break;
2663 case 0x28 ... 0x2d:
2664 sub: /* sub */
2665 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
2666 break;
2667 case 0x30 ... 0x35:
2668 xor: /* xor */
2669 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
2670 break;
2671 case 0x38 ... 0x3d:
2672 cmp: /* cmp */
2673 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
2674 break;
2675 case 0x40 ... 0x47: /* inc r16/r32 */
2676 emulate_1op("inc", c->dst, ctxt->eflags);
2677 break;
2678 case 0x48 ... 0x4f: /* dec r16/r32 */
2679 emulate_1op("dec", c->dst, ctxt->eflags);
2680 break;
2681 case 0x50 ... 0x57: /* push reg */
2682 emulate_push(ctxt, ops);
2683 break;
2684 case 0x58 ... 0x5f: /* pop reg */
2685 pop_instruction:
2686 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2687 if (rc != X86EMUL_CONTINUE)
2688 goto done;
2689 break;
2690 case 0x60: /* pusha */
2691 emulate_pusha(ctxt, ops);
2692 break;
2693 case 0x61: /* popa */
2694 rc = emulate_popa(ctxt, ops);
2695 if (rc != X86EMUL_CONTINUE)
2696 goto done;
2697 break;
2698 case 0x63: /* movsxd */
2699 if (ctxt->mode != X86EMUL_MODE_PROT64)
2700 goto cannot_emulate;
2701 c->dst.val = (s32) c->src.val;
2702 break;
2703 case 0x68: /* push imm */
2704 case 0x6a: /* push imm8 */
2705 emulate_push(ctxt, ops);
2706 break;
2707 case 0x6c: /* insb */
2708 case 0x6d: /* insw/insd */
2709 c->dst.bytes = min(c->dst.bytes, 4u);
2710 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2711 c->dst.bytes)) {
2712 emulate_gp(ctxt, 0);
2713 goto done;
2715 if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
2716 c->regs[VCPU_REGS_RDX], &c->dst.val))
2717 goto done; /* IO is needed, skip writeback */
2718 break;
2719 case 0x6e: /* outsb */
2720 case 0x6f: /* outsw/outsd */
2721 c->src.bytes = min(c->src.bytes, 4u);
2722 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2723 c->src.bytes)) {
2724 emulate_gp(ctxt, 0);
2725 goto done;
2727 ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
2728 &c->src.val, 1, ctxt->vcpu);
2730 c->dst.type = OP_NONE; /* nothing to writeback */
2731 break;
2732 case 0x70 ... 0x7f: /* jcc (short) */
2733 if (test_cc(c->b, ctxt->eflags))
2734 jmp_rel(c, c->src.val);
2735 break;
2736 case 0x80 ... 0x83: /* Grp1 */
2737 switch (c->modrm_reg) {
2738 case 0:
2739 goto add;
2740 case 1:
2741 goto or;
2742 case 2:
2743 goto adc;
2744 case 3:
2745 goto sbb;
2746 case 4:
2747 goto and;
2748 case 5:
2749 goto sub;
2750 case 6:
2751 goto xor;
2752 case 7:
2753 goto cmp;
2755 break;
2756 case 0x84 ... 0x85:
2757 test:
2758 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
2759 break;
2760 case 0x86 ... 0x87: /* xchg */
2761 xchg:
2762 /* Write back the register source. */
2763 switch (c->dst.bytes) {
2764 case 1:
2765 *(u8 *) c->src.ptr = (u8) c->dst.val;
2766 break;
2767 case 2:
2768 *(u16 *) c->src.ptr = (u16) c->dst.val;
2769 break;
2770 case 4:
2771 *c->src.ptr = (u32) c->dst.val;
2772 break; /* 64b reg: zero-extend */
2773 case 8:
2774 *c->src.ptr = c->dst.val;
2775 break;
2778 * Write back the memory destination with implicit LOCK
2779 * prefix.
2781 c->dst.val = c->src.val;
2782 c->lock_prefix = 1;
2783 break;
2784 case 0x88 ... 0x8b: /* mov */
2785 goto mov;
2786 case 0x8c: /* mov r/m, sreg */
2787 if (c->modrm_reg > VCPU_SREG_GS) {
2788 emulate_ud(ctxt);
2789 goto done;
2791 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
2792 break;
2793 case 0x8d: /* lea r16/r32, m */
2794 c->dst.val = c->modrm_ea;
2795 break;
2796 case 0x8e: { /* mov seg, r/m16 */
2797 uint16_t sel;
2799 sel = c->src.val;
2801 if (c->modrm_reg == VCPU_SREG_CS ||
2802 c->modrm_reg > VCPU_SREG_GS) {
2803 emulate_ud(ctxt);
2804 goto done;
2807 if (c->modrm_reg == VCPU_SREG_SS)
2808 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2810 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
2812 c->dst.type = OP_NONE; /* Disable writeback. */
2813 break;
2815 case 0x8f: /* pop (sole member of Grp1a) */
2816 rc = emulate_grp1a(ctxt, ops);
2817 if (rc != X86EMUL_CONTINUE)
2818 goto done;
2819 break;
2820 case 0x90: /* nop / xchg r8,rax */
2821 if (c->dst.ptr == (unsigned long *)&c->regs[VCPU_REGS_RAX]) {
2822 c->dst.type = OP_NONE; /* nop */
2823 break;
2825 case 0x91 ... 0x97: /* xchg reg,rax */
2826 c->src.type = OP_REG;
2827 c->src.bytes = c->op_bytes;
2828 c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX];
2829 c->src.val = *(c->src.ptr);
2830 goto xchg;
2831 case 0x9c: /* pushf */
2832 c->src.val = (unsigned long) ctxt->eflags;
2833 emulate_push(ctxt, ops);
2834 break;
2835 case 0x9d: /* popf */
2836 c->dst.type = OP_REG;
2837 c->dst.ptr = (unsigned long *) &ctxt->eflags;
2838 c->dst.bytes = c->op_bytes;
2839 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
2840 if (rc != X86EMUL_CONTINUE)
2841 goto done;
2842 break;
2843 case 0xa0 ... 0xa1: /* mov */
2844 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2845 c->dst.val = c->src.val;
2846 break;
2847 case 0xa2 ... 0xa3: /* mov */
2848 c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX];
2849 break;
2850 case 0xa4 ... 0xa5: /* movs */
2851 goto mov;
2852 case 0xa6 ... 0xa7: /* cmps */
2853 c->dst.type = OP_NONE; /* Disable writeback. */
2854 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
2855 goto cmp;
2856 case 0xa8 ... 0xa9: /* test ax, imm */
2857 goto test;
2858 case 0xaa ... 0xab: /* stos */
2859 c->dst.val = c->regs[VCPU_REGS_RAX];
2860 break;
2861 case 0xac ... 0xad: /* lods */
2862 goto mov;
2863 case 0xae ... 0xaf: /* scas */
2864 DPRINTF("Urk! I don't handle SCAS.\n");
2865 goto cannot_emulate;
2866 case 0xb0 ... 0xbf: /* mov r, imm */
2867 goto mov;
2868 case 0xc0 ... 0xc1:
2869 emulate_grp2(ctxt);
2870 break;
2871 case 0xc3: /* ret */
2872 c->dst.type = OP_REG;
2873 c->dst.ptr = &c->eip;
2874 c->dst.bytes = c->op_bytes;
2875 goto pop_instruction;
2876 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2877 mov:
2878 c->dst.val = c->src.val;
2879 break;
2880 case 0xcb: /* ret far */
2881 rc = emulate_ret_far(ctxt, ops);
2882 if (rc != X86EMUL_CONTINUE)
2883 goto done;
2884 break;
2885 case 0xd0 ... 0xd1: /* Grp2 */
2886 c->src.val = 1;
2887 emulate_grp2(ctxt);
2888 break;
2889 case 0xd2 ... 0xd3: /* Grp2 */
2890 c->src.val = c->regs[VCPU_REGS_RCX];
2891 emulate_grp2(ctxt);
2892 break;
2893 case 0xe4: /* inb */
2894 case 0xe5: /* in */
2895 goto do_io_in;
2896 case 0xe6: /* outb */
2897 case 0xe7: /* out */
2898 goto do_io_out;
2899 case 0xe8: /* call (near) */ {
2900 long int rel = c->src.val;
2901 c->src.val = (unsigned long) c->eip;
2902 jmp_rel(c, rel);
2903 emulate_push(ctxt, ops);
2904 break;
2906 case 0xe9: /* jmp rel */
2907 goto jmp;
2908 case 0xea: { /* jmp far */
2909 unsigned short sel;
2910 jump_far:
2911 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2913 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
2914 goto done;
2916 c->eip = 0;
2917 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2918 break;
2920 case 0xeb:
2921 jmp: /* jmp rel short */
2922 jmp_rel(c, c->src.val);
2923 c->dst.type = OP_NONE; /* Disable writeback. */
2924 break;
2925 case 0xec: /* in al,dx */
2926 case 0xed: /* in (e/r)ax,dx */
2927 c->src.val = c->regs[VCPU_REGS_RDX];
2928 do_io_in:
2929 c->dst.bytes = min(c->dst.bytes, 4u);
2930 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
2931 emulate_gp(ctxt, 0);
2932 goto done;
2934 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
2935 &c->dst.val))
2936 goto done; /* IO is needed */
2937 break;
2938 case 0xee: /* out al,dx */
2939 case 0xef: /* out (e/r)ax,dx */
2940 c->src.val = c->regs[VCPU_REGS_RDX];
2941 do_io_out:
2942 c->dst.bytes = min(c->dst.bytes, 4u);
2943 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
2944 emulate_gp(ctxt, 0);
2945 goto done;
2947 ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1,
2948 ctxt->vcpu);
2949 c->dst.type = OP_NONE; /* Disable writeback. */
2950 break;
2951 case 0xf4: /* hlt */
2952 ctxt->vcpu->arch.halt_request = 1;
2953 break;
2954 case 0xf5: /* cmc */
2955 /* complement carry flag from eflags reg */
2956 ctxt->eflags ^= EFLG_CF;
2957 c->dst.type = OP_NONE; /* Disable writeback. */
2958 break;
2959 case 0xf6 ... 0xf7: /* Grp3 */
2960 if (!emulate_grp3(ctxt, ops))
2961 goto cannot_emulate;
2962 break;
2963 case 0xf8: /* clc */
2964 ctxt->eflags &= ~EFLG_CF;
2965 c->dst.type = OP_NONE; /* Disable writeback. */
2966 break;
2967 case 0xfa: /* cli */
2968 if (emulator_bad_iopl(ctxt, ops))
2969 emulate_gp(ctxt, 0);
2970 else {
2971 ctxt->eflags &= ~X86_EFLAGS_IF;
2972 c->dst.type = OP_NONE; /* Disable writeback. */
2974 break;
2975 case 0xfb: /* sti */
2976 if (emulator_bad_iopl(ctxt, ops))
2977 emulate_gp(ctxt, 0);
2978 else {
2979 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2980 ctxt->eflags |= X86_EFLAGS_IF;
2981 c->dst.type = OP_NONE; /* Disable writeback. */
2983 break;
2984 case 0xfc: /* cld */
2985 ctxt->eflags &= ~EFLG_DF;
2986 c->dst.type = OP_NONE; /* Disable writeback. */
2987 break;
2988 case 0xfd: /* std */
2989 ctxt->eflags |= EFLG_DF;
2990 c->dst.type = OP_NONE; /* Disable writeback. */
2991 break;
2992 case 0xfe: /* Grp4 */
2993 grp45:
2994 rc = emulate_grp45(ctxt, ops);
2995 if (rc != X86EMUL_CONTINUE)
2996 goto done;
2997 break;
2998 case 0xff: /* Grp5 */
2999 if (c->modrm_reg == 5)
3000 goto jump_far;
3001 goto grp45;
3004 writeback:
3005 rc = writeback(ctxt, ops);
3006 if (rc != X86EMUL_CONTINUE)
3007 goto done;
3010 * restore dst type in case the decoding will be reused
3011 * (happens for string instruction )
3013 c->dst.type = saved_dst_type;
3015 if ((c->d & SrcMask) == SrcSI)
3016 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3017 VCPU_REGS_RSI, &c->src);
3019 if ((c->d & DstMask) == DstDI)
3020 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3021 &c->dst);
3023 if (c->rep_prefix && (c->d & String)) {
3024 struct read_cache *rc = &ctxt->decode.io_read;
3025 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3027 * Re-enter guest when pio read ahead buffer is empty or,
3028 * if it is not used, after each 1024 iteration.
3030 if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
3031 (rc->end != 0 && rc->end == rc->pos))
3032 ctxt->restart = false;
3035 * reset read cache here in case string instruction is restared
3036 * without decoding
3038 ctxt->decode.mem_read.end = 0;
3039 ctxt->eip = c->eip;
3041 done:
3042 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
3044 twobyte_insn:
3045 switch (c->b) {
3046 case 0x01: /* lgdt, lidt, lmsw */
3047 switch (c->modrm_reg) {
3048 u16 size;
3049 unsigned long address;
3051 case 0: /* vmcall */
3052 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3053 goto cannot_emulate;
3055 rc = kvm_fix_hypercall(ctxt->vcpu);
3056 if (rc != X86EMUL_CONTINUE)
3057 goto done;
3059 /* Let the processor re-execute the fixed hypercall */
3060 c->eip = ctxt->eip;
3061 /* Disable writeback. */
3062 c->dst.type = OP_NONE;
3063 break;
3064 case 2: /* lgdt */
3065 rc = read_descriptor(ctxt, ops, c->src.ptr,
3066 &size, &address, c->op_bytes);
3067 if (rc != X86EMUL_CONTINUE)
3068 goto done;
3069 realmode_lgdt(ctxt->vcpu, size, address);
3070 /* Disable writeback. */
3071 c->dst.type = OP_NONE;
3072 break;
3073 case 3: /* lidt/vmmcall */
3074 if (c->modrm_mod == 3) {
3075 switch (c->modrm_rm) {
3076 case 1:
3077 rc = kvm_fix_hypercall(ctxt->vcpu);
3078 if (rc != X86EMUL_CONTINUE)
3079 goto done;
3080 break;
3081 default:
3082 goto cannot_emulate;
3084 } else {
3085 rc = read_descriptor(ctxt, ops, c->src.ptr,
3086 &size, &address,
3087 c->op_bytes);
3088 if (rc != X86EMUL_CONTINUE)
3089 goto done;
3090 realmode_lidt(ctxt->vcpu, size, address);
3092 /* Disable writeback. */
3093 c->dst.type = OP_NONE;
3094 break;
3095 case 4: /* smsw */
3096 c->dst.bytes = 2;
3097 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3098 break;
3099 case 6: /* lmsw */
3100 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) |
3101 (c->src.val & 0x0f), ctxt->vcpu);
3102 c->dst.type = OP_NONE;
3103 break;
3104 case 5: /* not defined */
3105 emulate_ud(ctxt);
3106 goto done;
3107 case 7: /* invlpg*/
3108 emulate_invlpg(ctxt->vcpu, c->modrm_ea);
3109 /* Disable writeback. */
3110 c->dst.type = OP_NONE;
3111 break;
3112 default:
3113 goto cannot_emulate;
3115 break;
3116 case 0x05: /* syscall */
3117 rc = emulate_syscall(ctxt, ops);
3118 if (rc != X86EMUL_CONTINUE)
3119 goto done;
3120 else
3121 goto writeback;
3122 break;
3123 case 0x06:
3124 emulate_clts(ctxt->vcpu);
3125 c->dst.type = OP_NONE;
3126 break;
3127 case 0x08: /* invd */
3128 case 0x09: /* wbinvd */
3129 case 0x0d: /* GrpP (prefetch) */
3130 case 0x18: /* Grp16 (prefetch/nop) */
3131 c->dst.type = OP_NONE;
3132 break;
3133 case 0x20: /* mov cr, reg */
3134 switch (c->modrm_reg) {
3135 case 1:
3136 case 5 ... 7:
3137 case 9 ... 15:
3138 emulate_ud(ctxt);
3139 goto done;
3141 c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3142 c->dst.type = OP_NONE; /* no writeback */
3143 break;
3144 case 0x21: /* mov from dr to reg */
3145 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3146 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3147 emulate_ud(ctxt);
3148 goto done;
3150 ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu);
3151 c->dst.type = OP_NONE; /* no writeback */
3152 break;
3153 case 0x22: /* mov reg, cr */
3154 if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) {
3155 emulate_gp(ctxt, 0);
3156 goto done;
3158 c->dst.type = OP_NONE;
3159 break;
3160 case 0x23: /* mov from reg to dr */
3161 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3162 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3163 emulate_ud(ctxt);
3164 goto done;
3167 if (ops->set_dr(c->modrm_reg, c->regs[c->modrm_rm] &
3168 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3169 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3170 /* #UD condition is already handled by the code above */
3171 emulate_gp(ctxt, 0);
3172 goto done;
3175 c->dst.type = OP_NONE; /* no writeback */
3176 break;
3177 case 0x30:
3178 /* wrmsr */
3179 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3180 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3181 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3182 emulate_gp(ctxt, 0);
3183 goto done;
3185 rc = X86EMUL_CONTINUE;
3186 c->dst.type = OP_NONE;
3187 break;
3188 case 0x32:
3189 /* rdmsr */
3190 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3191 emulate_gp(ctxt, 0);
3192 goto done;
3193 } else {
3194 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3195 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3197 rc = X86EMUL_CONTINUE;
3198 c->dst.type = OP_NONE;
3199 break;
3200 case 0x34: /* sysenter */
3201 rc = emulate_sysenter(ctxt, ops);
3202 if (rc != X86EMUL_CONTINUE)
3203 goto done;
3204 else
3205 goto writeback;
3206 break;
3207 case 0x35: /* sysexit */
3208 rc = emulate_sysexit(ctxt, ops);
3209 if (rc != X86EMUL_CONTINUE)
3210 goto done;
3211 else
3212 goto writeback;
3213 break;
3214 case 0x40 ... 0x4f: /* cmov */
3215 c->dst.val = c->dst.orig_val = c->src.val;
3216 if (!test_cc(c->b, ctxt->eflags))
3217 c->dst.type = OP_NONE; /* no writeback */
3218 break;
3219 case 0x80 ... 0x8f: /* jnz rel, etc*/
3220 if (test_cc(c->b, ctxt->eflags))
3221 jmp_rel(c, c->src.val);
3222 c->dst.type = OP_NONE;
3223 break;
3224 case 0xa0: /* push fs */
3225 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3226 break;
3227 case 0xa1: /* pop fs */
3228 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3229 if (rc != X86EMUL_CONTINUE)
3230 goto done;
3231 break;
3232 case 0xa3:
3233 bt: /* bt */
3234 c->dst.type = OP_NONE;
3235 /* only subword offset */
3236 c->src.val &= (c->dst.bytes << 3) - 1;
3237 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3238 break;
3239 case 0xa4: /* shld imm8, r, r/m */
3240 case 0xa5: /* shld cl, r, r/m */
3241 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3242 break;
3243 case 0xa8: /* push gs */
3244 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3245 break;
3246 case 0xa9: /* pop gs */
3247 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3248 if (rc != X86EMUL_CONTINUE)
3249 goto done;
3250 break;
3251 case 0xab:
3252 bts: /* bts */
3253 /* only subword offset */
3254 c->src.val &= (c->dst.bytes << 3) - 1;
3255 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3256 break;
3257 case 0xac: /* shrd imm8, r, r/m */
3258 case 0xad: /* shrd cl, r, r/m */
3259 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3260 break;
3261 case 0xae: /* clflush */
3262 break;
3263 case 0xb0 ... 0xb1: /* cmpxchg */
3265 * Save real source value, then compare EAX against
3266 * destination.
3268 c->src.orig_val = c->src.val;
3269 c->src.val = c->regs[VCPU_REGS_RAX];
3270 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3271 if (ctxt->eflags & EFLG_ZF) {
3272 /* Success: write back to memory. */
3273 c->dst.val = c->src.orig_val;
3274 } else {
3275 /* Failure: write the value we saw to EAX. */
3276 c->dst.type = OP_REG;
3277 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3279 break;
3280 case 0xb3:
3281 btr: /* btr */
3282 /* only subword offset */
3283 c->src.val &= (c->dst.bytes << 3) - 1;
3284 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3285 break;
3286 case 0xb6 ... 0xb7: /* movzx */
3287 c->dst.bytes = c->op_bytes;
3288 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3289 : (u16) c->src.val;
3290 break;
3291 case 0xba: /* Grp8 */
3292 switch (c->modrm_reg & 3) {
3293 case 0:
3294 goto bt;
3295 case 1:
3296 goto bts;
3297 case 2:
3298 goto btr;
3299 case 3:
3300 goto btc;
3302 break;
3303 case 0xbb:
3304 btc: /* btc */
3305 /* only subword offset */
3306 c->src.val &= (c->dst.bytes << 3) - 1;
3307 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3308 break;
3309 case 0xbe ... 0xbf: /* movsx */
3310 c->dst.bytes = c->op_bytes;
3311 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3312 (s16) c->src.val;
3313 break;
3314 case 0xc3: /* movnti */
3315 c->dst.bytes = c->op_bytes;
3316 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3317 (u64) c->src.val;
3318 break;
3319 case 0xc7: /* Grp9 (cmpxchg8b) */
3320 rc = emulate_grp9(ctxt, ops);
3321 if (rc != X86EMUL_CONTINUE)
3322 goto done;
3323 break;
3325 goto writeback;
3327 cannot_emulate:
3328 DPRINTF("Cannot emulate %02x\n", c->b);
3329 return -1;