1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
19 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
25 #include <public/xen.h>
26 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
28 #include <linux/kvm_host.h>
29 #include "kvm_cache_regs.h"
30 #define DPRINTF(x...) do {} while (0)
32 #include <linux/module.h>
33 #include <asm/kvm_emulate.h>
39 * Opcode effective-address decode tables.
40 * Note that we only emulate instructions that have at least one memory
41 * operand (excluding implicit stack references). We assume that stack
42 * references and instruction fetches will never occur in special memory
43 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
47 /* Operand sizes: 8-bit operands or specified/overridden size. */
48 #define ByteOp (1<<0) /* 8-bit operands. */
49 /* Destination operand type. */
50 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
51 #define DstReg (2<<1) /* Register operand. */
52 #define DstMem (3<<1) /* Memory operand. */
53 #define DstAcc (4<<1) /* Destination Accumulator */
54 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
55 #define DstMem64 (6<<1) /* 64bit memory operand */
56 #define DstMask (7<<1)
57 /* Source operand type. */
58 #define SrcNone (0<<4) /* No source operand. */
59 #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */
60 #define SrcReg (1<<4) /* Register operand. */
61 #define SrcMem (2<<4) /* Memory operand. */
62 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
63 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
64 #define SrcImm (5<<4) /* Immediate operand. */
65 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
66 #define SrcOne (7<<4) /* Implied '1' */
67 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
68 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
69 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
70 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
71 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
72 #define SrcMask (0xf<<4)
73 /* Generic ModRM decode. */
75 /* Destination is only written; never read. */
78 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
79 #define String (1<<12) /* String instruction (rep capable) */
80 #define Stack (1<<13) /* Stack instruction (push/pop) */
81 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
82 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
83 #define GroupMask 0xff /* Group number stored in bits 0:7 */
85 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
86 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
88 /* Source 2 operand type */
89 #define Src2None (0<<29)
90 #define Src2CL (1<<29)
91 #define Src2ImmByte (2<<29)
92 #define Src2One (3<<29)
93 #define Src2Mask (7<<29)
96 Group1_80
, Group1_81
, Group1_82
, Group1_83
,
97 Group1A
, Group3_Byte
, Group3
, Group4
, Group5
, Group7
,
101 static u32 opcode_table
[256] = {
103 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
104 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
105 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
106 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
108 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
109 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
110 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
111 ImplicitOps
| Stack
| No64
, 0,
113 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
114 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
115 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
116 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
118 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
119 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
120 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
121 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
123 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
124 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
125 DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
127 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
128 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
129 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
131 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
132 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
133 ByteOp
| DstAcc
| SrcImmByte
, DstAcc
| SrcImm
, 0, 0,
135 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
136 ByteOp
| DstReg
| SrcMem
| ModRM
, DstReg
| SrcMem
| ModRM
,
137 ByteOp
| DstAcc
| SrcImm
, DstAcc
| SrcImm
,
140 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
142 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
144 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
145 SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
, SrcReg
| Stack
,
147 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
148 DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
, DstReg
| Stack
,
150 ImplicitOps
| Stack
| No64
, ImplicitOps
| Stack
| No64
,
151 0, DstReg
| SrcMem32
| ModRM
| Mov
/* movsxd (x86/64) */ ,
154 SrcImm
| Mov
| Stack
, 0, SrcImmByte
| Mov
| Stack
, 0,
155 DstDI
| ByteOp
| Mov
| String
, DstDI
| Mov
| String
, /* insb, insw/insd */
156 SrcSI
| ByteOp
| ImplicitOps
| String
, SrcSI
| ImplicitOps
| String
, /* outsb, outsw/outsd */
158 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
159 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
161 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
162 SrcImmByte
, SrcImmByte
, SrcImmByte
, SrcImmByte
,
164 Group
| Group1_80
, Group
| Group1_81
,
165 Group
| Group1_82
, Group
| Group1_83
,
166 ByteOp
| DstMem
| SrcReg
| ModRM
, DstMem
| SrcReg
| ModRM
,
167 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
169 ByteOp
| DstMem
| SrcReg
| ModRM
| Mov
, DstMem
| SrcReg
| ModRM
| Mov
,
170 ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
171 DstMem
| SrcReg
| ModRM
| Mov
, ModRM
| DstReg
,
172 ImplicitOps
| SrcMem
| ModRM
, Group
| Group1A
,
174 DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
, DstReg
,
176 0, 0, SrcImmFAddr
| No64
, 0,
177 ImplicitOps
| Stack
, ImplicitOps
| Stack
, 0, 0,
179 ByteOp
| DstReg
| SrcMem
| Mov
| MemAbs
, DstReg
| SrcMem
| Mov
| MemAbs
,
180 ByteOp
| DstMem
| SrcReg
| Mov
| MemAbs
, DstMem
| SrcReg
| Mov
| MemAbs
,
181 ByteOp
| SrcSI
| DstDI
| Mov
| String
, SrcSI
| DstDI
| Mov
| String
,
182 ByteOp
| SrcSI
| DstDI
| String
, SrcSI
| DstDI
| String
,
184 DstAcc
| SrcImmByte
| ByteOp
, DstAcc
| SrcImm
, ByteOp
| DstDI
| Mov
| String
, DstDI
| Mov
| String
,
185 ByteOp
| SrcSI
| DstAcc
| Mov
| String
, SrcSI
| DstAcc
| Mov
| String
,
186 ByteOp
| DstDI
| String
, DstDI
| String
,
188 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
189 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
190 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
191 ByteOp
| DstReg
| SrcImm
| Mov
, ByteOp
| DstReg
| SrcImm
| Mov
,
193 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
194 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
195 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
196 DstReg
| SrcImm
| Mov
, DstReg
| SrcImm
| Mov
,
198 ByteOp
| DstMem
| SrcImm
| ModRM
, DstMem
| SrcImmByte
| ModRM
,
199 0, ImplicitOps
| Stack
, 0, 0,
200 ByteOp
| DstMem
| SrcImm
| ModRM
| Mov
, DstMem
| SrcImm
| ModRM
| Mov
,
202 0, 0, 0, ImplicitOps
| Stack
,
203 ImplicitOps
, SrcImmByte
, ImplicitOps
| No64
, ImplicitOps
,
205 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
206 ByteOp
| DstMem
| SrcImplicit
| ModRM
, DstMem
| SrcImplicit
| ModRM
,
209 0, 0, 0, 0, 0, 0, 0, 0,
212 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
213 ByteOp
| SrcImmUByte
| DstAcc
, SrcImmUByte
| DstAcc
,
215 SrcImm
| Stack
, SrcImm
| ImplicitOps
,
216 SrcImmFAddr
| No64
, SrcImmByte
| ImplicitOps
,
217 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
218 SrcNone
| ByteOp
| DstAcc
, SrcNone
| DstAcc
,
221 ImplicitOps
| Priv
, ImplicitOps
, Group
| Group3_Byte
, Group
| Group3
,
223 ImplicitOps
, 0, ImplicitOps
, ImplicitOps
,
224 ImplicitOps
, ImplicitOps
, Group
| Group4
, Group
| Group5
,
227 static u32 twobyte_table
[256] = {
229 0, Group
| GroupDual
| Group7
, 0, 0,
230 0, ImplicitOps
, ImplicitOps
| Priv
, 0,
231 ImplicitOps
| Priv
, ImplicitOps
| Priv
, 0, 0,
232 0, ImplicitOps
| ModRM
, 0, 0,
234 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps
| ModRM
, 0, 0, 0, 0, 0, 0, 0,
236 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
237 ModRM
| ImplicitOps
| Priv
, ModRM
| Priv
,
239 0, 0, 0, 0, 0, 0, 0, 0,
241 ImplicitOps
| Priv
, 0, ImplicitOps
| Priv
, 0,
242 ImplicitOps
, ImplicitOps
| Priv
, 0, 0,
243 0, 0, 0, 0, 0, 0, 0, 0,
245 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
246 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
247 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
248 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
250 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
251 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
252 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
253 DstReg
| SrcMem
| ModRM
| Mov
, DstReg
| SrcMem
| ModRM
| Mov
,
255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
259 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
261 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
262 SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
, SrcImm
,
264 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
267 0, DstMem
| SrcReg
| ModRM
| BitOp
,
268 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
269 DstMem
| SrcReg
| Src2CL
| ModRM
, 0, 0,
271 ImplicitOps
| Stack
, ImplicitOps
| Stack
,
272 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
273 DstMem
| SrcReg
| Src2ImmByte
| ModRM
,
274 DstMem
| SrcReg
| Src2CL
| ModRM
,
277 ByteOp
| DstMem
| SrcReg
| ModRM
| Lock
, DstMem
| SrcReg
| ModRM
| Lock
,
278 0, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
279 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
280 DstReg
| SrcMem16
| ModRM
| Mov
,
283 Group
| Group8
, DstMem
| SrcReg
| ModRM
| BitOp
| Lock
,
284 0, 0, ByteOp
| DstReg
| SrcMem
| ModRM
| Mov
,
285 DstReg
| SrcMem16
| ModRM
| Mov
,
287 0, 0, 0, DstMem
| SrcReg
| ModRM
| Mov
,
288 0, 0, 0, Group
| GroupDual
| Group9
,
289 0, 0, 0, 0, 0, 0, 0, 0,
291 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
298 static u32 group_table
[] = {
300 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
301 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
302 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
303 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
304 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
305 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
306 ByteOp
| DstMem
| SrcImm
| ModRM
| Lock
,
307 ByteOp
| DstMem
| SrcImm
| ModRM
,
309 DstMem
| SrcImm
| ModRM
| Lock
,
310 DstMem
| SrcImm
| ModRM
| Lock
,
311 DstMem
| SrcImm
| ModRM
| Lock
,
312 DstMem
| SrcImm
| ModRM
| Lock
,
313 DstMem
| SrcImm
| ModRM
| Lock
,
314 DstMem
| SrcImm
| ModRM
| Lock
,
315 DstMem
| SrcImm
| ModRM
| Lock
,
316 DstMem
| SrcImm
| ModRM
,
318 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
319 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
320 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
321 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
322 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
323 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
324 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
| Lock
,
325 ByteOp
| DstMem
| SrcImm
| ModRM
| No64
,
327 DstMem
| SrcImmByte
| ModRM
| Lock
,
328 DstMem
| SrcImmByte
| ModRM
| Lock
,
329 DstMem
| SrcImmByte
| ModRM
| Lock
,
330 DstMem
| SrcImmByte
| ModRM
| Lock
,
331 DstMem
| SrcImmByte
| ModRM
| Lock
,
332 DstMem
| SrcImmByte
| ModRM
| Lock
,
333 DstMem
| SrcImmByte
| ModRM
| Lock
,
334 DstMem
| SrcImmByte
| ModRM
,
336 DstMem
| SrcNone
| ModRM
| Mov
| Stack
, 0, 0, 0, 0, 0, 0, 0,
338 ByteOp
| SrcImm
| DstMem
| ModRM
, 0,
339 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
342 DstMem
| SrcImm
| ModRM
, 0,
343 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
346 ByteOp
| DstMem
| SrcNone
| ModRM
, ByteOp
| DstMem
| SrcNone
| ModRM
,
349 DstMem
| SrcNone
| ModRM
, DstMem
| SrcNone
| ModRM
,
350 SrcMem
| ModRM
| Stack
, 0,
351 SrcMem
| ModRM
| Stack
, SrcMemFAddr
| ModRM
| ImplicitOps
,
352 SrcMem
| ModRM
| Stack
, 0,
354 0, 0, ModRM
| SrcMem
| Priv
, ModRM
| SrcMem
| Priv
,
355 SrcNone
| ModRM
| DstMem
| Mov
, 0,
356 SrcMem16
| ModRM
| Mov
| Priv
, SrcMem
| ModRM
| ByteOp
| Priv
,
359 DstMem
| SrcImmByte
| ModRM
, DstMem
| SrcImmByte
| ModRM
| Lock
,
360 DstMem
| SrcImmByte
| ModRM
| Lock
, DstMem
| SrcImmByte
| ModRM
| Lock
,
362 0, DstMem64
| ModRM
| Lock
, 0, 0, 0, 0, 0, 0,
365 static u32 group2_table
[] = {
367 SrcNone
| ModRM
| Priv
, 0, 0, SrcNone
| ModRM
| Priv
,
368 SrcNone
| ModRM
| DstMem
| Mov
, 0,
369 SrcMem16
| ModRM
| Mov
| Priv
, 0,
371 0, 0, 0, 0, 0, 0, 0, 0,
374 /* EFLAGS bit definitions. */
375 #define EFLG_ID (1<<21)
376 #define EFLG_VIP (1<<20)
377 #define EFLG_VIF (1<<19)
378 #define EFLG_AC (1<<18)
379 #define EFLG_VM (1<<17)
380 #define EFLG_RF (1<<16)
381 #define EFLG_IOPL (3<<12)
382 #define EFLG_NT (1<<14)
383 #define EFLG_OF (1<<11)
384 #define EFLG_DF (1<<10)
385 #define EFLG_IF (1<<9)
386 #define EFLG_TF (1<<8)
387 #define EFLG_SF (1<<7)
388 #define EFLG_ZF (1<<6)
389 #define EFLG_AF (1<<4)
390 #define EFLG_PF (1<<2)
391 #define EFLG_CF (1<<0)
394 * Instruction emulation:
395 * Most instructions are emulated directly via a fragment of inline assembly
396 * code. This allows us to save/restore EFLAGS and thus very easily pick up
397 * any modified flags.
400 #if defined(CONFIG_X86_64)
401 #define _LO32 "k" /* force 32-bit operand */
402 #define _STK "%%rsp" /* stack pointer */
403 #elif defined(__i386__)
404 #define _LO32 "" /* force 32-bit operand */
405 #define _STK "%%esp" /* stack pointer */
409 * These EFLAGS bits are restored from saved value during emulation, and
410 * any changes are written back to the saved value after emulation.
412 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
414 /* Before executing instruction: restore necessary bits in EFLAGS. */
415 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
416 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
417 "movl %"_sav",%"_LO32 _tmp"; " \
420 "movl %"_msk",%"_LO32 _tmp"; " \
421 "andl %"_LO32 _tmp",("_STK"); " \
423 "notl %"_LO32 _tmp"; " \
424 "andl %"_LO32 _tmp",("_STK"); " \
425 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
427 "orl %"_LO32 _tmp",("_STK"); " \
431 /* After executing instruction: write-back necessary bits in EFLAGS. */
432 #define _POST_EFLAGS(_sav, _msk, _tmp) \
433 /* _sav |= EFLAGS & _msk; */ \
436 "andl %"_msk",%"_LO32 _tmp"; " \
437 "orl %"_LO32 _tmp",%"_sav"; "
445 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
447 __asm__ __volatile__ ( \
448 _PRE_EFLAGS("0", "4", "2") \
449 _op _suffix " %"_x"3,%1; " \
450 _POST_EFLAGS("0", "4", "2") \
451 : "=m" (_eflags), "=m" ((_dst).val), \
453 : _y ((_src).val), "i" (EFLAGS_MASK)); \
457 /* Raw emulation: instruction has two explicit operands. */
458 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
460 unsigned long _tmp; \
462 switch ((_dst).bytes) { \
464 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
467 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
470 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
475 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
477 unsigned long _tmp; \
478 switch ((_dst).bytes) { \
480 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
483 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
484 _wx, _wy, _lx, _ly, _qx, _qy); \
489 /* Source operand is byte-sized and may be restricted to just %cl. */
490 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
491 __emulate_2op(_op, _src, _dst, _eflags, \
492 "b", "c", "b", "c", "b", "c", "b", "c")
494 /* Source operand is byte, word, long or quad sized. */
495 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
496 __emulate_2op(_op, _src, _dst, _eflags, \
497 "b", "q", "w", "r", _LO32, "r", "", "r")
499 /* Source operand is word, long or quad sized. */
500 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
501 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
502 "w", "r", _LO32, "r", "", "r")
504 /* Instruction has three operands and one operand is stored in ECX register */
505 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
507 unsigned long _tmp; \
508 _type _clv = (_cl).val; \
509 _type _srcv = (_src).val; \
510 _type _dstv = (_dst).val; \
512 __asm__ __volatile__ ( \
513 _PRE_EFLAGS("0", "5", "2") \
514 _op _suffix " %4,%1 \n" \
515 _POST_EFLAGS("0", "5", "2") \
516 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
517 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
520 (_cl).val = (unsigned long) _clv; \
521 (_src).val = (unsigned long) _srcv; \
522 (_dst).val = (unsigned long) _dstv; \
525 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
527 switch ((_dst).bytes) { \
529 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
530 "w", unsigned short); \
533 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
534 "l", unsigned int); \
537 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
538 "q", unsigned long)); \
543 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
545 unsigned long _tmp; \
547 __asm__ __volatile__ ( \
548 _PRE_EFLAGS("0", "3", "2") \
549 _op _suffix " %1; " \
550 _POST_EFLAGS("0", "3", "2") \
551 : "=m" (_eflags), "+m" ((_dst).val), \
553 : "i" (EFLAGS_MASK)); \
556 /* Instruction has only one explicit operand (no source operand). */
557 #define emulate_1op(_op, _dst, _eflags) \
559 switch ((_dst).bytes) { \
560 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
561 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
562 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
563 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
567 /* Fetch next part of the instruction being emulated. */
568 #define insn_fetch(_type, _size, _eip) \
569 ({ unsigned long _x; \
570 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
571 if (rc != X86EMUL_CONTINUE) \
577 #define insn_fetch_arr(_arr, _size, _eip) \
578 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
579 if (rc != X86EMUL_CONTINUE) \
584 static inline unsigned long ad_mask(struct decode_cache
*c
)
586 return (1UL << (c
->ad_bytes
<< 3)) - 1;
589 /* Access/update address held in a register, based on addressing mode. */
590 static inline unsigned long
591 address_mask(struct decode_cache
*c
, unsigned long reg
)
593 if (c
->ad_bytes
== sizeof(unsigned long))
596 return reg
& ad_mask(c
);
599 static inline unsigned long
600 register_address(struct decode_cache
*c
, unsigned long base
, unsigned long reg
)
602 return base
+ address_mask(c
, reg
);
606 register_address_increment(struct decode_cache
*c
, unsigned long *reg
, int inc
)
608 if (c
->ad_bytes
== sizeof(unsigned long))
611 *reg
= (*reg
& ~ad_mask(c
)) | ((*reg
+ inc
) & ad_mask(c
));
614 static inline void jmp_rel(struct decode_cache
*c
, int rel
)
616 register_address_increment(c
, &c
->eip
, rel
);
619 static void set_seg_override(struct decode_cache
*c
, int seg
)
621 c
->has_seg_override
= true;
622 c
->seg_override
= seg
;
625 static unsigned long seg_base(struct x86_emulate_ctxt
*ctxt
,
626 struct x86_emulate_ops
*ops
, int seg
)
628 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& seg
< VCPU_SREG_FS
)
631 return ops
->get_cached_segment_base(seg
, ctxt
->vcpu
);
634 static unsigned long seg_override_base(struct x86_emulate_ctxt
*ctxt
,
635 struct x86_emulate_ops
*ops
,
636 struct decode_cache
*c
)
638 if (!c
->has_seg_override
)
641 return seg_base(ctxt
, ops
, c
->seg_override
);
644 static unsigned long es_base(struct x86_emulate_ctxt
*ctxt
,
645 struct x86_emulate_ops
*ops
)
647 return seg_base(ctxt
, ops
, VCPU_SREG_ES
);
650 static unsigned long ss_base(struct x86_emulate_ctxt
*ctxt
,
651 struct x86_emulate_ops
*ops
)
653 return seg_base(ctxt
, ops
, VCPU_SREG_SS
);
656 static void emulate_exception(struct x86_emulate_ctxt
*ctxt
, int vec
,
657 u32 error
, bool valid
)
659 ctxt
->exception
= vec
;
660 ctxt
->error_code
= error
;
661 ctxt
->error_code_valid
= valid
;
662 ctxt
->restart
= false;
665 static void emulate_gp(struct x86_emulate_ctxt
*ctxt
, int err
)
667 emulate_exception(ctxt
, GP_VECTOR
, err
, true);
670 static void emulate_pf(struct x86_emulate_ctxt
*ctxt
, unsigned long addr
,
674 emulate_exception(ctxt
, PF_VECTOR
, err
, true);
677 static void emulate_ud(struct x86_emulate_ctxt
*ctxt
)
679 emulate_exception(ctxt
, UD_VECTOR
, 0, false);
682 static void emulate_ts(struct x86_emulate_ctxt
*ctxt
, int err
)
684 emulate_exception(ctxt
, TS_VECTOR
, err
, true);
687 static int do_fetch_insn_byte(struct x86_emulate_ctxt
*ctxt
,
688 struct x86_emulate_ops
*ops
,
689 unsigned long eip
, u8
*dest
)
691 struct fetch_cache
*fc
= &ctxt
->decode
.fetch
;
695 if (eip
== fc
->end
) {
696 cur_size
= fc
->end
- fc
->start
;
697 size
= min(15UL - cur_size
, PAGE_SIZE
- offset_in_page(eip
));
698 rc
= ops
->fetch(ctxt
->cs_base
+ eip
, fc
->data
+ cur_size
,
699 size
, ctxt
->vcpu
, NULL
);
700 if (rc
!= X86EMUL_CONTINUE
)
704 *dest
= fc
->data
[eip
- fc
->start
];
705 return X86EMUL_CONTINUE
;
708 static int do_insn_fetch(struct x86_emulate_ctxt
*ctxt
,
709 struct x86_emulate_ops
*ops
,
710 unsigned long eip
, void *dest
, unsigned size
)
714 /* x86 instructions are limited to 15 bytes. */
715 if (eip
+ size
- ctxt
->eip
> 15)
716 return X86EMUL_UNHANDLEABLE
;
718 rc
= do_fetch_insn_byte(ctxt
, ops
, eip
++, dest
++);
719 if (rc
!= X86EMUL_CONTINUE
)
722 return X86EMUL_CONTINUE
;
726 * Given the 'reg' portion of a ModRM byte, and a register block, return a
727 * pointer into the block that addresses the relevant register.
728 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
730 static void *decode_register(u8 modrm_reg
, unsigned long *regs
,
735 p
= ®s
[modrm_reg
];
736 if (highbyte_regs
&& modrm_reg
>= 4 && modrm_reg
< 8)
737 p
= (unsigned char *)®s
[modrm_reg
& 3] + 1;
741 static int read_descriptor(struct x86_emulate_ctxt
*ctxt
,
742 struct x86_emulate_ops
*ops
,
744 u16
*size
, unsigned long *address
, int op_bytes
)
751 rc
= ops
->read_std((unsigned long)ptr
, (unsigned long *)size
, 2,
753 if (rc
!= X86EMUL_CONTINUE
)
755 rc
= ops
->read_std((unsigned long)ptr
+ 2, address
, op_bytes
,
760 static int test_cc(unsigned int condition
, unsigned int flags
)
764 switch ((condition
& 15) >> 1) {
766 rc
|= (flags
& EFLG_OF
);
768 case 1: /* b/c/nae */
769 rc
|= (flags
& EFLG_CF
);
772 rc
|= (flags
& EFLG_ZF
);
775 rc
|= (flags
& (EFLG_CF
|EFLG_ZF
));
778 rc
|= (flags
& EFLG_SF
);
781 rc
|= (flags
& EFLG_PF
);
784 rc
|= (flags
& EFLG_ZF
);
787 rc
|= (!(flags
& EFLG_SF
) != !(flags
& EFLG_OF
));
791 /* Odd condition identifiers (lsb == 1) have inverted sense. */
792 return (!!rc
^ (condition
& 1));
795 static void decode_register_operand(struct operand
*op
,
796 struct decode_cache
*c
,
799 unsigned reg
= c
->modrm_reg
;
800 int highbyte_regs
= c
->rex_prefix
== 0;
803 reg
= (c
->b
& 7) | ((c
->rex_prefix
& 1) << 3);
805 if ((c
->d
& ByteOp
) && !inhibit_bytereg
) {
806 op
->ptr
= decode_register(reg
, c
->regs
, highbyte_regs
);
807 op
->val
= *(u8
*)op
->ptr
;
810 op
->ptr
= decode_register(reg
, c
->regs
, 0);
811 op
->bytes
= c
->op_bytes
;
814 op
->val
= *(u16
*)op
->ptr
;
817 op
->val
= *(u32
*)op
->ptr
;
820 op
->val
= *(u64
*) op
->ptr
;
824 op
->orig_val
= op
->val
;
827 static int decode_modrm(struct x86_emulate_ctxt
*ctxt
,
828 struct x86_emulate_ops
*ops
)
830 struct decode_cache
*c
= &ctxt
->decode
;
832 int index_reg
= 0, base_reg
= 0, scale
;
833 int rc
= X86EMUL_CONTINUE
;
836 c
->modrm_reg
= (c
->rex_prefix
& 4) << 1; /* REX.R */
837 index_reg
= (c
->rex_prefix
& 2) << 2; /* REX.X */
838 c
->modrm_rm
= base_reg
= (c
->rex_prefix
& 1) << 3; /* REG.B */
841 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
842 c
->modrm_mod
|= (c
->modrm
& 0xc0) >> 6;
843 c
->modrm_reg
|= (c
->modrm
& 0x38) >> 3;
844 c
->modrm_rm
|= (c
->modrm
& 0x07);
848 if (c
->modrm_mod
== 3) {
849 c
->modrm_ptr
= decode_register(c
->modrm_rm
,
850 c
->regs
, c
->d
& ByteOp
);
851 c
->modrm_val
= *(unsigned long *)c
->modrm_ptr
;
855 if (c
->ad_bytes
== 2) {
856 unsigned bx
= c
->regs
[VCPU_REGS_RBX
];
857 unsigned bp
= c
->regs
[VCPU_REGS_RBP
];
858 unsigned si
= c
->regs
[VCPU_REGS_RSI
];
859 unsigned di
= c
->regs
[VCPU_REGS_RDI
];
861 /* 16-bit ModR/M decode. */
862 switch (c
->modrm_mod
) {
864 if (c
->modrm_rm
== 6)
865 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
868 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
871 c
->modrm_ea
+= insn_fetch(u16
, 2, c
->eip
);
874 switch (c
->modrm_rm
) {
876 c
->modrm_ea
+= bx
+ si
;
879 c
->modrm_ea
+= bx
+ di
;
882 c
->modrm_ea
+= bp
+ si
;
885 c
->modrm_ea
+= bp
+ di
;
894 if (c
->modrm_mod
!= 0)
901 if (c
->modrm_rm
== 2 || c
->modrm_rm
== 3 ||
902 (c
->modrm_rm
== 6 && c
->modrm_mod
!= 0))
903 if (!c
->has_seg_override
)
904 set_seg_override(c
, VCPU_SREG_SS
);
905 c
->modrm_ea
= (u16
)c
->modrm_ea
;
907 /* 32/64-bit ModR/M decode. */
908 if ((c
->modrm_rm
& 7) == 4) {
909 sib
= insn_fetch(u8
, 1, c
->eip
);
910 index_reg
|= (sib
>> 3) & 7;
914 if ((base_reg
& 7) == 5 && c
->modrm_mod
== 0)
915 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
917 c
->modrm_ea
+= c
->regs
[base_reg
];
919 c
->modrm_ea
+= c
->regs
[index_reg
] << scale
;
920 } else if ((c
->modrm_rm
& 7) == 5 && c
->modrm_mod
== 0) {
921 if (ctxt
->mode
== X86EMUL_MODE_PROT64
)
924 c
->modrm_ea
+= c
->regs
[c
->modrm_rm
];
925 switch (c
->modrm_mod
) {
927 if (c
->modrm_rm
== 5)
928 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
931 c
->modrm_ea
+= insn_fetch(s8
, 1, c
->eip
);
934 c
->modrm_ea
+= insn_fetch(s32
, 4, c
->eip
);
942 static int decode_abs(struct x86_emulate_ctxt
*ctxt
,
943 struct x86_emulate_ops
*ops
)
945 struct decode_cache
*c
= &ctxt
->decode
;
946 int rc
= X86EMUL_CONTINUE
;
948 switch (c
->ad_bytes
) {
950 c
->modrm_ea
= insn_fetch(u16
, 2, c
->eip
);
953 c
->modrm_ea
= insn_fetch(u32
, 4, c
->eip
);
956 c
->modrm_ea
= insn_fetch(u64
, 8, c
->eip
);
964 x86_decode_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
966 struct decode_cache
*c
= &ctxt
->decode
;
967 int rc
= X86EMUL_CONTINUE
;
968 int mode
= ctxt
->mode
;
969 int def_op_bytes
, def_ad_bytes
, group
;
972 /* we cannot decode insn before we complete previous rep insn */
973 WARN_ON(ctxt
->restart
);
976 c
->fetch
.start
= c
->fetch
.end
= c
->eip
;
977 ctxt
->cs_base
= seg_base(ctxt
, ops
, VCPU_SREG_CS
);
980 case X86EMUL_MODE_REAL
:
981 case X86EMUL_MODE_VM86
:
982 case X86EMUL_MODE_PROT16
:
983 def_op_bytes
= def_ad_bytes
= 2;
985 case X86EMUL_MODE_PROT32
:
986 def_op_bytes
= def_ad_bytes
= 4;
989 case X86EMUL_MODE_PROT64
:
998 c
->op_bytes
= def_op_bytes
;
999 c
->ad_bytes
= def_ad_bytes
;
1001 /* Legacy prefixes. */
1003 switch (c
->b
= insn_fetch(u8
, 1, c
->eip
)) {
1004 case 0x66: /* operand-size override */
1005 /* switch between 2/4 bytes */
1006 c
->op_bytes
= def_op_bytes
^ 6;
1008 case 0x67: /* address-size override */
1009 if (mode
== X86EMUL_MODE_PROT64
)
1010 /* switch between 4/8 bytes */
1011 c
->ad_bytes
= def_ad_bytes
^ 12;
1013 /* switch between 2/4 bytes */
1014 c
->ad_bytes
= def_ad_bytes
^ 6;
1016 case 0x26: /* ES override */
1017 case 0x2e: /* CS override */
1018 case 0x36: /* SS override */
1019 case 0x3e: /* DS override */
1020 set_seg_override(c
, (c
->b
>> 3) & 3);
1022 case 0x64: /* FS override */
1023 case 0x65: /* GS override */
1024 set_seg_override(c
, c
->b
& 7);
1026 case 0x40 ... 0x4f: /* REX */
1027 if (mode
!= X86EMUL_MODE_PROT64
)
1029 c
->rex_prefix
= c
->b
;
1031 case 0xf0: /* LOCK */
1034 case 0xf2: /* REPNE/REPNZ */
1035 c
->rep_prefix
= REPNE_PREFIX
;
1037 case 0xf3: /* REP/REPE/REPZ */
1038 c
->rep_prefix
= REPE_PREFIX
;
1044 /* Any legacy prefix after a REX prefix nullifies its effect. */
1053 if (c
->rex_prefix
& 8)
1054 c
->op_bytes
= 8; /* REX.W */
1056 /* Opcode byte(s). */
1057 c
->d
= opcode_table
[c
->b
];
1059 /* Two-byte opcode? */
1062 c
->b
= insn_fetch(u8
, 1, c
->eip
);
1063 c
->d
= twobyte_table
[c
->b
];
1068 group
= c
->d
& GroupMask
;
1069 c
->modrm
= insn_fetch(u8
, 1, c
->eip
);
1072 group
= (group
<< 3) + ((c
->modrm
>> 3) & 7);
1073 if ((c
->d
& GroupDual
) && (c
->modrm
>> 6) == 3)
1074 c
->d
= group2_table
[group
];
1076 c
->d
= group_table
[group
];
1081 DPRINTF("Cannot emulate %02x\n", c
->b
);
1085 if (mode
== X86EMUL_MODE_PROT64
&& (c
->d
& Stack
))
1088 /* ModRM and SIB bytes. */
1090 rc
= decode_modrm(ctxt
, ops
);
1091 else if (c
->d
& MemAbs
)
1092 rc
= decode_abs(ctxt
, ops
);
1093 if (rc
!= X86EMUL_CONTINUE
)
1096 if (!c
->has_seg_override
)
1097 set_seg_override(c
, VCPU_SREG_DS
);
1099 if (!(!c
->twobyte
&& c
->b
== 0x8d))
1100 c
->modrm_ea
+= seg_override_base(ctxt
, ops
, c
);
1102 if (c
->ad_bytes
!= 8)
1103 c
->modrm_ea
= (u32
)c
->modrm_ea
;
1105 if (c
->rip_relative
)
1106 c
->modrm_ea
+= c
->eip
;
1109 * Decode and fetch the source operand: register, memory
1112 switch (c
->d
& SrcMask
) {
1116 decode_register_operand(&c
->src
, c
, 0);
1125 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 :
1127 /* Don't fetch the address for invlpg: it could be unmapped. */
1128 if (c
->twobyte
&& c
->b
== 0x01 && c
->modrm_reg
== 7)
1132 * For instructions with a ModR/M byte, switch to register
1133 * access if Mod = 3.
1135 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1136 c
->src
.type
= OP_REG
;
1137 c
->src
.val
= c
->modrm_val
;
1138 c
->src
.ptr
= c
->modrm_ptr
;
1141 c
->src
.type
= OP_MEM
;
1142 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1147 c
->src
.type
= OP_IMM
;
1148 c
->src
.ptr
= (unsigned long *)c
->eip
;
1149 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1150 if (c
->src
.bytes
== 8)
1152 /* NB. Immediates are sign-extended as necessary. */
1153 switch (c
->src
.bytes
) {
1155 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1158 c
->src
.val
= insn_fetch(s16
, 2, c
->eip
);
1161 c
->src
.val
= insn_fetch(s32
, 4, c
->eip
);
1164 if ((c
->d
& SrcMask
) == SrcImmU
) {
1165 switch (c
->src
.bytes
) {
1170 c
->src
.val
&= 0xffff;
1173 c
->src
.val
&= 0xffffffff;
1180 c
->src
.type
= OP_IMM
;
1181 c
->src
.ptr
= (unsigned long *)c
->eip
;
1183 if ((c
->d
& SrcMask
) == SrcImmByte
)
1184 c
->src
.val
= insn_fetch(s8
, 1, c
->eip
);
1186 c
->src
.val
= insn_fetch(u8
, 1, c
->eip
);
1193 c
->src
.type
= OP_MEM
;
1194 c
->src
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1195 c
->src
.ptr
= (unsigned long *)
1196 register_address(c
, seg_override_base(ctxt
, ops
, c
),
1197 c
->regs
[VCPU_REGS_RSI
]);
1201 c
->src
.type
= OP_IMM
;
1202 c
->src
.ptr
= (unsigned long *)c
->eip
;
1203 c
->src
.bytes
= c
->op_bytes
+ 2;
1204 insn_fetch_arr(c
->src
.valptr
, c
->src
.bytes
, c
->eip
);
1207 c
->src
.type
= OP_MEM
;
1208 c
->src
.ptr
= (unsigned long *)c
->modrm_ea
;
1209 c
->src
.bytes
= c
->op_bytes
+ 2;
1214 * Decode and fetch the second source operand: register, memory
1217 switch (c
->d
& Src2Mask
) {
1222 c
->src2
.val
= c
->regs
[VCPU_REGS_RCX
] & 0x8;
1225 c
->src2
.type
= OP_IMM
;
1226 c
->src2
.ptr
= (unsigned long *)c
->eip
;
1228 c
->src2
.val
= insn_fetch(u8
, 1, c
->eip
);
1236 /* Decode and fetch the destination operand: register or memory. */
1237 switch (c
->d
& DstMask
) {
1239 /* Special instructions do their own operand decoding. */
1242 decode_register_operand(&c
->dst
, c
,
1243 c
->twobyte
&& (c
->b
== 0xb6 || c
->b
== 0xb7));
1247 if ((c
->d
& ModRM
) && c
->modrm_mod
== 3) {
1248 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1249 c
->dst
.type
= OP_REG
;
1250 c
->dst
.val
= c
->dst
.orig_val
= c
->modrm_val
;
1251 c
->dst
.ptr
= c
->modrm_ptr
;
1254 c
->dst
.type
= OP_MEM
;
1255 c
->dst
.ptr
= (unsigned long *)c
->modrm_ea
;
1256 if ((c
->d
& DstMask
) == DstMem64
)
1259 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1262 unsigned long mask
= ~(c
->dst
.bytes
* 8 - 1);
1264 c
->dst
.ptr
= (void *)c
->dst
.ptr
+
1265 (c
->src
.val
& mask
) / 8;
1269 c
->dst
.type
= OP_REG
;
1270 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1271 c
->dst
.ptr
= &c
->regs
[VCPU_REGS_RAX
];
1272 switch (c
->dst
.bytes
) {
1274 c
->dst
.val
= *(u8
*)c
->dst
.ptr
;
1277 c
->dst
.val
= *(u16
*)c
->dst
.ptr
;
1280 c
->dst
.val
= *(u32
*)c
->dst
.ptr
;
1283 c
->dst
.val
= *(u64
*)c
->dst
.ptr
;
1286 c
->dst
.orig_val
= c
->dst
.val
;
1289 c
->dst
.type
= OP_MEM
;
1290 c
->dst
.bytes
= (c
->d
& ByteOp
) ? 1 : c
->op_bytes
;
1291 c
->dst
.ptr
= (unsigned long *)
1292 register_address(c
, es_base(ctxt
, ops
),
1293 c
->regs
[VCPU_REGS_RDI
]);
1299 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
1302 static int read_emulated(struct x86_emulate_ctxt
*ctxt
,
1303 struct x86_emulate_ops
*ops
,
1304 unsigned long addr
, void *dest
, unsigned size
)
1307 struct read_cache
*mc
= &ctxt
->decode
.mem_read
;
1311 int n
= min(size
, 8u);
1313 if (mc
->pos
< mc
->end
)
1316 rc
= ops
->read_emulated(addr
, mc
->data
+ mc
->end
, n
, &err
,
1318 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1319 emulate_pf(ctxt
, addr
, err
);
1320 if (rc
!= X86EMUL_CONTINUE
)
1325 memcpy(dest
, mc
->data
+ mc
->pos
, n
);
1330 return X86EMUL_CONTINUE
;
1333 static int pio_in_emulated(struct x86_emulate_ctxt
*ctxt
,
1334 struct x86_emulate_ops
*ops
,
1335 unsigned int size
, unsigned short port
,
1338 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
1340 if (rc
->pos
== rc
->end
) { /* refill pio read ahead */
1341 struct decode_cache
*c
= &ctxt
->decode
;
1342 unsigned int in_page
, n
;
1343 unsigned int count
= c
->rep_prefix
?
1344 address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) : 1;
1345 in_page
= (ctxt
->eflags
& EFLG_DF
) ?
1346 offset_in_page(c
->regs
[VCPU_REGS_RDI
]) :
1347 PAGE_SIZE
- offset_in_page(c
->regs
[VCPU_REGS_RDI
]);
1348 n
= min(min(in_page
, (unsigned int)sizeof(rc
->data
)) / size
,
1352 rc
->pos
= rc
->end
= 0;
1353 if (!ops
->pio_in_emulated(size
, port
, rc
->data
, n
, ctxt
->vcpu
))
1358 memcpy(dest
, rc
->data
+ rc
->pos
, size
);
1363 static u32
desc_limit_scaled(struct desc_struct
*desc
)
1365 u32 limit
= get_desc_limit(desc
);
1367 return desc
->g
? (limit
<< 12) | 0xfff : limit
;
1370 static void get_descriptor_table_ptr(struct x86_emulate_ctxt
*ctxt
,
1371 struct x86_emulate_ops
*ops
,
1372 u16 selector
, struct desc_ptr
*dt
)
1374 if (selector
& 1 << 2) {
1375 struct desc_struct desc
;
1376 memset (dt
, 0, sizeof *dt
);
1377 if (!ops
->get_cached_descriptor(&desc
, VCPU_SREG_LDTR
, ctxt
->vcpu
))
1380 dt
->size
= desc_limit_scaled(&desc
); /* what if limit > 65535? */
1381 dt
->address
= get_desc_base(&desc
);
1383 ops
->get_gdt(dt
, ctxt
->vcpu
);
1386 /* allowed just for 8 bytes segments */
1387 static int read_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1388 struct x86_emulate_ops
*ops
,
1389 u16 selector
, struct desc_struct
*desc
)
1392 u16 index
= selector
>> 3;
1397 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1399 if (dt
.size
< index
* 8 + 7) {
1400 emulate_gp(ctxt
, selector
& 0xfffc);
1401 return X86EMUL_PROPAGATE_FAULT
;
1403 addr
= dt
.address
+ index
* 8;
1404 ret
= ops
->read_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1405 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1406 emulate_pf(ctxt
, addr
, err
);
1411 /* allowed just for 8 bytes segments */
1412 static int write_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1413 struct x86_emulate_ops
*ops
,
1414 u16 selector
, struct desc_struct
*desc
)
1417 u16 index
= selector
>> 3;
1422 get_descriptor_table_ptr(ctxt
, ops
, selector
, &dt
);
1424 if (dt
.size
< index
* 8 + 7) {
1425 emulate_gp(ctxt
, selector
& 0xfffc);
1426 return X86EMUL_PROPAGATE_FAULT
;
1429 addr
= dt
.address
+ index
* 8;
1430 ret
= ops
->write_std(addr
, desc
, sizeof *desc
, ctxt
->vcpu
, &err
);
1431 if (ret
== X86EMUL_PROPAGATE_FAULT
)
1432 emulate_pf(ctxt
, addr
, err
);
1437 static int load_segment_descriptor(struct x86_emulate_ctxt
*ctxt
,
1438 struct x86_emulate_ops
*ops
,
1439 u16 selector
, int seg
)
1441 struct desc_struct seg_desc
;
1443 unsigned err_vec
= GP_VECTOR
;
1445 bool null_selector
= !(selector
& ~0x3); /* 0000-0003 are null */
1448 memset(&seg_desc
, 0, sizeof seg_desc
);
1450 if ((seg
<= VCPU_SREG_GS
&& ctxt
->mode
== X86EMUL_MODE_VM86
)
1451 || ctxt
->mode
== X86EMUL_MODE_REAL
) {
1452 /* set real mode segment descriptor */
1453 set_desc_base(&seg_desc
, selector
<< 4);
1454 set_desc_limit(&seg_desc
, 0xffff);
1461 /* NULL selector is not valid for TR, CS and SS */
1462 if ((seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
|| seg
== VCPU_SREG_TR
)
1466 /* TR should be in GDT only */
1467 if (seg
== VCPU_SREG_TR
&& (selector
& (1 << 2)))
1470 if (null_selector
) /* for NULL selector skip all following checks */
1473 ret
= read_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1474 if (ret
!= X86EMUL_CONTINUE
)
1477 err_code
= selector
& 0xfffc;
1478 err_vec
= GP_VECTOR
;
1480 /* can't load system descriptor into segment selecor */
1481 if (seg
<= VCPU_SREG_GS
&& !seg_desc
.s
)
1485 err_vec
= (seg
== VCPU_SREG_SS
) ? SS_VECTOR
: NP_VECTOR
;
1491 cpl
= ops
->cpl(ctxt
->vcpu
);
1496 * segment is not a writable data segment or segment
1497 * selector's RPL != CPL or segment selector's RPL != CPL
1499 if (rpl
!= cpl
|| (seg_desc
.type
& 0xa) != 0x2 || dpl
!= cpl
)
1503 if (!(seg_desc
.type
& 8))
1506 if (seg_desc
.type
& 4) {
1512 if (rpl
> cpl
|| dpl
!= cpl
)
1515 /* CS(RPL) <- CPL */
1516 selector
= (selector
& 0xfffc) | cpl
;
1519 if (seg_desc
.s
|| (seg_desc
.type
!= 1 && seg_desc
.type
!= 9))
1522 case VCPU_SREG_LDTR
:
1523 if (seg_desc
.s
|| seg_desc
.type
!= 2)
1526 default: /* DS, ES, FS, or GS */
1528 * segment is not a data or readable code segment or
1529 * ((segment is a data or nonconforming code segment)
1530 * and (both RPL and CPL > DPL))
1532 if ((seg_desc
.type
& 0xa) == 0x8 ||
1533 (((seg_desc
.type
& 0xc) != 0xc) &&
1534 (rpl
> dpl
&& cpl
> dpl
)))
1540 /* mark segment as accessed */
1542 ret
= write_segment_descriptor(ctxt
, ops
, selector
, &seg_desc
);
1543 if (ret
!= X86EMUL_CONTINUE
)
1547 ops
->set_segment_selector(selector
, seg
, ctxt
->vcpu
);
1548 ops
->set_cached_descriptor(&seg_desc
, seg
, ctxt
->vcpu
);
1549 return X86EMUL_CONTINUE
;
1551 emulate_exception(ctxt
, err_vec
, err_code
, true);
1552 return X86EMUL_PROPAGATE_FAULT
;
1555 static inline void emulate_push(struct x86_emulate_ctxt
*ctxt
,
1556 struct x86_emulate_ops
*ops
)
1558 struct decode_cache
*c
= &ctxt
->decode
;
1560 c
->dst
.type
= OP_MEM
;
1561 c
->dst
.bytes
= c
->op_bytes
;
1562 c
->dst
.val
= c
->src
.val
;
1563 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], -c
->op_bytes
);
1564 c
->dst
.ptr
= (void *) register_address(c
, ss_base(ctxt
, ops
),
1565 c
->regs
[VCPU_REGS_RSP
]);
1568 static int emulate_pop(struct x86_emulate_ctxt
*ctxt
,
1569 struct x86_emulate_ops
*ops
,
1570 void *dest
, int len
)
1572 struct decode_cache
*c
= &ctxt
->decode
;
1575 rc
= read_emulated(ctxt
, ops
, register_address(c
, ss_base(ctxt
, ops
),
1576 c
->regs
[VCPU_REGS_RSP
]),
1578 if (rc
!= X86EMUL_CONTINUE
)
1581 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
], len
);
1585 static int emulate_popf(struct x86_emulate_ctxt
*ctxt
,
1586 struct x86_emulate_ops
*ops
,
1587 void *dest
, int len
)
1590 unsigned long val
, change_mask
;
1591 int iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1592 int cpl
= ops
->cpl(ctxt
->vcpu
);
1594 rc
= emulate_pop(ctxt
, ops
, &val
, len
);
1595 if (rc
!= X86EMUL_CONTINUE
)
1598 change_mask
= EFLG_CF
| EFLG_PF
| EFLG_AF
| EFLG_ZF
| EFLG_SF
| EFLG_OF
1599 | EFLG_TF
| EFLG_DF
| EFLG_NT
| EFLG_RF
| EFLG_AC
| EFLG_ID
;
1601 switch(ctxt
->mode
) {
1602 case X86EMUL_MODE_PROT64
:
1603 case X86EMUL_MODE_PROT32
:
1604 case X86EMUL_MODE_PROT16
:
1606 change_mask
|= EFLG_IOPL
;
1608 change_mask
|= EFLG_IF
;
1610 case X86EMUL_MODE_VM86
:
1612 emulate_gp(ctxt
, 0);
1613 return X86EMUL_PROPAGATE_FAULT
;
1615 change_mask
|= EFLG_IF
;
1617 default: /* real mode */
1618 change_mask
|= (EFLG_IOPL
| EFLG_IF
);
1622 *(unsigned long *)dest
=
1623 (ctxt
->eflags
& ~change_mask
) | (val
& change_mask
);
1628 static void emulate_push_sreg(struct x86_emulate_ctxt
*ctxt
,
1629 struct x86_emulate_ops
*ops
, int seg
)
1631 struct decode_cache
*c
= &ctxt
->decode
;
1633 c
->src
.val
= ops
->get_segment_selector(seg
, ctxt
->vcpu
);
1635 emulate_push(ctxt
, ops
);
1638 static int emulate_pop_sreg(struct x86_emulate_ctxt
*ctxt
,
1639 struct x86_emulate_ops
*ops
, int seg
)
1641 struct decode_cache
*c
= &ctxt
->decode
;
1642 unsigned long selector
;
1645 rc
= emulate_pop(ctxt
, ops
, &selector
, c
->op_bytes
);
1646 if (rc
!= X86EMUL_CONTINUE
)
1649 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)selector
, seg
);
1653 static void emulate_pusha(struct x86_emulate_ctxt
*ctxt
,
1654 struct x86_emulate_ops
*ops
)
1656 struct decode_cache
*c
= &ctxt
->decode
;
1657 unsigned long old_esp
= c
->regs
[VCPU_REGS_RSP
];
1658 int reg
= VCPU_REGS_RAX
;
1660 while (reg
<= VCPU_REGS_RDI
) {
1661 (reg
== VCPU_REGS_RSP
) ?
1662 (c
->src
.val
= old_esp
) : (c
->src
.val
= c
->regs
[reg
]);
1664 emulate_push(ctxt
, ops
);
1669 static int emulate_popa(struct x86_emulate_ctxt
*ctxt
,
1670 struct x86_emulate_ops
*ops
)
1672 struct decode_cache
*c
= &ctxt
->decode
;
1673 int rc
= X86EMUL_CONTINUE
;
1674 int reg
= VCPU_REGS_RDI
;
1676 while (reg
>= VCPU_REGS_RAX
) {
1677 if (reg
== VCPU_REGS_RSP
) {
1678 register_address_increment(c
, &c
->regs
[VCPU_REGS_RSP
],
1683 rc
= emulate_pop(ctxt
, ops
, &c
->regs
[reg
], c
->op_bytes
);
1684 if (rc
!= X86EMUL_CONTINUE
)
1691 static inline int emulate_grp1a(struct x86_emulate_ctxt
*ctxt
,
1692 struct x86_emulate_ops
*ops
)
1694 struct decode_cache
*c
= &ctxt
->decode
;
1696 return emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->dst
.bytes
);
1699 static inline void emulate_grp2(struct x86_emulate_ctxt
*ctxt
)
1701 struct decode_cache
*c
= &ctxt
->decode
;
1702 switch (c
->modrm_reg
) {
1704 emulate_2op_SrcB("rol", c
->src
, c
->dst
, ctxt
->eflags
);
1707 emulate_2op_SrcB("ror", c
->src
, c
->dst
, ctxt
->eflags
);
1710 emulate_2op_SrcB("rcl", c
->src
, c
->dst
, ctxt
->eflags
);
1713 emulate_2op_SrcB("rcr", c
->src
, c
->dst
, ctxt
->eflags
);
1715 case 4: /* sal/shl */
1716 case 6: /* sal/shl */
1717 emulate_2op_SrcB("sal", c
->src
, c
->dst
, ctxt
->eflags
);
1720 emulate_2op_SrcB("shr", c
->src
, c
->dst
, ctxt
->eflags
);
1723 emulate_2op_SrcB("sar", c
->src
, c
->dst
, ctxt
->eflags
);
1728 static inline int emulate_grp3(struct x86_emulate_ctxt
*ctxt
,
1729 struct x86_emulate_ops
*ops
)
1731 struct decode_cache
*c
= &ctxt
->decode
;
1733 switch (c
->modrm_reg
) {
1734 case 0 ... 1: /* test */
1735 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
1738 c
->dst
.val
= ~c
->dst
.val
;
1741 emulate_1op("neg", c
->dst
, ctxt
->eflags
);
1749 static inline int emulate_grp45(struct x86_emulate_ctxt
*ctxt
,
1750 struct x86_emulate_ops
*ops
)
1752 struct decode_cache
*c
= &ctxt
->decode
;
1754 switch (c
->modrm_reg
) {
1756 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
1759 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
1761 case 2: /* call near abs */ {
1764 c
->eip
= c
->src
.val
;
1765 c
->src
.val
= old_eip
;
1766 emulate_push(ctxt
, ops
);
1769 case 4: /* jmp abs */
1770 c
->eip
= c
->src
.val
;
1773 emulate_push(ctxt
, ops
);
1776 return X86EMUL_CONTINUE
;
1779 static inline int emulate_grp9(struct x86_emulate_ctxt
*ctxt
,
1780 struct x86_emulate_ops
*ops
)
1782 struct decode_cache
*c
= &ctxt
->decode
;
1783 u64 old
= c
->dst
.orig_val
;
1785 if (((u32
) (old
>> 0) != (u32
) c
->regs
[VCPU_REGS_RAX
]) ||
1786 ((u32
) (old
>> 32) != (u32
) c
->regs
[VCPU_REGS_RDX
])) {
1788 c
->regs
[VCPU_REGS_RAX
] = (u32
) (old
>> 0);
1789 c
->regs
[VCPU_REGS_RDX
] = (u32
) (old
>> 32);
1790 ctxt
->eflags
&= ~EFLG_ZF
;
1792 c
->dst
.val
= ((u64
)c
->regs
[VCPU_REGS_RCX
] << 32) |
1793 (u32
) c
->regs
[VCPU_REGS_RBX
];
1795 ctxt
->eflags
|= EFLG_ZF
;
1797 return X86EMUL_CONTINUE
;
1800 static int emulate_ret_far(struct x86_emulate_ctxt
*ctxt
,
1801 struct x86_emulate_ops
*ops
)
1803 struct decode_cache
*c
= &ctxt
->decode
;
1807 rc
= emulate_pop(ctxt
, ops
, &c
->eip
, c
->op_bytes
);
1808 if (rc
!= X86EMUL_CONTINUE
)
1810 if (c
->op_bytes
== 4)
1811 c
->eip
= (u32
)c
->eip
;
1812 rc
= emulate_pop(ctxt
, ops
, &cs
, c
->op_bytes
);
1813 if (rc
!= X86EMUL_CONTINUE
)
1815 rc
= load_segment_descriptor(ctxt
, ops
, (u16
)cs
, VCPU_SREG_CS
);
1819 static inline int writeback(struct x86_emulate_ctxt
*ctxt
,
1820 struct x86_emulate_ops
*ops
)
1823 struct decode_cache
*c
= &ctxt
->decode
;
1826 switch (c
->dst
.type
) {
1828 /* The 4-byte case *is* correct:
1829 * in 64-bit mode we zero-extend.
1831 switch (c
->dst
.bytes
) {
1833 *(u8
*)c
->dst
.ptr
= (u8
)c
->dst
.val
;
1836 *(u16
*)c
->dst
.ptr
= (u16
)c
->dst
.val
;
1839 *c
->dst
.ptr
= (u32
)c
->dst
.val
;
1840 break; /* 64b: zero-ext */
1842 *c
->dst
.ptr
= c
->dst
.val
;
1848 rc
= ops
->cmpxchg_emulated(
1849 (unsigned long)c
->dst
.ptr
,
1856 rc
= ops
->write_emulated(
1857 (unsigned long)c
->dst
.ptr
,
1862 if (rc
== X86EMUL_PROPAGATE_FAULT
)
1864 (unsigned long)c
->dst
.ptr
, err
);
1865 if (rc
!= X86EMUL_CONTINUE
)
1874 return X86EMUL_CONTINUE
;
1878 setup_syscalls_segments(struct x86_emulate_ctxt
*ctxt
,
1879 struct x86_emulate_ops
*ops
, struct desc_struct
*cs
,
1880 struct desc_struct
*ss
)
1882 memset(cs
, 0, sizeof(struct desc_struct
));
1883 ops
->get_cached_descriptor(cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1884 memset(ss
, 0, sizeof(struct desc_struct
));
1886 cs
->l
= 0; /* will be adjusted later */
1887 set_desc_base(cs
, 0); /* flat segment */
1888 cs
->g
= 1; /* 4kb granularity */
1889 set_desc_limit(cs
, 0xfffff); /* 4GB limit */
1890 cs
->type
= 0x0b; /* Read, Execute, Accessed */
1892 cs
->dpl
= 0; /* will be adjusted later */
1896 set_desc_base(ss
, 0); /* flat segment */
1897 set_desc_limit(ss
, 0xfffff); /* 4GB limit */
1898 ss
->g
= 1; /* 4kb granularity */
1900 ss
->type
= 0x03; /* Read/Write, Accessed */
1901 ss
->d
= 1; /* 32bit stack segment */
1907 emulate_syscall(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1909 struct decode_cache
*c
= &ctxt
->decode
;
1910 struct desc_struct cs
, ss
;
1914 /* syscall is not available in real mode */
1915 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
1916 ctxt
->mode
== X86EMUL_MODE_VM86
) {
1918 return X86EMUL_PROPAGATE_FAULT
;
1921 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1923 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1925 cs_sel
= (u16
)(msr_data
& 0xfffc);
1926 ss_sel
= (u16
)(msr_data
+ 8);
1928 if (is_long_mode(ctxt
->vcpu
)) {
1932 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
1933 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
1934 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
1935 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
1937 c
->regs
[VCPU_REGS_RCX
] = c
->eip
;
1938 if (is_long_mode(ctxt
->vcpu
)) {
1939 #ifdef CONFIG_X86_64
1940 c
->regs
[VCPU_REGS_R11
] = ctxt
->eflags
& ~EFLG_RF
;
1942 ops
->get_msr(ctxt
->vcpu
,
1943 ctxt
->mode
== X86EMUL_MODE_PROT64
?
1944 MSR_LSTAR
: MSR_CSTAR
, &msr_data
);
1947 ops
->get_msr(ctxt
->vcpu
, MSR_SYSCALL_MASK
, &msr_data
);
1948 ctxt
->eflags
&= ~(msr_data
| EFLG_RF
);
1952 ops
->get_msr(ctxt
->vcpu
, MSR_STAR
, &msr_data
);
1953 c
->eip
= (u32
)msr_data
;
1955 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
1958 return X86EMUL_CONTINUE
;
1962 emulate_sysenter(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
1964 struct decode_cache
*c
= &ctxt
->decode
;
1965 struct desc_struct cs
, ss
;
1969 /* inject #GP if in real mode */
1970 if (ctxt
->mode
== X86EMUL_MODE_REAL
) {
1971 emulate_gp(ctxt
, 0);
1972 return X86EMUL_PROPAGATE_FAULT
;
1975 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1976 * Therefore, we inject an #UD.
1978 if (ctxt
->mode
== X86EMUL_MODE_PROT64
) {
1980 return X86EMUL_PROPAGATE_FAULT
;
1983 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
1985 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
1986 switch (ctxt
->mode
) {
1987 case X86EMUL_MODE_PROT32
:
1988 if ((msr_data
& 0xfffc) == 0x0) {
1989 emulate_gp(ctxt
, 0);
1990 return X86EMUL_PROPAGATE_FAULT
;
1993 case X86EMUL_MODE_PROT64
:
1994 if (msr_data
== 0x0) {
1995 emulate_gp(ctxt
, 0);
1996 return X86EMUL_PROPAGATE_FAULT
;
2001 ctxt
->eflags
&= ~(EFLG_VM
| EFLG_IF
| EFLG_RF
);
2002 cs_sel
= (u16
)msr_data
;
2003 cs_sel
&= ~SELECTOR_RPL_MASK
;
2004 ss_sel
= cs_sel
+ 8;
2005 ss_sel
&= ~SELECTOR_RPL_MASK
;
2006 if (ctxt
->mode
== X86EMUL_MODE_PROT64
2007 || is_long_mode(ctxt
->vcpu
)) {
2012 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2013 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2014 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2015 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2017 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_EIP
, &msr_data
);
2020 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_ESP
, &msr_data
);
2021 c
->regs
[VCPU_REGS_RSP
] = msr_data
;
2023 return X86EMUL_CONTINUE
;
2027 emulate_sysexit(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2029 struct decode_cache
*c
= &ctxt
->decode
;
2030 struct desc_struct cs
, ss
;
2035 /* inject #GP if in real mode or Virtual 8086 mode */
2036 if (ctxt
->mode
== X86EMUL_MODE_REAL
||
2037 ctxt
->mode
== X86EMUL_MODE_VM86
) {
2038 emulate_gp(ctxt
, 0);
2039 return X86EMUL_PROPAGATE_FAULT
;
2042 setup_syscalls_segments(ctxt
, ops
, &cs
, &ss
);
2044 if ((c
->rex_prefix
& 0x8) != 0x0)
2045 usermode
= X86EMUL_MODE_PROT64
;
2047 usermode
= X86EMUL_MODE_PROT32
;
2051 ops
->get_msr(ctxt
->vcpu
, MSR_IA32_SYSENTER_CS
, &msr_data
);
2053 case X86EMUL_MODE_PROT32
:
2054 cs_sel
= (u16
)(msr_data
+ 16);
2055 if ((msr_data
& 0xfffc) == 0x0) {
2056 emulate_gp(ctxt
, 0);
2057 return X86EMUL_PROPAGATE_FAULT
;
2059 ss_sel
= (u16
)(msr_data
+ 24);
2061 case X86EMUL_MODE_PROT64
:
2062 cs_sel
= (u16
)(msr_data
+ 32);
2063 if (msr_data
== 0x0) {
2064 emulate_gp(ctxt
, 0);
2065 return X86EMUL_PROPAGATE_FAULT
;
2067 ss_sel
= cs_sel
+ 8;
2072 cs_sel
|= SELECTOR_RPL_MASK
;
2073 ss_sel
|= SELECTOR_RPL_MASK
;
2075 ops
->set_cached_descriptor(&cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2076 ops
->set_segment_selector(cs_sel
, VCPU_SREG_CS
, ctxt
->vcpu
);
2077 ops
->set_cached_descriptor(&ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2078 ops
->set_segment_selector(ss_sel
, VCPU_SREG_SS
, ctxt
->vcpu
);
2080 c
->eip
= c
->regs
[VCPU_REGS_RDX
];
2081 c
->regs
[VCPU_REGS_RSP
] = c
->regs
[VCPU_REGS_RCX
];
2083 return X86EMUL_CONTINUE
;
2086 static bool emulator_bad_iopl(struct x86_emulate_ctxt
*ctxt
,
2087 struct x86_emulate_ops
*ops
)
2090 if (ctxt
->mode
== X86EMUL_MODE_REAL
)
2092 if (ctxt
->mode
== X86EMUL_MODE_VM86
)
2094 iopl
= (ctxt
->eflags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
2095 return ops
->cpl(ctxt
->vcpu
) > iopl
;
2098 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt
*ctxt
,
2099 struct x86_emulate_ops
*ops
,
2102 struct desc_struct tr_seg
;
2105 u8 perm
, bit_idx
= port
& 0x7;
2106 unsigned mask
= (1 << len
) - 1;
2108 ops
->get_cached_descriptor(&tr_seg
, VCPU_SREG_TR
, ctxt
->vcpu
);
2111 if (desc_limit_scaled(&tr_seg
) < 103)
2113 r
= ops
->read_std(get_desc_base(&tr_seg
) + 102, &io_bitmap_ptr
, 2,
2115 if (r
!= X86EMUL_CONTINUE
)
2117 if (io_bitmap_ptr
+ port
/8 > desc_limit_scaled(&tr_seg
))
2119 r
= ops
->read_std(get_desc_base(&tr_seg
) + io_bitmap_ptr
+ port
/8,
2120 &perm
, 1, ctxt
->vcpu
, NULL
);
2121 if (r
!= X86EMUL_CONTINUE
)
2123 if ((perm
>> bit_idx
) & mask
)
2128 static bool emulator_io_permited(struct x86_emulate_ctxt
*ctxt
,
2129 struct x86_emulate_ops
*ops
,
2132 if (emulator_bad_iopl(ctxt
, ops
))
2133 if (!emulator_io_port_access_allowed(ctxt
, ops
, port
, len
))
2138 static void save_state_to_tss16(struct x86_emulate_ctxt
*ctxt
,
2139 struct x86_emulate_ops
*ops
,
2140 struct tss_segment_16
*tss
)
2142 struct decode_cache
*c
= &ctxt
->decode
;
2145 tss
->flag
= ctxt
->eflags
;
2146 tss
->ax
= c
->regs
[VCPU_REGS_RAX
];
2147 tss
->cx
= c
->regs
[VCPU_REGS_RCX
];
2148 tss
->dx
= c
->regs
[VCPU_REGS_RDX
];
2149 tss
->bx
= c
->regs
[VCPU_REGS_RBX
];
2150 tss
->sp
= c
->regs
[VCPU_REGS_RSP
];
2151 tss
->bp
= c
->regs
[VCPU_REGS_RBP
];
2152 tss
->si
= c
->regs
[VCPU_REGS_RSI
];
2153 tss
->di
= c
->regs
[VCPU_REGS_RDI
];
2155 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2156 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2157 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2158 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2159 tss
->ldt
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2162 static int load_state_from_tss16(struct x86_emulate_ctxt
*ctxt
,
2163 struct x86_emulate_ops
*ops
,
2164 struct tss_segment_16
*tss
)
2166 struct decode_cache
*c
= &ctxt
->decode
;
2170 ctxt
->eflags
= tss
->flag
| 2;
2171 c
->regs
[VCPU_REGS_RAX
] = tss
->ax
;
2172 c
->regs
[VCPU_REGS_RCX
] = tss
->cx
;
2173 c
->regs
[VCPU_REGS_RDX
] = tss
->dx
;
2174 c
->regs
[VCPU_REGS_RBX
] = tss
->bx
;
2175 c
->regs
[VCPU_REGS_RSP
] = tss
->sp
;
2176 c
->regs
[VCPU_REGS_RBP
] = tss
->bp
;
2177 c
->regs
[VCPU_REGS_RSI
] = tss
->si
;
2178 c
->regs
[VCPU_REGS_RDI
] = tss
->di
;
2181 * SDM says that segment selectors are loaded before segment
2184 ops
->set_segment_selector(tss
->ldt
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2185 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2186 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2187 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2188 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2191 * Now load segment descriptors. If fault happenes at this stage
2192 * it is handled in a context of new task
2194 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt
, VCPU_SREG_LDTR
);
2195 if (ret
!= X86EMUL_CONTINUE
)
2197 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2198 if (ret
!= X86EMUL_CONTINUE
)
2200 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2201 if (ret
!= X86EMUL_CONTINUE
)
2203 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2204 if (ret
!= X86EMUL_CONTINUE
)
2206 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2207 if (ret
!= X86EMUL_CONTINUE
)
2210 return X86EMUL_CONTINUE
;
2213 static int task_switch_16(struct x86_emulate_ctxt
*ctxt
,
2214 struct x86_emulate_ops
*ops
,
2215 u16 tss_selector
, u16 old_tss_sel
,
2216 ulong old_tss_base
, struct desc_struct
*new_desc
)
2218 struct tss_segment_16 tss_seg
;
2220 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2222 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2224 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2225 /* FIXME: need to provide precise fault address */
2226 emulate_pf(ctxt
, old_tss_base
, err
);
2230 save_state_to_tss16(ctxt
, ops
, &tss_seg
);
2232 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2234 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2235 /* FIXME: need to provide precise fault address */
2236 emulate_pf(ctxt
, old_tss_base
, err
);
2240 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2242 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2243 /* FIXME: need to provide precise fault address */
2244 emulate_pf(ctxt
, new_tss_base
, err
);
2248 if (old_tss_sel
!= 0xffff) {
2249 tss_seg
.prev_task_link
= old_tss_sel
;
2251 ret
= ops
->write_std(new_tss_base
,
2252 &tss_seg
.prev_task_link
,
2253 sizeof tss_seg
.prev_task_link
,
2255 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2256 /* FIXME: need to provide precise fault address */
2257 emulate_pf(ctxt
, new_tss_base
, err
);
2262 return load_state_from_tss16(ctxt
, ops
, &tss_seg
);
2265 static void save_state_to_tss32(struct x86_emulate_ctxt
*ctxt
,
2266 struct x86_emulate_ops
*ops
,
2267 struct tss_segment_32
*tss
)
2269 struct decode_cache
*c
= &ctxt
->decode
;
2271 tss
->cr3
= ops
->get_cr(3, ctxt
->vcpu
);
2273 tss
->eflags
= ctxt
->eflags
;
2274 tss
->eax
= c
->regs
[VCPU_REGS_RAX
];
2275 tss
->ecx
= c
->regs
[VCPU_REGS_RCX
];
2276 tss
->edx
= c
->regs
[VCPU_REGS_RDX
];
2277 tss
->ebx
= c
->regs
[VCPU_REGS_RBX
];
2278 tss
->esp
= c
->regs
[VCPU_REGS_RSP
];
2279 tss
->ebp
= c
->regs
[VCPU_REGS_RBP
];
2280 tss
->esi
= c
->regs
[VCPU_REGS_RSI
];
2281 tss
->edi
= c
->regs
[VCPU_REGS_RDI
];
2283 tss
->es
= ops
->get_segment_selector(VCPU_SREG_ES
, ctxt
->vcpu
);
2284 tss
->cs
= ops
->get_segment_selector(VCPU_SREG_CS
, ctxt
->vcpu
);
2285 tss
->ss
= ops
->get_segment_selector(VCPU_SREG_SS
, ctxt
->vcpu
);
2286 tss
->ds
= ops
->get_segment_selector(VCPU_SREG_DS
, ctxt
->vcpu
);
2287 tss
->fs
= ops
->get_segment_selector(VCPU_SREG_FS
, ctxt
->vcpu
);
2288 tss
->gs
= ops
->get_segment_selector(VCPU_SREG_GS
, ctxt
->vcpu
);
2289 tss
->ldt_selector
= ops
->get_segment_selector(VCPU_SREG_LDTR
, ctxt
->vcpu
);
2292 static int load_state_from_tss32(struct x86_emulate_ctxt
*ctxt
,
2293 struct x86_emulate_ops
*ops
,
2294 struct tss_segment_32
*tss
)
2296 struct decode_cache
*c
= &ctxt
->decode
;
2299 if (ops
->set_cr(3, tss
->cr3
, ctxt
->vcpu
)) {
2300 emulate_gp(ctxt
, 0);
2301 return X86EMUL_PROPAGATE_FAULT
;
2304 ctxt
->eflags
= tss
->eflags
| 2;
2305 c
->regs
[VCPU_REGS_RAX
] = tss
->eax
;
2306 c
->regs
[VCPU_REGS_RCX
] = tss
->ecx
;
2307 c
->regs
[VCPU_REGS_RDX
] = tss
->edx
;
2308 c
->regs
[VCPU_REGS_RBX
] = tss
->ebx
;
2309 c
->regs
[VCPU_REGS_RSP
] = tss
->esp
;
2310 c
->regs
[VCPU_REGS_RBP
] = tss
->ebp
;
2311 c
->regs
[VCPU_REGS_RSI
] = tss
->esi
;
2312 c
->regs
[VCPU_REGS_RDI
] = tss
->edi
;
2315 * SDM says that segment selectors are loaded before segment
2318 ops
->set_segment_selector(tss
->ldt_selector
, VCPU_SREG_LDTR
, ctxt
->vcpu
);
2319 ops
->set_segment_selector(tss
->es
, VCPU_SREG_ES
, ctxt
->vcpu
);
2320 ops
->set_segment_selector(tss
->cs
, VCPU_SREG_CS
, ctxt
->vcpu
);
2321 ops
->set_segment_selector(tss
->ss
, VCPU_SREG_SS
, ctxt
->vcpu
);
2322 ops
->set_segment_selector(tss
->ds
, VCPU_SREG_DS
, ctxt
->vcpu
);
2323 ops
->set_segment_selector(tss
->fs
, VCPU_SREG_FS
, ctxt
->vcpu
);
2324 ops
->set_segment_selector(tss
->gs
, VCPU_SREG_GS
, ctxt
->vcpu
);
2327 * Now load segment descriptors. If fault happenes at this stage
2328 * it is handled in a context of new task
2330 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ldt_selector
, VCPU_SREG_LDTR
);
2331 if (ret
!= X86EMUL_CONTINUE
)
2333 ret
= load_segment_descriptor(ctxt
, ops
, tss
->es
, VCPU_SREG_ES
);
2334 if (ret
!= X86EMUL_CONTINUE
)
2336 ret
= load_segment_descriptor(ctxt
, ops
, tss
->cs
, VCPU_SREG_CS
);
2337 if (ret
!= X86EMUL_CONTINUE
)
2339 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ss
, VCPU_SREG_SS
);
2340 if (ret
!= X86EMUL_CONTINUE
)
2342 ret
= load_segment_descriptor(ctxt
, ops
, tss
->ds
, VCPU_SREG_DS
);
2343 if (ret
!= X86EMUL_CONTINUE
)
2345 ret
= load_segment_descriptor(ctxt
, ops
, tss
->fs
, VCPU_SREG_FS
);
2346 if (ret
!= X86EMUL_CONTINUE
)
2348 ret
= load_segment_descriptor(ctxt
, ops
, tss
->gs
, VCPU_SREG_GS
);
2349 if (ret
!= X86EMUL_CONTINUE
)
2352 return X86EMUL_CONTINUE
;
2355 static int task_switch_32(struct x86_emulate_ctxt
*ctxt
,
2356 struct x86_emulate_ops
*ops
,
2357 u16 tss_selector
, u16 old_tss_sel
,
2358 ulong old_tss_base
, struct desc_struct
*new_desc
)
2360 struct tss_segment_32 tss_seg
;
2362 u32 err
, new_tss_base
= get_desc_base(new_desc
);
2364 ret
= ops
->read_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2366 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2367 /* FIXME: need to provide precise fault address */
2368 emulate_pf(ctxt
, old_tss_base
, err
);
2372 save_state_to_tss32(ctxt
, ops
, &tss_seg
);
2374 ret
= ops
->write_std(old_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2376 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2377 /* FIXME: need to provide precise fault address */
2378 emulate_pf(ctxt
, old_tss_base
, err
);
2382 ret
= ops
->read_std(new_tss_base
, &tss_seg
, sizeof tss_seg
, ctxt
->vcpu
,
2384 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2385 /* FIXME: need to provide precise fault address */
2386 emulate_pf(ctxt
, new_tss_base
, err
);
2390 if (old_tss_sel
!= 0xffff) {
2391 tss_seg
.prev_task_link
= old_tss_sel
;
2393 ret
= ops
->write_std(new_tss_base
,
2394 &tss_seg
.prev_task_link
,
2395 sizeof tss_seg
.prev_task_link
,
2397 if (ret
== X86EMUL_PROPAGATE_FAULT
) {
2398 /* FIXME: need to provide precise fault address */
2399 emulate_pf(ctxt
, new_tss_base
, err
);
2404 return load_state_from_tss32(ctxt
, ops
, &tss_seg
);
2407 static int emulator_do_task_switch(struct x86_emulate_ctxt
*ctxt
,
2408 struct x86_emulate_ops
*ops
,
2409 u16 tss_selector
, int reason
,
2410 bool has_error_code
, u32 error_code
)
2412 struct desc_struct curr_tss_desc
, next_tss_desc
;
2414 u16 old_tss_sel
= ops
->get_segment_selector(VCPU_SREG_TR
, ctxt
->vcpu
);
2415 ulong old_tss_base
=
2416 ops
->get_cached_segment_base(VCPU_SREG_TR
, ctxt
->vcpu
);
2419 /* FIXME: old_tss_base == ~0 ? */
2421 ret
= read_segment_descriptor(ctxt
, ops
, tss_selector
, &next_tss_desc
);
2422 if (ret
!= X86EMUL_CONTINUE
)
2424 ret
= read_segment_descriptor(ctxt
, ops
, old_tss_sel
, &curr_tss_desc
);
2425 if (ret
!= X86EMUL_CONTINUE
)
2428 /* FIXME: check that next_tss_desc is tss */
2430 if (reason
!= TASK_SWITCH_IRET
) {
2431 if ((tss_selector
& 3) > next_tss_desc
.dpl
||
2432 ops
->cpl(ctxt
->vcpu
) > next_tss_desc
.dpl
) {
2433 emulate_gp(ctxt
, 0);
2434 return X86EMUL_PROPAGATE_FAULT
;
2438 desc_limit
= desc_limit_scaled(&next_tss_desc
);
2439 if (!next_tss_desc
.p
||
2440 ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) ||
2441 desc_limit
< 0x2b)) {
2442 emulate_ts(ctxt
, tss_selector
& 0xfffc);
2443 return X86EMUL_PROPAGATE_FAULT
;
2446 if (reason
== TASK_SWITCH_IRET
|| reason
== TASK_SWITCH_JMP
) {
2447 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
2448 write_segment_descriptor(ctxt
, ops
, old_tss_sel
,
2452 if (reason
== TASK_SWITCH_IRET
)
2453 ctxt
->eflags
= ctxt
->eflags
& ~X86_EFLAGS_NT
;
2455 /* set back link to prev task only if NT bit is set in eflags
2456 note that old_tss_sel is not used afetr this point */
2457 if (reason
!= TASK_SWITCH_CALL
&& reason
!= TASK_SWITCH_GATE
)
2458 old_tss_sel
= 0xffff;
2460 if (next_tss_desc
.type
& 8)
2461 ret
= task_switch_32(ctxt
, ops
, tss_selector
, old_tss_sel
,
2462 old_tss_base
, &next_tss_desc
);
2464 ret
= task_switch_16(ctxt
, ops
, tss_selector
, old_tss_sel
,
2465 old_tss_base
, &next_tss_desc
);
2466 if (ret
!= X86EMUL_CONTINUE
)
2469 if (reason
== TASK_SWITCH_CALL
|| reason
== TASK_SWITCH_GATE
)
2470 ctxt
->eflags
= ctxt
->eflags
| X86_EFLAGS_NT
;
2472 if (reason
!= TASK_SWITCH_IRET
) {
2473 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
2474 write_segment_descriptor(ctxt
, ops
, tss_selector
,
2478 ops
->set_cr(0, ops
->get_cr(0, ctxt
->vcpu
) | X86_CR0_TS
, ctxt
->vcpu
);
2479 ops
->set_cached_descriptor(&next_tss_desc
, VCPU_SREG_TR
, ctxt
->vcpu
);
2480 ops
->set_segment_selector(tss_selector
, VCPU_SREG_TR
, ctxt
->vcpu
);
2482 if (has_error_code
) {
2483 struct decode_cache
*c
= &ctxt
->decode
;
2485 c
->op_bytes
= c
->ad_bytes
= (next_tss_desc
.type
& 8) ? 4 : 2;
2487 c
->src
.val
= (unsigned long) error_code
;
2488 emulate_push(ctxt
, ops
);
2494 int emulator_task_switch(struct x86_emulate_ctxt
*ctxt
,
2495 struct x86_emulate_ops
*ops
,
2496 u16 tss_selector
, int reason
,
2497 bool has_error_code
, u32 error_code
)
2499 struct decode_cache
*c
= &ctxt
->decode
;
2503 c
->dst
.type
= OP_NONE
;
2505 rc
= emulator_do_task_switch(ctxt
, ops
, tss_selector
, reason
,
2506 has_error_code
, error_code
);
2508 if (rc
== X86EMUL_CONTINUE
) {
2509 rc
= writeback(ctxt
, ops
);
2510 if (rc
== X86EMUL_CONTINUE
)
2514 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
2517 static void string_addr_inc(struct x86_emulate_ctxt
*ctxt
, unsigned long base
,
2518 int reg
, struct operand
*op
)
2520 struct decode_cache
*c
= &ctxt
->decode
;
2521 int df
= (ctxt
->eflags
& EFLG_DF
) ? -1 : 1;
2523 register_address_increment(c
, &c
->regs
[reg
], df
* op
->bytes
);
2524 op
->ptr
= (unsigned long *)register_address(c
, base
, c
->regs
[reg
]);
2528 x86_emulate_insn(struct x86_emulate_ctxt
*ctxt
, struct x86_emulate_ops
*ops
)
2531 struct decode_cache
*c
= &ctxt
->decode
;
2532 int rc
= X86EMUL_CONTINUE
;
2533 int saved_dst_type
= c
->dst
.type
;
2535 ctxt
->decode
.mem_read
.pos
= 0;
2537 if (ctxt
->mode
== X86EMUL_MODE_PROT64
&& (c
->d
& No64
)) {
2542 /* LOCK prefix is allowed only with some instructions */
2543 if (c
->lock_prefix
&& (!(c
->d
& Lock
) || c
->dst
.type
!= OP_MEM
)) {
2548 /* Privileged instruction can be executed only in CPL=0 */
2549 if ((c
->d
& Priv
) && ops
->cpl(ctxt
->vcpu
)) {
2550 emulate_gp(ctxt
, 0);
2554 if (c
->rep_prefix
&& (c
->d
& String
)) {
2555 ctxt
->restart
= true;
2556 /* All REP prefixes have the same first termination condition */
2557 if (address_mask(c
, c
->regs
[VCPU_REGS_RCX
]) == 0) {
2559 ctxt
->restart
= false;
2563 /* The second termination condition only applies for REPE
2564 * and REPNE. Test if the repeat string operation prefix is
2565 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2566 * corresponding termination condition according to:
2567 * - if REPE/REPZ and ZF = 0 then done
2568 * - if REPNE/REPNZ and ZF = 1 then done
2570 if ((c
->b
== 0xa6) || (c
->b
== 0xa7) ||
2571 (c
->b
== 0xae) || (c
->b
== 0xaf)) {
2572 if ((c
->rep_prefix
== REPE_PREFIX
) &&
2573 ((ctxt
->eflags
& EFLG_ZF
) == 0))
2575 if ((c
->rep_prefix
== REPNE_PREFIX
) &&
2576 ((ctxt
->eflags
& EFLG_ZF
) == EFLG_ZF
))
2582 if (c
->src
.type
== OP_MEM
) {
2583 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src
.ptr
,
2584 c
->src
.valptr
, c
->src
.bytes
);
2585 if (rc
!= X86EMUL_CONTINUE
)
2587 c
->src
.orig_val
= c
->src
.val
;
2590 if (c
->src2
.type
== OP_MEM
) {
2591 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->src2
.ptr
,
2592 &c
->src2
.val
, c
->src2
.bytes
);
2593 if (rc
!= X86EMUL_CONTINUE
)
2597 if ((c
->d
& DstMask
) == ImplicitOps
)
2601 if ((c
->dst
.type
== OP_MEM
) && !(c
->d
& Mov
)) {
2602 /* optimisation - avoid slow emulated read if Mov */
2603 rc
= read_emulated(ctxt
, ops
, (unsigned long)c
->dst
.ptr
,
2604 &c
->dst
.val
, c
->dst
.bytes
);
2605 if (rc
!= X86EMUL_CONTINUE
)
2608 c
->dst
.orig_val
= c
->dst
.val
;
2618 emulate_2op_SrcV("add", c
->src
, c
->dst
, ctxt
->eflags
);
2620 case 0x06: /* push es */
2621 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2623 case 0x07: /* pop es */
2624 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_ES
);
2625 if (rc
!= X86EMUL_CONTINUE
)
2630 emulate_2op_SrcV("or", c
->src
, c
->dst
, ctxt
->eflags
);
2632 case 0x0e: /* push cs */
2633 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_CS
);
2637 emulate_2op_SrcV("adc", c
->src
, c
->dst
, ctxt
->eflags
);
2639 case 0x16: /* push ss */
2640 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2642 case 0x17: /* pop ss */
2643 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_SS
);
2644 if (rc
!= X86EMUL_CONTINUE
)
2649 emulate_2op_SrcV("sbb", c
->src
, c
->dst
, ctxt
->eflags
);
2651 case 0x1e: /* push ds */
2652 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2654 case 0x1f: /* pop ds */
2655 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_DS
);
2656 if (rc
!= X86EMUL_CONTINUE
)
2661 emulate_2op_SrcV("and", c
->src
, c
->dst
, ctxt
->eflags
);
2665 emulate_2op_SrcV("sub", c
->src
, c
->dst
, ctxt
->eflags
);
2669 emulate_2op_SrcV("xor", c
->src
, c
->dst
, ctxt
->eflags
);
2673 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
2675 case 0x40 ... 0x47: /* inc r16/r32 */
2676 emulate_1op("inc", c
->dst
, ctxt
->eflags
);
2678 case 0x48 ... 0x4f: /* dec r16/r32 */
2679 emulate_1op("dec", c
->dst
, ctxt
->eflags
);
2681 case 0x50 ... 0x57: /* push reg */
2682 emulate_push(ctxt
, ops
);
2684 case 0x58 ... 0x5f: /* pop reg */
2686 rc
= emulate_pop(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2687 if (rc
!= X86EMUL_CONTINUE
)
2690 case 0x60: /* pusha */
2691 emulate_pusha(ctxt
, ops
);
2693 case 0x61: /* popa */
2694 rc
= emulate_popa(ctxt
, ops
);
2695 if (rc
!= X86EMUL_CONTINUE
)
2698 case 0x63: /* movsxd */
2699 if (ctxt
->mode
!= X86EMUL_MODE_PROT64
)
2700 goto cannot_emulate
;
2701 c
->dst
.val
= (s32
) c
->src
.val
;
2703 case 0x68: /* push imm */
2704 case 0x6a: /* push imm8 */
2705 emulate_push(ctxt
, ops
);
2707 case 0x6c: /* insb */
2708 case 0x6d: /* insw/insd */
2709 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2710 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2712 emulate_gp(ctxt
, 0);
2715 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
,
2716 c
->regs
[VCPU_REGS_RDX
], &c
->dst
.val
))
2717 goto done
; /* IO is needed, skip writeback */
2719 case 0x6e: /* outsb */
2720 case 0x6f: /* outsw/outsd */
2721 c
->src
.bytes
= min(c
->src
.bytes
, 4u);
2722 if (!emulator_io_permited(ctxt
, ops
, c
->regs
[VCPU_REGS_RDX
],
2724 emulate_gp(ctxt
, 0);
2727 ops
->pio_out_emulated(c
->src
.bytes
, c
->regs
[VCPU_REGS_RDX
],
2728 &c
->src
.val
, 1, ctxt
->vcpu
);
2730 c
->dst
.type
= OP_NONE
; /* nothing to writeback */
2732 case 0x70 ... 0x7f: /* jcc (short) */
2733 if (test_cc(c
->b
, ctxt
->eflags
))
2734 jmp_rel(c
, c
->src
.val
);
2736 case 0x80 ... 0x83: /* Grp1 */
2737 switch (c
->modrm_reg
) {
2758 emulate_2op_SrcV("test", c
->src
, c
->dst
, ctxt
->eflags
);
2760 case 0x86 ... 0x87: /* xchg */
2762 /* Write back the register source. */
2763 switch (c
->dst
.bytes
) {
2765 *(u8
*) c
->src
.ptr
= (u8
) c
->dst
.val
;
2768 *(u16
*) c
->src
.ptr
= (u16
) c
->dst
.val
;
2771 *c
->src
.ptr
= (u32
) c
->dst
.val
;
2772 break; /* 64b reg: zero-extend */
2774 *c
->src
.ptr
= c
->dst
.val
;
2778 * Write back the memory destination with implicit LOCK
2781 c
->dst
.val
= c
->src
.val
;
2784 case 0x88 ... 0x8b: /* mov */
2786 case 0x8c: /* mov r/m, sreg */
2787 if (c
->modrm_reg
> VCPU_SREG_GS
) {
2791 c
->dst
.val
= ops
->get_segment_selector(c
->modrm_reg
, ctxt
->vcpu
);
2793 case 0x8d: /* lea r16/r32, m */
2794 c
->dst
.val
= c
->modrm_ea
;
2796 case 0x8e: { /* mov seg, r/m16 */
2801 if (c
->modrm_reg
== VCPU_SREG_CS
||
2802 c
->modrm_reg
> VCPU_SREG_GS
) {
2807 if (c
->modrm_reg
== VCPU_SREG_SS
)
2808 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_MOV_SS
;
2810 rc
= load_segment_descriptor(ctxt
, ops
, sel
, c
->modrm_reg
);
2812 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2815 case 0x8f: /* pop (sole member of Grp1a) */
2816 rc
= emulate_grp1a(ctxt
, ops
);
2817 if (rc
!= X86EMUL_CONTINUE
)
2820 case 0x90: /* nop / xchg r8,rax */
2821 if (c
->dst
.ptr
== (unsigned long *)&c
->regs
[VCPU_REGS_RAX
]) {
2822 c
->dst
.type
= OP_NONE
; /* nop */
2825 case 0x91 ... 0x97: /* xchg reg,rax */
2826 c
->src
.type
= OP_REG
;
2827 c
->src
.bytes
= c
->op_bytes
;
2828 c
->src
.ptr
= (unsigned long *) &c
->regs
[VCPU_REGS_RAX
];
2829 c
->src
.val
= *(c
->src
.ptr
);
2831 case 0x9c: /* pushf */
2832 c
->src
.val
= (unsigned long) ctxt
->eflags
;
2833 emulate_push(ctxt
, ops
);
2835 case 0x9d: /* popf */
2836 c
->dst
.type
= OP_REG
;
2837 c
->dst
.ptr
= (unsigned long *) &ctxt
->eflags
;
2838 c
->dst
.bytes
= c
->op_bytes
;
2839 rc
= emulate_popf(ctxt
, ops
, &c
->dst
.val
, c
->op_bytes
);
2840 if (rc
!= X86EMUL_CONTINUE
)
2843 case 0xa0 ... 0xa1: /* mov */
2844 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
2845 c
->dst
.val
= c
->src
.val
;
2847 case 0xa2 ... 0xa3: /* mov */
2848 c
->dst
.val
= (unsigned long)c
->regs
[VCPU_REGS_RAX
];
2850 case 0xa4 ... 0xa5: /* movs */
2852 case 0xa6 ... 0xa7: /* cmps */
2853 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2854 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c
->src
.ptr
, c
->dst
.ptr
);
2856 case 0xa8 ... 0xa9: /* test ax, imm */
2858 case 0xaa ... 0xab: /* stos */
2859 c
->dst
.val
= c
->regs
[VCPU_REGS_RAX
];
2861 case 0xac ... 0xad: /* lods */
2863 case 0xae ... 0xaf: /* scas */
2864 DPRINTF("Urk! I don't handle SCAS.\n");
2865 goto cannot_emulate
;
2866 case 0xb0 ... 0xbf: /* mov r, imm */
2871 case 0xc3: /* ret */
2872 c
->dst
.type
= OP_REG
;
2873 c
->dst
.ptr
= &c
->eip
;
2874 c
->dst
.bytes
= c
->op_bytes
;
2875 goto pop_instruction
;
2876 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
2878 c
->dst
.val
= c
->src
.val
;
2880 case 0xcb: /* ret far */
2881 rc
= emulate_ret_far(ctxt
, ops
);
2882 if (rc
!= X86EMUL_CONTINUE
)
2885 case 0xd0 ... 0xd1: /* Grp2 */
2889 case 0xd2 ... 0xd3: /* Grp2 */
2890 c
->src
.val
= c
->regs
[VCPU_REGS_RCX
];
2893 case 0xe4: /* inb */
2896 case 0xe6: /* outb */
2897 case 0xe7: /* out */
2899 case 0xe8: /* call (near) */ {
2900 long int rel
= c
->src
.val
;
2901 c
->src
.val
= (unsigned long) c
->eip
;
2903 emulate_push(ctxt
, ops
);
2906 case 0xe9: /* jmp rel */
2908 case 0xea: { /* jmp far */
2911 memcpy(&sel
, c
->src
.valptr
+ c
->op_bytes
, 2);
2913 if (load_segment_descriptor(ctxt
, ops
, sel
, VCPU_SREG_CS
))
2917 memcpy(&c
->eip
, c
->src
.valptr
, c
->op_bytes
);
2921 jmp
: /* jmp rel short */
2922 jmp_rel(c
, c
->src
.val
);
2923 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2925 case 0xec: /* in al,dx */
2926 case 0xed: /* in (e/r)ax,dx */
2927 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2929 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2930 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2931 emulate_gp(ctxt
, 0);
2934 if (!pio_in_emulated(ctxt
, ops
, c
->dst
.bytes
, c
->src
.val
,
2936 goto done
; /* IO is needed */
2938 case 0xee: /* out al,dx */
2939 case 0xef: /* out (e/r)ax,dx */
2940 c
->src
.val
= c
->regs
[VCPU_REGS_RDX
];
2942 c
->dst
.bytes
= min(c
->dst
.bytes
, 4u);
2943 if (!emulator_io_permited(ctxt
, ops
, c
->src
.val
, c
->dst
.bytes
)) {
2944 emulate_gp(ctxt
, 0);
2947 ops
->pio_out_emulated(c
->dst
.bytes
, c
->src
.val
, &c
->dst
.val
, 1,
2949 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2951 case 0xf4: /* hlt */
2952 ctxt
->vcpu
->arch
.halt_request
= 1;
2954 case 0xf5: /* cmc */
2955 /* complement carry flag from eflags reg */
2956 ctxt
->eflags
^= EFLG_CF
;
2957 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2959 case 0xf6 ... 0xf7: /* Grp3 */
2960 if (!emulate_grp3(ctxt
, ops
))
2961 goto cannot_emulate
;
2963 case 0xf8: /* clc */
2964 ctxt
->eflags
&= ~EFLG_CF
;
2965 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2967 case 0xfa: /* cli */
2968 if (emulator_bad_iopl(ctxt
, ops
))
2969 emulate_gp(ctxt
, 0);
2971 ctxt
->eflags
&= ~X86_EFLAGS_IF
;
2972 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2975 case 0xfb: /* sti */
2976 if (emulator_bad_iopl(ctxt
, ops
))
2977 emulate_gp(ctxt
, 0);
2979 ctxt
->interruptibility
= KVM_X86_SHADOW_INT_STI
;
2980 ctxt
->eflags
|= X86_EFLAGS_IF
;
2981 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2984 case 0xfc: /* cld */
2985 ctxt
->eflags
&= ~EFLG_DF
;
2986 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2988 case 0xfd: /* std */
2989 ctxt
->eflags
|= EFLG_DF
;
2990 c
->dst
.type
= OP_NONE
; /* Disable writeback. */
2992 case 0xfe: /* Grp4 */
2994 rc
= emulate_grp45(ctxt
, ops
);
2995 if (rc
!= X86EMUL_CONTINUE
)
2998 case 0xff: /* Grp5 */
2999 if (c
->modrm_reg
== 5)
3005 rc
= writeback(ctxt
, ops
);
3006 if (rc
!= X86EMUL_CONTINUE
)
3010 * restore dst type in case the decoding will be reused
3011 * (happens for string instruction )
3013 c
->dst
.type
= saved_dst_type
;
3015 if ((c
->d
& SrcMask
) == SrcSI
)
3016 string_addr_inc(ctxt
, seg_override_base(ctxt
, ops
, c
),
3017 VCPU_REGS_RSI
, &c
->src
);
3019 if ((c
->d
& DstMask
) == DstDI
)
3020 string_addr_inc(ctxt
, es_base(ctxt
, ops
), VCPU_REGS_RDI
,
3023 if (c
->rep_prefix
&& (c
->d
& String
)) {
3024 struct read_cache
*rc
= &ctxt
->decode
.io_read
;
3025 register_address_increment(c
, &c
->regs
[VCPU_REGS_RCX
], -1);
3027 * Re-enter guest when pio read ahead buffer is empty or,
3028 * if it is not used, after each 1024 iteration.
3030 if ((rc
->end
== 0 && !(c
->regs
[VCPU_REGS_RCX
] & 0x3ff)) ||
3031 (rc
->end
!= 0 && rc
->end
== rc
->pos
))
3032 ctxt
->restart
= false;
3035 * reset read cache here in case string instruction is restared
3038 ctxt
->decode
.mem_read
.end
= 0;
3042 return (rc
== X86EMUL_UNHANDLEABLE
) ? -1 : 0;
3046 case 0x01: /* lgdt, lidt, lmsw */
3047 switch (c
->modrm_reg
) {
3049 unsigned long address
;
3051 case 0: /* vmcall */
3052 if (c
->modrm_mod
!= 3 || c
->modrm_rm
!= 1)
3053 goto cannot_emulate
;
3055 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3056 if (rc
!= X86EMUL_CONTINUE
)
3059 /* Let the processor re-execute the fixed hypercall */
3061 /* Disable writeback. */
3062 c
->dst
.type
= OP_NONE
;
3065 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3066 &size
, &address
, c
->op_bytes
);
3067 if (rc
!= X86EMUL_CONTINUE
)
3069 realmode_lgdt(ctxt
->vcpu
, size
, address
);
3070 /* Disable writeback. */
3071 c
->dst
.type
= OP_NONE
;
3073 case 3: /* lidt/vmmcall */
3074 if (c
->modrm_mod
== 3) {
3075 switch (c
->modrm_rm
) {
3077 rc
= kvm_fix_hypercall(ctxt
->vcpu
);
3078 if (rc
!= X86EMUL_CONTINUE
)
3082 goto cannot_emulate
;
3085 rc
= read_descriptor(ctxt
, ops
, c
->src
.ptr
,
3088 if (rc
!= X86EMUL_CONTINUE
)
3090 realmode_lidt(ctxt
->vcpu
, size
, address
);
3092 /* Disable writeback. */
3093 c
->dst
.type
= OP_NONE
;
3097 c
->dst
.val
= ops
->get_cr(0, ctxt
->vcpu
);
3100 ops
->set_cr(0, (ops
->get_cr(0, ctxt
->vcpu
) & ~0x0ful
) |
3101 (c
->src
.val
& 0x0f), ctxt
->vcpu
);
3102 c
->dst
.type
= OP_NONE
;
3104 case 5: /* not defined */
3108 emulate_invlpg(ctxt
->vcpu
, c
->modrm_ea
);
3109 /* Disable writeback. */
3110 c
->dst
.type
= OP_NONE
;
3113 goto cannot_emulate
;
3116 case 0x05: /* syscall */
3117 rc
= emulate_syscall(ctxt
, ops
);
3118 if (rc
!= X86EMUL_CONTINUE
)
3124 emulate_clts(ctxt
->vcpu
);
3125 c
->dst
.type
= OP_NONE
;
3127 case 0x08: /* invd */
3128 case 0x09: /* wbinvd */
3129 case 0x0d: /* GrpP (prefetch) */
3130 case 0x18: /* Grp16 (prefetch/nop) */
3131 c
->dst
.type
= OP_NONE
;
3133 case 0x20: /* mov cr, reg */
3134 switch (c
->modrm_reg
) {
3141 c
->regs
[c
->modrm_rm
] = ops
->get_cr(c
->modrm_reg
, ctxt
->vcpu
);
3142 c
->dst
.type
= OP_NONE
; /* no writeback */
3144 case 0x21: /* mov from dr to reg */
3145 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3146 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3150 ops
->get_dr(c
->modrm_reg
, &c
->regs
[c
->modrm_rm
], ctxt
->vcpu
);
3151 c
->dst
.type
= OP_NONE
; /* no writeback */
3153 case 0x22: /* mov reg, cr */
3154 if (ops
->set_cr(c
->modrm_reg
, c
->modrm_val
, ctxt
->vcpu
)) {
3155 emulate_gp(ctxt
, 0);
3158 c
->dst
.type
= OP_NONE
;
3160 case 0x23: /* mov from reg to dr */
3161 if ((ops
->get_cr(4, ctxt
->vcpu
) & X86_CR4_DE
) &&
3162 (c
->modrm_reg
== 4 || c
->modrm_reg
== 5)) {
3167 if (ops
->set_dr(c
->modrm_reg
, c
->regs
[c
->modrm_rm
] &
3168 ((ctxt
->mode
== X86EMUL_MODE_PROT64
) ?
3169 ~0ULL : ~0U), ctxt
->vcpu
) < 0) {
3170 /* #UD condition is already handled by the code above */
3171 emulate_gp(ctxt
, 0);
3175 c
->dst
.type
= OP_NONE
; /* no writeback */
3179 msr_data
= (u32
)c
->regs
[VCPU_REGS_RAX
]
3180 | ((u64
)c
->regs
[VCPU_REGS_RDX
] << 32);
3181 if (ops
->set_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], msr_data
)) {
3182 emulate_gp(ctxt
, 0);
3185 rc
= X86EMUL_CONTINUE
;
3186 c
->dst
.type
= OP_NONE
;
3190 if (ops
->get_msr(ctxt
->vcpu
, c
->regs
[VCPU_REGS_RCX
], &msr_data
)) {
3191 emulate_gp(ctxt
, 0);
3194 c
->regs
[VCPU_REGS_RAX
] = (u32
)msr_data
;
3195 c
->regs
[VCPU_REGS_RDX
] = msr_data
>> 32;
3197 rc
= X86EMUL_CONTINUE
;
3198 c
->dst
.type
= OP_NONE
;
3200 case 0x34: /* sysenter */
3201 rc
= emulate_sysenter(ctxt
, ops
);
3202 if (rc
!= X86EMUL_CONTINUE
)
3207 case 0x35: /* sysexit */
3208 rc
= emulate_sysexit(ctxt
, ops
);
3209 if (rc
!= X86EMUL_CONTINUE
)
3214 case 0x40 ... 0x4f: /* cmov */
3215 c
->dst
.val
= c
->dst
.orig_val
= c
->src
.val
;
3216 if (!test_cc(c
->b
, ctxt
->eflags
))
3217 c
->dst
.type
= OP_NONE
; /* no writeback */
3219 case 0x80 ... 0x8f: /* jnz rel, etc*/
3220 if (test_cc(c
->b
, ctxt
->eflags
))
3221 jmp_rel(c
, c
->src
.val
);
3222 c
->dst
.type
= OP_NONE
;
3224 case 0xa0: /* push fs */
3225 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3227 case 0xa1: /* pop fs */
3228 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_FS
);
3229 if (rc
!= X86EMUL_CONTINUE
)
3234 c
->dst
.type
= OP_NONE
;
3235 /* only subword offset */
3236 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3237 emulate_2op_SrcV_nobyte("bt", c
->src
, c
->dst
, ctxt
->eflags
);
3239 case 0xa4: /* shld imm8, r, r/m */
3240 case 0xa5: /* shld cl, r, r/m */
3241 emulate_2op_cl("shld", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3243 case 0xa8: /* push gs */
3244 emulate_push_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3246 case 0xa9: /* pop gs */
3247 rc
= emulate_pop_sreg(ctxt
, ops
, VCPU_SREG_GS
);
3248 if (rc
!= X86EMUL_CONTINUE
)
3253 /* only subword offset */
3254 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3255 emulate_2op_SrcV_nobyte("bts", c
->src
, c
->dst
, ctxt
->eflags
);
3257 case 0xac: /* shrd imm8, r, r/m */
3258 case 0xad: /* shrd cl, r, r/m */
3259 emulate_2op_cl("shrd", c
->src2
, c
->src
, c
->dst
, ctxt
->eflags
);
3261 case 0xae: /* clflush */
3263 case 0xb0 ... 0xb1: /* cmpxchg */
3265 * Save real source value, then compare EAX against
3268 c
->src
.orig_val
= c
->src
.val
;
3269 c
->src
.val
= c
->regs
[VCPU_REGS_RAX
];
3270 emulate_2op_SrcV("cmp", c
->src
, c
->dst
, ctxt
->eflags
);
3271 if (ctxt
->eflags
& EFLG_ZF
) {
3272 /* Success: write back to memory. */
3273 c
->dst
.val
= c
->src
.orig_val
;
3275 /* Failure: write the value we saw to EAX. */
3276 c
->dst
.type
= OP_REG
;
3277 c
->dst
.ptr
= (unsigned long *)&c
->regs
[VCPU_REGS_RAX
];
3282 /* only subword offset */
3283 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3284 emulate_2op_SrcV_nobyte("btr", c
->src
, c
->dst
, ctxt
->eflags
);
3286 case 0xb6 ... 0xb7: /* movzx */
3287 c
->dst
.bytes
= c
->op_bytes
;
3288 c
->dst
.val
= (c
->d
& ByteOp
) ? (u8
) c
->src
.val
3291 case 0xba: /* Grp8 */
3292 switch (c
->modrm_reg
& 3) {
3305 /* only subword offset */
3306 c
->src
.val
&= (c
->dst
.bytes
<< 3) - 1;
3307 emulate_2op_SrcV_nobyte("btc", c
->src
, c
->dst
, ctxt
->eflags
);
3309 case 0xbe ... 0xbf: /* movsx */
3310 c
->dst
.bytes
= c
->op_bytes
;
3311 c
->dst
.val
= (c
->d
& ByteOp
) ? (s8
) c
->src
.val
:
3314 case 0xc3: /* movnti */
3315 c
->dst
.bytes
= c
->op_bytes
;
3316 c
->dst
.val
= (c
->op_bytes
== 4) ? (u32
) c
->src
.val
:
3319 case 0xc7: /* Grp9 (cmpxchg8b) */
3320 rc
= emulate_grp9(ctxt
, ops
);
3321 if (rc
!= X86EMUL_CONTINUE
)
3328 DPRINTF("Cannot emulate %02x\n", c
->b
);