Expose rdrand and f16c through cpuid also if the host only has avx.
[valgrind.git] / VEX / priv / guest_amd64_defs.h
bloba5de527d26000c36b00ce4014705cc06b42f2e44
2 /*---------------------------------------------------------------*/
3 /*--- begin guest_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 /* Only to be used within the guest-amd64 directory. */
36 #ifndef __VEX_GUEST_AMD64_DEFS_H
37 #define __VEX_GUEST_AMD64_DEFS_H
39 #include "libvex_basictypes.h"
40 #include "libvex_emnote.h" // VexEmNote
41 #include "libvex_guest_amd64.h" // VexGuestAMD64State
42 #include "guest_generic_bb_to_IR.h" // DisResult
44 /*---------------------------------------------------------*/
45 /*--- amd64 to IR conversion ---*/
46 /*---------------------------------------------------------*/
48 /* Convert one amd64 insn to IR. See the type DisOneInstrFn in
49 guest_generic_bb_to_IR.h. */
50 extern
51 DisResult disInstr_AMD64 ( IRSB* irbb,
52 Bool (*resteerOkFn) ( void*, Addr ),
53 Bool resteerCisOk,
54 void* callback_opaque,
55 const UChar* guest_code,
56 Long delta,
57 Addr guest_IP,
58 VexArch guest_arch,
59 const VexArchInfo* archinfo,
60 const VexAbiInfo* abiinfo,
61 VexEndness host_endness,
62 Bool sigill_diag );
64 /* Used by the optimiser to specialise calls to helpers. */
65 extern
66 IRExpr* guest_amd64_spechelper ( const HChar* function_name,
67 IRExpr** args,
68 IRStmt** precedingStmts,
69 Int n_precedingStmts );
71 /* Describes to the optimiser which part of the guest state require
72 precise memory exceptions. This is logically part of the guest
73 state description. */
74 extern
75 Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int,
76 VexRegisterUpdates );
78 extern
79 VexGuestLayout amd64guest_layout;
82 /*---------------------------------------------------------*/
83 /*--- amd64 guest helpers ---*/
84 /*---------------------------------------------------------*/
86 /* --- CLEAN HELPERS --- */
88 extern ULong amd64g_calculate_rflags_all (
89 ULong cc_op,
90 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
93 extern ULong amd64g_calculate_rflags_c (
94 ULong cc_op,
95 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
98 extern ULong amd64g_calculate_condition (
99 ULong/*AMD64Condcode*/ cond,
100 ULong cc_op,
101 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
104 extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
106 extern ULong amd64g_calculate_RCR (
107 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
110 extern ULong amd64g_calculate_RCL (
111 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
114 extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
116 extern ULong amd64g_check_fldcw ( ULong fpucw );
118 extern ULong amd64g_create_fpucw ( ULong fpround );
120 extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
122 extern ULong amd64g_create_mxcsr ( ULong sseround );
124 extern VexEmNote amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
125 extern VexEmNote amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
126 extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
128 extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
129 extern void amd64g_dirtyhelper_FNSAVE ( VexGuestAMD64State*, HWord );
130 extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
132 /* Translate a guest virtual_addr into a guest linear address by
133 consulting the supplied LDT/GDT structures. Their representation
134 must be as specified in pub/libvex_guest_amd64.h. To indicate a
135 translation failure, 1<<32 is returned. On success, the lower 32
136 bits of the returned result indicate the linear address.
138 //extern
139 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
140 // UInt seg_selector, UInt virtual_addr );
142 extern ULong amd64g_calculate_mmx_pmaddwd ( ULong, ULong );
143 extern ULong amd64g_calculate_mmx_psadbw ( ULong, ULong );
145 extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
147 extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
148 extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
149 extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
150 extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
152 extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
153 ULong dHi, ULong dLo,
154 ULong imm_and_return_control_bit );
156 extern ULong amd64g_calculate_pext ( ULong, ULong );
157 extern ULong amd64g_calculate_pdep ( ULong, ULong );
159 /* --- DIRTY HELPERS --- */
161 extern ULong amd64g_dirtyhelper_loadF80le ( Addr/*addr*/ );
163 extern void amd64g_dirtyhelper_storeF80le ( Addr/*addr*/, ULong/*data*/ );
165 extern void amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
166 extern void amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
167 extern void amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
168 extern void amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st,
169 ULong hasF16C,
170 ULong hasRDRAND );
171 extern void amd64g_dirtyhelper_CPUID_avx2 ( VexGuestAMD64State* st,
172 ULong hasF16C, ULong hasRDRAND );
174 extern void amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
176 extern void amd64g_dirtyhelper_XSAVE_COMPONENT_0
177 ( VexGuestAMD64State* gst, HWord addr );
178 extern void amd64g_dirtyhelper_XSAVE_COMPONENT_1_EXCLUDING_XMMREGS
179 ( VexGuestAMD64State* gst, HWord addr );
181 extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_0
182 ( VexGuestAMD64State* gst, HWord addr );
183 extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_1_EXCLUDING_XMMREGS
184 ( VexGuestAMD64State* gst, HWord addr );
186 extern ULong amd64g_dirtyhelper_RDTSC ( void );
187 extern void amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
189 extern ULong amd64g_dirtyhelper_IN ( ULong portno, ULong sz/*1,2 or 4*/ );
190 extern void amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
191 ULong sz/*1,2 or 4*/ );
193 extern void amd64g_dirtyhelper_SxDT ( void* address,
194 ULong op /* 0 or 1 */ );
196 // This returns a 32-bit value from the host's RDRAND in bits 31:0, and the
197 // resulting C flag value in bit 32.
198 extern ULong amd64g_dirtyhelper_RDRAND ( void );
201 /* Helps with PCMP{I,E}STR{I,M}.
203 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
204 actually it could be a clean helper, but for the fact that we can't
205 pass by value 2 x V128 to a clean helper, nor have one returned.)
206 Reads guest state, writes to guest state for the xSTRM cases, no
207 accesses of memory, is a pure function.
209 opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
210 the callee knows which I/E and I/M variant it is dealing with and
211 what the specific operation is. 4th byte of opcode is in the range
212 0x60 to 0x63:
213 istri 66 0F 3A 63
214 istrm 66 0F 3A 62
215 estri 66 0F 3A 61
216 estrm 66 0F 3A 60
218 gstOffL and gstOffR are the guest state offsets for the two XMM
219 register inputs. We never have to deal with the memory case since
220 that is handled by pre-loading the relevant value into the fake
221 XMM16 register.
223 For ESTRx variants, edxIN and eaxIN hold the values of those two
224 registers.
226 In all cases, the bottom 16 bits of the result contain the new
227 OSZACP %rflags values. For xSTRI variants, bits[31:16] of the
228 result hold the new %ecx value. For xSTRM variants, the helper
229 writes the result directly to the guest XMM0.
231 Declarable side effects: in all cases, reads guest state at
232 [gstOffL, +16) and [gstOffR, +16). For xSTRM variants, also writes
233 guest_XMM0.
235 Is expected to be called with opc_and_imm combinations which have
236 actually been validated, and will assert if otherwise. The front
237 end should ensure we're only called with verified values.
239 extern ULong amd64g_dirtyhelper_PCMPxSTRx (
240 VexGuestAMD64State*,
241 HWord opc4_and_imm,
242 HWord gstOffL, HWord gstOffR,
243 HWord edxIN, HWord eaxIN
246 /* Implementation of intel AES instructions as described in
247 Intel Advanced Vector Extensions
248 Programming Reference
249 MARCH 2008
250 319433-002.
252 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
253 actually it could be a clean helper, but for the fact that we can't
254 pass by value 2 x V128 to a clean helper, nor have one returned.)
255 Reads guest state, writes to guest state, no
256 accesses of memory, is a pure function.
258 opc4 contains the 4th byte of opcode. Front-end should only
259 give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
260 (will assert otherwise).
262 gstOffL and gstOffR are the guest state offsets for the two XMM
263 register inputs, gstOffD is the guest state offset for the XMM register
264 output. We never have to deal with the memory case since that is handled
265 by pre-loading the relevant value into the fake XMM16 register.
268 extern void amd64g_dirtyhelper_AES (
269 VexGuestAMD64State* gst,
270 HWord opc4, HWord gstOffD,
271 HWord gstOffL, HWord gstOffR
274 /* Implementation of AESKEYGENASSIST.
276 CALLED FROM GENERATED CODE: DIRTY HELPER(s). (But not really,
277 actually it could be a clean helper, but for the fact that we can't
278 pass by value 1 x V128 to a clean helper, nor have one returned.)
279 Reads guest state, writes to guest state, no
280 accesses of memory, is a pure function.
282 imm8 is the Round Key constant.
284 gstOffL and gstOffR are the guest state offsets for the two XMM
285 register input and output. We never have to deal with the memory case since
286 that is handled by pre-loading the relevant value into the fake
287 XMM16 register.
290 extern void amd64g_dirtyhelper_AESKEYGENASSIST (
291 VexGuestAMD64State* gst,
292 HWord imm8,
293 HWord gstOffL, HWord gstOffR
296 //extern void amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
297 //extern void amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
298 //extern void amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
300 //extern void amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
302 //extern VexEmNote
303 // amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
305 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
307 //extern VexEmNote
308 // amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
312 /*---------------------------------------------------------*/
313 /*--- Condition code stuff ---*/
314 /*---------------------------------------------------------*/
316 /* rflags masks */
317 #define AMD64G_CC_SHIFT_O 11
318 #define AMD64G_CC_SHIFT_S 7
319 #define AMD64G_CC_SHIFT_Z 6
320 #define AMD64G_CC_SHIFT_A 4
321 #define AMD64G_CC_SHIFT_C 0
322 #define AMD64G_CC_SHIFT_P 2
324 #define AMD64G_CC_MASK_O (1ULL << AMD64G_CC_SHIFT_O)
325 #define AMD64G_CC_MASK_S (1ULL << AMD64G_CC_SHIFT_S)
326 #define AMD64G_CC_MASK_Z (1ULL << AMD64G_CC_SHIFT_Z)
327 #define AMD64G_CC_MASK_A (1ULL << AMD64G_CC_SHIFT_A)
328 #define AMD64G_CC_MASK_C (1ULL << AMD64G_CC_SHIFT_C)
329 #define AMD64G_CC_MASK_P (1ULL << AMD64G_CC_SHIFT_P)
331 /* additional rflags masks */
332 #define AMD64G_CC_SHIFT_ID 21
333 #define AMD64G_CC_SHIFT_AC 18
334 #define AMD64G_CC_SHIFT_D 10
336 #define AMD64G_CC_MASK_ID (1ULL << AMD64G_CC_SHIFT_ID)
337 #define AMD64G_CC_MASK_AC (1ULL << AMD64G_CC_SHIFT_AC)
338 #define AMD64G_CC_MASK_D (1ULL << AMD64G_CC_SHIFT_D)
340 /* FPU flag masks */
341 #define AMD64G_FC_SHIFT_C3 14
342 #define AMD64G_FC_SHIFT_C2 10
343 #define AMD64G_FC_SHIFT_C1 9
344 #define AMD64G_FC_SHIFT_C0 8
346 #define AMD64G_FC_MASK_C3 (1ULL << AMD64G_FC_SHIFT_C3)
347 #define AMD64G_FC_MASK_C2 (1ULL << AMD64G_FC_SHIFT_C2)
348 #define AMD64G_FC_MASK_C1 (1ULL << AMD64G_FC_SHIFT_C1)
349 #define AMD64G_FC_MASK_C0 (1ULL << AMD64G_FC_SHIFT_C0)
352 /* %RFLAGS thunk descriptors. A four-word thunk is used to record
353 details of the most recent flag-setting operation, so the flags can
354 be computed later if needed. It is possible to do this a little
355 more efficiently using a 3-word thunk, but that makes it impossible
356 to describe the flag data dependencies sufficiently accurately for
357 Memcheck. Hence 4 words are used, with minimal loss of efficiency.
359 The four words are:
361 CC_OP, which describes the operation.
363 CC_DEP1 and CC_DEP2. These are arguments to the operation.
364 We want Memcheck to believe that the resulting flags are
365 data-dependent on both CC_DEP1 and CC_DEP2, hence the
366 name DEP.
368 CC_NDEP. This is a 3rd argument to the operation which is
369 sometimes needed. We arrange things so that Memcheck does
370 not believe the resulting flags are data-dependent on CC_NDEP
371 ("not dependent").
373 To make Memcheck believe that (the definedness of) the encoded
374 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
375 requires two things:
377 (1) In the guest state layout info (amd64guest_layout), CC_OP and
378 CC_NDEP are marked as always defined.
380 (2) When passing the thunk components to an evaluation function
381 (calculate_condition, calculate_eflags, calculate_eflags_c) the
382 IRCallee's mcx_mask must be set so as to exclude from
383 consideration all passed args except CC_DEP1 and CC_DEP2.
385 Strictly speaking only (2) is necessary for correctness. However,
386 (1) helps efficiency in that since (2) means we never ask about the
387 definedness of CC_OP or CC_NDEP, we may as well not even bother to
388 track their definedness.
390 When building the thunk, it is always necessary to write words into
391 CC_DEP1 and CC_DEP2, even if those args are not used given the
392 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
393 This is important because otherwise Memcheck could give false
394 positives as it does not understand the relationship between the
395 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
396 definedness of the stored flags always depends on both CC_DEP1 and
397 CC_DEP2.
399 However, it is only necessary to set CC_NDEP when the CC_OP value
400 requires it, because Memcheck ignores CC_NDEP, and the evaluation
401 functions do understand the CC_OP fields and will only examine
402 CC_NDEP for suitable values of CC_OP.
404 A summary of the field usages is:
406 Operation DEP1 DEP2 NDEP
407 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
409 add/sub/mul first arg second arg unused
411 adc/sbb first arg (second arg)
412 XOR old_carry old_carry
414 and/or/xor result zero unused
416 inc/dec result zero old_carry
418 shl/shr/sar result subshifted- unused
419 result
421 rol/ror result zero old_flags
423 copy old_flags zero unused.
426 Therefore Memcheck will believe the following:
428 * add/sub/mul -- definedness of result flags depends on definedness
429 of both args.
431 * adc/sbb -- definedness of result flags depends on definedness of
432 both args and definedness of the old C flag. Because only two
433 DEP fields are available, the old C flag is XOR'd into the second
434 arg so that Memcheck sees the data dependency on it. That means
435 the NDEP field must contain a second copy of the old C flag
436 so that the evaluation functions can correctly recover the second
437 arg.
439 * and/or/xor are straightforward -- definedness of result flags
440 depends on definedness of result value.
442 * inc/dec -- definedness of result flags depends only on
443 definedness of result. This isn't really true -- it also depends
444 on the old C flag. However, we don't want Memcheck to see that,
445 and so the old C flag must be passed in NDEP and not in DEP2.
446 It's inconceivable that a compiler would generate code that puts
447 the C flag in an undefined state, then does an inc/dec, which
448 leaves C unchanged, and then makes a conditional jump/move based
449 on C. So our fiction seems a good approximation.
451 * shl/shr/sar -- straightforward, again, definedness of result
452 flags depends on definedness of result value. The subshifted
453 value (value shifted one less) is also needed, but its
454 definedness is the same as the definedness of the shifted value.
456 * rol/ror -- these only set O and C, and leave A Z C P alone.
457 However it seems prudent (as per inc/dec) to say the definedness
458 of all resulting flags depends on the definedness of the result,
459 hence the old flags must go in as NDEP and not DEP2.
461 * rcl/rcr are too difficult to do in-line, and so are done by a
462 helper function. They are not part of this scheme. The helper
463 function takes the value to be rotated, the rotate amount and the
464 old flags, and returns the new flags and the rotated value.
465 Since the helper's mcx_mask does not have any set bits, Memcheck
466 will lazily propagate undefinedness from any of the 3 args into
467 both results (flags and actual value).
469 enum {
470 AMD64G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
471 /* just copy DEP1 to output */
473 AMD64G_CC_OP_ADDB, /* 1 */
474 AMD64G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
475 AMD64G_CC_OP_ADDL, /* 3 */
476 AMD64G_CC_OP_ADDQ, /* 4 */
478 AMD64G_CC_OP_SUBB, /* 5 */
479 AMD64G_CC_OP_SUBW, /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
480 AMD64G_CC_OP_SUBL, /* 7 */
481 AMD64G_CC_OP_SUBQ, /* 8 */
483 AMD64G_CC_OP_ADCB, /* 9 */
484 AMD64G_CC_OP_ADCW, /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
485 AMD64G_CC_OP_ADCL, /* 11 */
486 AMD64G_CC_OP_ADCQ, /* 12 */
488 AMD64G_CC_OP_SBBB, /* 13 */
489 AMD64G_CC_OP_SBBW, /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
490 AMD64G_CC_OP_SBBL, /* 15 */
491 AMD64G_CC_OP_SBBQ, /* 16 */
493 AMD64G_CC_OP_LOGICB, /* 17 */
494 AMD64G_CC_OP_LOGICW, /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
495 AMD64G_CC_OP_LOGICL, /* 19 */
496 AMD64G_CC_OP_LOGICQ, /* 20 */
498 AMD64G_CC_OP_INCB, /* 21 */
499 AMD64G_CC_OP_INCW, /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
500 AMD64G_CC_OP_INCL, /* 23 */
501 AMD64G_CC_OP_INCQ, /* 24 */
503 AMD64G_CC_OP_DECB, /* 25 */
504 AMD64G_CC_OP_DECW, /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
505 AMD64G_CC_OP_DECL, /* 27 */
506 AMD64G_CC_OP_DECQ, /* 28 */
508 AMD64G_CC_OP_SHLB, /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
509 AMD64G_CC_OP_SHLW, /* 30 where res' is like res but shifted one bit less */
510 AMD64G_CC_OP_SHLL, /* 31 */
511 AMD64G_CC_OP_SHLQ, /* 32 */
513 AMD64G_CC_OP_SHRB, /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
514 AMD64G_CC_OP_SHRW, /* 34 where res' is like res but shifted one bit less */
515 AMD64G_CC_OP_SHRL, /* 35 */
516 AMD64G_CC_OP_SHRQ, /* 36 */
518 AMD64G_CC_OP_ROLB, /* 37 */
519 AMD64G_CC_OP_ROLW, /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
520 AMD64G_CC_OP_ROLL, /* 39 */
521 AMD64G_CC_OP_ROLQ, /* 40 */
523 AMD64G_CC_OP_RORB, /* 41 */
524 AMD64G_CC_OP_RORW, /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
525 AMD64G_CC_OP_RORL, /* 43 */
526 AMD64G_CC_OP_RORQ, /* 44 */
528 AMD64G_CC_OP_UMULB, /* 45 */
529 AMD64G_CC_OP_UMULW, /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
530 AMD64G_CC_OP_UMULL, /* 47 */
531 AMD64G_CC_OP_UMULQ, /* 48 */
533 AMD64G_CC_OP_SMULB, /* 49 */
534 AMD64G_CC_OP_SMULW, /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
535 AMD64G_CC_OP_SMULL, /* 51 */
536 AMD64G_CC_OP_SMULQ, /* 52 */
538 AMD64G_CC_OP_ANDN32, /* 53 */
539 AMD64G_CC_OP_ANDN64, /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
541 AMD64G_CC_OP_BLSI32, /* 55 */
542 AMD64G_CC_OP_BLSI64, /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
544 AMD64G_CC_OP_BLSMSK32,/* 57 */
545 AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
547 AMD64G_CC_OP_BLSR32, /* 59 */
548 AMD64G_CC_OP_BLSR64, /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
550 AMD64G_CC_OP_ADCX32, /* 61 DEP1 = argL, DEP2 = argR ^ oldCarry, .. */
551 AMD64G_CC_OP_ADCX64, /* 62 .. NDEP = old flags */
553 AMD64G_CC_OP_ADOX32, /* 63 DEP1 = argL, DEP2 = argR ^ oldOverflow, .. */
554 AMD64G_CC_OP_ADOX64, /* 64 .. NDEP = old flags */
556 AMD64G_CC_OP_NUMBER
559 typedef
560 enum {
561 AMD64CondO = 0, /* overflow */
562 AMD64CondNO = 1, /* no overflow */
564 AMD64CondB = 2, /* below */
565 AMD64CondNB = 3, /* not below */
567 AMD64CondZ = 4, /* zero */
568 AMD64CondNZ = 5, /* not zero */
570 AMD64CondBE = 6, /* below or equal */
571 AMD64CondNBE = 7, /* not below or equal */
573 AMD64CondS = 8, /* negative */
574 AMD64CondNS = 9, /* not negative */
576 AMD64CondP = 10, /* parity even */
577 AMD64CondNP = 11, /* not parity even */
579 AMD64CondL = 12, /* less */
580 AMD64CondNL = 13, /* not less */
582 AMD64CondLE = 14, /* less or equal */
583 AMD64CondNLE = 15, /* not less or equal */
585 AMD64CondAlways = 16 /* HACK */
587 AMD64Condcode;
589 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
591 /*---------------------------------------------------------------*/
592 /*--- end guest_amd64_defs.h ---*/
593 /*---------------------------------------------------------------*/