rbtree: add rb_search_exact()
[nasm.git] / disasm / disasm.c
blobe1b5ebc3be6271a3dfeb774b22edd1676127f8d8
1 /* ----------------------------------------------------------------------- *
2 *
3 * Copyright 1996-2012 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
34 /*
35 * disasm.c where all the _work_ gets done in the Netwide Disassembler
38 #include "compiler.h"
41 #include "nasm.h"
42 #include "disasm.h"
43 #include "sync.h"
44 #include "insns.h"
45 #include "tables.h"
46 #include "regdis.h"
47 #include "disp8.h"
49 #define fetch_safe(_start, _ptr, _size, _need, _op) \
50 do { \
51 if (((_ptr) - (_start)) >= ((_size) - (_need))) \
52 _op; \
53 } while (0)
55 #define fetch_or_return(_start, _ptr, _size, _need) \
56 fetch_safe(_start, _ptr, _size, _need, return 0)
59 * Flags that go into the `segment' field of `insn' structures
60 * during disassembly.
62 #define SEG_RELATIVE 1
63 #define SEG_32BIT 2
64 #define SEG_RMREG 4
65 #define SEG_DISP8 8
66 #define SEG_DISP16 16
67 #define SEG_DISP32 32
68 #define SEG_NODISP 64
69 #define SEG_SIGNED 128
70 #define SEG_64BIT 256
73 * Prefix information
75 struct prefix_info {
76 uint8_t osize; /* Operand size */
77 uint8_t asize; /* Address size */
78 uint8_t osp; /* Operand size prefix present */
79 uint8_t asp; /* Address size prefix present */
80 uint8_t rep; /* Rep prefix present */
81 uint8_t seg; /* Segment override prefix present */
82 uint8_t wait; /* WAIT "prefix" present */
83 uint8_t lock; /* Lock prefix present */
84 uint8_t vex[3]; /* VEX prefix present */
85 uint8_t vex_c; /* VEX "class" (VEX, XOP, ...) */
86 uint8_t vex_m; /* VEX.M field */
87 uint8_t vex_v;
88 uint8_t vex_lp; /* VEX.LP fields */
89 uint32_t rex; /* REX prefix present */
90 uint8_t evex[3]; /* EVEX prefix present */
93 #define getu8(x) (*(uint8_t *)(x))
94 #if X86_MEMORY
95 /* Littleendian CPU which can handle unaligned references */
96 #define getu16(x) (*(uint16_t *)(x))
97 #define getu32(x) (*(uint32_t *)(x))
98 #define getu64(x) (*(uint64_t *)(x))
99 #else
100 static uint16_t getu16(uint8_t *data)
102 return (uint16_t)data[0] + ((uint16_t)data[1] << 8);
104 static uint32_t getu32(uint8_t *data)
106 return (uint32_t)getu16(data) + ((uint32_t)getu16(data+2) << 16);
108 static uint64_t getu64(uint8_t *data)
110 return (uint64_t)getu32(data) + ((uint64_t)getu32(data+4) << 32);
112 #endif
114 #define gets8(x) ((int8_t)getu8(x))
115 #define gets16(x) ((int16_t)getu16(x))
116 #define gets32(x) ((int32_t)getu32(x))
117 #define gets64(x) ((int64_t)getu64(x))
119 /* Important: regval must already have been adjusted for rex extensions */
120 static enum reg_enum whichreg(opflags_t regflags, int regval, int rex)
122 size_t i;
124 static const struct {
125 opflags_t flags;
126 enum reg_enum reg;
127 } specific_registers[] = {
128 {REG_AL, R_AL},
129 {REG_AX, R_AX},
130 {REG_EAX, R_EAX},
131 {REG_RAX, R_RAX},
132 {REG_DL, R_DL},
133 {REG_DX, R_DX},
134 {REG_EDX, R_EDX},
135 {REG_RDX, R_RDX},
136 {REG_CL, R_CL},
137 {REG_CX, R_CX},
138 {REG_ECX, R_ECX},
139 {REG_RCX, R_RCX},
140 {FPU0, R_ST0},
141 {XMM0, R_XMM0},
142 {YMM0, R_YMM0},
143 {ZMM0, R_ZMM0},
144 {REG_ES, R_ES},
145 {REG_CS, R_CS},
146 {REG_SS, R_SS},
147 {REG_DS, R_DS},
148 {REG_FS, R_FS},
149 {REG_GS, R_GS},
150 {OPMASK0, R_K0},
153 if (!(regflags & (REGISTER|REGMEM)))
154 return 0; /* Registers not permissible?! */
156 regflags |= REGISTER;
158 for (i = 0; i < ARRAY_SIZE(specific_registers); i++)
159 if (!(specific_registers[i].flags & ~regflags))
160 return specific_registers[i].reg;
162 /* All the entries below look up regval in an 16-entry array */
163 if (regval < 0 || regval > (rex & REX_EV ? 31 : 15))
164 return 0;
166 #define GET_REGISTER(__array, __index) \
167 ((size_t)(__index) < (size_t)ARRAY_SIZE(__array) ? __array[(__index)] : 0)
169 if (!(REG8 & ~regflags)) {
170 if (rex & (REX_P|REX_NH))
171 return GET_REGISTER(nasm_rd_reg8_rex, regval);
172 else
173 return GET_REGISTER(nasm_rd_reg8, regval);
175 if (!(REG16 & ~regflags))
176 return GET_REGISTER(nasm_rd_reg16, regval);
177 if (!(REG32 & ~regflags))
178 return GET_REGISTER(nasm_rd_reg32, regval);
179 if (!(REG64 & ~regflags))
180 return GET_REGISTER(nasm_rd_reg64, regval);
181 if (!(REG_SREG & ~regflags))
182 return GET_REGISTER(nasm_rd_sreg, regval & 7); /* Ignore REX */
183 if (!(REG_CREG & ~regflags))
184 return GET_REGISTER(nasm_rd_creg, regval);
185 if (!(REG_DREG & ~regflags))
186 return GET_REGISTER(nasm_rd_dreg, regval);
187 if (!(REG_TREG & ~regflags)) {
188 if (regval > 7)
189 return 0; /* TR registers are ill-defined with rex */
190 return GET_REGISTER(nasm_rd_treg, regval);
192 if (!(FPUREG & ~regflags))
193 return GET_REGISTER(nasm_rd_fpureg, regval & 7); /* Ignore REX */
194 if (!(MMXREG & ~regflags))
195 return GET_REGISTER(nasm_rd_mmxreg, regval & 7); /* Ignore REX */
196 if (!(XMMREG & ~regflags))
197 return GET_REGISTER(nasm_rd_xmmreg, regval);
198 if (!(YMMREG & ~regflags))
199 return GET_REGISTER(nasm_rd_ymmreg, regval);
200 if (!(ZMMREG & ~regflags))
201 return GET_REGISTER(nasm_rd_zmmreg, regval);
202 if (!(OPMASKREG & ~regflags))
203 return GET_REGISTER(nasm_rd_opmaskreg, regval);
204 if (!(BNDREG & ~regflags))
205 return GET_REGISTER(nasm_rd_bndreg, regval);
207 #undef GET_REGISTER
208 return 0;
211 static uint32_t append_evex_reg_deco(char *buf, uint32_t num,
212 decoflags_t deco, uint8_t *evex)
214 const char * const er_names[] = {"rn-sae", "rd-sae", "ru-sae", "rz-sae"};
215 uint32_t num_chars = 0;
217 if ((deco & MASK) && (evex[2] & EVEX_P2AAA)) {
218 enum reg_enum opmasknum = nasm_rd_opmaskreg[evex[2] & EVEX_P2AAA];
219 const char * regname = nasm_reg_names[opmasknum - EXPR_REG_START];
221 num_chars += snprintf(buf + num_chars, num - num_chars,
222 "{%s}", regname);
224 if ((deco & Z) && (evex[2] & EVEX_P2Z)) {
225 num_chars += snprintf(buf + num_chars, num - num_chars,
226 "{z}");
230 if (evex[2] & EVEX_P2B) {
231 if (deco & ER) {
232 uint8_t er_type = (evex[2] & EVEX_P2LL) >> 5;
233 num_chars += snprintf(buf + num_chars, num - num_chars,
234 ",{%s}", er_names[er_type]);
235 } else if (deco & SAE) {
236 num_chars += snprintf(buf + num_chars, num - num_chars,
237 ",{sae}");
241 return num_chars;
244 static uint32_t append_evex_mem_deco(char *buf, uint32_t num, opflags_t type,
245 decoflags_t deco, uint8_t *evex)
247 uint32_t num_chars = 0;
249 if ((evex[2] & EVEX_P2B) && (deco & BRDCAST_MASK)) {
250 decoflags_t deco_brsize = deco & BRSIZE_MASK;
251 opflags_t template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
252 uint8_t br_num = (type & SIZE_MASK) / BITS128 *
253 BITS64 / template_opsize * 2;
255 num_chars += snprintf(buf + num_chars, num - num_chars,
256 "{1to%d}", br_num);
259 if ((deco & MASK) && (evex[2] & EVEX_P2AAA)) {
260 enum reg_enum opmasknum = nasm_rd_opmaskreg[evex[2] & EVEX_P2AAA];
261 const char * regname = nasm_reg_names[opmasknum - EXPR_REG_START];
263 num_chars += snprintf(buf + num_chars, num - num_chars,
264 "{%s}", regname);
266 if ((deco & Z) && (evex[2] & EVEX_P2Z)) {
267 num_chars += snprintf(buf + num_chars, num - num_chars,
268 "{z}");
273 return num_chars;
277 * Process an effective address (ModRM) specification.
279 static uint8_t *do_ea(uint8_t *data, int modrm, int asize,
280 int segsize, enum ea_type type,
281 operand *op, insn *ins)
283 int mod, rm, scale, index, base;
284 int rex;
285 uint8_t *evex;
286 uint8_t sib = 0;
287 bool is_evex = !!(ins->rex & REX_EV);
289 mod = (modrm >> 6) & 03;
290 rm = modrm & 07;
292 if (mod != 3 && asize != 16 && rm == 4)
293 sib = *data++;
295 rex = ins->rex;
296 evex = ins->evex_p;
298 if (mod == 3) { /* pure register version */
299 op->basereg = rm+(rex & REX_B ? 8 : 0);
300 op->segment |= SEG_RMREG;
301 if (is_evex && segsize == 64) {
302 op->basereg += (evex[0] & EVEX_P0X ? 0 : 16);
304 return data;
307 op->disp_size = 0;
308 op->eaflags = 0;
310 if (asize == 16) {
312 * <mod> specifies the displacement size (none, byte or
313 * word), and <rm> specifies the register combination.
314 * Exception: mod=0,rm=6 does not specify [BP] as one might
315 * expect, but instead specifies [disp16].
318 if (type != EA_SCALAR)
319 return NULL;
321 op->indexreg = op->basereg = -1;
322 op->scale = 1; /* always, in 16 bits */
323 switch (rm) {
324 case 0:
325 op->basereg = R_BX;
326 op->indexreg = R_SI;
327 break;
328 case 1:
329 op->basereg = R_BX;
330 op->indexreg = R_DI;
331 break;
332 case 2:
333 op->basereg = R_BP;
334 op->indexreg = R_SI;
335 break;
336 case 3:
337 op->basereg = R_BP;
338 op->indexreg = R_DI;
339 break;
340 case 4:
341 op->basereg = R_SI;
342 break;
343 case 5:
344 op->basereg = R_DI;
345 break;
346 case 6:
347 op->basereg = R_BP;
348 break;
349 case 7:
350 op->basereg = R_BX;
351 break;
353 if (rm == 6 && mod == 0) { /* special case */
354 op->basereg = -1;
355 if (segsize != 16)
356 op->disp_size = 16;
357 mod = 2; /* fake disp16 */
359 switch (mod) {
360 case 0:
361 op->segment |= SEG_NODISP;
362 break;
363 case 1:
364 op->segment |= SEG_DISP8;
365 if (ins->evex_tuple != 0) {
366 op->offset = gets8(data) * get_disp8N(ins);
367 } else {
368 op->offset = gets8(data);
370 data++;
371 break;
372 case 2:
373 op->segment |= SEG_DISP16;
374 op->offset = *data++;
375 op->offset |= ((unsigned)*data++) << 8;
376 break;
378 return data;
379 } else {
381 * Once again, <mod> specifies displacement size (this time
382 * none, byte or *dword*), while <rm> specifies the base
383 * register. Again, [EBP] is missing, replaced by a pure
384 * disp32 (this time that's mod=0,rm=*5*) in 32-bit mode,
385 * and RIP-relative addressing in 64-bit mode.
387 * However, rm=4
388 * indicates not a single base register, but instead the
389 * presence of a SIB byte...
391 int a64 = asize == 64;
393 op->indexreg = -1;
395 if (a64)
396 op->basereg = nasm_rd_reg64[rm | ((rex & REX_B) ? 8 : 0)];
397 else
398 op->basereg = nasm_rd_reg32[rm | ((rex & REX_B) ? 8 : 0)];
400 if (rm == 5 && mod == 0) {
401 if (segsize == 64) {
402 op->eaflags |= EAF_REL;
403 op->segment |= SEG_RELATIVE;
406 if (asize != 64)
407 op->disp_size = asize;
409 op->basereg = -1;
410 mod = 2; /* fake disp32 */
414 if (rm == 4) { /* process SIB */
415 uint8_t vsib_hi = 0;
416 scale = (sib >> 6) & 03;
417 index = (sib >> 3) & 07;
418 base = sib & 07;
420 op->scale = 1 << scale;
422 if (segsize == 64) {
423 vsib_hi = (rex & REX_X ? 8 : 0) |
424 (evex[2] & EVEX_P2VP ? 0 : 16);
427 if (type == EA_XMMVSIB)
428 op->indexreg = nasm_rd_xmmreg[index | vsib_hi];
429 else if (type == EA_YMMVSIB)
430 op->indexreg = nasm_rd_ymmreg[index | vsib_hi];
431 else if (type == EA_ZMMVSIB)
432 op->indexreg = nasm_rd_zmmreg[index | vsib_hi];
433 else if (index == 4 && !(rex & REX_X))
434 op->indexreg = -1; /* ESP/RSP cannot be an index */
435 else if (a64)
436 op->indexreg = nasm_rd_reg64[index | ((rex & REX_X) ? 8 : 0)];
437 else
438 op->indexreg = nasm_rd_reg32[index | ((rex & REX_X) ? 8 : 0)];
440 if (base == 5 && mod == 0) {
441 op->basereg = -1;
442 mod = 2; /* Fake disp32 */
443 } else if (a64)
444 op->basereg = nasm_rd_reg64[base | ((rex & REX_B) ? 8 : 0)];
445 else
446 op->basereg = nasm_rd_reg32[base | ((rex & REX_B) ? 8 : 0)];
448 if (segsize == 16)
449 op->disp_size = 32;
450 } else if (type != EA_SCALAR) {
451 /* Can't have VSIB without SIB */
452 return NULL;
455 switch (mod) {
456 case 0:
457 op->segment |= SEG_NODISP;
458 break;
459 case 1:
460 op->segment |= SEG_DISP8;
461 if (ins->evex_tuple != 0) {
462 op->offset = gets8(data) * get_disp8N(ins);
463 } else {
464 op->offset = gets8(data);
466 data++;
467 break;
468 case 2:
469 op->segment |= SEG_DISP32;
470 op->offset = gets32(data);
471 data += 4;
472 break;
474 return data;
479 * Determine whether the instruction template in t corresponds to the data
480 * stream in data. Return the number of bytes matched if so.
482 #define case4(x) case (x): case (x)+1: case (x)+2: case (x)+3
484 static int matches(const struct itemplate *t, uint8_t *data,
485 const struct prefix_info *prefix, int segsize, insn *ins)
487 uint8_t *r = (uint8_t *)(t->code);
488 uint8_t *origdata = data;
489 bool a_used = false, o_used = false;
490 enum prefixes drep = 0;
491 enum prefixes dwait = 0;
492 uint8_t lock = prefix->lock;
493 int osize = prefix->osize;
494 int asize = prefix->asize;
495 int i, c;
496 int op1, op2;
497 struct operand *opx, *opy;
498 uint8_t opex = 0;
499 bool vex_ok = false;
500 int regmask = (segsize == 64) ? 15 : 7;
501 enum ea_type eat = EA_SCALAR;
503 for (i = 0; i < MAX_OPERANDS; i++) {
504 ins->oprs[i].segment = ins->oprs[i].disp_size =
505 (segsize == 64 ? SEG_64BIT : segsize == 32 ? SEG_32BIT : 0);
507 ins->condition = -1;
508 ins->evex_tuple = 0;
509 ins->rex = prefix->rex;
510 memset(ins->prefixes, 0, sizeof ins->prefixes);
512 if (itemp_has(t, (segsize == 64 ? IF_NOLONG : IF_LONG)))
513 return 0;
515 if (prefix->rep == 0xF2)
516 drep = (itemp_has(t, IF_BND) ? P_BND : P_REPNE);
517 else if (prefix->rep == 0xF3)
518 drep = P_REP;
520 dwait = prefix->wait ? P_WAIT : 0;
522 while ((c = *r++) != 0) {
523 op1 = (c & 3) + ((opex & 1) << 2);
524 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
525 opx = &ins->oprs[op1];
526 opy = &ins->oprs[op2];
527 opex = 0;
529 switch (c) {
530 case 01:
531 case 02:
532 case 03:
533 case 04:
534 while (c--)
535 if (*r++ != *data++)
536 return 0;
537 break;
539 case 05:
540 case 06:
541 case 07:
542 opex = c;
543 break;
545 case4(010):
547 int t = *r++, d = *data++;
548 if (d < t || d > t + 7)
549 return 0;
550 else {
551 opx->basereg = (d-t)+
552 (ins->rex & REX_B ? 8 : 0);
553 opx->segment |= SEG_RMREG;
555 break;
558 case4(014):
559 /* this is an separate index reg position of MIB operand (ICC) */
560 /* Disassembler uses NASM's split EA form only */
561 break;
563 case4(0274):
564 opx->offset = (int8_t)*data++;
565 opx->segment |= SEG_SIGNED;
566 break;
568 case4(020):
569 opx->offset = *data++;
570 break;
572 case4(024):
573 opx->offset = *data++;
574 break;
576 case4(030):
577 opx->offset = getu16(data);
578 data += 2;
579 break;
581 case4(034):
582 if (osize == 32) {
583 opx->offset = getu32(data);
584 data += 4;
585 } else {
586 opx->offset = getu16(data);
587 data += 2;
589 if (segsize != asize)
590 opx->disp_size = asize;
591 break;
593 case4(040):
594 opx->offset = getu32(data);
595 data += 4;
596 break;
598 case4(0254):
599 opx->offset = gets32(data);
600 data += 4;
601 break;
603 case4(044):
604 switch (asize) {
605 case 16:
606 opx->offset = getu16(data);
607 data += 2;
608 if (segsize != 16)
609 opx->disp_size = 16;
610 break;
611 case 32:
612 opx->offset = getu32(data);
613 data += 4;
614 if (segsize == 16)
615 opx->disp_size = 32;
616 break;
617 case 64:
618 opx->offset = getu64(data);
619 opx->disp_size = 64;
620 data += 8;
621 break;
623 break;
625 case4(050):
626 opx->offset = gets8(data++);
627 opx->segment |= SEG_RELATIVE;
628 break;
630 case4(054):
631 opx->offset = getu64(data);
632 data += 8;
633 break;
635 case4(060):
636 opx->offset = gets16(data);
637 data += 2;
638 opx->segment |= SEG_RELATIVE;
639 opx->segment &= ~SEG_32BIT;
640 break;
642 case4(064): /* rel */
643 opx->segment |= SEG_RELATIVE;
644 /* In long mode rel is always 32 bits, sign extended. */
645 if (segsize == 64 || osize == 32) {
646 opx->offset = gets32(data);
647 data += 4;
648 if (segsize != 64)
649 opx->segment |= SEG_32BIT;
650 opx->type = (opx->type & ~SIZE_MASK)
651 | (segsize == 64 ? BITS64 : BITS32);
652 } else {
653 opx->offset = gets16(data);
654 data += 2;
655 opx->segment &= ~SEG_32BIT;
656 opx->type = (opx->type & ~SIZE_MASK) | BITS16;
658 break;
660 case4(070):
661 opx->offset = gets32(data);
662 data += 4;
663 opx->segment |= SEG_32BIT | SEG_RELATIVE;
664 break;
666 case4(0100):
667 case4(0110):
668 case4(0120):
669 case4(0130):
671 int modrm = *data++;
672 opx->segment |= SEG_RMREG;
673 data = do_ea(data, modrm, asize, segsize, eat, opy, ins);
674 if (!data)
675 return 0;
676 opx->basereg = ((modrm >> 3) & 7) + (ins->rex & REX_R ? 8 : 0);
677 if ((ins->rex & REX_EV) && (segsize == 64))
678 opx->basereg += (ins->evex_p[0] & EVEX_P0RP ? 0 : 16);
679 break;
682 case 0172:
684 uint8_t ximm = *data++;
685 c = *r++;
686 ins->oprs[c >> 3].basereg = (ximm >> 4) & regmask;
687 ins->oprs[c >> 3].segment |= SEG_RMREG;
688 ins->oprs[c & 7].offset = ximm & 15;
690 break;
692 case 0173:
694 uint8_t ximm = *data++;
695 c = *r++;
697 if ((c ^ ximm) & 15)
698 return 0;
700 ins->oprs[c >> 4].basereg = (ximm >> 4) & regmask;
701 ins->oprs[c >> 4].segment |= SEG_RMREG;
703 break;
705 case4(0174):
707 uint8_t ximm = *data++;
709 opx->basereg = (ximm >> 4) & regmask;
710 opx->segment |= SEG_RMREG;
712 break;
714 case4(0200):
715 case4(0204):
716 case4(0210):
717 case4(0214):
718 case4(0220):
719 case4(0224):
720 case4(0230):
721 case4(0234):
723 int modrm = *data++;
724 if (((modrm >> 3) & 07) != (c & 07))
725 return 0; /* spare field doesn't match up */
726 data = do_ea(data, modrm, asize, segsize, eat, opy, ins);
727 if (!data)
728 return 0;
729 break;
732 case4(0240):
733 case 0250:
735 uint8_t evexm = *r++;
736 uint8_t evexwlp = *r++;
737 uint8_t modrm, valid_mask;
738 ins->evex_tuple = *r++ - 0300;
739 modrm = *(origdata + 1);
741 ins->rex |= REX_EV;
742 if ((prefix->rex & (REX_EV|REX_V|REX_P)) != REX_EV)
743 return 0;
745 if ((evexm & 0x1f) != prefix->vex_m)
746 return 0;
748 switch (evexwlp & 060) {
749 case 000:
750 if (prefix->rex & REX_W)
751 return 0;
752 break;
753 case 020:
754 if (!(prefix->rex & REX_W))
755 return 0;
756 ins->rex |= REX_W;
757 break;
758 case 040: /* VEX.W is a don't care */
759 ins->rex &= ~REX_W;
760 break;
761 case 060:
762 break;
765 /* If EVEX.b is set with reg-reg op,
766 * EVEX.L'L contains embedded rounding control info
768 if ((prefix->evex[2] & EVEX_P2B) && ((modrm >> 6) == 3)) {
769 valid_mask = 0x3; /* prefix only */
770 } else {
771 valid_mask = 0xf; /* vector length and prefix */
773 if ((evexwlp ^ prefix->vex_lp) & valid_mask)
774 return 0;
776 if (c == 0250) {
777 if ((prefix->vex_v != 0) ||
778 (!(prefix->evex[2] & EVEX_P2VP) &&
779 ((eat < EA_XMMVSIB) || (eat > EA_ZMMVSIB))))
780 return 0;
781 } else {
782 opx->segment |= SEG_RMREG;
783 opx->basereg = ((~prefix->evex[2] & EVEX_P2VP) << (4 - 3) ) |
784 prefix->vex_v;
786 vex_ok = true;
787 memcpy(ins->evex_p, prefix->evex, 3);
788 break;
791 case4(0260):
792 case 0270:
794 int vexm = *r++;
795 int vexwlp = *r++;
797 ins->rex |= REX_V;
798 if ((prefix->rex & (REX_V|REX_P)) != REX_V)
799 return 0;
801 if ((vexm & 0x1f) != prefix->vex_m)
802 return 0;
804 switch (vexwlp & 060) {
805 case 000:
806 if (prefix->rex & REX_W)
807 return 0;
808 break;
809 case 020:
810 if (!(prefix->rex & REX_W))
811 return 0;
812 ins->rex &= ~REX_W;
813 break;
814 case 040: /* VEX.W is a don't care */
815 ins->rex &= ~REX_W;
816 break;
817 case 060:
818 break;
821 /* The 010 bit of vexwlp is set if VEX.L is ignored */
822 if ((vexwlp ^ prefix->vex_lp) & ((vexwlp & 010) ? 03 : 07))
823 return 0;
825 if (c == 0270) {
826 if (prefix->vex_v != 0)
827 return 0;
828 } else {
829 opx->segment |= SEG_RMREG;
830 opx->basereg = prefix->vex_v;
832 vex_ok = true;
833 break;
836 case 0271:
837 if (prefix->rep == 0xF3)
838 drep = P_XRELEASE;
839 break;
841 case 0272:
842 if (prefix->rep == 0xF2)
843 drep = P_XACQUIRE;
844 else if (prefix->rep == 0xF3)
845 drep = P_XRELEASE;
846 break;
848 case 0273:
849 if (prefix->lock == 0xF0) {
850 if (prefix->rep == 0xF2)
851 drep = P_XACQUIRE;
852 else if (prefix->rep == 0xF3)
853 drep = P_XRELEASE;
855 break;
857 case 0310:
858 if (asize != 16)
859 return 0;
860 else
861 a_used = true;
862 break;
864 case 0311:
865 if (asize != 32)
866 return 0;
867 else
868 a_used = true;
869 break;
871 case 0312:
872 if (asize != segsize)
873 return 0;
874 else
875 a_used = true;
876 break;
878 case 0313:
879 if (asize != 64)
880 return 0;
881 else
882 a_used = true;
883 break;
885 case 0314:
886 if (prefix->rex & REX_B)
887 return 0;
888 break;
890 case 0315:
891 if (prefix->rex & REX_X)
892 return 0;
893 break;
895 case 0316:
896 if (prefix->rex & REX_R)
897 return 0;
898 break;
900 case 0317:
901 if (prefix->rex & REX_W)
902 return 0;
903 break;
905 case 0320:
906 if (osize != 16)
907 return 0;
908 else
909 o_used = true;
910 break;
912 case 0321:
913 if (osize != 32)
914 return 0;
915 else
916 o_used = true;
917 break;
919 case 0322:
920 if (osize != (segsize == 16 ? 16 : 32))
921 return 0;
922 else
923 o_used = true;
924 break;
926 case 0323:
927 ins->rex |= REX_W; /* 64-bit only instruction */
928 osize = 64;
929 o_used = true;
930 break;
932 case 0324:
933 if (osize != 64)
934 return 0;
935 o_used = true;
936 break;
938 case 0325:
939 ins->rex |= REX_NH;
940 break;
942 case 0330:
944 int t = *r++, d = *data++;
945 if (d < t || d > t + 15)
946 return 0;
947 else
948 ins->condition = d - t;
949 break;
952 case 0326:
953 if (prefix->rep == 0xF3)
954 return 0;
955 break;
957 case 0331:
958 if (prefix->rep)
959 return 0;
960 break;
962 case 0332:
963 if (prefix->rep != 0xF2)
964 return 0;
965 drep = 0;
966 break;
968 case 0333:
969 if (prefix->rep != 0xF3)
970 return 0;
971 drep = 0;
972 break;
974 case 0334:
975 if (lock) {
976 ins->rex |= REX_R;
977 lock = 0;
979 break;
981 case 0335:
982 if (drep == P_REP)
983 drep = P_REPE;
984 break;
986 case 0336:
987 case 0337:
988 break;
990 case 0340:
991 return 0;
993 case 0341:
994 if (prefix->wait != 0x9B)
995 return 0;
996 dwait = 0;
997 break;
999 case 0360:
1000 if (prefix->osp || prefix->rep)
1001 return 0;
1002 break;
1004 case 0361:
1005 if (!prefix->osp || prefix->rep)
1006 return 0;
1007 o_used = true;
1008 break;
1010 case 0364:
1011 if (prefix->osp)
1012 return 0;
1013 break;
1015 case 0365:
1016 if (prefix->asp)
1017 return 0;
1018 break;
1020 case 0366:
1021 if (!prefix->osp)
1022 return 0;
1023 o_used = true;
1024 break;
1026 case 0367:
1027 if (!prefix->asp)
1028 return 0;
1029 a_used = true;
1030 break;
1032 case 0370:
1033 case 0371:
1034 break;
1036 case 0374:
1037 eat = EA_XMMVSIB;
1038 break;
1040 case 0375:
1041 eat = EA_YMMVSIB;
1042 break;
1044 case 0376:
1045 eat = EA_ZMMVSIB;
1046 break;
1048 default:
1049 return 0; /* Unknown code */
1053 if (!vex_ok && (ins->rex & (REX_V | REX_EV)))
1054 return 0;
1056 /* REX cannot be combined with VEX */
1057 if ((ins->rex & REX_V) && (prefix->rex & REX_P))
1058 return 0;
1061 * Check for unused rep or a/o prefixes.
1063 for (i = 0; i < t->operands; i++) {
1064 if (ins->oprs[i].segment != SEG_RMREG)
1065 a_used = true;
1068 if (lock) {
1069 if (ins->prefixes[PPS_LOCK])
1070 return 0;
1071 ins->prefixes[PPS_LOCK] = P_LOCK;
1073 if (drep) {
1074 if (ins->prefixes[PPS_REP])
1075 return 0;
1076 ins->prefixes[PPS_REP] = drep;
1078 ins->prefixes[PPS_WAIT] = dwait;
1079 if (!o_used) {
1080 if (osize != ((segsize == 16) ? 16 : 32)) {
1081 enum prefixes pfx = 0;
1083 switch (osize) {
1084 case 16:
1085 pfx = P_O16;
1086 break;
1087 case 32:
1088 pfx = P_O32;
1089 break;
1090 case 64:
1091 pfx = P_O64;
1092 break;
1095 if (ins->prefixes[PPS_OSIZE])
1096 return 0;
1097 ins->prefixes[PPS_OSIZE] = pfx;
1100 if (!a_used && asize != segsize) {
1101 if (ins->prefixes[PPS_ASIZE])
1102 return 0;
1103 ins->prefixes[PPS_ASIZE] = asize == 16 ? P_A16 : P_A32;
1106 /* Fix: check for redundant REX prefixes */
1108 return data - origdata;
1111 /* Condition names for disassembly, sorted by x86 code */
1112 static const char * const condition_name[16] = {
1113 "o", "no", "c", "nc", "z", "nz", "na", "a",
1114 "s", "ns", "pe", "po", "l", "nl", "ng", "g"
1117 int32_t disasm(uint8_t *data, int32_t data_size, char *output, int outbufsize, int segsize,
1118 int64_t offset, int autosync, iflag_t *prefer)
1120 const struct itemplate * const *p, * const *best_p;
1121 const struct disasm_index *ix;
1122 uint8_t *dp;
1123 int length, best_length = 0;
1124 char *segover;
1125 int i, slen, colon, n;
1126 uint8_t *origdata;
1127 int works;
1128 insn tmp_ins, ins;
1129 iflag_t goodness, best;
1130 int best_pref;
1131 struct prefix_info prefix;
1132 bool end_prefix;
1133 bool is_evex;
1135 memset(&ins, 0, sizeof ins);
1138 * Scan for prefixes.
1140 memset(&prefix, 0, sizeof prefix);
1141 prefix.asize = segsize;
1142 prefix.osize = (segsize == 64) ? 32 : segsize;
1143 segover = NULL;
1144 origdata = data;
1146 ix = itable;
1148 end_prefix = false;
1149 while (!end_prefix) {
1150 switch (*data) {
1151 case 0xF2:
1152 case 0xF3:
1153 fetch_or_return(origdata, data, data_size, 1);
1154 prefix.rep = *data++;
1155 break;
1157 case 0x9B:
1158 fetch_or_return(origdata, data, data_size, 1);
1159 prefix.wait = *data++;
1160 break;
1162 case 0xF0:
1163 fetch_or_return(origdata, data, data_size, 1);
1164 prefix.lock = *data++;
1165 break;
1167 case 0x2E:
1168 fetch_or_return(origdata, data, data_size, 1);
1169 segover = "cs", prefix.seg = *data++;
1170 break;
1171 case 0x36:
1172 fetch_or_return(origdata, data, data_size, 1);
1173 segover = "ss", prefix.seg = *data++;
1174 break;
1175 case 0x3E:
1176 fetch_or_return(origdata, data, data_size, 1);
1177 segover = "ds", prefix.seg = *data++;
1178 break;
1179 case 0x26:
1180 fetch_or_return(origdata, data, data_size, 1);
1181 segover = "es", prefix.seg = *data++;
1182 break;
1183 case 0x64:
1184 fetch_or_return(origdata, data, data_size, 1);
1185 segover = "fs", prefix.seg = *data++;
1186 break;
1187 case 0x65:
1188 fetch_or_return(origdata, data, data_size, 1);
1189 segover = "gs", prefix.seg = *data++;
1190 break;
1192 case 0x66:
1193 fetch_or_return(origdata, data, data_size, 1);
1194 prefix.osize = (segsize == 16) ? 32 : 16;
1195 prefix.osp = *data++;
1196 break;
1197 case 0x67:
1198 fetch_or_return(origdata, data, data_size, 1);
1199 prefix.asize = (segsize == 32) ? 16 : 32;
1200 prefix.asp = *data++;
1201 break;
1203 case 0xC4:
1204 case 0xC5:
1205 if (segsize == 64 || (data[1] & 0xc0) == 0xc0) {
1206 fetch_or_return(origdata, data, data_size, 2);
1207 prefix.vex[0] = *data++;
1208 prefix.vex[1] = *data++;
1210 prefix.rex = REX_V;
1211 prefix.vex_c = RV_VEX;
1213 if (prefix.vex[0] == 0xc4) {
1214 fetch_or_return(origdata, data, data_size, 1);
1215 prefix.vex[2] = *data++;
1216 prefix.rex |= (~prefix.vex[1] >> 5) & 7; /* REX_RXB */
1217 prefix.rex |= (prefix.vex[2] >> (7-3)) & REX_W;
1218 prefix.vex_m = prefix.vex[1] & 0x1f;
1219 prefix.vex_v = (~prefix.vex[2] >> 3) & 15;
1220 prefix.vex_lp = prefix.vex[2] & 7;
1221 } else {
1222 prefix.rex |= (~prefix.vex[1] >> (7-2)) & REX_R;
1223 prefix.vex_m = 1;
1224 prefix.vex_v = (~prefix.vex[1] >> 3) & 15;
1225 prefix.vex_lp = prefix.vex[1] & 7;
1228 ix = itable_vex[RV_VEX][prefix.vex_m][prefix.vex_lp & 3];
1230 end_prefix = true;
1231 break;
1233 case 0x62:
1235 if (segsize == 64 || ((data[1] & 0xc0) == 0xc0)) {
1236 fetch_or_return(origdata, data, data_size, 4);
1237 data++; /* 62h EVEX prefix */
1238 prefix.evex[0] = *data++;
1239 prefix.evex[1] = *data++;
1240 prefix.evex[2] = *data++;
1242 prefix.rex = REX_EV;
1243 prefix.vex_c = RV_EVEX;
1244 prefix.rex |= (~prefix.evex[0] >> 5) & 7; /* REX_RXB */
1245 prefix.rex |= (prefix.evex[1] >> (7-3)) & REX_W;
1246 prefix.vex_m = prefix.evex[0] & EVEX_P0MM;
1247 prefix.vex_v = (~prefix.evex[1] & EVEX_P1VVVV) >> 3;
1248 prefix.vex_lp = ((prefix.evex[2] & EVEX_P2LL) >> (5-2)) |
1249 (prefix.evex[1] & EVEX_P1PP);
1251 ix = itable_vex[prefix.vex_c][prefix.vex_m][prefix.vex_lp & 3];
1253 end_prefix = true;
1254 break;
1257 case 0x8F:
1258 if ((data[1] & 030) != 0 &&
1259 (segsize == 64 || (data[1] & 0xc0) == 0xc0)) {
1260 fetch_or_return(origdata, data, data_size, 3);
1261 prefix.vex[0] = *data++;
1262 prefix.vex[1] = *data++;
1263 prefix.vex[2] = *data++;
1265 prefix.rex = REX_V;
1266 prefix.vex_c = RV_XOP;
1268 prefix.rex |= (~prefix.vex[1] >> 5) & 7; /* REX_RXB */
1269 prefix.rex |= (prefix.vex[2] >> (7-3)) & REX_W;
1270 prefix.vex_m = prefix.vex[1] & 0x1f;
1271 prefix.vex_v = (~prefix.vex[2] >> 3) & 15;
1272 prefix.vex_lp = prefix.vex[2] & 7;
1274 ix = itable_vex[RV_XOP][prefix.vex_m][prefix.vex_lp & 3];
1276 end_prefix = true;
1277 break;
1279 case REX_P + 0x0:
1280 case REX_P + 0x1:
1281 case REX_P + 0x2:
1282 case REX_P + 0x3:
1283 case REX_P + 0x4:
1284 case REX_P + 0x5:
1285 case REX_P + 0x6:
1286 case REX_P + 0x7:
1287 case REX_P + 0x8:
1288 case REX_P + 0x9:
1289 case REX_P + 0xA:
1290 case REX_P + 0xB:
1291 case REX_P + 0xC:
1292 case REX_P + 0xD:
1293 case REX_P + 0xE:
1294 case REX_P + 0xF:
1295 if (segsize == 64) {
1296 fetch_or_return(origdata, data, data_size, 1);
1297 prefix.rex = *data++;
1298 if (prefix.rex & REX_W)
1299 prefix.osize = 64;
1301 end_prefix = true;
1302 break;
1304 default:
1305 end_prefix = true;
1306 break;
1310 iflag_set_all(&best); /* Worst possible */
1311 best_p = NULL;
1312 best_pref = INT_MAX;
1314 if (!ix)
1315 return 0; /* No instruction table at all... */
1317 dp = data;
1318 fetch_or_return(origdata, dp, data_size, 1);
1319 ix += *dp++;
1320 while (ix->n == -1) {
1321 fetch_or_return(origdata, dp, data_size, 1);
1322 ix = (const struct disasm_index *)ix->p + *dp++;
1325 p = (const struct itemplate * const *)ix->p;
1326 for (n = ix->n; n; n--, p++) {
1327 if ((length = matches(*p, data, &prefix, segsize, &tmp_ins))) {
1328 works = true;
1330 * Final check to make sure the types of r/m match up.
1331 * XXX: Need to make sure this is actually correct.
1333 for (i = 0; i < (*p)->operands; i++) {
1334 if (
1335 /* If it's a mem-only EA but we have a
1336 register, die. */
1337 ((tmp_ins.oprs[i].segment & SEG_RMREG) &&
1338 is_class(MEMORY, (*p)->opd[i])) ||
1339 /* If it's a reg-only EA but we have a memory
1340 ref, die. */
1341 (!(tmp_ins.oprs[i].segment & SEG_RMREG) &&
1342 !(REG_EA & ~(*p)->opd[i]) &&
1343 !((*p)->opd[i] & REG_SMASK)) ||
1344 /* Register type mismatch (eg FS vs REG_DESS):
1345 die. */
1346 ((((*p)->opd[i] & (REGISTER | FPUREG)) ||
1347 (tmp_ins.oprs[i].segment & SEG_RMREG)) &&
1348 !whichreg((*p)->opd[i],
1349 tmp_ins.oprs[i].basereg, tmp_ins.rex))
1351 works = false;
1352 break;
1357 * Note: we always prefer instructions which incorporate
1358 * prefixes in the instructions themselves. This is to allow
1359 * e.g. PAUSE to be preferred to REP NOP, and deal with
1360 * MMX/SSE instructions where prefixes are used to select
1361 * between MMX and SSE register sets or outright opcode
1362 * selection.
1364 if (works) {
1365 int i, nprefix;
1366 goodness = iflag_pfmask(*p);
1367 goodness = iflag_xor(&goodness, prefer);
1368 nprefix = 0;
1369 for (i = 0; i < MAXPREFIX; i++)
1370 if (tmp_ins.prefixes[i])
1371 nprefix++;
1372 if (nprefix < best_pref ||
1373 (nprefix == best_pref &&
1374 iflag_cmp(&goodness, &best) < 0)) {
1375 /* This is the best one found so far */
1376 best = goodness;
1377 best_p = p;
1378 best_pref = nprefix;
1379 best_length = length;
1380 ins = tmp_ins;
1386 if (!best_p)
1387 return 0; /* no instruction was matched */
1389 /* Pick the best match */
1390 p = best_p;
1391 length = best_length;
1393 slen = 0;
1395 /* TODO: snprintf returns the value that the string would have if
1396 * the buffer were long enough, and not the actual length of
1397 * the returned string, so each instance of using the return
1398 * value of snprintf should actually be checked to assure that
1399 * the return value is "sane." Maybe a macro wrapper could
1400 * be used for that purpose.
1402 for (i = 0; i < MAXPREFIX; i++) {
1403 const char *prefix = prefix_name(ins.prefixes[i]);
1404 if (prefix)
1405 slen += snprintf(output+slen, outbufsize-slen, "%s ", prefix);
1408 i = (*p)->opcode;
1409 if (i >= FIRST_COND_OPCODE)
1410 slen += snprintf(output + slen, outbufsize - slen, "%s%s",
1411 nasm_insn_names[i], condition_name[ins.condition]);
1412 else
1413 slen += snprintf(output + slen, outbufsize - slen, "%s",
1414 nasm_insn_names[i]);
1416 colon = false;
1417 is_evex = !!(ins.rex & REX_EV);
1418 length += data - origdata; /* fix up for prefixes */
1419 for (i = 0; i < (*p)->operands; i++) {
1420 opflags_t t = (*p)->opd[i];
1421 decoflags_t deco = (*p)->deco[i];
1422 const operand *o = &ins.oprs[i];
1423 int64_t offs;
1425 output[slen++] = (colon ? ':' : i == 0 ? ' ' : ',');
1427 offs = o->offset;
1428 if (o->segment & SEG_RELATIVE) {
1429 offs += offset + length;
1431 * sort out wraparound
1433 if (!(o->segment & (SEG_32BIT|SEG_64BIT)))
1434 offs &= 0xffff;
1435 else if (segsize != 64)
1436 offs &= 0xffffffff;
1439 * add sync marker, if autosync is on
1441 if (autosync)
1442 add_sync(offs, 0L);
1445 if (t & COLON)
1446 colon = true;
1447 else
1448 colon = false;
1450 if ((t & (REGISTER | FPUREG)) ||
1451 (o->segment & SEG_RMREG)) {
1452 enum reg_enum reg;
1453 reg = whichreg(t, o->basereg, ins.rex);
1454 if (t & TO)
1455 slen += snprintf(output + slen, outbufsize - slen, "to ");
1456 slen += snprintf(output + slen, outbufsize - slen, "%s",
1457 nasm_reg_names[reg-EXPR_REG_START]);
1458 if (t & REGSET_MASK)
1459 slen += snprintf(output + slen, outbufsize - slen, "+%d",
1460 (int)((t & REGSET_MASK) >> (REGSET_SHIFT-1))-1);
1461 if (is_evex && deco)
1462 slen += append_evex_reg_deco(output + slen, outbufsize - slen,
1463 deco, ins.evex_p);
1464 } else if (!(UNITY & ~t)) {
1465 output[slen++] = '1';
1466 } else if (t & IMMEDIATE) {
1467 if (t & BITS8) {
1468 slen +=
1469 snprintf(output + slen, outbufsize - slen, "byte ");
1470 if (o->segment & SEG_SIGNED) {
1471 if (offs < 0) {
1472 offs *= -1;
1473 output[slen++] = '-';
1474 } else
1475 output[slen++] = '+';
1477 } else if (t & BITS16) {
1478 slen +=
1479 snprintf(output + slen, outbufsize - slen, "word ");
1480 } else if (t & BITS32) {
1481 slen +=
1482 snprintf(output + slen, outbufsize - slen, "dword ");
1483 } else if (t & BITS64) {
1484 slen +=
1485 snprintf(output + slen, outbufsize - slen, "qword ");
1486 } else if (t & NEAR) {
1487 slen +=
1488 snprintf(output + slen, outbufsize - slen, "near ");
1489 } else if (t & SHORT) {
1490 slen +=
1491 snprintf(output + slen, outbufsize - slen, "short ");
1493 slen +=
1494 snprintf(output + slen, outbufsize - slen, "0x%"PRIx64"",
1495 offs);
1496 } else if (!(MEM_OFFS & ~t)) {
1497 slen +=
1498 snprintf(output + slen, outbufsize - slen,
1499 "[%s%s%s0x%"PRIx64"]",
1500 (segover ? segover : ""),
1501 (segover ? ":" : ""),
1502 (o->disp_size == 64 ? "qword " :
1503 o->disp_size == 32 ? "dword " :
1504 o->disp_size == 16 ? "word " : ""), offs);
1505 segover = NULL;
1506 } else if (is_class(REGMEM, t)) {
1507 int started = false;
1508 if (t & BITS8)
1509 slen +=
1510 snprintf(output + slen, outbufsize - slen, "byte ");
1511 if (t & BITS16)
1512 slen +=
1513 snprintf(output + slen, outbufsize - slen, "word ");
1514 if (t & BITS32)
1515 slen +=
1516 snprintf(output + slen, outbufsize - slen, "dword ");
1517 if (t & BITS64)
1518 slen +=
1519 snprintf(output + slen, outbufsize - slen, "qword ");
1520 if (t & BITS80)
1521 slen +=
1522 snprintf(output + slen, outbufsize - slen, "tword ");
1523 if ((ins.evex_p[2] & EVEX_P2B) && (deco & BRDCAST_MASK)) {
1524 /* when broadcasting, each element size should be used */
1525 if (deco & BR_BITS32)
1526 slen +=
1527 snprintf(output + slen, outbufsize - slen, "dword ");
1528 else if (deco & BR_BITS64)
1529 slen +=
1530 snprintf(output + slen, outbufsize - slen, "qword ");
1531 } else {
1532 if (t & BITS128)
1533 slen +=
1534 snprintf(output + slen, outbufsize - slen, "oword ");
1535 if (t & BITS256)
1536 slen +=
1537 snprintf(output + slen, outbufsize - slen, "yword ");
1538 if (t & BITS512)
1539 slen +=
1540 snprintf(output + slen, outbufsize - slen, "zword ");
1542 if (t & FAR)
1543 slen += snprintf(output + slen, outbufsize - slen, "far ");
1544 if (t & NEAR)
1545 slen +=
1546 snprintf(output + slen, outbufsize - slen, "near ");
1547 output[slen++] = '[';
1548 if (o->disp_size)
1549 slen += snprintf(output + slen, outbufsize - slen, "%s",
1550 (o->disp_size == 64 ? "qword " :
1551 o->disp_size == 32 ? "dword " :
1552 o->disp_size == 16 ? "word " :
1553 ""));
1554 if (o->eaflags & EAF_REL)
1555 slen += snprintf(output + slen, outbufsize - slen, "rel ");
1556 if (segover) {
1557 slen +=
1558 snprintf(output + slen, outbufsize - slen, "%s:",
1559 segover);
1560 segover = NULL;
1562 if (o->basereg != -1) {
1563 slen += snprintf(output + slen, outbufsize - slen, "%s",
1564 nasm_reg_names[(o->basereg-EXPR_REG_START)]);
1565 started = true;
1567 if (o->indexreg != -1 && !itemp_has(*best_p, IF_MIB)) {
1568 if (started)
1569 output[slen++] = '+';
1570 slen += snprintf(output + slen, outbufsize - slen, "%s",
1571 nasm_reg_names[(o->indexreg-EXPR_REG_START)]);
1572 if (o->scale > 1)
1573 slen +=
1574 snprintf(output + slen, outbufsize - slen, "*%d",
1575 o->scale);
1576 started = true;
1580 if (o->segment & SEG_DISP8) {
1581 if (is_evex) {
1582 const char *prefix;
1583 uint32_t offset = offs;
1584 if ((int32_t)offset < 0) {
1585 prefix = "-";
1586 offset = -offset;
1587 } else {
1588 prefix = "+";
1590 slen +=
1591 snprintf(output + slen, outbufsize - slen, "%s0x%"PRIx32"",
1592 prefix, offset);
1593 } else {
1594 const char *prefix;
1595 uint8_t offset = offs;
1596 if ((int8_t)offset < 0) {
1597 prefix = "-";
1598 offset = -offset;
1599 } else {
1600 prefix = "+";
1602 slen +=
1603 snprintf(output + slen, outbufsize - slen, "%s0x%"PRIx8"",
1604 prefix, offset);
1606 } else if (o->segment & SEG_DISP16) {
1607 const char *prefix;
1608 uint16_t offset = offs;
1609 if ((int16_t)offset < 0 && started) {
1610 offset = -offset;
1611 prefix = "-";
1612 } else {
1613 prefix = started ? "+" : "";
1615 slen +=
1616 snprintf(output + slen, outbufsize - slen,
1617 "%s0x%"PRIx16"", prefix, offset);
1618 } else if (o->segment & SEG_DISP32) {
1619 if (prefix.asize == 64) {
1620 const char *prefix;
1621 uint64_t offset = offs;
1622 if ((int32_t)offs < 0 && started) {
1623 offset = -offset;
1624 prefix = "-";
1625 } else {
1626 prefix = started ? "+" : "";
1628 slen +=
1629 snprintf(output + slen, outbufsize - slen,
1630 "%s0x%"PRIx64"", prefix, offset);
1631 } else {
1632 const char *prefix;
1633 uint32_t offset = offs;
1634 if ((int32_t) offset < 0 && started) {
1635 offset = -offset;
1636 prefix = "-";
1637 } else {
1638 prefix = started ? "+" : "";
1640 slen +=
1641 snprintf(output + slen, outbufsize - slen,
1642 "%s0x%"PRIx32"", prefix, offset);
1646 if (o->indexreg != -1 && itemp_has(*best_p, IF_MIB)) {
1647 output[slen++] = ',';
1648 slen += snprintf(output + slen, outbufsize - slen, "%s",
1649 nasm_reg_names[(o->indexreg-EXPR_REG_START)]);
1650 if (o->scale > 1)
1651 slen +=
1652 snprintf(output + slen, outbufsize - slen, "*%d",
1653 o->scale);
1654 started = true;
1657 output[slen++] = ']';
1659 if (is_evex && deco)
1660 slen += append_evex_mem_deco(output + slen, outbufsize - slen,
1661 t, deco, ins.evex_p);
1662 } else {
1663 slen +=
1664 snprintf(output + slen, outbufsize - slen, "<operand%d>",
1668 output[slen] = '\0';
1669 if (segover) { /* unused segment override */
1670 char *p = output;
1671 int count = slen + 1;
1672 while (count--)
1673 p[count + 3] = p[count];
1674 strncpy(output, segover, 2);
1675 output[2] = ' ';
1677 return length;
1681 * This is called when we don't have a complete instruction. If it
1682 * is a standalone *single-byte* prefix show it as such, otherwise
1683 * print it as a literal.
1685 int32_t eatbyte(uint8_t *data, char *output, int outbufsize, int segsize)
1687 uint8_t byte = *data;
1688 const char *str = NULL;
1690 switch (byte) {
1691 case 0xF2:
1692 str = "repne";
1693 break;
1694 case 0xF3:
1695 str = "rep";
1696 break;
1697 case 0x9B:
1698 str = "wait";
1699 break;
1700 case 0xF0:
1701 str = "lock";
1702 break;
1703 case 0x2E:
1704 str = "cs";
1705 break;
1706 case 0x36:
1707 str = "ss";
1708 break;
1709 case 0x3E:
1710 str = "ds";
1711 break;
1712 case 0x26:
1713 str = "es";
1714 break;
1715 case 0x64:
1716 str = "fs";
1717 break;
1718 case 0x65:
1719 str = "gs";
1720 break;
1721 case 0x66:
1722 str = (segsize == 16) ? "o32" : "o16";
1723 break;
1724 case 0x67:
1725 str = (segsize == 32) ? "a16" : "a32";
1726 break;
1727 case REX_P + 0x0:
1728 case REX_P + 0x1:
1729 case REX_P + 0x2:
1730 case REX_P + 0x3:
1731 case REX_P + 0x4:
1732 case REX_P + 0x5:
1733 case REX_P + 0x6:
1734 case REX_P + 0x7:
1735 case REX_P + 0x8:
1736 case REX_P + 0x9:
1737 case REX_P + 0xA:
1738 case REX_P + 0xB:
1739 case REX_P + 0xC:
1740 case REX_P + 0xD:
1741 case REX_P + 0xE:
1742 case REX_P + 0xF:
1743 if (segsize == 64) {
1744 snprintf(output, outbufsize, "rex%s%s%s%s%s",
1745 (byte == REX_P) ? "" : ".",
1746 (byte & REX_W) ? "w" : "",
1747 (byte & REX_R) ? "r" : "",
1748 (byte & REX_X) ? "x" : "",
1749 (byte & REX_B) ? "b" : "");
1750 break;
1752 /* else fall through */
1753 default:
1754 snprintf(output, outbufsize, "db 0x%02x", byte);
1755 break;
1758 if (str)
1759 snprintf(output, outbufsize, "%s", str);
1761 return 1;