rdoff: use nasm-provided safe memory allocation and I/O
[nasm.git] / disasm / disasm.c
bloba75d839e8224e826933c9ef3eb6db125efb6ee73
1 /* ----------------------------------------------------------------------- *
2 *
3 * Copyright 1996-2012 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
34 /*
35 * disasm.c where all the _work_ gets done in the Netwide Disassembler
38 #include "compiler.h"
40 #include <stdio.h>
41 #include <string.h>
42 #include <limits.h>
44 #include "nasm.h"
45 #include "disasm.h"
46 #include "sync.h"
47 #include "insns.h"
48 #include "tables.h"
49 #include "regdis.h"
50 #include "disp8.h"
53 * Flags that go into the `segment' field of `insn' structures
54 * during disassembly.
56 #define SEG_RELATIVE 1
57 #define SEG_32BIT 2
58 #define SEG_RMREG 4
59 #define SEG_DISP8 8
60 #define SEG_DISP16 16
61 #define SEG_DISP32 32
62 #define SEG_NODISP 64
63 #define SEG_SIGNED 128
64 #define SEG_64BIT 256
67 * Prefix information
69 struct prefix_info {
70 uint8_t osize; /* Operand size */
71 uint8_t asize; /* Address size */
72 uint8_t osp; /* Operand size prefix present */
73 uint8_t asp; /* Address size prefix present */
74 uint8_t rep; /* Rep prefix present */
75 uint8_t seg; /* Segment override prefix present */
76 uint8_t wait; /* WAIT "prefix" present */
77 uint8_t lock; /* Lock prefix present */
78 uint8_t vex[3]; /* VEX prefix present */
79 uint8_t vex_c; /* VEX "class" (VEX, XOP, ...) */
80 uint8_t vex_m; /* VEX.M field */
81 uint8_t vex_v;
82 uint8_t vex_lp; /* VEX.LP fields */
83 uint32_t rex; /* REX prefix present */
84 uint8_t evex[3]; /* EVEX prefix present */
87 #define getu8(x) (*(uint8_t *)(x))
88 #if X86_MEMORY
89 /* Littleendian CPU which can handle unaligned references */
90 #define getu16(x) (*(uint16_t *)(x))
91 #define getu32(x) (*(uint32_t *)(x))
92 #define getu64(x) (*(uint64_t *)(x))
93 #else
94 static uint16_t getu16(uint8_t *data)
96 return (uint16_t)data[0] + ((uint16_t)data[1] << 8);
98 static uint32_t getu32(uint8_t *data)
100 return (uint32_t)getu16(data) + ((uint32_t)getu16(data+2) << 16);
102 static uint64_t getu64(uint8_t *data)
104 return (uint64_t)getu32(data) + ((uint64_t)getu32(data+4) << 32);
106 #endif
108 #define gets8(x) ((int8_t)getu8(x))
109 #define gets16(x) ((int16_t)getu16(x))
110 #define gets32(x) ((int32_t)getu32(x))
111 #define gets64(x) ((int64_t)getu64(x))
113 /* Important: regval must already have been adjusted for rex extensions */
114 static enum reg_enum whichreg(opflags_t regflags, int regval, int rex)
116 size_t i;
118 static const struct {
119 opflags_t flags;
120 enum reg_enum reg;
121 } specific_registers[] = {
122 {REG_AL, R_AL},
123 {REG_AX, R_AX},
124 {REG_EAX, R_EAX},
125 {REG_RAX, R_RAX},
126 {REG_DL, R_DL},
127 {REG_DX, R_DX},
128 {REG_EDX, R_EDX},
129 {REG_RDX, R_RDX},
130 {REG_CL, R_CL},
131 {REG_CX, R_CX},
132 {REG_ECX, R_ECX},
133 {REG_RCX, R_RCX},
134 {FPU0, R_ST0},
135 {XMM0, R_XMM0},
136 {YMM0, R_YMM0},
137 {ZMM0, R_ZMM0},
138 {REG_ES, R_ES},
139 {REG_CS, R_CS},
140 {REG_SS, R_SS},
141 {REG_DS, R_DS},
142 {REG_FS, R_FS},
143 {REG_GS, R_GS},
144 {OPMASK0, R_K0},
147 if (!(regflags & (REGISTER|REGMEM)))
148 return 0; /* Registers not permissible?! */
150 regflags |= REGISTER;
152 for (i = 0; i < ARRAY_SIZE(specific_registers); i++)
153 if (!(specific_registers[i].flags & ~regflags))
154 return specific_registers[i].reg;
156 /* All the entries below look up regval in an 16-entry array */
157 if (regval < 0 || regval > (rex & REX_EV ? 31 : 15))
158 return 0;
160 #define GET_REGISTER(__array, __index) \
161 ((size_t)(__index) < (size_t)ARRAY_SIZE(__array) ? __array[(__index)] : 0)
163 if (!(REG8 & ~regflags)) {
164 if (rex & (REX_P|REX_NH))
165 return GET_REGISTER(nasm_rd_reg8_rex, regval);
166 else
167 return GET_REGISTER(nasm_rd_reg8, regval);
169 if (!(REG16 & ~regflags))
170 return GET_REGISTER(nasm_rd_reg16, regval);
171 if (!(REG32 & ~regflags))
172 return GET_REGISTER(nasm_rd_reg32, regval);
173 if (!(REG64 & ~regflags))
174 return GET_REGISTER(nasm_rd_reg64, regval);
175 if (!(REG_SREG & ~regflags))
176 return GET_REGISTER(nasm_rd_sreg, regval & 7); /* Ignore REX */
177 if (!(REG_CREG & ~regflags))
178 return GET_REGISTER(nasm_rd_creg, regval);
179 if (!(REG_DREG & ~regflags))
180 return GET_REGISTER(nasm_rd_dreg, regval);
181 if (!(REG_TREG & ~regflags)) {
182 if (regval > 7)
183 return 0; /* TR registers are ill-defined with rex */
184 return GET_REGISTER(nasm_rd_treg, regval);
186 if (!(FPUREG & ~regflags))
187 return GET_REGISTER(nasm_rd_fpureg, regval & 7); /* Ignore REX */
188 if (!(MMXREG & ~regflags))
189 return GET_REGISTER(nasm_rd_mmxreg, regval & 7); /* Ignore REX */
190 if (!(XMMREG & ~regflags))
191 return GET_REGISTER(nasm_rd_xmmreg, regval);
192 if (!(YMMREG & ~regflags))
193 return GET_REGISTER(nasm_rd_ymmreg, regval);
194 if (!(ZMMREG & ~regflags))
195 return GET_REGISTER(nasm_rd_zmmreg, regval);
196 if (!(OPMASKREG & ~regflags))
197 return GET_REGISTER(nasm_rd_opmaskreg, regval);
198 if (!(BNDREG & ~regflags))
199 return GET_REGISTER(nasm_rd_bndreg, regval);
201 #undef GET_REGISTER
202 return 0;
205 static uint32_t append_evex_reg_deco(char *buf, uint32_t num,
206 decoflags_t deco, uint8_t *evex)
208 const char * const er_names[] = {"rn-sae", "rd-sae", "ru-sae", "rz-sae"};
209 uint32_t num_chars = 0;
211 if ((deco & MASK) && (evex[2] & EVEX_P2AAA)) {
212 enum reg_enum opmasknum = nasm_rd_opmaskreg[evex[2] & EVEX_P2AAA];
213 const char * regname = nasm_reg_names[opmasknum - EXPR_REG_START];
215 num_chars += snprintf(buf + num_chars, num - num_chars,
216 "{%s}", regname);
218 if ((deco & Z) && (evex[2] & EVEX_P2Z)) {
219 num_chars += snprintf(buf + num_chars, num - num_chars,
220 "{z}");
224 if (evex[2] & EVEX_P2B) {
225 if (deco & ER) {
226 uint8_t er_type = (evex[2] & EVEX_P2LL) >> 5;
227 num_chars += snprintf(buf + num_chars, num - num_chars,
228 ",{%s}", er_names[er_type]);
229 } else if (deco & SAE) {
230 num_chars += snprintf(buf + num_chars, num - num_chars,
231 ",{sae}");
235 return num_chars;
238 static uint32_t append_evex_mem_deco(char *buf, uint32_t num, opflags_t type,
239 decoflags_t deco, uint8_t *evex)
241 uint32_t num_chars = 0;
243 if ((evex[2] & EVEX_P2B) && (deco & BRDCAST_MASK)) {
244 decoflags_t deco_brsize = deco & BRSIZE_MASK;
245 opflags_t template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
246 uint8_t br_num = (type & SIZE_MASK) / BITS128 *
247 BITS64 / template_opsize * 2;
249 num_chars += snprintf(buf + num_chars, num - num_chars,
250 "{1to%d}", br_num);
253 if ((deco & MASK) && (evex[2] & EVEX_P2AAA)) {
254 enum reg_enum opmasknum = nasm_rd_opmaskreg[evex[2] & EVEX_P2AAA];
255 const char * regname = nasm_reg_names[opmasknum - EXPR_REG_START];
257 num_chars += snprintf(buf + num_chars, num - num_chars,
258 "{%s}", regname);
260 if ((deco & Z) && (evex[2] & EVEX_P2Z)) {
261 num_chars += snprintf(buf + num_chars, num - num_chars,
262 "{z}");
267 return num_chars;
271 * Process an effective address (ModRM) specification.
273 static uint8_t *do_ea(uint8_t *data, int modrm, int asize,
274 int segsize, enum ea_type type,
275 operand *op, insn *ins)
277 int mod, rm, scale, index, base;
278 int rex;
279 uint8_t *evex;
280 uint8_t sib = 0;
281 bool is_evex = !!(ins->rex & REX_EV);
283 mod = (modrm >> 6) & 03;
284 rm = modrm & 07;
286 if (mod != 3 && asize != 16 && rm == 4)
287 sib = *data++;
289 rex = ins->rex;
290 evex = ins->evex_p;
292 if (mod == 3) { /* pure register version */
293 op->basereg = rm+(rex & REX_B ? 8 : 0);
294 op->segment |= SEG_RMREG;
295 if (is_evex && segsize == 64) {
296 op->basereg += (evex[0] & EVEX_P0X ? 0 : 16);
298 return data;
301 op->disp_size = 0;
302 op->eaflags = 0;
304 if (asize == 16) {
306 * <mod> specifies the displacement size (none, byte or
307 * word), and <rm> specifies the register combination.
308 * Exception: mod=0,rm=6 does not specify [BP] as one might
309 * expect, but instead specifies [disp16].
312 if (type != EA_SCALAR)
313 return NULL;
315 op->indexreg = op->basereg = -1;
316 op->scale = 1; /* always, in 16 bits */
317 switch (rm) {
318 case 0:
319 op->basereg = R_BX;
320 op->indexreg = R_SI;
321 break;
322 case 1:
323 op->basereg = R_BX;
324 op->indexreg = R_DI;
325 break;
326 case 2:
327 op->basereg = R_BP;
328 op->indexreg = R_SI;
329 break;
330 case 3:
331 op->basereg = R_BP;
332 op->indexreg = R_DI;
333 break;
334 case 4:
335 op->basereg = R_SI;
336 break;
337 case 5:
338 op->basereg = R_DI;
339 break;
340 case 6:
341 op->basereg = R_BP;
342 break;
343 case 7:
344 op->basereg = R_BX;
345 break;
347 if (rm == 6 && mod == 0) { /* special case */
348 op->basereg = -1;
349 if (segsize != 16)
350 op->disp_size = 16;
351 mod = 2; /* fake disp16 */
353 switch (mod) {
354 case 0:
355 op->segment |= SEG_NODISP;
356 break;
357 case 1:
358 op->segment |= SEG_DISP8;
359 if (ins->evex_tuple != 0) {
360 op->offset = gets8(data) * get_disp8N(ins);
361 } else {
362 op->offset = gets8(data);
364 data++;
365 break;
366 case 2:
367 op->segment |= SEG_DISP16;
368 op->offset = *data++;
369 op->offset |= ((unsigned)*data++) << 8;
370 break;
372 return data;
373 } else {
375 * Once again, <mod> specifies displacement size (this time
376 * none, byte or *dword*), while <rm> specifies the base
377 * register. Again, [EBP] is missing, replaced by a pure
378 * disp32 (this time that's mod=0,rm=*5*) in 32-bit mode,
379 * and RIP-relative addressing in 64-bit mode.
381 * However, rm=4
382 * indicates not a single base register, but instead the
383 * presence of a SIB byte...
385 int a64 = asize == 64;
387 op->indexreg = -1;
389 if (a64)
390 op->basereg = nasm_rd_reg64[rm | ((rex & REX_B) ? 8 : 0)];
391 else
392 op->basereg = nasm_rd_reg32[rm | ((rex & REX_B) ? 8 : 0)];
394 if (rm == 5 && mod == 0) {
395 if (segsize == 64) {
396 op->eaflags |= EAF_REL;
397 op->segment |= SEG_RELATIVE;
400 if (asize != 64)
401 op->disp_size = asize;
403 op->basereg = -1;
404 mod = 2; /* fake disp32 */
408 if (rm == 4) { /* process SIB */
409 uint8_t vsib_hi = 0;
410 scale = (sib >> 6) & 03;
411 index = (sib >> 3) & 07;
412 base = sib & 07;
414 op->scale = 1 << scale;
416 if (segsize == 64) {
417 vsib_hi = (rex & REX_X ? 8 : 0) |
418 (evex[2] & EVEX_P2VP ? 0 : 16);
421 if (type == EA_XMMVSIB)
422 op->indexreg = nasm_rd_xmmreg[index | vsib_hi];
423 else if (type == EA_YMMVSIB)
424 op->indexreg = nasm_rd_ymmreg[index | vsib_hi];
425 else if (type == EA_ZMMVSIB)
426 op->indexreg = nasm_rd_zmmreg[index | vsib_hi];
427 else if (index == 4 && !(rex & REX_X))
428 op->indexreg = -1; /* ESP/RSP cannot be an index */
429 else if (a64)
430 op->indexreg = nasm_rd_reg64[index | ((rex & REX_X) ? 8 : 0)];
431 else
432 op->indexreg = nasm_rd_reg32[index | ((rex & REX_X) ? 8 : 0)];
434 if (base == 5 && mod == 0) {
435 op->basereg = -1;
436 mod = 2; /* Fake disp32 */
437 } else if (a64)
438 op->basereg = nasm_rd_reg64[base | ((rex & REX_B) ? 8 : 0)];
439 else
440 op->basereg = nasm_rd_reg32[base | ((rex & REX_B) ? 8 : 0)];
442 if (segsize == 16)
443 op->disp_size = 32;
444 } else if (type != EA_SCALAR) {
445 /* Can't have VSIB without SIB */
446 return NULL;
449 switch (mod) {
450 case 0:
451 op->segment |= SEG_NODISP;
452 break;
453 case 1:
454 op->segment |= SEG_DISP8;
455 if (ins->evex_tuple != 0) {
456 op->offset = gets8(data) * get_disp8N(ins);
457 } else {
458 op->offset = gets8(data);
460 data++;
461 break;
462 case 2:
463 op->segment |= SEG_DISP32;
464 op->offset = gets32(data);
465 data += 4;
466 break;
468 return data;
473 * Determine whether the instruction template in t corresponds to the data
474 * stream in data. Return the number of bytes matched if so.
476 #define case4(x) case (x): case (x)+1: case (x)+2: case (x)+3
478 static int matches(const struct itemplate *t, uint8_t *data,
479 const struct prefix_info *prefix, int segsize, insn *ins)
481 uint8_t *r = (uint8_t *)(t->code);
482 uint8_t *origdata = data;
483 bool a_used = false, o_used = false;
484 enum prefixes drep = 0;
485 enum prefixes dwait = 0;
486 uint8_t lock = prefix->lock;
487 int osize = prefix->osize;
488 int asize = prefix->asize;
489 int i, c;
490 int op1, op2;
491 struct operand *opx, *opy;
492 uint8_t opex = 0;
493 bool vex_ok = false;
494 int regmask = (segsize == 64) ? 15 : 7;
495 enum ea_type eat = EA_SCALAR;
497 for (i = 0; i < MAX_OPERANDS; i++) {
498 ins->oprs[i].segment = ins->oprs[i].disp_size =
499 (segsize == 64 ? SEG_64BIT : segsize == 32 ? SEG_32BIT : 0);
501 ins->condition = -1;
502 ins->evex_tuple = 0;
503 ins->rex = prefix->rex;
504 memset(ins->prefixes, 0, sizeof ins->prefixes);
506 if (itemp_has(t, (segsize == 64 ? IF_NOLONG : IF_LONG)))
507 return 0;
509 if (prefix->rep == 0xF2)
510 drep = (itemp_has(t, IF_BND) ? P_BND : P_REPNE);
511 else if (prefix->rep == 0xF3)
512 drep = P_REP;
514 dwait = prefix->wait ? P_WAIT : 0;
516 while ((c = *r++) != 0) {
517 op1 = (c & 3) + ((opex & 1) << 2);
518 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
519 opx = &ins->oprs[op1];
520 opy = &ins->oprs[op2];
521 opex = 0;
523 switch (c) {
524 case 01:
525 case 02:
526 case 03:
527 case 04:
528 while (c--)
529 if (*r++ != *data++)
530 return 0;
531 break;
533 case 05:
534 case 06:
535 case 07:
536 opex = c;
537 break;
539 case4(010):
541 int t = *r++, d = *data++;
542 if (d < t || d > t + 7)
543 return 0;
544 else {
545 opx->basereg = (d-t)+
546 (ins->rex & REX_B ? 8 : 0);
547 opx->segment |= SEG_RMREG;
549 break;
552 case4(014):
553 /* this is an separate index reg position of MIB operand (ICC) */
554 /* Disassembler uses NASM's split EA form only */
555 break;
557 case4(0274):
558 opx->offset = (int8_t)*data++;
559 opx->segment |= SEG_SIGNED;
560 break;
562 case4(020):
563 opx->offset = *data++;
564 break;
566 case4(024):
567 opx->offset = *data++;
568 break;
570 case4(030):
571 opx->offset = getu16(data);
572 data += 2;
573 break;
575 case4(034):
576 if (osize == 32) {
577 opx->offset = getu32(data);
578 data += 4;
579 } else {
580 opx->offset = getu16(data);
581 data += 2;
583 if (segsize != asize)
584 opx->disp_size = asize;
585 break;
587 case4(040):
588 opx->offset = getu32(data);
589 data += 4;
590 break;
592 case4(0254):
593 opx->offset = gets32(data);
594 data += 4;
595 break;
597 case4(044):
598 switch (asize) {
599 case 16:
600 opx->offset = getu16(data);
601 data += 2;
602 if (segsize != 16)
603 opx->disp_size = 16;
604 break;
605 case 32:
606 opx->offset = getu32(data);
607 data += 4;
608 if (segsize == 16)
609 opx->disp_size = 32;
610 break;
611 case 64:
612 opx->offset = getu64(data);
613 opx->disp_size = 64;
614 data += 8;
615 break;
617 break;
619 case4(050):
620 opx->offset = gets8(data++);
621 opx->segment |= SEG_RELATIVE;
622 break;
624 case4(054):
625 opx->offset = getu64(data);
626 data += 8;
627 break;
629 case4(060):
630 opx->offset = gets16(data);
631 data += 2;
632 opx->segment |= SEG_RELATIVE;
633 opx->segment &= ~SEG_32BIT;
634 break;
636 case4(064): /* rel */
637 opx->segment |= SEG_RELATIVE;
638 /* In long mode rel is always 32 bits, sign extended. */
639 if (segsize == 64 || osize == 32) {
640 opx->offset = gets32(data);
641 data += 4;
642 if (segsize != 64)
643 opx->segment |= SEG_32BIT;
644 opx->type = (opx->type & ~SIZE_MASK)
645 | (segsize == 64 ? BITS64 : BITS32);
646 } else {
647 opx->offset = gets16(data);
648 data += 2;
649 opx->segment &= ~SEG_32BIT;
650 opx->type = (opx->type & ~SIZE_MASK) | BITS16;
652 break;
654 case4(070):
655 opx->offset = gets32(data);
656 data += 4;
657 opx->segment |= SEG_32BIT | SEG_RELATIVE;
658 break;
660 case4(0100):
661 case4(0110):
662 case4(0120):
663 case4(0130):
665 int modrm = *data++;
666 opx->segment |= SEG_RMREG;
667 data = do_ea(data, modrm, asize, segsize, eat, opy, ins);
668 if (!data)
669 return 0;
670 opx->basereg = ((modrm >> 3) & 7) + (ins->rex & REX_R ? 8 : 0);
671 if ((ins->rex & REX_EV) && (segsize == 64))
672 opx->basereg += (ins->evex_p[0] & EVEX_P0RP ? 0 : 16);
673 break;
676 case 0172:
678 uint8_t ximm = *data++;
679 c = *r++;
680 ins->oprs[c >> 3].basereg = (ximm >> 4) & regmask;
681 ins->oprs[c >> 3].segment |= SEG_RMREG;
682 ins->oprs[c & 7].offset = ximm & 15;
684 break;
686 case 0173:
688 uint8_t ximm = *data++;
689 c = *r++;
691 if ((c ^ ximm) & 15)
692 return 0;
694 ins->oprs[c >> 4].basereg = (ximm >> 4) & regmask;
695 ins->oprs[c >> 4].segment |= SEG_RMREG;
697 break;
699 case4(0174):
701 uint8_t ximm = *data++;
703 opx->basereg = (ximm >> 4) & regmask;
704 opx->segment |= SEG_RMREG;
706 break;
708 case4(0200):
709 case4(0204):
710 case4(0210):
711 case4(0214):
712 case4(0220):
713 case4(0224):
714 case4(0230):
715 case4(0234):
717 int modrm = *data++;
718 if (((modrm >> 3) & 07) != (c & 07))
719 return 0; /* spare field doesn't match up */
720 data = do_ea(data, modrm, asize, segsize, eat, opy, ins);
721 if (!data)
722 return 0;
723 break;
726 case4(0240):
727 case 0250:
729 uint8_t evexm = *r++;
730 uint8_t evexwlp = *r++;
731 uint8_t modrm, valid_mask;
732 ins->evex_tuple = *r++ - 0300;
733 modrm = *(origdata + 1);
735 ins->rex |= REX_EV;
736 if ((prefix->rex & (REX_EV|REX_V|REX_P)) != REX_EV)
737 return 0;
739 if ((evexm & 0x1f) != prefix->vex_m)
740 return 0;
742 switch (evexwlp & 060) {
743 case 000:
744 if (prefix->rex & REX_W)
745 return 0;
746 break;
747 case 020:
748 if (!(prefix->rex & REX_W))
749 return 0;
750 ins->rex |= REX_W;
751 break;
752 case 040: /* VEX.W is a don't care */
753 ins->rex &= ~REX_W;
754 break;
755 case 060:
756 break;
759 /* If EVEX.b is set with reg-reg op,
760 * EVEX.L'L contains embedded rounding control info
762 if ((prefix->evex[2] & EVEX_P2B) && ((modrm >> 6) == 3)) {
763 valid_mask = 0x3; /* prefix only */
764 } else {
765 valid_mask = 0xf; /* vector length and prefix */
767 if ((evexwlp ^ prefix->vex_lp) & valid_mask)
768 return 0;
770 if (c == 0250) {
771 if ((prefix->vex_v != 0) ||
772 (!(prefix->evex[2] & EVEX_P2VP) &&
773 ((eat < EA_XMMVSIB) || (eat > EA_ZMMVSIB))))
774 return 0;
775 } else {
776 opx->segment |= SEG_RMREG;
777 opx->basereg = ((~prefix->evex[2] & EVEX_P2VP) << (4 - 3) ) |
778 prefix->vex_v;
780 vex_ok = true;
781 memcpy(ins->evex_p, prefix->evex, 3);
782 break;
785 case4(0260):
786 case 0270:
788 int vexm = *r++;
789 int vexwlp = *r++;
791 ins->rex |= REX_V;
792 if ((prefix->rex & (REX_V|REX_P)) != REX_V)
793 return 0;
795 if ((vexm & 0x1f) != prefix->vex_m)
796 return 0;
798 switch (vexwlp & 060) {
799 case 000:
800 if (prefix->rex & REX_W)
801 return 0;
802 break;
803 case 020:
804 if (!(prefix->rex & REX_W))
805 return 0;
806 ins->rex &= ~REX_W;
807 break;
808 case 040: /* VEX.W is a don't care */
809 ins->rex &= ~REX_W;
810 break;
811 case 060:
812 break;
815 /* The 010 bit of vexwlp is set if VEX.L is ignored */
816 if ((vexwlp ^ prefix->vex_lp) & ((vexwlp & 010) ? 03 : 07))
817 return 0;
819 if (c == 0270) {
820 if (prefix->vex_v != 0)
821 return 0;
822 } else {
823 opx->segment |= SEG_RMREG;
824 opx->basereg = prefix->vex_v;
826 vex_ok = true;
827 break;
830 case 0271:
831 if (prefix->rep == 0xF3)
832 drep = P_XRELEASE;
833 break;
835 case 0272:
836 if (prefix->rep == 0xF2)
837 drep = P_XACQUIRE;
838 else if (prefix->rep == 0xF3)
839 drep = P_XRELEASE;
840 break;
842 case 0273:
843 if (prefix->lock == 0xF0) {
844 if (prefix->rep == 0xF2)
845 drep = P_XACQUIRE;
846 else if (prefix->rep == 0xF3)
847 drep = P_XRELEASE;
849 break;
851 case 0310:
852 if (asize != 16)
853 return 0;
854 else
855 a_used = true;
856 break;
858 case 0311:
859 if (asize != 32)
860 return 0;
861 else
862 a_used = true;
863 break;
865 case 0312:
866 if (asize != segsize)
867 return 0;
868 else
869 a_used = true;
870 break;
872 case 0313:
873 if (asize != 64)
874 return 0;
875 else
876 a_used = true;
877 break;
879 case 0314:
880 if (prefix->rex & REX_B)
881 return 0;
882 break;
884 case 0315:
885 if (prefix->rex & REX_X)
886 return 0;
887 break;
889 case 0316:
890 if (prefix->rex & REX_R)
891 return 0;
892 break;
894 case 0317:
895 if (prefix->rex & REX_W)
896 return 0;
897 break;
899 case 0320:
900 if (osize != 16)
901 return 0;
902 else
903 o_used = true;
904 break;
906 case 0321:
907 if (osize != 32)
908 return 0;
909 else
910 o_used = true;
911 break;
913 case 0322:
914 if (osize != (segsize == 16 ? 16 : 32))
915 return 0;
916 else
917 o_used = true;
918 break;
920 case 0323:
921 ins->rex |= REX_W; /* 64-bit only instruction */
922 osize = 64;
923 o_used = true;
924 break;
926 case 0324:
927 if (osize != 64)
928 return 0;
929 o_used = true;
930 break;
932 case 0325:
933 ins->rex |= REX_NH;
934 break;
936 case 0330:
938 int t = *r++, d = *data++;
939 if (d < t || d > t + 15)
940 return 0;
941 else
942 ins->condition = d - t;
943 break;
946 case 0326:
947 if (prefix->rep == 0xF3)
948 return 0;
949 break;
951 case 0331:
952 if (prefix->rep)
953 return 0;
954 break;
956 case 0332:
957 if (prefix->rep != 0xF2)
958 return 0;
959 drep = 0;
960 break;
962 case 0333:
963 if (prefix->rep != 0xF3)
964 return 0;
965 drep = 0;
966 break;
968 case 0334:
969 if (lock) {
970 ins->rex |= REX_R;
971 lock = 0;
973 break;
975 case 0335:
976 if (drep == P_REP)
977 drep = P_REPE;
978 break;
980 case 0336:
981 case 0337:
982 break;
984 case 0340:
985 return 0;
987 case 0341:
988 if (prefix->wait != 0x9B)
989 return 0;
990 dwait = 0;
991 break;
993 case 0360:
994 if (prefix->osp || prefix->rep)
995 return 0;
996 break;
998 case 0361:
999 if (!prefix->osp || prefix->rep)
1000 return 0;
1001 o_used = true;
1002 break;
1004 case 0364:
1005 if (prefix->osp)
1006 return 0;
1007 break;
1009 case 0365:
1010 if (prefix->asp)
1011 return 0;
1012 break;
1014 case 0366:
1015 if (!prefix->osp)
1016 return 0;
1017 o_used = true;
1018 break;
1020 case 0367:
1021 if (!prefix->asp)
1022 return 0;
1023 a_used = true;
1024 break;
1026 case 0370:
1027 case 0371:
1028 break;
1030 case 0374:
1031 eat = EA_XMMVSIB;
1032 break;
1034 case 0375:
1035 eat = EA_YMMVSIB;
1036 break;
1038 case 0376:
1039 eat = EA_ZMMVSIB;
1040 break;
1042 default:
1043 return 0; /* Unknown code */
1047 if (!vex_ok && (ins->rex & (REX_V | REX_EV)))
1048 return 0;
1050 /* REX cannot be combined with VEX */
1051 if ((ins->rex & REX_V) && (prefix->rex & REX_P))
1052 return 0;
1055 * Check for unused rep or a/o prefixes.
1057 for (i = 0; i < t->operands; i++) {
1058 if (ins->oprs[i].segment != SEG_RMREG)
1059 a_used = true;
1062 if (lock) {
1063 if (ins->prefixes[PPS_LOCK])
1064 return 0;
1065 ins->prefixes[PPS_LOCK] = P_LOCK;
1067 if (drep) {
1068 if (ins->prefixes[PPS_REP])
1069 return 0;
1070 ins->prefixes[PPS_REP] = drep;
1072 ins->prefixes[PPS_WAIT] = dwait;
1073 if (!o_used) {
1074 if (osize != ((segsize == 16) ? 16 : 32)) {
1075 enum prefixes pfx = 0;
1077 switch (osize) {
1078 case 16:
1079 pfx = P_O16;
1080 break;
1081 case 32:
1082 pfx = P_O32;
1083 break;
1084 case 64:
1085 pfx = P_O64;
1086 break;
1089 if (ins->prefixes[PPS_OSIZE])
1090 return 0;
1091 ins->prefixes[PPS_OSIZE] = pfx;
1094 if (!a_used && asize != segsize) {
1095 if (ins->prefixes[PPS_ASIZE])
1096 return 0;
1097 ins->prefixes[PPS_ASIZE] = asize == 16 ? P_A16 : P_A32;
1100 /* Fix: check for redundant REX prefixes */
1102 return data - origdata;
1105 /* Condition names for disassembly, sorted by x86 code */
1106 static const char * const condition_name[16] = {
1107 "o", "no", "c", "nc", "z", "nz", "na", "a",
1108 "s", "ns", "pe", "po", "l", "nl", "ng", "g"
1111 int32_t disasm(uint8_t *data, char *output, int outbufsize, int segsize,
1112 int64_t offset, int autosync, iflag_t *prefer)
1114 const struct itemplate * const *p, * const *best_p;
1115 const struct disasm_index *ix;
1116 uint8_t *dp;
1117 int length, best_length = 0;
1118 char *segover;
1119 int i, slen, colon, n;
1120 uint8_t *origdata;
1121 int works;
1122 insn tmp_ins, ins;
1123 iflag_t goodness, best;
1124 int best_pref;
1125 struct prefix_info prefix;
1126 bool end_prefix;
1127 bool is_evex;
1129 memset(&ins, 0, sizeof ins);
1132 * Scan for prefixes.
1134 memset(&prefix, 0, sizeof prefix);
1135 prefix.asize = segsize;
1136 prefix.osize = (segsize == 64) ? 32 : segsize;
1137 segover = NULL;
1138 origdata = data;
1140 ix = itable;
1142 end_prefix = false;
1143 while (!end_prefix) {
1144 switch (*data) {
1145 case 0xF2:
1146 case 0xF3:
1147 prefix.rep = *data++;
1148 break;
1150 case 0x9B:
1151 prefix.wait = *data++;
1152 break;
1154 case 0xF0:
1155 prefix.lock = *data++;
1156 break;
1158 case 0x2E:
1159 segover = "cs", prefix.seg = *data++;
1160 break;
1161 case 0x36:
1162 segover = "ss", prefix.seg = *data++;
1163 break;
1164 case 0x3E:
1165 segover = "ds", prefix.seg = *data++;
1166 break;
1167 case 0x26:
1168 segover = "es", prefix.seg = *data++;
1169 break;
1170 case 0x64:
1171 segover = "fs", prefix.seg = *data++;
1172 break;
1173 case 0x65:
1174 segover = "gs", prefix.seg = *data++;
1175 break;
1177 case 0x66:
1178 prefix.osize = (segsize == 16) ? 32 : 16;
1179 prefix.osp = *data++;
1180 break;
1181 case 0x67:
1182 prefix.asize = (segsize == 32) ? 16 : 32;
1183 prefix.asp = *data++;
1184 break;
1186 case 0xC4:
1187 case 0xC5:
1188 if (segsize == 64 || (data[1] & 0xc0) == 0xc0) {
1189 prefix.vex[0] = *data++;
1190 prefix.vex[1] = *data++;
1192 prefix.rex = REX_V;
1193 prefix.vex_c = RV_VEX;
1195 if (prefix.vex[0] == 0xc4) {
1196 prefix.vex[2] = *data++;
1197 prefix.rex |= (~prefix.vex[1] >> 5) & 7; /* REX_RXB */
1198 prefix.rex |= (prefix.vex[2] >> (7-3)) & REX_W;
1199 prefix.vex_m = prefix.vex[1] & 0x1f;
1200 prefix.vex_v = (~prefix.vex[2] >> 3) & 15;
1201 prefix.vex_lp = prefix.vex[2] & 7;
1202 } else {
1203 prefix.rex |= (~prefix.vex[1] >> (7-2)) & REX_R;
1204 prefix.vex_m = 1;
1205 prefix.vex_v = (~prefix.vex[1] >> 3) & 15;
1206 prefix.vex_lp = prefix.vex[1] & 7;
1209 ix = itable_vex[RV_VEX][prefix.vex_m][prefix.vex_lp & 3];
1211 end_prefix = true;
1212 break;
1214 case 0x62:
1216 if (segsize == 64 || ((data[1] & 0xc0) == 0xc0)) {
1217 data++; /* 62h EVEX prefix */
1218 prefix.evex[0] = *data++;
1219 prefix.evex[1] = *data++;
1220 prefix.evex[2] = *data++;
1222 prefix.rex = REX_EV;
1223 prefix.vex_c = RV_EVEX;
1224 prefix.rex |= (~prefix.evex[0] >> 5) & 7; /* REX_RXB */
1225 prefix.rex |= (prefix.evex[1] >> (7-3)) & REX_W;
1226 prefix.vex_m = prefix.evex[0] & EVEX_P0MM;
1227 prefix.vex_v = (~prefix.evex[1] & EVEX_P1VVVV) >> 3;
1228 prefix.vex_lp = ((prefix.evex[2] & EVEX_P2LL) >> (5-2)) |
1229 (prefix.evex[1] & EVEX_P1PP);
1231 ix = itable_vex[prefix.vex_c][prefix.vex_m][prefix.vex_lp & 3];
1233 end_prefix = true;
1234 break;
1237 case 0x8F:
1238 if ((data[1] & 030) != 0 &&
1239 (segsize == 64 || (data[1] & 0xc0) == 0xc0)) {
1240 prefix.vex[0] = *data++;
1241 prefix.vex[1] = *data++;
1242 prefix.vex[2] = *data++;
1244 prefix.rex = REX_V;
1245 prefix.vex_c = RV_XOP;
1247 prefix.rex |= (~prefix.vex[1] >> 5) & 7; /* REX_RXB */
1248 prefix.rex |= (prefix.vex[2] >> (7-3)) & REX_W;
1249 prefix.vex_m = prefix.vex[1] & 0x1f;
1250 prefix.vex_v = (~prefix.vex[2] >> 3) & 15;
1251 prefix.vex_lp = prefix.vex[2] & 7;
1253 ix = itable_vex[RV_XOP][prefix.vex_m][prefix.vex_lp & 3];
1255 end_prefix = true;
1256 break;
1258 case REX_P + 0x0:
1259 case REX_P + 0x1:
1260 case REX_P + 0x2:
1261 case REX_P + 0x3:
1262 case REX_P + 0x4:
1263 case REX_P + 0x5:
1264 case REX_P + 0x6:
1265 case REX_P + 0x7:
1266 case REX_P + 0x8:
1267 case REX_P + 0x9:
1268 case REX_P + 0xA:
1269 case REX_P + 0xB:
1270 case REX_P + 0xC:
1271 case REX_P + 0xD:
1272 case REX_P + 0xE:
1273 case REX_P + 0xF:
1274 if (segsize == 64) {
1275 prefix.rex = *data++;
1276 if (prefix.rex & REX_W)
1277 prefix.osize = 64;
1279 end_prefix = true;
1280 break;
1282 default:
1283 end_prefix = true;
1284 break;
1288 iflag_set_all(&best); /* Worst possible */
1289 best_p = NULL;
1290 best_pref = INT_MAX;
1292 if (!ix)
1293 return 0; /* No instruction table at all... */
1295 dp = data;
1296 ix += *dp++;
1297 while (ix->n == -1) {
1298 ix = (const struct disasm_index *)ix->p + *dp++;
1301 p = (const struct itemplate * const *)ix->p;
1302 for (n = ix->n; n; n--, p++) {
1303 if ((length = matches(*p, data, &prefix, segsize, &tmp_ins))) {
1304 works = true;
1306 * Final check to make sure the types of r/m match up.
1307 * XXX: Need to make sure this is actually correct.
1309 for (i = 0; i < (*p)->operands; i++) {
1310 if (
1311 /* If it's a mem-only EA but we have a
1312 register, die. */
1313 ((tmp_ins.oprs[i].segment & SEG_RMREG) &&
1314 is_class(MEMORY, (*p)->opd[i])) ||
1315 /* If it's a reg-only EA but we have a memory
1316 ref, die. */
1317 (!(tmp_ins.oprs[i].segment & SEG_RMREG) &&
1318 !(REG_EA & ~(*p)->opd[i]) &&
1319 !((*p)->opd[i] & REG_SMASK)) ||
1320 /* Register type mismatch (eg FS vs REG_DESS):
1321 die. */
1322 ((((*p)->opd[i] & (REGISTER | FPUREG)) ||
1323 (tmp_ins.oprs[i].segment & SEG_RMREG)) &&
1324 !whichreg((*p)->opd[i],
1325 tmp_ins.oprs[i].basereg, tmp_ins.rex))
1327 works = false;
1328 break;
1333 * Note: we always prefer instructions which incorporate
1334 * prefixes in the instructions themselves. This is to allow
1335 * e.g. PAUSE to be preferred to REP NOP, and deal with
1336 * MMX/SSE instructions where prefixes are used to select
1337 * between MMX and SSE register sets or outright opcode
1338 * selection.
1340 if (works) {
1341 int i, nprefix;
1342 goodness = iflag_pfmask(*p);
1343 goodness = iflag_xor(&goodness, prefer);
1344 nprefix = 0;
1345 for (i = 0; i < MAXPREFIX; i++)
1346 if (tmp_ins.prefixes[i])
1347 nprefix++;
1348 if (nprefix < best_pref ||
1349 (nprefix == best_pref &&
1350 iflag_cmp(&goodness, &best) < 0)) {
1351 /* This is the best one found so far */
1352 best = goodness;
1353 best_p = p;
1354 best_pref = nprefix;
1355 best_length = length;
1356 ins = tmp_ins;
1362 if (!best_p)
1363 return 0; /* no instruction was matched */
1365 /* Pick the best match */
1366 p = best_p;
1367 length = best_length;
1369 slen = 0;
1371 /* TODO: snprintf returns the value that the string would have if
1372 * the buffer were long enough, and not the actual length of
1373 * the returned string, so each instance of using the return
1374 * value of snprintf should actually be checked to assure that
1375 * the return value is "sane." Maybe a macro wrapper could
1376 * be used for that purpose.
1378 for (i = 0; i < MAXPREFIX; i++) {
1379 const char *prefix = prefix_name(ins.prefixes[i]);
1380 if (prefix)
1381 slen += snprintf(output+slen, outbufsize-slen, "%s ", prefix);
1384 i = (*p)->opcode;
1385 if (i >= FIRST_COND_OPCODE)
1386 slen += snprintf(output + slen, outbufsize - slen, "%s%s",
1387 nasm_insn_names[i], condition_name[ins.condition]);
1388 else
1389 slen += snprintf(output + slen, outbufsize - slen, "%s",
1390 nasm_insn_names[i]);
1392 colon = false;
1393 is_evex = !!(ins.rex & REX_EV);
1394 length += data - origdata; /* fix up for prefixes */
1395 for (i = 0; i < (*p)->operands; i++) {
1396 opflags_t t = (*p)->opd[i];
1397 decoflags_t deco = (*p)->deco[i];
1398 const operand *o = &ins.oprs[i];
1399 int64_t offs;
1401 output[slen++] = (colon ? ':' : i == 0 ? ' ' : ',');
1403 offs = o->offset;
1404 if (o->segment & SEG_RELATIVE) {
1405 offs += offset + length;
1407 * sort out wraparound
1409 if (!(o->segment & (SEG_32BIT|SEG_64BIT)))
1410 offs &= 0xffff;
1411 else if (segsize != 64)
1412 offs &= 0xffffffff;
1415 * add sync marker, if autosync is on
1417 if (autosync)
1418 add_sync(offs, 0L);
1421 if (t & COLON)
1422 colon = true;
1423 else
1424 colon = false;
1426 if ((t & (REGISTER | FPUREG)) ||
1427 (o->segment & SEG_RMREG)) {
1428 enum reg_enum reg;
1429 reg = whichreg(t, o->basereg, ins.rex);
1430 if (t & TO)
1431 slen += snprintf(output + slen, outbufsize - slen, "to ");
1432 slen += snprintf(output + slen, outbufsize - slen, "%s",
1433 nasm_reg_names[reg-EXPR_REG_START]);
1434 if (is_evex && deco)
1435 slen += append_evex_reg_deco(output + slen, outbufsize - slen,
1436 deco, ins.evex_p);
1437 } else if (!(UNITY & ~t)) {
1438 output[slen++] = '1';
1439 } else if (t & IMMEDIATE) {
1440 if (t & BITS8) {
1441 slen +=
1442 snprintf(output + slen, outbufsize - slen, "byte ");
1443 if (o->segment & SEG_SIGNED) {
1444 if (offs < 0) {
1445 offs *= -1;
1446 output[slen++] = '-';
1447 } else
1448 output[slen++] = '+';
1450 } else if (t & BITS16) {
1451 slen +=
1452 snprintf(output + slen, outbufsize - slen, "word ");
1453 } else if (t & BITS32) {
1454 slen +=
1455 snprintf(output + slen, outbufsize - slen, "dword ");
1456 } else if (t & BITS64) {
1457 slen +=
1458 snprintf(output + slen, outbufsize - slen, "qword ");
1459 } else if (t & NEAR) {
1460 slen +=
1461 snprintf(output + slen, outbufsize - slen, "near ");
1462 } else if (t & SHORT) {
1463 slen +=
1464 snprintf(output + slen, outbufsize - slen, "short ");
1466 slen +=
1467 snprintf(output + slen, outbufsize - slen, "0x%"PRIx64"",
1468 offs);
1469 } else if (!(MEM_OFFS & ~t)) {
1470 slen +=
1471 snprintf(output + slen, outbufsize - slen,
1472 "[%s%s%s0x%"PRIx64"]",
1473 (segover ? segover : ""),
1474 (segover ? ":" : ""),
1475 (o->disp_size == 64 ? "qword " :
1476 o->disp_size == 32 ? "dword " :
1477 o->disp_size == 16 ? "word " : ""), offs);
1478 segover = NULL;
1479 } else if (is_class(REGMEM, t)) {
1480 int started = false;
1481 if (t & BITS8)
1482 slen +=
1483 snprintf(output + slen, outbufsize - slen, "byte ");
1484 if (t & BITS16)
1485 slen +=
1486 snprintf(output + slen, outbufsize - slen, "word ");
1487 if (t & BITS32)
1488 slen +=
1489 snprintf(output + slen, outbufsize - slen, "dword ");
1490 if (t & BITS64)
1491 slen +=
1492 snprintf(output + slen, outbufsize - slen, "qword ");
1493 if (t & BITS80)
1494 slen +=
1495 snprintf(output + slen, outbufsize - slen, "tword ");
1496 if ((ins.evex_p[2] & EVEX_P2B) && (deco & BRDCAST_MASK)) {
1497 /* when broadcasting, each element size should be used */
1498 if (deco & BR_BITS32)
1499 slen +=
1500 snprintf(output + slen, outbufsize - slen, "dword ");
1501 else if (deco & BR_BITS64)
1502 slen +=
1503 snprintf(output + slen, outbufsize - slen, "qword ");
1504 } else {
1505 if (t & BITS128)
1506 slen +=
1507 snprintf(output + slen, outbufsize - slen, "oword ");
1508 if (t & BITS256)
1509 slen +=
1510 snprintf(output + slen, outbufsize - slen, "yword ");
1511 if (t & BITS512)
1512 slen +=
1513 snprintf(output + slen, outbufsize - slen, "zword ");
1515 if (t & FAR)
1516 slen += snprintf(output + slen, outbufsize - slen, "far ");
1517 if (t & NEAR)
1518 slen +=
1519 snprintf(output + slen, outbufsize - slen, "near ");
1520 output[slen++] = '[';
1521 if (o->disp_size)
1522 slen += snprintf(output + slen, outbufsize - slen, "%s",
1523 (o->disp_size == 64 ? "qword " :
1524 o->disp_size == 32 ? "dword " :
1525 o->disp_size == 16 ? "word " :
1526 ""));
1527 if (o->eaflags & EAF_REL)
1528 slen += snprintf(output + slen, outbufsize - slen, "rel ");
1529 if (segover) {
1530 slen +=
1531 snprintf(output + slen, outbufsize - slen, "%s:",
1532 segover);
1533 segover = NULL;
1535 if (o->basereg != -1) {
1536 slen += snprintf(output + slen, outbufsize - slen, "%s",
1537 nasm_reg_names[(o->basereg-EXPR_REG_START)]);
1538 started = true;
1540 if (o->indexreg != -1 && !itemp_has(*best_p, IF_MIB)) {
1541 if (started)
1542 output[slen++] = '+';
1543 slen += snprintf(output + slen, outbufsize - slen, "%s",
1544 nasm_reg_names[(o->indexreg-EXPR_REG_START)]);
1545 if (o->scale > 1)
1546 slen +=
1547 snprintf(output + slen, outbufsize - slen, "*%d",
1548 o->scale);
1549 started = true;
1553 if (o->segment & SEG_DISP8) {
1554 if (is_evex) {
1555 const char *prefix;
1556 uint32_t offset = offs;
1557 if ((int32_t)offset < 0) {
1558 prefix = "-";
1559 offset = -offset;
1560 } else {
1561 prefix = "+";
1563 slen +=
1564 snprintf(output + slen, outbufsize - slen, "%s0x%"PRIx32"",
1565 prefix, offset);
1566 } else {
1567 const char *prefix;
1568 uint8_t offset = offs;
1569 if ((int8_t)offset < 0) {
1570 prefix = "-";
1571 offset = -offset;
1572 } else {
1573 prefix = "+";
1575 slen +=
1576 snprintf(output + slen, outbufsize - slen, "%s0x%"PRIx8"",
1577 prefix, offset);
1579 } else if (o->segment & SEG_DISP16) {
1580 const char *prefix;
1581 uint16_t offset = offs;
1582 if ((int16_t)offset < 0 && started) {
1583 offset = -offset;
1584 prefix = "-";
1585 } else {
1586 prefix = started ? "+" : "";
1588 slen +=
1589 snprintf(output + slen, outbufsize - slen,
1590 "%s0x%"PRIx16"", prefix, offset);
1591 } else if (o->segment & SEG_DISP32) {
1592 if (prefix.asize == 64) {
1593 const char *prefix;
1594 uint64_t offset = offs;
1595 if ((int32_t)offs < 0 && started) {
1596 offset = -offset;
1597 prefix = "-";
1598 } else {
1599 prefix = started ? "+" : "";
1601 slen +=
1602 snprintf(output + slen, outbufsize - slen,
1603 "%s0x%"PRIx64"", prefix, offset);
1604 } else {
1605 const char *prefix;
1606 uint32_t offset = offs;
1607 if ((int32_t) offset < 0 && started) {
1608 offset = -offset;
1609 prefix = "-";
1610 } else {
1611 prefix = started ? "+" : "";
1613 slen +=
1614 snprintf(output + slen, outbufsize - slen,
1615 "%s0x%"PRIx32"", prefix, offset);
1619 if (o->indexreg != -1 && itemp_has(*best_p, IF_MIB)) {
1620 output[slen++] = ',';
1621 slen += snprintf(output + slen, outbufsize - slen, "%s",
1622 nasm_reg_names[(o->indexreg-EXPR_REG_START)]);
1623 if (o->scale > 1)
1624 slen +=
1625 snprintf(output + slen, outbufsize - slen, "*%d",
1626 o->scale);
1627 started = true;
1630 output[slen++] = ']';
1632 if (is_evex && deco)
1633 slen += append_evex_mem_deco(output + slen, outbufsize - slen,
1634 t, deco, ins.evex_p);
1635 } else {
1636 slen +=
1637 snprintf(output + slen, outbufsize - slen, "<operand%d>",
1641 output[slen] = '\0';
1642 if (segover) { /* unused segment override */
1643 char *p = output;
1644 int count = slen + 1;
1645 while (count--)
1646 p[count + 3] = p[count];
1647 strncpy(output, segover, 2);
1648 output[2] = ' ';
1650 return length;
1654 * This is called when we don't have a complete instruction. If it
1655 * is a standalone *single-byte* prefix show it as such, otherwise
1656 * print it as a literal.
1658 int32_t eatbyte(uint8_t *data, char *output, int outbufsize, int segsize)
1660 uint8_t byte = *data;
1661 const char *str = NULL;
1663 switch (byte) {
1664 case 0xF2:
1665 str = "repne";
1666 break;
1667 case 0xF3:
1668 str = "rep";
1669 break;
1670 case 0x9B:
1671 str = "wait";
1672 break;
1673 case 0xF0:
1674 str = "lock";
1675 break;
1676 case 0x2E:
1677 str = "cs";
1678 break;
1679 case 0x36:
1680 str = "ss";
1681 break;
1682 case 0x3E:
1683 str = "ds";
1684 break;
1685 case 0x26:
1686 str = "es";
1687 break;
1688 case 0x64:
1689 str = "fs";
1690 break;
1691 case 0x65:
1692 str = "gs";
1693 break;
1694 case 0x66:
1695 str = (segsize == 16) ? "o32" : "o16";
1696 break;
1697 case 0x67:
1698 str = (segsize == 32) ? "a16" : "a32";
1699 break;
1700 case REX_P + 0x0:
1701 case REX_P + 0x1:
1702 case REX_P + 0x2:
1703 case REX_P + 0x3:
1704 case REX_P + 0x4:
1705 case REX_P + 0x5:
1706 case REX_P + 0x6:
1707 case REX_P + 0x7:
1708 case REX_P + 0x8:
1709 case REX_P + 0x9:
1710 case REX_P + 0xA:
1711 case REX_P + 0xB:
1712 case REX_P + 0xC:
1713 case REX_P + 0xD:
1714 case REX_P + 0xE:
1715 case REX_P + 0xF:
1716 if (segsize == 64) {
1717 snprintf(output, outbufsize, "rex%s%s%s%s%s",
1718 (byte == REX_P) ? "" : ".",
1719 (byte & REX_W) ? "w" : "",
1720 (byte & REX_R) ? "r" : "",
1721 (byte & REX_X) ? "x" : "",
1722 (byte & REX_B) ? "b" : "");
1723 break;
1725 /* else fall through */
1726 default:
1727 snprintf(output, outbufsize, "db 0x%02x", byte);
1728 break;
1731 if (str)
1732 snprintf(output, outbufsize, "%s", str);
1734 return 1;