disasm: Add EVEX decorator syntax
[nasm.git] / assemble.c
blob00acd208b4a8bbff83704f2ce07f12390a5ca591
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2013 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * the actual codes (C syntax, i.e. octal):
38 * \0 - terminates the code. (Unless it's a literal of course.)
39 * \1..\4 - that many literal bytes follow in the code stream
40 * \5 - add 4 to the primary operand number (b, low octdigit)
41 * \6 - add 4 to the secondary operand number (a, middle octdigit)
42 * \7 - add 4 to both the primary and the secondary operand number
43 * \10..\13 - a literal byte follows in the code stream, to be added
44 * to the register value of operand 0..3
45 * \14..\17 - the position of index register operand in MIB (BND insns)
46 * \20..\23 - a byte immediate operand, from operand 0..3
47 * \24..\27 - a zero-extended byte immediate operand, from operand 0..3
48 * \30..\33 - a word immediate operand, from operand 0..3
49 * \34..\37 - select between \3[0-3] and \4[0-3] depending on 16/32 bit
50 * assembly mode or the operand-size override on the operand
51 * \40..\43 - a long immediate operand, from operand 0..3
52 * \44..\47 - select between \3[0-3], \4[0-3] and \5[4-7]
53 * depending on the address size of the instruction.
54 * \50..\53 - a byte relative operand, from operand 0..3
55 * \54..\57 - a qword immediate operand, from operand 0..3
56 * \60..\63 - a word relative operand, from operand 0..3
57 * \64..\67 - select between \6[0-3] and \7[0-3] depending on 16/32 bit
58 * assembly mode or the operand-size override on the operand
59 * \70..\73 - a long relative operand, from operand 0..3
60 * \74..\77 - a word constant, from the _segment_ part of operand 0..3
61 * \1ab - a ModRM, calculated on EA in operand a, with the spare
62 * field the register value of operand b.
63 * \172\ab - the register number from operand a in bits 7..4, with
64 * the 4-bit immediate from operand b in bits 3..0.
65 * \173\xab - the register number from operand a in bits 7..4, with
66 * the value b in bits 3..0.
67 * \174..\177 - the register number from operand 0..3 in bits 7..4, and
68 * an arbitrary value in bits 3..0 (assembled as zero.)
69 * \2ab - a ModRM, calculated on EA in operand a, with the spare
70 * field equal to digit b.
72 * \240..\243 - this instruction uses EVEX rather than REX or VEX/XOP, with the
73 * V field taken from operand 0..3.
74 * \250 - this instruction uses EVEX rather than REX or VEX/XOP, with the
75 * V field set to 1111b.
76 * EVEX prefixes are followed by the sequence:
77 * \cm\wlp\tup where cm is:
78 * cc 000 0mm
79 * c = 2 for EVEX and m is the legacy escape (0f, 0f38, 0f3a)
80 * and wlp is:
81 * 00 wwl lpp
82 * [l0] ll = 0 (.128, .lz)
83 * [l1] ll = 1 (.256)
84 * [l2] ll = 2 (.512)
85 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
87 * [w0] ww = 0 for W = 0
88 * [w1] ww = 1 for W = 1
89 * [wig] ww = 2 for W don't care (always assembled as 0)
90 * [ww] ww = 3 for W used as REX.W
92 * [p0] pp = 0 for no prefix
93 * [60] pp = 1 for legacy prefix 60
94 * [f3] pp = 2
95 * [f2] pp = 3
97 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
98 * (compressed displacement encoding)
100 * \254..\257 - a signed 32-bit operand to be extended to 64 bits.
101 * \260..\263 - this instruction uses VEX/XOP rather than REX, with the
102 * V field taken from operand 0..3.
103 * \270 - this instruction uses VEX/XOP rather than REX, with the
104 * V field set to 1111b.
106 * VEX/XOP prefixes are followed by the sequence:
107 * \tmm\wlp where mm is the M field; and wlp is:
108 * 00 wwl lpp
109 * [l0] ll = 0 for L = 0 (.128, .lz)
110 * [l1] ll = 1 for L = 1 (.256)
111 * [lig] ll = 2 for L don't care (always assembled as 0)
113 * [w0] ww = 0 for W = 0
114 * [w1 ] ww = 1 for W = 1
115 * [wig] ww = 2 for W don't care (always assembled as 0)
116 * [ww] ww = 3 for W used as REX.W
118 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
120 * \271 - instruction takes XRELEASE (F3) with or without lock
121 * \272 - instruction takes XACQUIRE/XRELEASE with or without lock
122 * \273 - instruction takes XACQUIRE/XRELEASE with lock only
123 * \274..\277 - a byte immediate operand, from operand 0..3, sign-extended
124 * to the operand size (if o16/o32/o64 present) or the bit size
125 * \310 - indicates fixed 16-bit address size, i.e. optional 0x67.
126 * \311 - indicates fixed 32-bit address size, i.e. optional 0x67.
127 * \312 - (disassembler only) invalid with non-default address size.
128 * \313 - indicates fixed 64-bit address size, 0x67 invalid.
129 * \314 - (disassembler only) invalid with REX.B
130 * \315 - (disassembler only) invalid with REX.X
131 * \316 - (disassembler only) invalid with REX.R
132 * \317 - (disassembler only) invalid with REX.W
133 * \320 - indicates fixed 16-bit operand size, i.e. optional 0x66.
134 * \321 - indicates fixed 32-bit operand size, i.e. optional 0x66.
135 * \322 - indicates that this instruction is only valid when the
136 * operand size is the default (instruction to disassembler,
137 * generates no code in the assembler)
138 * \323 - indicates fixed 64-bit operand size, REX on extensions only.
139 * \324 - indicates 64-bit operand size requiring REX prefix.
140 * \325 - instruction which always uses spl/bpl/sil/dil
141 * \326 - instruction not valid with 0xF3 REP prefix. Hint for
142 disassembler only; for SSE instructions.
143 * \330 - a literal byte follows in the code stream, to be added
144 * to the condition code value of the instruction.
145 * \331 - instruction not valid with REP prefix. Hint for
146 * disassembler only; for SSE instructions.
147 * \332 - REP prefix (0xF2 byte) used as opcode extension.
148 * \333 - REP prefix (0xF3 byte) used as opcode extension.
149 * \334 - LOCK prefix used as REX.R (used in non-64-bit mode)
150 * \335 - disassemble a rep (0xF3 byte) prefix as repe not rep.
151 * \336 - force a REP(E) prefix (0xF3) even if not specified.
152 * \337 - force a REPNE prefix (0xF2) even if not specified.
153 * \336-\337 are still listed as prefixes in the disassembler.
154 * \340 - reserve <operand 0> bytes of uninitialized storage.
155 * Operand 0 had better be a segmentless constant.
156 * \341 - this instruction needs a WAIT "prefix"
157 * \360 - no SSE prefix (== \364\331)
158 * \361 - 66 SSE prefix (== \366\331)
159 * \364 - operand-size prefix (0x66) not permitted
160 * \365 - address-size prefix (0x67) not permitted
161 * \366 - operand-size prefix (0x66) used as opcode extension
162 * \367 - address-size prefix (0x67) used as opcode extension
163 * \370,\371 - match only if operand 0 meets byte jump criteria.
164 * 370 is used for Jcc, 371 is used for JMP.
165 * \373 - assemble 0x03 if bits==16, 0x05 if bits==32;
166 * used for conditional jump over longer jump
167 * \374 - this instruction takes an XMM VSIB memory EA
168 * \375 - this instruction takes an YMM VSIB memory EA
169 * \376 - this instruction takes an ZMM VSIB memory EA
172 #include "compiler.h"
174 #include <stdio.h>
175 #include <string.h>
176 #include <inttypes.h>
178 #include "nasm.h"
179 #include "nasmlib.h"
180 #include "assemble.h"
181 #include "insns.h"
182 #include "tables.h"
184 enum match_result {
186 * Matching errors. These should be sorted so that more specific
187 * errors come later in the sequence.
189 MERR_INVALOP,
190 MERR_OPSIZEMISSING,
191 MERR_OPSIZEMISMATCH,
192 MERR_BRNUMMISMATCH,
193 MERR_BADCPU,
194 MERR_BADMODE,
195 MERR_BADHLE,
196 MERR_ENCMISMATCH,
197 MERR_BADBND,
199 * Matching success; the conditional ones first
201 MOK_JUMP, /* Matching OK but needs jmp_match() */
202 MOK_GOOD /* Matching unconditionally OK */
205 typedef struct {
206 enum ea_type type; /* what kind of EA is this? */
207 int sib_present; /* is a SIB byte necessary? */
208 int bytes; /* # of bytes of offset needed */
209 int size; /* lazy - this is sib+bytes+1 */
210 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
211 int8_t disp8; /* compressed displacement for EVEX */
212 } ea;
214 #define GEN_SIB(scale, index, base) \
215 (((scale) << 6) | ((index) << 3) | ((base)))
217 #define GEN_MODRM(mod, reg, rm) \
218 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
220 static iflags_t cpu; /* cpu level received from nasm.c */
221 static efunc errfunc;
222 static struct ofmt *outfmt;
223 static ListGen *list;
225 static int64_t calcsize(int32_t, int64_t, int, insn *,
226 const struct itemplate *);
227 static void gencode(int32_t segment, int64_t offset, int bits,
228 insn * ins, const struct itemplate *temp,
229 int64_t insn_end);
230 static enum match_result find_match(const struct itemplate **tempp,
231 insn *instruction,
232 int32_t segment, int64_t offset, int bits);
233 static enum match_result matches(const struct itemplate *, insn *, int bits);
234 static opflags_t regflag(const operand *);
235 static int32_t regval(const operand *);
236 static int rexflags(int, opflags_t, int);
237 static int op_rexflags(const operand *, int);
238 static int op_evexflags(const operand *, int, uint8_t);
239 static void add_asp(insn *, int);
241 static enum ea_type process_ea(operand *, ea *, int, int, opflags_t, insn *);
243 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
245 return ins->prefixes[pos] == prefix;
248 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
250 if (ins->prefixes[pos])
251 errfunc(ERR_NONFATAL, "invalid %s prefix",
252 prefix_name(ins->prefixes[pos]));
255 static const char *size_name(int size)
257 switch (size) {
258 case 1:
259 return "byte";
260 case 2:
261 return "word";
262 case 4:
263 return "dword";
264 case 8:
265 return "qword";
266 case 10:
267 return "tword";
268 case 16:
269 return "oword";
270 case 32:
271 return "yword";
272 case 64:
273 return "zword";
274 default:
275 return "???";
279 static void warn_overflow(int pass, int size)
281 errfunc(ERR_WARNING | pass | ERR_WARN_NOV,
282 "%s data exceeds bounds", size_name(size));
285 static void warn_overflow_const(int64_t data, int size)
287 if (overflow_general(data, size))
288 warn_overflow(ERR_PASS1, size);
291 static void warn_overflow_opd(const struct operand *o, int size)
293 if (o->wrt == NO_SEG && o->segment == NO_SEG) {
294 if (overflow_general(o->offset, size))
295 warn_overflow(ERR_PASS2, size);
300 * This routine wrappers the real output format's output routine,
301 * in order to pass a copy of the data off to the listing file
302 * generator at the same time.
304 static void out(int64_t offset, int32_t segto, const void *data,
305 enum out_type type, uint64_t size,
306 int32_t segment, int32_t wrt)
308 static int32_t lineno = 0; /* static!!! */
309 static char *lnfname = NULL;
310 uint8_t p[8];
312 if (type == OUT_ADDRESS && segment == NO_SEG && wrt == NO_SEG) {
314 * This is a non-relocated address, and we're going to
315 * convert it into RAWDATA format.
317 uint8_t *q = p;
319 if (size > 8) {
320 errfunc(ERR_PANIC, "OUT_ADDRESS with size > 8");
321 return;
324 WRITEADDR(q, *(int64_t *)data, size);
325 data = p;
326 type = OUT_RAWDATA;
329 list->output(offset, data, type, size);
332 * this call to src_get determines when we call the
333 * debug-format-specific "linenum" function
334 * it updates lineno and lnfname to the current values
335 * returning 0 if "same as last time", -2 if lnfname
336 * changed, and the amount by which lineno changed,
337 * if it did. thus, these variables must be static
340 if (src_get(&lineno, &lnfname))
341 outfmt->current_dfmt->linenum(lnfname, lineno, segto);
343 outfmt->output(segto, data, type, size, segment, wrt);
346 static void out_imm8(int64_t offset, int32_t segment, struct operand *opx)
348 if (opx->segment != NO_SEG) {
349 uint64_t data = opx->offset;
350 out(offset, segment, &data, OUT_ADDRESS, 1, opx->segment, opx->wrt);
351 } else {
352 uint8_t byte = opx->offset;
353 out(offset, segment, &byte, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
357 static bool jmp_match(int32_t segment, int64_t offset, int bits,
358 insn * ins, const struct itemplate *temp)
360 int64_t isize;
361 const uint8_t *code = temp->code;
362 uint8_t c = code[0];
364 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
365 return false;
366 if (!optimizing)
367 return false;
368 if (optimizing < 0 && c == 0371)
369 return false;
371 isize = calcsize(segment, offset, bits, ins, temp);
373 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
374 /* Be optimistic in pass 1 */
375 return true;
377 if (ins->oprs[0].segment != segment)
378 return false;
380 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
381 return (isize >= -128 && isize <= 127); /* is it byte size? */
384 int64_t assemble(int32_t segment, int64_t offset, int bits, iflags_t cp,
385 insn * instruction, struct ofmt *output, efunc error,
386 ListGen * listgen)
388 const struct itemplate *temp;
389 int j;
390 enum match_result m;
391 int64_t insn_end;
392 int32_t itimes;
393 int64_t start = offset;
394 int64_t wsize; /* size for DB etc. */
396 errfunc = error; /* to pass to other functions */
397 cpu = cp;
398 outfmt = output; /* likewise */
399 list = listgen; /* and again */
401 wsize = idata_bytes(instruction->opcode);
402 if (wsize == -1)
403 return 0;
405 if (wsize) {
406 extop *e;
407 int32_t t = instruction->times;
408 if (t < 0)
409 errfunc(ERR_PANIC,
410 "instruction->times < 0 (%ld) in assemble()", t);
412 while (t--) { /* repeat TIMES times */
413 list_for_each(e, instruction->eops) {
414 if (e->type == EOT_DB_NUMBER) {
415 if (wsize > 8) {
416 errfunc(ERR_NONFATAL,
417 "integer supplied to a DT, DO or DY"
418 " instruction");
419 } else {
420 out(offset, segment, &e->offset,
421 OUT_ADDRESS, wsize, e->segment, e->wrt);
422 offset += wsize;
424 } else if (e->type == EOT_DB_STRING ||
425 e->type == EOT_DB_STRING_FREE) {
426 int align;
428 out(offset, segment, e->stringval,
429 OUT_RAWDATA, e->stringlen, NO_SEG, NO_SEG);
430 align = e->stringlen % wsize;
432 if (align) {
433 align = wsize - align;
434 out(offset, segment, zero_buffer,
435 OUT_RAWDATA, align, NO_SEG, NO_SEG);
437 offset += e->stringlen + align;
440 if (t > 0 && t == instruction->times - 1) {
442 * Dummy call to list->output to give the offset to the
443 * listing module.
445 list->output(offset, NULL, OUT_RAWDATA, 0);
446 list->uplevel(LIST_TIMES);
449 if (instruction->times > 1)
450 list->downlevel(LIST_TIMES);
451 return offset - start;
454 if (instruction->opcode == I_INCBIN) {
455 const char *fname = instruction->eops->stringval;
456 FILE *fp;
458 fp = fopen(fname, "rb");
459 if (!fp) {
460 error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
461 fname);
462 } else if (fseek(fp, 0L, SEEK_END) < 0) {
463 error(ERR_NONFATAL, "`incbin': unable to seek on file `%s'",
464 fname);
465 fclose(fp);
466 } else {
467 static char buf[4096];
468 size_t t = instruction->times;
469 size_t base = 0;
470 size_t len;
472 len = ftell(fp);
473 if (instruction->eops->next) {
474 base = instruction->eops->next->offset;
475 len -= base;
476 if (instruction->eops->next->next &&
477 len > (size_t)instruction->eops->next->next->offset)
478 len = (size_t)instruction->eops->next->next->offset;
481 * Dummy call to list->output to give the offset to the
482 * listing module.
484 list->output(offset, NULL, OUT_RAWDATA, 0);
485 list->uplevel(LIST_INCBIN);
486 while (t--) {
487 size_t l;
489 fseek(fp, base, SEEK_SET);
490 l = len;
491 while (l > 0) {
492 int32_t m;
493 m = fread(buf, 1, l > sizeof(buf) ? sizeof(buf) : l, fp);
494 if (!m) {
496 * This shouldn't happen unless the file
497 * actually changes while we are reading
498 * it.
500 error(ERR_NONFATAL,
501 "`incbin': unexpected EOF while"
502 " reading file `%s'", fname);
503 t = 0; /* Try to exit cleanly */
504 break;
506 out(offset, segment, buf, OUT_RAWDATA, m,
507 NO_SEG, NO_SEG);
508 l -= m;
511 list->downlevel(LIST_INCBIN);
512 if (instruction->times > 1) {
514 * Dummy call to list->output to give the offset to the
515 * listing module.
517 list->output(offset, NULL, OUT_RAWDATA, 0);
518 list->uplevel(LIST_TIMES);
519 list->downlevel(LIST_TIMES);
521 fclose(fp);
522 return instruction->times * len;
524 return 0; /* if we're here, there's an error */
527 /* Check to see if we need an address-size prefix */
528 add_asp(instruction, bits);
530 m = find_match(&temp, instruction, segment, offset, bits);
532 if (m == MOK_GOOD) {
533 /* Matches! */
534 int64_t insn_size = calcsize(segment, offset, bits, instruction, temp);
535 itimes = instruction->times;
536 if (insn_size < 0) /* shouldn't be, on pass two */
537 error(ERR_PANIC, "errors made it through from pass one");
538 else
539 while (itimes--) {
540 for (j = 0; j < MAXPREFIX; j++) {
541 uint8_t c = 0;
542 switch (instruction->prefixes[j]) {
543 case P_WAIT:
544 c = 0x9B;
545 break;
546 case P_LOCK:
547 c = 0xF0;
548 break;
549 case P_REPNE:
550 case P_REPNZ:
551 case P_XACQUIRE:
552 case P_BND:
553 c = 0xF2;
554 break;
555 case P_REPE:
556 case P_REPZ:
557 case P_REP:
558 case P_XRELEASE:
559 c = 0xF3;
560 break;
561 case R_CS:
562 if (bits == 64) {
563 error(ERR_WARNING | ERR_PASS2,
564 "cs segment base generated, but will be ignored in 64-bit mode");
566 c = 0x2E;
567 break;
568 case R_DS:
569 if (bits == 64) {
570 error(ERR_WARNING | ERR_PASS2,
571 "ds segment base generated, but will be ignored in 64-bit mode");
573 c = 0x3E;
574 break;
575 case R_ES:
576 if (bits == 64) {
577 error(ERR_WARNING | ERR_PASS2,
578 "es segment base generated, but will be ignored in 64-bit mode");
580 c = 0x26;
581 break;
582 case R_FS:
583 c = 0x64;
584 break;
585 case R_GS:
586 c = 0x65;
587 break;
588 case R_SS:
589 if (bits == 64) {
590 error(ERR_WARNING | ERR_PASS2,
591 "ss segment base generated, but will be ignored in 64-bit mode");
593 c = 0x36;
594 break;
595 case R_SEGR6:
596 case R_SEGR7:
597 error(ERR_NONFATAL,
598 "segr6 and segr7 cannot be used as prefixes");
599 break;
600 case P_A16:
601 if (bits == 64) {
602 error(ERR_NONFATAL,
603 "16-bit addressing is not supported "
604 "in 64-bit mode");
605 } else if (bits != 16)
606 c = 0x67;
607 break;
608 case P_A32:
609 if (bits != 32)
610 c = 0x67;
611 break;
612 case P_A64:
613 if (bits != 64) {
614 error(ERR_NONFATAL,
615 "64-bit addressing is only supported "
616 "in 64-bit mode");
618 break;
619 case P_ASP:
620 c = 0x67;
621 break;
622 case P_O16:
623 if (bits != 16)
624 c = 0x66;
625 break;
626 case P_O32:
627 if (bits == 16)
628 c = 0x66;
629 break;
630 case P_O64:
631 /* REX.W */
632 break;
633 case P_OSP:
634 c = 0x66;
635 break;
636 case P_EVEX:
637 /* EVEX */
638 break;
639 case P_none:
640 break;
641 default:
642 error(ERR_PANIC, "invalid instruction prefix");
644 if (c != 0) {
645 out(offset, segment, &c, OUT_RAWDATA, 1,
646 NO_SEG, NO_SEG);
647 offset++;
650 insn_end = offset + insn_size;
651 gencode(segment, offset, bits, instruction,
652 temp, insn_end);
653 offset += insn_size;
654 if (itimes > 0 && itimes == instruction->times - 1) {
656 * Dummy call to list->output to give the offset to the
657 * listing module.
659 list->output(offset, NULL, OUT_RAWDATA, 0);
660 list->uplevel(LIST_TIMES);
663 if (instruction->times > 1)
664 list->downlevel(LIST_TIMES);
665 return offset - start;
666 } else {
667 /* No match */
668 switch (m) {
669 case MERR_OPSIZEMISSING:
670 error(ERR_NONFATAL, "operation size not specified");
671 break;
672 case MERR_OPSIZEMISMATCH:
673 error(ERR_NONFATAL, "mismatch in operand sizes");
674 break;
675 case MERR_BRNUMMISMATCH:
676 error(ERR_NONFATAL,
677 "mismatch in the number of broadcasting elements");
678 break;
679 case MERR_BADCPU:
680 error(ERR_NONFATAL, "no instruction for this cpu level");
681 break;
682 case MERR_BADMODE:
683 error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
684 bits);
685 break;
686 default:
687 error(ERR_NONFATAL,
688 "invalid combination of opcode and operands");
689 break;
692 return 0;
695 int64_t insn_size(int32_t segment, int64_t offset, int bits, iflags_t cp,
696 insn * instruction, efunc error)
698 const struct itemplate *temp;
699 enum match_result m;
701 errfunc = error; /* to pass to other functions */
702 cpu = cp;
704 if (instruction->opcode == I_none)
705 return 0;
707 if (instruction->opcode == I_DB || instruction->opcode == I_DW ||
708 instruction->opcode == I_DD || instruction->opcode == I_DQ ||
709 instruction->opcode == I_DT || instruction->opcode == I_DO ||
710 instruction->opcode == I_DY) {
711 extop *e;
712 int32_t isize, osize, wsize;
714 isize = 0;
715 wsize = idata_bytes(instruction->opcode);
717 list_for_each(e, instruction->eops) {
718 int32_t align;
720 osize = 0;
721 if (e->type == EOT_DB_NUMBER) {
722 osize = 1;
723 warn_overflow_const(e->offset, wsize);
724 } else if (e->type == EOT_DB_STRING ||
725 e->type == EOT_DB_STRING_FREE)
726 osize = e->stringlen;
728 align = (-osize) % wsize;
729 if (align < 0)
730 align += wsize;
731 isize += osize + align;
733 return isize * instruction->times;
736 if (instruction->opcode == I_INCBIN) {
737 const char *fname = instruction->eops->stringval;
738 FILE *fp;
739 int64_t val = 0;
740 size_t len;
742 fp = fopen(fname, "rb");
743 if (!fp)
744 error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
745 fname);
746 else if (fseek(fp, 0L, SEEK_END) < 0)
747 error(ERR_NONFATAL, "`incbin': unable to seek on file `%s'",
748 fname);
749 else {
750 len = ftell(fp);
751 if (instruction->eops->next) {
752 len -= instruction->eops->next->offset;
753 if (instruction->eops->next->next &&
754 len > (size_t)instruction->eops->next->next->offset) {
755 len = (size_t)instruction->eops->next->next->offset;
758 val = instruction->times * len;
760 if (fp)
761 fclose(fp);
762 return val;
765 /* Check to see if we need an address-size prefix */
766 add_asp(instruction, bits);
768 m = find_match(&temp, instruction, segment, offset, bits);
769 if (m == MOK_GOOD) {
770 /* we've matched an instruction. */
771 int64_t isize;
772 int j;
774 isize = calcsize(segment, offset, bits, instruction, temp);
775 if (isize < 0)
776 return -1;
777 for (j = 0; j < MAXPREFIX; j++) {
778 switch (instruction->prefixes[j]) {
779 case P_A16:
780 if (bits != 16)
781 isize++;
782 break;
783 case P_A32:
784 if (bits != 32)
785 isize++;
786 break;
787 case P_O16:
788 if (bits != 16)
789 isize++;
790 break;
791 case P_O32:
792 if (bits == 16)
793 isize++;
794 break;
795 case P_A64:
796 case P_O64:
797 case P_EVEX:
798 case P_none:
799 break;
800 default:
801 isize++;
802 break;
805 return isize * instruction->times;
806 } else {
807 return -1; /* didn't match any instruction */
811 static void bad_hle_warn(const insn * ins, uint8_t hleok)
813 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
814 enum whatwarn { w_none, w_lock, w_inval } ww;
815 static const enum whatwarn warn[2][4] =
817 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
818 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
820 unsigned int n;
822 n = (unsigned int)rep_pfx - P_XACQUIRE;
823 if (n > 1)
824 return; /* Not XACQUIRE/XRELEASE */
826 ww = warn[n][hleok];
827 if (!is_class(MEMORY, ins->oprs[0].type))
828 ww = w_inval; /* HLE requires operand 0 to be memory */
830 switch (ww) {
831 case w_none:
832 break;
834 case w_lock:
835 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
836 errfunc(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
837 "%s with this instruction requires lock",
838 prefix_name(rep_pfx));
840 break;
842 case w_inval:
843 errfunc(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
844 "%s invalid with this instruction",
845 prefix_name(rep_pfx));
846 break;
850 /* Common construct */
851 #define case3(x) case (x): case (x)+1: case (x)+2
852 #define case4(x) case3(x): case (x)+3
854 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
855 insn * ins, const struct itemplate *temp)
857 const uint8_t *codes = temp->code;
858 int64_t length = 0;
859 uint8_t c;
860 int rex_mask = ~0;
861 int op1, op2;
862 struct operand *opx;
863 uint8_t opex = 0;
864 enum ea_type eat;
865 uint8_t hleok = 0;
866 bool lockcheck = true;
867 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
869 ins->rex = 0; /* Ensure REX is reset */
870 eat = EA_SCALAR; /* Expect a scalar EA */
871 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
873 if (ins->prefixes[PPS_OSIZE] == P_O64)
874 ins->rex |= REX_W;
876 (void)segment; /* Don't warn that this parameter is unused */
877 (void)offset; /* Don't warn that this parameter is unused */
879 while (*codes) {
880 c = *codes++;
881 op1 = (c & 3) + ((opex & 1) << 2);
882 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
883 opx = &ins->oprs[op1];
884 opex = 0; /* For the next iteration */
886 switch (c) {
887 case4(01):
888 codes += c, length += c;
889 break;
891 case3(05):
892 opex = c;
893 break;
895 case4(010):
896 ins->rex |=
897 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
898 codes++, length++;
899 break;
901 case4(014):
902 /* this is an index reg of MIB operand */
903 mib_index = opx->basereg;
904 break;
906 case4(020):
907 case4(024):
908 length++;
909 break;
911 case4(030):
912 length += 2;
913 break;
915 case4(034):
916 if (opx->type & (BITS16 | BITS32 | BITS64))
917 length += (opx->type & BITS16) ? 2 : 4;
918 else
919 length += (bits == 16) ? 2 : 4;
920 break;
922 case4(040):
923 length += 4;
924 break;
926 case4(044):
927 length += ins->addr_size >> 3;
928 break;
930 case4(050):
931 length++;
932 break;
934 case4(054):
935 length += 8; /* MOV reg64/imm */
936 break;
938 case4(060):
939 length += 2;
940 break;
942 case4(064):
943 if (opx->type & (BITS16 | BITS32 | BITS64))
944 length += (opx->type & BITS16) ? 2 : 4;
945 else
946 length += (bits == 16) ? 2 : 4;
947 break;
949 case4(070):
950 length += 4;
951 break;
953 case4(074):
954 length += 2;
955 break;
957 case 0172:
958 case 0173:
959 codes++;
960 length++;
961 break;
963 case4(0174):
964 length++;
965 break;
967 case4(0240):
968 ins->rex |= REX_EV;
969 ins->vexreg = regval(opx);
970 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
971 ins->vex_cm = *codes++;
972 ins->vex_wlp = *codes++;
973 ins->evex_tuple = (*codes++ - 0300);
974 break;
976 case 0250:
977 ins->rex |= REX_EV;
978 ins->vexreg = 0;
979 ins->vex_cm = *codes++;
980 ins->vex_wlp = *codes++;
981 ins->evex_tuple = (*codes++ - 0300);
982 break;
984 case4(0254):
985 length += 4;
986 break;
988 case4(0260):
989 ins->rex |= REX_V;
990 ins->vexreg = regval(opx);
991 ins->vex_cm = *codes++;
992 ins->vex_wlp = *codes++;
993 break;
995 case 0270:
996 ins->rex |= REX_V;
997 ins->vexreg = 0;
998 ins->vex_cm = *codes++;
999 ins->vex_wlp = *codes++;
1000 break;
1002 case3(0271):
1003 hleok = c & 3;
1004 break;
1006 case4(0274):
1007 length++;
1008 break;
1010 case4(0300):
1011 break;
1013 case 0310:
1014 if (bits == 64)
1015 return -1;
1016 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1017 break;
1019 case 0311:
1020 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1021 break;
1023 case 0312:
1024 break;
1026 case 0313:
1027 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1028 has_prefix(ins, PPS_ASIZE, P_A32))
1029 return -1;
1030 break;
1032 case4(0314):
1033 break;
1035 case 0320:
1037 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1038 if (pfx == P_O16)
1039 break;
1040 if (pfx != P_none)
1041 errfunc(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1042 else
1043 ins->prefixes[PPS_OSIZE] = P_O16;
1044 break;
1047 case 0321:
1049 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1050 if (pfx == P_O32)
1051 break;
1052 if (pfx != P_none)
1053 errfunc(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1054 else
1055 ins->prefixes[PPS_OSIZE] = P_O32;
1056 break;
1059 case 0322:
1060 break;
1062 case 0323:
1063 rex_mask &= ~REX_W;
1064 break;
1066 case 0324:
1067 ins->rex |= REX_W;
1068 break;
1070 case 0325:
1071 ins->rex |= REX_NH;
1072 break;
1074 case 0326:
1075 break;
1077 case 0330:
1078 codes++, length++;
1079 break;
1081 case 0331:
1082 break;
1084 case 0332:
1085 case 0333:
1086 length++;
1087 break;
1089 case 0334:
1090 ins->rex |= REX_L;
1091 break;
1093 case 0335:
1094 break;
1096 case 0336:
1097 if (!ins->prefixes[PPS_REP])
1098 ins->prefixes[PPS_REP] = P_REP;
1099 break;
1101 case 0337:
1102 if (!ins->prefixes[PPS_REP])
1103 ins->prefixes[PPS_REP] = P_REPNE;
1104 break;
1106 case 0340:
1107 if (ins->oprs[0].segment != NO_SEG)
1108 errfunc(ERR_NONFATAL, "attempt to reserve non-constant"
1109 " quantity of BSS space");
1110 else
1111 length += ins->oprs[0].offset;
1112 break;
1114 case 0341:
1115 if (!ins->prefixes[PPS_WAIT])
1116 ins->prefixes[PPS_WAIT] = P_WAIT;
1117 break;
1119 case 0360:
1120 break;
1122 case 0361:
1123 length++;
1124 break;
1126 case 0364:
1127 case 0365:
1128 break;
1130 case 0366:
1131 case 0367:
1132 length++;
1133 break;
1135 case 0370:
1136 case 0371:
1137 break;
1139 case 0373:
1140 length++;
1141 break;
1143 case 0374:
1144 eat = EA_XMMVSIB;
1145 break;
1147 case 0375:
1148 eat = EA_YMMVSIB;
1149 break;
1151 case 0376:
1152 eat = EA_ZMMVSIB;
1153 break;
1155 case4(0100):
1156 case4(0110):
1157 case4(0120):
1158 case4(0130):
1159 case4(0200):
1160 case4(0204):
1161 case4(0210):
1162 case4(0214):
1163 case4(0220):
1164 case4(0224):
1165 case4(0230):
1166 case4(0234):
1168 ea ea_data;
1169 int rfield;
1170 opflags_t rflags;
1171 struct operand *opy = &ins->oprs[op2];
1172 struct operand *op_er_sae;
1174 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1176 if (c <= 0177) {
1177 /* pick rfield from operand b (opx) */
1178 rflags = regflag(opx);
1179 rfield = nasm_regvals[opx->basereg];
1180 } else {
1181 rflags = 0;
1182 rfield = c & 7;
1185 /* EVEX.b1 : evex_brerop contains the operand position */
1186 op_er_sae = (ins->evex_brerop >= 0 ?
1187 &ins->oprs[ins->evex_brerop] : NULL);
1189 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1190 /* set EVEX.b */
1191 ins->evex_p[2] |= EVEX_P2B;
1192 if (op_er_sae->decoflags & ER) {
1193 /* set EVEX.RC (rounding control) */
1194 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1195 & EVEX_P2RC;
1197 } else {
1198 /* set EVEX.L'L (vector length) */
1199 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1200 if (opy->decoflags & BRDCAST_MASK) {
1201 /* set EVEX.b */
1202 ins->evex_p[2] |= EVEX_P2B;
1207 * if a separate form of MIB (ICC style) is used,
1208 * the index reg info is merged into mem operand
1210 if (mib_index != R_none) {
1211 opy->indexreg = mib_index;
1212 opy->scale = 1;
1213 opy->hintbase = mib_index;
1214 opy->hinttype = EAH_NOTBASE;
1218 * only for mib operands, make a single reg index [reg*1].
1219 * gas uses this form to explicitly denote index register.
1221 if ((temp->flags & IF_MIB) &&
1222 (opy->indexreg == -1 && opy->hintbase == opy->basereg &&
1223 opy->hinttype == EAH_NOTBASE)) {
1224 opy->indexreg = opy->basereg;
1225 opy->basereg = -1;
1226 opy->scale = 1;
1229 if (process_ea(opy, &ea_data, bits,
1230 rfield, rflags, ins) != eat) {
1231 errfunc(ERR_NONFATAL, "invalid effective address");
1232 return -1;
1233 } else {
1234 ins->rex |= ea_data.rex;
1235 length += ea_data.size;
1238 break;
1240 default:
1241 errfunc(ERR_PANIC, "internal instruction table corrupt"
1242 ": instruction code \\%o (0x%02X) given", c, c);
1243 break;
1247 ins->rex &= rex_mask;
1249 if (ins->rex & REX_NH) {
1250 if (ins->rex & REX_H) {
1251 errfunc(ERR_NONFATAL, "instruction cannot use high registers");
1252 return -1;
1254 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1257 if (ins->rex & (REX_V | REX_EV)) {
1258 int bad32 = REX_R|REX_W|REX_X|REX_B;
1260 if (ins->rex & REX_H) {
1261 errfunc(ERR_NONFATAL, "cannot use high register in AVX instruction");
1262 return -1;
1264 switch (ins->vex_wlp & 060) {
1265 case 000:
1266 case 040:
1267 ins->rex &= ~REX_W;
1268 break;
1269 case 020:
1270 ins->rex |= REX_W;
1271 bad32 &= ~REX_W;
1272 break;
1273 case 060:
1274 /* Follow REX_W */
1275 break;
1278 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1279 errfunc(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1280 return -1;
1281 } else if (!(ins->rex & REX_EV) &&
1282 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1283 errfunc(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1284 return -1;
1286 if (ins->rex & REX_EV)
1287 length += 4;
1288 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)))
1289 length += 3;
1290 else
1291 length += 2;
1292 } else if (ins->rex & REX_REAL) {
1293 if (ins->rex & REX_H) {
1294 errfunc(ERR_NONFATAL, "cannot use high register in rex instruction");
1295 return -1;
1296 } else if (bits == 64) {
1297 length++;
1298 } else if ((ins->rex & REX_L) &&
1299 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1300 cpu >= IF_X86_64) {
1301 /* LOCK-as-REX.R */
1302 assert_no_prefix(ins, PPS_LOCK);
1303 lockcheck = false; /* Already errored, no need for warning */
1304 length++;
1305 } else {
1306 errfunc(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1307 return -1;
1311 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1312 (!(temp->flags & IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1313 errfunc(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1314 "instruction is not lockable");
1317 bad_hle_warn(ins, hleok);
1319 return length;
1322 static inline unsigned int emit_rex(insn *ins, int32_t segment, int64_t offset, int bits)
1324 if (bits == 64) {
1325 if ((ins->rex & REX_REAL) && !(ins->rex & (REX_V | REX_EV))) {
1326 ins->rex = (ins->rex & REX_REAL) | REX_P;
1327 out(offset, segment, &ins->rex, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1328 ins->rex = 0;
1329 return 1;
1333 return 0;
1336 static void gencode(int32_t segment, int64_t offset, int bits,
1337 insn * ins, const struct itemplate *temp,
1338 int64_t insn_end)
1340 uint8_t c;
1341 uint8_t bytes[4];
1342 int64_t size;
1343 int64_t data;
1344 int op1, op2;
1345 struct operand *opx;
1346 const uint8_t *codes = temp->code;
1347 uint8_t opex = 0;
1348 enum ea_type eat = EA_SCALAR;
1350 while (*codes) {
1351 c = *codes++;
1352 op1 = (c & 3) + ((opex & 1) << 2);
1353 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1354 opx = &ins->oprs[op1];
1355 opex = 0; /* For the next iteration */
1357 switch (c) {
1358 case 01:
1359 case 02:
1360 case 03:
1361 case 04:
1362 offset += emit_rex(ins, segment, offset, bits);
1363 out(offset, segment, codes, OUT_RAWDATA, c, NO_SEG, NO_SEG);
1364 codes += c;
1365 offset += c;
1366 break;
1368 case 05:
1369 case 06:
1370 case 07:
1371 opex = c;
1372 break;
1374 case4(010):
1375 offset += emit_rex(ins, segment, offset, bits);
1376 bytes[0] = *codes++ + (regval(opx) & 7);
1377 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1378 offset += 1;
1379 break;
1381 case4(014):
1382 break;
1384 case4(020):
1385 if (opx->offset < -256 || opx->offset > 255) {
1386 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1387 "byte value exceeds bounds");
1389 out_imm8(offset, segment, opx);
1390 offset += 1;
1391 break;
1393 case4(024):
1394 if (opx->offset < 0 || opx->offset > 255)
1395 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1396 "unsigned byte value exceeds bounds");
1397 out_imm8(offset, segment, opx);
1398 offset += 1;
1399 break;
1401 case4(030):
1402 warn_overflow_opd(opx, 2);
1403 data = opx->offset;
1404 out(offset, segment, &data, OUT_ADDRESS, 2,
1405 opx->segment, opx->wrt);
1406 offset += 2;
1407 break;
1409 case4(034):
1410 if (opx->type & (BITS16 | BITS32))
1411 size = (opx->type & BITS16) ? 2 : 4;
1412 else
1413 size = (bits == 16) ? 2 : 4;
1414 warn_overflow_opd(opx, size);
1415 data = opx->offset;
1416 out(offset, segment, &data, OUT_ADDRESS, size,
1417 opx->segment, opx->wrt);
1418 offset += size;
1419 break;
1421 case4(040):
1422 warn_overflow_opd(opx, 4);
1423 data = opx->offset;
1424 out(offset, segment, &data, OUT_ADDRESS, 4,
1425 opx->segment, opx->wrt);
1426 offset += 4;
1427 break;
1429 case4(044):
1430 data = opx->offset;
1431 size = ins->addr_size >> 3;
1432 warn_overflow_opd(opx, size);
1433 out(offset, segment, &data, OUT_ADDRESS, size,
1434 opx->segment, opx->wrt);
1435 offset += size;
1436 break;
1438 case4(050):
1439 if (opx->segment != segment) {
1440 data = opx->offset;
1441 out(offset, segment, &data,
1442 OUT_REL1ADR, insn_end - offset,
1443 opx->segment, opx->wrt);
1444 } else {
1445 data = opx->offset - insn_end;
1446 if (data > 127 || data < -128)
1447 errfunc(ERR_NONFATAL, "short jump is out of range");
1448 out(offset, segment, &data,
1449 OUT_ADDRESS, 1, NO_SEG, NO_SEG);
1451 offset += 1;
1452 break;
1454 case4(054):
1455 data = (int64_t)opx->offset;
1456 out(offset, segment, &data, OUT_ADDRESS, 8,
1457 opx->segment, opx->wrt);
1458 offset += 8;
1459 break;
1461 case4(060):
1462 if (opx->segment != segment) {
1463 data = opx->offset;
1464 out(offset, segment, &data,
1465 OUT_REL2ADR, insn_end - offset,
1466 opx->segment, opx->wrt);
1467 } else {
1468 data = opx->offset - insn_end;
1469 out(offset, segment, &data,
1470 OUT_ADDRESS, 2, NO_SEG, NO_SEG);
1472 offset += 2;
1473 break;
1475 case4(064):
1476 if (opx->type & (BITS16 | BITS32 | BITS64))
1477 size = (opx->type & BITS16) ? 2 : 4;
1478 else
1479 size = (bits == 16) ? 2 : 4;
1480 if (opx->segment != segment) {
1481 data = opx->offset;
1482 out(offset, segment, &data,
1483 size == 2 ? OUT_REL2ADR : OUT_REL4ADR,
1484 insn_end - offset, opx->segment, opx->wrt);
1485 } else {
1486 data = opx->offset - insn_end;
1487 out(offset, segment, &data,
1488 OUT_ADDRESS, size, NO_SEG, NO_SEG);
1490 offset += size;
1491 break;
1493 case4(070):
1494 if (opx->segment != segment) {
1495 data = opx->offset;
1496 out(offset, segment, &data,
1497 OUT_REL4ADR, insn_end - offset,
1498 opx->segment, opx->wrt);
1499 } else {
1500 data = opx->offset - insn_end;
1501 out(offset, segment, &data,
1502 OUT_ADDRESS, 4, NO_SEG, NO_SEG);
1504 offset += 4;
1505 break;
1507 case4(074):
1508 if (opx->segment == NO_SEG)
1509 errfunc(ERR_NONFATAL, "value referenced by FAR is not"
1510 " relocatable");
1511 data = 0;
1512 out(offset, segment, &data, OUT_ADDRESS, 2,
1513 outfmt->segbase(1 + opx->segment),
1514 opx->wrt);
1515 offset += 2;
1516 break;
1518 case 0172:
1519 c = *codes++;
1520 opx = &ins->oprs[c >> 3];
1521 bytes[0] = nasm_regvals[opx->basereg] << 4;
1522 opx = &ins->oprs[c & 7];
1523 if (opx->segment != NO_SEG || opx->wrt != NO_SEG) {
1524 errfunc(ERR_NONFATAL,
1525 "non-absolute expression not permitted as argument %d",
1526 c & 7);
1527 } else {
1528 if (opx->offset & ~15) {
1529 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1530 "four-bit argument exceeds bounds");
1532 bytes[0] |= opx->offset & 15;
1534 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1535 offset++;
1536 break;
1538 case 0173:
1539 c = *codes++;
1540 opx = &ins->oprs[c >> 4];
1541 bytes[0] = nasm_regvals[opx->basereg] << 4;
1542 bytes[0] |= c & 15;
1543 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1544 offset++;
1545 break;
1547 case4(0174):
1548 bytes[0] = nasm_regvals[opx->basereg] << 4;
1549 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1550 offset++;
1551 break;
1553 case4(0254):
1554 data = opx->offset;
1555 if (opx->wrt == NO_SEG && opx->segment == NO_SEG &&
1556 (int32_t)data != (int64_t)data) {
1557 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1558 "signed dword immediate exceeds bounds");
1560 out(offset, segment, &data, OUT_ADDRESS, 4,
1561 opx->segment, opx->wrt);
1562 offset += 4;
1563 break;
1565 case4(0240):
1566 case 0250:
1567 codes += 3;
1568 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1569 EVEX_P2Z | EVEX_P2AAA, 2);
1570 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1571 bytes[0] = 0x62;
1572 /* EVEX.X can be set by either REX or EVEX for different reasons */
1573 bytes[1] = ((((ins->rex & 7) << 5) |
1574 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1575 (ins->vex_cm & 3);
1576 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1577 ((~ins->vexreg & 15) << 3) |
1578 (1 << 2) | (ins->vex_wlp & 3);
1579 bytes[3] = ins->evex_p[2];
1580 out(offset, segment, &bytes, OUT_RAWDATA, 4, NO_SEG, NO_SEG);
1581 offset += 4;
1582 break;
1584 case4(0260):
1585 case 0270:
1586 codes += 2;
1587 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B))) {
1588 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1589 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1590 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1591 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1592 out(offset, segment, &bytes, OUT_RAWDATA, 3, NO_SEG, NO_SEG);
1593 offset += 3;
1594 } else {
1595 bytes[0] = 0xc5;
1596 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1597 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1598 out(offset, segment, &bytes, OUT_RAWDATA, 2, NO_SEG, NO_SEG);
1599 offset += 2;
1601 break;
1603 case 0271:
1604 case 0272:
1605 case 0273:
1606 break;
1608 case4(0274):
1610 uint64_t uv, um;
1611 int s;
1613 if (ins->rex & REX_W)
1614 s = 64;
1615 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1616 s = 16;
1617 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1618 s = 32;
1619 else
1620 s = bits;
1622 um = (uint64_t)2 << (s-1);
1623 uv = opx->offset;
1625 if (uv > 127 && uv < (uint64_t)-128 &&
1626 (uv < um-128 || uv > um-1)) {
1627 /* If this wasn't explicitly byte-sized, warn as though we
1628 * had fallen through to the imm16/32/64 case.
1630 errfunc(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1631 "%s value exceeds bounds",
1632 (opx->type & BITS8) ? "signed byte" :
1633 s == 16 ? "word" :
1634 s == 32 ? "dword" :
1635 "signed dword");
1637 if (opx->segment != NO_SEG) {
1638 data = uv;
1639 out(offset, segment, &data, OUT_ADDRESS, 1,
1640 opx->segment, opx->wrt);
1641 } else {
1642 bytes[0] = uv;
1643 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG,
1644 NO_SEG);
1646 offset += 1;
1647 break;
1650 case4(0300):
1651 break;
1653 case 0310:
1654 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16)) {
1655 *bytes = 0x67;
1656 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1657 offset += 1;
1658 } else
1659 offset += 0;
1660 break;
1662 case 0311:
1663 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32)) {
1664 *bytes = 0x67;
1665 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1666 offset += 1;
1667 } else
1668 offset += 0;
1669 break;
1671 case 0312:
1672 break;
1674 case 0313:
1675 ins->rex = 0;
1676 break;
1678 case4(0314):
1679 break;
1681 case 0320:
1682 case 0321:
1683 break;
1685 case 0322:
1686 case 0323:
1687 break;
1689 case 0324:
1690 ins->rex |= REX_W;
1691 break;
1693 case 0325:
1694 break;
1696 case 0326:
1697 break;
1699 case 0330:
1700 *bytes = *codes++ ^ get_cond_opcode(ins->condition);
1701 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1702 offset += 1;
1703 break;
1705 case 0331:
1706 break;
1708 case 0332:
1709 case 0333:
1710 *bytes = c - 0332 + 0xF2;
1711 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1712 offset += 1;
1713 break;
1715 case 0334:
1716 if (ins->rex & REX_R) {
1717 *bytes = 0xF0;
1718 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1719 offset += 1;
1721 ins->rex &= ~(REX_L|REX_R);
1722 break;
1724 case 0335:
1725 break;
1727 case 0336:
1728 case 0337:
1729 break;
1731 case 0340:
1732 if (ins->oprs[0].segment != NO_SEG)
1733 errfunc(ERR_PANIC, "non-constant BSS size in pass two");
1734 else {
1735 int64_t size = ins->oprs[0].offset;
1736 if (size > 0)
1737 out(offset, segment, NULL,
1738 OUT_RESERVE, size, NO_SEG, NO_SEG);
1739 offset += size;
1741 break;
1743 case 0341:
1744 break;
1746 case 0360:
1747 break;
1749 case 0361:
1750 bytes[0] = 0x66;
1751 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1752 offset += 1;
1753 break;
1755 case 0364:
1756 case 0365:
1757 break;
1759 case 0366:
1760 case 0367:
1761 *bytes = c - 0366 + 0x66;
1762 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1763 offset += 1;
1764 break;
1766 case3(0370):
1767 break;
1769 case 0373:
1770 *bytes = bits == 16 ? 3 : 5;
1771 out(offset, segment, bytes, OUT_RAWDATA, 1, NO_SEG, NO_SEG);
1772 offset += 1;
1773 break;
1775 case 0374:
1776 eat = EA_XMMVSIB;
1777 break;
1779 case 0375:
1780 eat = EA_YMMVSIB;
1781 break;
1783 case 0376:
1784 eat = EA_ZMMVSIB;
1785 break;
1787 case4(0100):
1788 case4(0110):
1789 case4(0120):
1790 case4(0130):
1791 case4(0200):
1792 case4(0204):
1793 case4(0210):
1794 case4(0214):
1795 case4(0220):
1796 case4(0224):
1797 case4(0230):
1798 case4(0234):
1800 ea ea_data;
1801 int rfield;
1802 opflags_t rflags;
1803 uint8_t *p;
1804 int32_t s;
1805 struct operand *opy = &ins->oprs[op2];
1807 if (c <= 0177) {
1808 /* pick rfield from operand b (opx) */
1809 rflags = regflag(opx);
1810 rfield = nasm_regvals[opx->basereg];
1811 } else {
1812 /* rfield is constant */
1813 rflags = 0;
1814 rfield = c & 7;
1817 if (process_ea(opy, &ea_data, bits,
1818 rfield, rflags, ins) != eat)
1819 errfunc(ERR_NONFATAL, "invalid effective address");
1821 p = bytes;
1822 *p++ = ea_data.modrm;
1823 if (ea_data.sib_present)
1824 *p++ = ea_data.sib;
1826 s = p - bytes;
1827 out(offset, segment, bytes, OUT_RAWDATA, s, NO_SEG, NO_SEG);
1830 * Make sure the address gets the right offset in case
1831 * the line breaks in the .lst file (BR 1197827)
1833 offset += s;
1834 s = 0;
1836 switch (ea_data.bytes) {
1837 case 0:
1838 break;
1839 case 1:
1840 case 2:
1841 case 4:
1842 case 8:
1843 /* use compressed displacement, if available */
1844 data = ea_data.disp8 ? ea_data.disp8 : opy->offset;
1845 s += ea_data.bytes;
1846 if (ea_data.rip) {
1847 if (opy->segment == segment) {
1848 data -= insn_end;
1849 if (overflow_signed(data, ea_data.bytes))
1850 warn_overflow(ERR_PASS2, ea_data.bytes);
1851 out(offset, segment, &data, OUT_ADDRESS,
1852 ea_data.bytes, NO_SEG, NO_SEG);
1853 } else {
1854 /* overflow check in output/linker? */
1855 out(offset, segment, &data, OUT_REL4ADR,
1856 insn_end - offset, opy->segment, opy->wrt);
1858 } else {
1859 if (overflow_general(data, ins->addr_size >> 3) ||
1860 signed_bits(data, ins->addr_size) !=
1861 signed_bits(data, ea_data.bytes * 8))
1862 warn_overflow(ERR_PASS2, ea_data.bytes);
1864 out(offset, segment, &data, OUT_ADDRESS,
1865 ea_data.bytes, opy->segment, opy->wrt);
1867 break;
1868 default:
1869 /* Impossible! */
1870 errfunc(ERR_PANIC,
1871 "Invalid amount of bytes (%d) for offset?!",
1872 ea_data.bytes);
1873 break;
1875 offset += s;
1877 break;
1879 default:
1880 errfunc(ERR_PANIC, "internal instruction table corrupt"
1881 ": instruction code \\%o (0x%02X) given", c, c);
1882 break;
1887 static opflags_t regflag(const operand * o)
1889 if (!is_register(o->basereg))
1890 errfunc(ERR_PANIC, "invalid operand passed to regflag()");
1891 return nasm_reg_flags[o->basereg];
1894 static int32_t regval(const operand * o)
1896 if (!is_register(o->basereg))
1897 errfunc(ERR_PANIC, "invalid operand passed to regval()");
1898 return nasm_regvals[o->basereg];
1901 static int op_rexflags(const operand * o, int mask)
1903 opflags_t flags;
1904 int val;
1906 if (!is_register(o->basereg))
1907 errfunc(ERR_PANIC, "invalid operand passed to op_rexflags()");
1909 flags = nasm_reg_flags[o->basereg];
1910 val = nasm_regvals[o->basereg];
1912 return rexflags(val, flags, mask);
1915 static int rexflags(int val, opflags_t flags, int mask)
1917 int rex = 0;
1919 if (val >= 0 && (val & 8))
1920 rex |= REX_B|REX_X|REX_R;
1921 if (flags & BITS64)
1922 rex |= REX_W;
1923 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
1924 rex |= REX_H;
1925 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
1926 rex |= REX_P;
1928 return rex & mask;
1931 static int evexflags(int val, decoflags_t deco,
1932 int mask, uint8_t byte)
1934 int evex = 0;
1936 switch (byte) {
1937 case 0:
1938 if (val >= 0 && (val & 16))
1939 evex |= (EVEX_P0RP | EVEX_P0X);
1940 break;
1941 case 2:
1942 if (val >= 0 && (val & 16))
1943 evex |= EVEX_P2VP;
1944 if (deco & Z)
1945 evex |= EVEX_P2Z;
1946 if (deco & OPMASK_MASK)
1947 evex |= deco & EVEX_P2AAA;
1948 break;
1950 return evex & mask;
1953 static int op_evexflags(const operand * o, int mask, uint8_t byte)
1955 int val;
1957 if (!is_register(o->basereg))
1958 errfunc(ERR_PANIC, "invalid operand passed to op_evexflags()");
1960 val = nasm_regvals[o->basereg];
1962 return evexflags(val, o->decoflags, mask, byte);
1965 static enum match_result find_match(const struct itemplate **tempp,
1966 insn *instruction,
1967 int32_t segment, int64_t offset, int bits)
1969 const struct itemplate *temp;
1970 enum match_result m, merr;
1971 opflags_t xsizeflags[MAX_OPERANDS];
1972 bool opsizemissing = false;
1973 int8_t broadcast = instruction->evex_brerop;
1974 int i;
1976 /* broadcasting uses a different data element size */
1977 for (i = 0; i < instruction->operands; i++)
1978 if (i == broadcast)
1979 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
1980 else
1981 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
1983 merr = MERR_INVALOP;
1985 for (temp = nasm_instructions[instruction->opcode];
1986 temp->opcode != I_none; temp++) {
1987 m = matches(temp, instruction, bits);
1988 if (m == MOK_JUMP) {
1989 if (jmp_match(segment, offset, bits, instruction, temp))
1990 m = MOK_GOOD;
1991 else
1992 m = MERR_INVALOP;
1993 } else if (m == MERR_OPSIZEMISSING &&
1994 (temp->flags & IF_SMASK) != IF_SX) {
1996 * Missing operand size and a candidate for fuzzy matching...
1998 for (i = 0; i < temp->operands; i++)
1999 if (i == broadcast)
2000 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2001 else
2002 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2003 opsizemissing = true;
2005 if (m > merr)
2006 merr = m;
2007 if (merr == MOK_GOOD)
2008 goto done;
2011 /* No match, but see if we can get a fuzzy operand size match... */
2012 if (!opsizemissing)
2013 goto done;
2015 for (i = 0; i < instruction->operands; i++) {
2017 * We ignore extrinsic operand sizes on registers, so we should
2018 * never try to fuzzy-match on them. This also resolves the case
2019 * when we have e.g. "xmmrm128" in two different positions.
2021 if (is_class(REGISTER, instruction->oprs[i].type))
2022 continue;
2024 /* This tests if xsizeflags[i] has more than one bit set */
2025 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2026 goto done; /* No luck */
2028 if (i == broadcast) {
2029 instruction->oprs[i].decoflags |= xsizeflags[i];
2030 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2031 BITS32 : BITS64);
2032 } else {
2033 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2037 /* Try matching again... */
2038 for (temp = nasm_instructions[instruction->opcode];
2039 temp->opcode != I_none; temp++) {
2040 m = matches(temp, instruction, bits);
2041 if (m == MOK_JUMP) {
2042 if (jmp_match(segment, offset, bits, instruction, temp))
2043 m = MOK_GOOD;
2044 else
2045 m = MERR_INVALOP;
2047 if (m > merr)
2048 merr = m;
2049 if (merr == MOK_GOOD)
2050 goto done;
2053 done:
2054 *tempp = temp;
2055 return merr;
2058 static enum match_result matches(const struct itemplate *itemp,
2059 insn *instruction, int bits)
2061 opflags_t size[MAX_OPERANDS], asize;
2062 bool opsizemissing = false;
2063 int i, oprs;
2066 * Check the opcode
2068 if (itemp->opcode != instruction->opcode)
2069 return MERR_INVALOP;
2072 * Count the operands
2074 if (itemp->operands != instruction->operands)
2075 return MERR_INVALOP;
2078 * Is it legal?
2080 if (!(optimizing > 0) && (itemp->flags & IF_OPT))
2081 return MERR_INVALOP;
2084 * Check that no spurious colons or TOs are present
2086 for (i = 0; i < itemp->operands; i++)
2087 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2088 return MERR_INVALOP;
2091 * Process size flags
2093 switch (itemp->flags & IF_SMASK) {
2094 case IF_SB:
2095 asize = BITS8;
2096 break;
2097 case IF_SW:
2098 asize = BITS16;
2099 break;
2100 case IF_SD:
2101 asize = BITS32;
2102 break;
2103 case IF_SQ:
2104 asize = BITS64;
2105 break;
2106 case IF_SO:
2107 asize = BITS128;
2108 break;
2109 case IF_SY:
2110 asize = BITS256;
2111 break;
2112 case IF_SZ:
2113 asize = BITS512;
2114 break;
2115 case IF_SIZE:
2116 switch (bits) {
2117 case 16:
2118 asize = BITS16;
2119 break;
2120 case 32:
2121 asize = BITS32;
2122 break;
2123 case 64:
2124 asize = BITS64;
2125 break;
2126 default:
2127 asize = 0;
2128 break;
2130 break;
2131 default:
2132 asize = 0;
2133 break;
2136 if (itemp->flags & IF_ARMASK) {
2137 /* S- flags only apply to a specific operand */
2138 i = ((itemp->flags & IF_ARMASK) >> IF_ARSHFT) - 1;
2139 memset(size, 0, sizeof size);
2140 size[i] = asize;
2141 } else {
2142 /* S- flags apply to all operands */
2143 for (i = 0; i < MAX_OPERANDS; i++)
2144 size[i] = asize;
2148 * Check that the operand flags all match up,
2149 * it's a bit tricky so lets be verbose:
2151 * 1) Find out the size of operand. If instruction
2152 * doesn't have one specified -- we're trying to
2153 * guess it either from template (IF_S* flag) or
2154 * from code bits.
2156 * 2) If template operand do not match the instruction OR
2157 * template has an operand size specified AND this size differ
2158 * from which instruction has (perhaps we got it from code bits)
2159 * we are:
2160 * a) Check that only size of instruction and operand is differ
2161 * other characteristics do match
2162 * b) Perhaps it's a register specified in instruction so
2163 * for such a case we just mark that operand as "size
2164 * missing" and this will turn on fuzzy operand size
2165 * logic facility (handled by a caller)
2167 for (i = 0; i < itemp->operands; i++) {
2168 opflags_t type = instruction->oprs[i].type;
2169 decoflags_t deco = instruction->oprs[i].decoflags;
2170 bool is_broadcast = deco & BRDCAST_MASK;
2171 uint8_t brcast_num = 0;
2172 opflags_t template_opsize, insn_opsize;
2174 if (!(type & SIZE_MASK))
2175 type |= size[i];
2177 insn_opsize = type & SIZE_MASK;
2178 if (!is_broadcast) {
2179 template_opsize = itemp->opd[i] & SIZE_MASK;
2180 } else {
2181 decoflags_t deco_brsize = itemp->deco[i] & BRSIZE_MASK;
2183 * when broadcasting, the element size depends on
2184 * the instruction type. decorator flag should match.
2187 if (deco_brsize) {
2188 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2189 /* calculate the proper number : {1to<brcast_num>} */
2190 brcast_num = (itemp->opd[i] & SIZE_MASK) / BITS128 *
2191 BITS64 / template_opsize * 2;
2192 } else {
2193 template_opsize = 0;
2197 if ((itemp->opd[i] & ~type & ~SIZE_MASK) ||
2198 (deco & ~itemp->deco[i] & ~BRNUM_MASK)) {
2199 return MERR_INVALOP;
2200 } else if (template_opsize) {
2201 if (template_opsize != insn_opsize) {
2202 if (insn_opsize) {
2203 return MERR_INVALOP;
2204 } else if (!is_class(REGISTER, type)) {
2206 * Note: we don't honor extrinsic operand sizes for registers,
2207 * so "missing operand size" for a register should be
2208 * considered a wildcard match rather than an error.
2210 opsizemissing = true;
2212 } else if (is_broadcast &&
2213 (brcast_num !=
2214 (8U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2216 * broadcasting opsize matches but the number of repeated memory
2217 * element does not match.
2218 * if 64b double precision float is broadcasted to zmm (512b),
2219 * broadcasting decorator must be {1to8}.
2221 return MERR_BRNUMMISMATCH;
2223 } else if (is_register(instruction->oprs[i].basereg) &&
2224 nasm_regvals[instruction->oprs[i].basereg] >= 16 &&
2225 !(itemp->flags & IF_AVX512)) {
2226 return MERR_ENCMISMATCH;
2227 } else if (instruction->prefixes[PPS_EVEX] &&
2228 !(itemp->flags & IF_AVX512)) {
2229 return MERR_ENCMISMATCH;
2233 if (opsizemissing)
2234 return MERR_OPSIZEMISSING;
2237 * Check operand sizes
2239 if (itemp->flags & (IF_SM | IF_SM2)) {
2240 oprs = (itemp->flags & IF_SM2 ? 2 : itemp->operands);
2241 for (i = 0; i < oprs; i++) {
2242 asize = itemp->opd[i] & SIZE_MASK;
2243 if (asize) {
2244 for (i = 0; i < oprs; i++)
2245 size[i] = asize;
2246 break;
2249 } else {
2250 oprs = itemp->operands;
2253 for (i = 0; i < itemp->operands; i++) {
2254 if (!(itemp->opd[i] & SIZE_MASK) &&
2255 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2256 return MERR_OPSIZEMISMATCH;
2260 * Check template is okay at the set cpu level
2262 if (((itemp->flags & IF_PLEVEL) > cpu))
2263 return MERR_BADCPU;
2266 * Verify the appropriate long mode flag.
2268 if ((itemp->flags & (bits == 64 ? IF_NOLONG : IF_LONG)))
2269 return MERR_BADMODE;
2272 * If we have a HLE prefix, look for the NOHLE flag
2274 if ((itemp->flags & IF_NOHLE) &&
2275 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2276 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2277 return MERR_BADHLE;
2280 * Check if special handling needed for Jumps
2282 if ((itemp->code[0] & ~1) == 0370)
2283 return MOK_JUMP;
2286 * Check if BND prefix is allowed
2288 if ((IF_BND & ~itemp->flags) &&
2289 has_prefix(instruction, PPS_REP, P_BND))
2290 return MERR_BADBND;
2292 return MOK_GOOD;
2296 * Check if offset is a multiple of N with corresponding tuple type
2297 * if Disp8*N is available, compressed displacement is stored in compdisp
2299 static bool is_disp8n(operand *input, insn *ins, int8_t *compdisp)
2301 const uint8_t fv_n[2][2][VLMAX] = {{{16, 32, 64}, {4, 4, 4}},
2302 {{16, 32, 64}, {8, 8, 8}}};
2303 const uint8_t hv_n[2][VLMAX] = {{8, 16, 32}, {4, 4, 4}};
2304 const uint8_t dup_n[VLMAX] = {8, 32, 64};
2306 bool evex_b = input->decoflags & BRDCAST_MASK;
2307 enum ttypes tuple = ins->evex_tuple;
2308 /* vex_wlp composed as [wwllpp] */
2309 enum vectlens vectlen = (ins->vex_wlp & 0x0c) >> 2;
2310 /* wig(=2) is treated as w0(=0) */
2311 bool evex_w = (ins->vex_wlp & 0x10) >> 4;
2312 int32_t off = input->offset;
2313 uint8_t n = 0;
2314 int32_t disp8;
2316 switch(tuple) {
2317 case FV:
2318 n = fv_n[evex_w][evex_b][vectlen];
2319 break;
2320 case HV:
2321 n = hv_n[evex_b][vectlen];
2322 break;
2324 case FVM:
2325 /* 16, 32, 64 for VL 128, 256, 512 respectively*/
2326 n = 1 << (vectlen + 4);
2327 break;
2328 case T1S8: /* N = 1 */
2329 case T1S16: /* N = 2 */
2330 n = tuple - T1S8 + 1;
2331 break;
2332 case T1S:
2333 /* N = 4 for 32bit, 8 for 64bit */
2334 n = evex_w ? 8 : 4;
2335 break;
2336 case T1F32:
2337 case T1F64:
2338 /* N = 4 for 32bit, 8 for 64bit */
2339 n = (tuple == T1F32 ? 4 : 8);
2340 break;
2341 case T2:
2342 case T4:
2343 case T8:
2344 if (vectlen + 7 <= (evex_w + 5) + (tuple - T2 + 1))
2345 n = 0;
2346 else
2347 n = 1 << (tuple - T2 + evex_w + 3);
2348 break;
2349 case HVM:
2350 case QVM:
2351 case OVM:
2352 n = 1 << (OVM - tuple + vectlen + 1);
2353 break;
2354 case M128:
2355 n = 16;
2356 break;
2357 case DUP:
2358 n = dup_n[vectlen];
2359 break;
2361 default:
2362 break;
2365 if (n && !(off & (n - 1))) {
2366 disp8 = off / n;
2367 /* if it fits in Disp8 */
2368 if (disp8 >= -128 && disp8 <= 127) {
2369 *compdisp = disp8;
2370 return true;
2374 *compdisp = 0;
2375 return false;
2379 * Check if ModR/M.mod should/can be 01.
2380 * - EAF_BYTEOFFS is set
2381 * - offset can fit in a byte when EVEX is not used
2382 * - offset can be compressed when EVEX is used
2384 #define IS_MOD_01() (input->eaflags & EAF_BYTEOFFS || \
2385 (o >= -128 && o <= 127 && \
2386 seg == NO_SEG && !forw_ref && \
2387 !(input->eaflags & EAF_WORDOFFS) && \
2388 !(ins->rex & REX_EV)) || \
2389 (ins->rex & REX_EV && \
2390 is_disp8n(input, ins, &output->disp8)))
2392 static enum ea_type process_ea(operand *input, ea *output, int bits,
2393 int rfield, opflags_t rflags, insn *ins)
2395 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2396 int addrbits = ins->addr_size;
2398 output->type = EA_SCALAR;
2399 output->rip = false;
2400 output->disp8 = 0;
2402 /* REX flags for the rfield operand */
2403 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2404 /* EVEX.R' flag for the REG operand */
2405 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2407 if (is_class(REGISTER, input->type)) {
2409 * It's a direct register.
2411 if (!is_register(input->basereg))
2412 goto err;
2414 if (!is_reg_class(REG_EA, input->basereg))
2415 goto err;
2417 /* broadcasting is not available with a direct register operand. */
2418 if (input->decoflags & BRDCAST_MASK) {
2419 nasm_error(ERR_NONFATAL, "Broadcasting not allowed from a register");
2420 goto err;
2423 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2424 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2425 output->sib_present = false; /* no SIB necessary */
2426 output->bytes = 0; /* no offset necessary either */
2427 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2428 } else {
2430 * It's a memory reference.
2433 /* Embedded rounding or SAE is not available with a mem ref operand. */
2434 if (input->decoflags & (ER | SAE)) {
2435 nasm_error(ERR_NONFATAL,
2436 "Embedded rounding is available only with reg-reg op.");
2437 return -1;
2440 if (input->basereg == -1 &&
2441 (input->indexreg == -1 || input->scale == 0)) {
2443 * It's a pure offset.
2445 if (bits == 64 && ((input->type & IP_REL) == IP_REL) &&
2446 input->segment == NO_SEG) {
2447 nasm_error(ERR_WARNING | ERR_PASS1, "absolute address can not be RIP-relative");
2448 input->type &= ~IP_REL;
2449 input->type |= MEMORY;
2452 if (input->eaflags & EAF_BYTEOFFS ||
2453 (input->eaflags & EAF_WORDOFFS &&
2454 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2455 nasm_error(ERR_WARNING | ERR_PASS1, "displacement size ignored on absolute address");
2458 if (bits == 64 && (~input->type & IP_REL)) {
2459 output->sib_present = true;
2460 output->sib = GEN_SIB(0, 4, 5);
2461 output->bytes = 4;
2462 output->modrm = GEN_MODRM(0, rfield, 4);
2463 output->rip = false;
2464 } else {
2465 output->sib_present = false;
2466 output->bytes = (addrbits != 16 ? 4 : 2);
2467 output->modrm = GEN_MODRM(0, rfield, (addrbits != 16 ? 5 : 6));
2468 output->rip = bits == 64;
2470 } else {
2472 * It's an indirection.
2474 int i = input->indexreg, b = input->basereg, s = input->scale;
2475 int32_t seg = input->segment;
2476 int hb = input->hintbase, ht = input->hinttype;
2477 int t, it, bt; /* register numbers */
2478 opflags_t x, ix, bx; /* register flags */
2480 if (s == 0)
2481 i = -1; /* make this easy, at least */
2483 if (is_register(i)) {
2484 it = nasm_regvals[i];
2485 ix = nasm_reg_flags[i];
2486 } else {
2487 it = -1;
2488 ix = 0;
2491 if (is_register(b)) {
2492 bt = nasm_regvals[b];
2493 bx = nasm_reg_flags[b];
2494 } else {
2495 bt = -1;
2496 bx = 0;
2499 /* if either one are a vector register... */
2500 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2501 opflags_t sok = BITS32 | BITS64;
2502 int32_t o = input->offset;
2503 int mod, scale, index, base;
2506 * For a vector SIB, one has to be a vector and the other,
2507 * if present, a GPR. The vector must be the index operand.
2509 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2510 if (s == 0)
2511 s = 1;
2512 else if (s != 1)
2513 goto err;
2515 t = bt, bt = it, it = t;
2516 x = bx, bx = ix, ix = x;
2519 if (bt != -1) {
2520 if (REG_GPR & ~bx)
2521 goto err;
2522 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2523 sok &= bx;
2524 else
2525 goto err;
2529 * While we're here, ensure the user didn't specify
2530 * WORD or QWORD
2532 if (input->disp_size == 16 || input->disp_size == 64)
2533 goto err;
2535 if (addrbits == 16 ||
2536 (addrbits == 32 && !(sok & BITS32)) ||
2537 (addrbits == 64 && !(sok & BITS64)))
2538 goto err;
2540 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2541 : ((ix & YMMREG & ~REG_EA)
2542 ? EA_YMMVSIB : EA_XMMVSIB));
2544 output->rex |= rexflags(it, ix, REX_X);
2545 output->rex |= rexflags(bt, bx, REX_B);
2546 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2548 index = it & 7; /* it is known to be != -1 */
2550 switch (s) {
2551 case 1:
2552 scale = 0;
2553 break;
2554 case 2:
2555 scale = 1;
2556 break;
2557 case 4:
2558 scale = 2;
2559 break;
2560 case 8:
2561 scale = 3;
2562 break;
2563 default: /* then what the smeg is it? */
2564 goto err; /* panic */
2567 if (bt == -1) {
2568 base = 5;
2569 mod = 0;
2570 } else {
2571 base = (bt & 7);
2572 if (base != REG_NUM_EBP && o == 0 &&
2573 seg == NO_SEG && !forw_ref &&
2574 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2575 mod = 0;
2576 else if (IS_MOD_01())
2577 mod = 1;
2578 else
2579 mod = 2;
2582 output->sib_present = true;
2583 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2584 output->modrm = GEN_MODRM(mod, rfield, 4);
2585 output->sib = GEN_SIB(scale, index, base);
2586 } else if ((ix|bx) & (BITS32|BITS64)) {
2588 * it must be a 32/64-bit memory reference. Firstly we have
2589 * to check that all registers involved are type E/Rxx.
2591 opflags_t sok = BITS32 | BITS64;
2592 int32_t o = input->offset;
2594 if (it != -1) {
2595 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2596 sok &= ix;
2597 else
2598 goto err;
2601 if (bt != -1) {
2602 if (REG_GPR & ~bx)
2603 goto err; /* Invalid register */
2604 if (~sok & bx & SIZE_MASK)
2605 goto err; /* Invalid size */
2606 sok &= bx;
2610 * While we're here, ensure the user didn't specify
2611 * WORD or QWORD
2613 if (input->disp_size == 16 || input->disp_size == 64)
2614 goto err;
2616 if (addrbits == 16 ||
2617 (addrbits == 32 && !(sok & BITS32)) ||
2618 (addrbits == 64 && !(sok & BITS64)))
2619 goto err;
2621 /* now reorganize base/index */
2622 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2623 ((hb == b && ht == EAH_NOTBASE) ||
2624 (hb == i && ht == EAH_MAKEBASE))) {
2625 /* swap if hints say so */
2626 t = bt, bt = it, it = t;
2627 x = bx, bx = ix, ix = x;
2629 if (bt == it) /* convert EAX+2*EAX to 3*EAX */
2630 bt = -1, bx = 0, s++;
2631 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2632 /* make single reg base, unless hint */
2633 bt = it, bx = ix, it = -1, ix = 0;
2635 if (((s == 2 && it != REG_NUM_ESP && !(input->eaflags & EAF_TIMESTWO)) ||
2636 s == 3 || s == 5 || s == 9) && bt == -1)
2637 bt = it, bx = ix, s--; /* convert 3*EAX to EAX+2*EAX */
2638 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2639 (input->eaflags & EAF_TIMESTWO))
2640 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2641 /* convert [NOSPLIT EAX] to sib format with 0x0 displacement */
2642 if (s == 1 && it == REG_NUM_ESP) {
2643 /* swap ESP into base if scale is 1 */
2644 t = it, it = bt, bt = t;
2645 x = ix, ix = bx, bx = x;
2647 if (it == REG_NUM_ESP ||
2648 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2649 goto err; /* wrong, for various reasons */
2651 output->rex |= rexflags(it, ix, REX_X);
2652 output->rex |= rexflags(bt, bx, REX_B);
2654 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2655 /* no SIB needed */
2656 int mod, rm;
2658 if (bt == -1) {
2659 rm = 5;
2660 mod = 0;
2661 } else {
2662 rm = (bt & 7);
2663 if (rm != REG_NUM_EBP && o == 0 &&
2664 seg == NO_SEG && !forw_ref &&
2665 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2666 mod = 0;
2667 else if (IS_MOD_01())
2668 mod = 1;
2669 else
2670 mod = 2;
2673 output->sib_present = false;
2674 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2675 output->modrm = GEN_MODRM(mod, rfield, rm);
2676 } else {
2677 /* we need a SIB */
2678 int mod, scale, index, base;
2680 if (it == -1)
2681 index = 4, s = 1;
2682 else
2683 index = (it & 7);
2685 switch (s) {
2686 case 1:
2687 scale = 0;
2688 break;
2689 case 2:
2690 scale = 1;
2691 break;
2692 case 4:
2693 scale = 2;
2694 break;
2695 case 8:
2696 scale = 3;
2697 break;
2698 default: /* then what the smeg is it? */
2699 goto err; /* panic */
2702 if (bt == -1) {
2703 base = 5;
2704 mod = 0;
2705 } else {
2706 base = (bt & 7);
2707 if (base != REG_NUM_EBP && o == 0 &&
2708 seg == NO_SEG && !forw_ref &&
2709 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2710 mod = 0;
2711 else if (IS_MOD_01())
2712 mod = 1;
2713 else
2714 mod = 2;
2717 output->sib_present = true;
2718 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2719 output->modrm = GEN_MODRM(mod, rfield, 4);
2720 output->sib = GEN_SIB(scale, index, base);
2722 } else { /* it's 16-bit */
2723 int mod, rm;
2724 int16_t o = input->offset;
2726 /* check for 64-bit long mode */
2727 if (addrbits == 64)
2728 goto err;
2730 /* check all registers are BX, BP, SI or DI */
2731 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2732 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2733 goto err;
2735 /* ensure the user didn't specify DWORD/QWORD */
2736 if (input->disp_size == 32 || input->disp_size == 64)
2737 goto err;
2739 if (s != 1 && i != -1)
2740 goto err; /* no can do, in 16-bit EA */
2741 if (b == -1 && i != -1) {
2742 int tmp = b;
2743 b = i;
2744 i = tmp;
2745 } /* swap */
2746 if ((b == R_SI || b == R_DI) && i != -1) {
2747 int tmp = b;
2748 b = i;
2749 i = tmp;
2751 /* have BX/BP as base, SI/DI index */
2752 if (b == i)
2753 goto err; /* shouldn't ever happen, in theory */
2754 if (i != -1 && b != -1 &&
2755 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2756 goto err; /* invalid combinations */
2757 if (b == -1) /* pure offset: handled above */
2758 goto err; /* so if it gets to here, panic! */
2760 rm = -1;
2761 if (i != -1)
2762 switch (i * 256 + b) {
2763 case R_SI * 256 + R_BX:
2764 rm = 0;
2765 break;
2766 case R_DI * 256 + R_BX:
2767 rm = 1;
2768 break;
2769 case R_SI * 256 + R_BP:
2770 rm = 2;
2771 break;
2772 case R_DI * 256 + R_BP:
2773 rm = 3;
2774 break;
2775 } else
2776 switch (b) {
2777 case R_SI:
2778 rm = 4;
2779 break;
2780 case R_DI:
2781 rm = 5;
2782 break;
2783 case R_BP:
2784 rm = 6;
2785 break;
2786 case R_BX:
2787 rm = 7;
2788 break;
2790 if (rm == -1) /* can't happen, in theory */
2791 goto err; /* so panic if it does */
2793 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2794 !(input->eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2795 mod = 0;
2796 else if (IS_MOD_01())
2797 mod = 1;
2798 else
2799 mod = 2;
2801 output->sib_present = false; /* no SIB - it's 16-bit */
2802 output->bytes = mod; /* bytes of offset needed */
2803 output->modrm = GEN_MODRM(mod, rfield, rm);
2808 output->size = 1 + output->sib_present + output->bytes;
2809 return output->type;
2811 err:
2812 return output->type = EA_INVALID;
2815 static void add_asp(insn *ins, int addrbits)
2817 int j, valid;
2818 int defdisp;
2820 valid = (addrbits == 64) ? 64|32 : 32|16;
2822 switch (ins->prefixes[PPS_ASIZE]) {
2823 case P_A16:
2824 valid &= 16;
2825 break;
2826 case P_A32:
2827 valid &= 32;
2828 break;
2829 case P_A64:
2830 valid &= 64;
2831 break;
2832 case P_ASP:
2833 valid &= (addrbits == 32) ? 16 : 32;
2834 break;
2835 default:
2836 break;
2839 for (j = 0; j < ins->operands; j++) {
2840 if (is_class(MEMORY, ins->oprs[j].type)) {
2841 opflags_t i, b;
2843 /* Verify as Register */
2844 if (!is_register(ins->oprs[j].indexreg))
2845 i = 0;
2846 else
2847 i = nasm_reg_flags[ins->oprs[j].indexreg];
2849 /* Verify as Register */
2850 if (!is_register(ins->oprs[j].basereg))
2851 b = 0;
2852 else
2853 b = nasm_reg_flags[ins->oprs[j].basereg];
2855 if (ins->oprs[j].scale == 0)
2856 i = 0;
2858 if (!i && !b) {
2859 int ds = ins->oprs[j].disp_size;
2860 if ((addrbits != 64 && ds > 8) ||
2861 (addrbits == 64 && ds == 16))
2862 valid &= ds;
2863 } else {
2864 if (!(REG16 & ~b))
2865 valid &= 16;
2866 if (!(REG32 & ~b))
2867 valid &= 32;
2868 if (!(REG64 & ~b))
2869 valid &= 64;
2871 if (!(REG16 & ~i))
2872 valid &= 16;
2873 if (!(REG32 & ~i))
2874 valid &= 32;
2875 if (!(REG64 & ~i))
2876 valid &= 64;
2881 if (valid & addrbits) {
2882 ins->addr_size = addrbits;
2883 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2884 /* Add an address size prefix */
2885 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2886 ins->addr_size = (addrbits == 32) ? 16 : 32;
2887 } else {
2888 /* Impossible... */
2889 errfunc(ERR_NONFATAL, "impossible combination of address sizes");
2890 ins->addr_size = addrbits; /* Error recovery */
2893 defdisp = ins->addr_size == 16 ? 16 : 32;
2895 for (j = 0; j < ins->operands; j++) {
2896 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2897 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2899 * mem_offs sizes must match the address size; if not,
2900 * strip the MEM_OFFS bit and match only EA instructions
2902 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);