New backend interface - assembler internals #1
[nasm.git] / asm / assemble.c
bloba682e5a3ca13f1fa225af2d154f48a4bd2cda65d
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2016 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * Bytecode specification
38 * ----------------------
41 * Codes Mnemonic Explanation
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
178 #include "compiler.h"
180 #include <stdio.h>
181 #include <string.h>
182 #include <stdlib.h>
184 #include "nasm.h"
185 #include "nasmlib.h"
186 #include "assemble.h"
187 #include "insns.h"
188 #include "tables.h"
189 #include "disp8.h"
190 #include "listing.h"
192 enum match_result {
194 * Matching errors. These should be sorted so that more specific
195 * errors come later in the sequence.
197 MERR_INVALOP,
198 MERR_OPSIZEMISSING,
199 MERR_OPSIZEMISMATCH,
200 MERR_BRNUMMISMATCH,
201 MERR_BADCPU,
202 MERR_BADMODE,
203 MERR_BADHLE,
204 MERR_ENCMISMATCH,
205 MERR_BADBND,
206 MERR_BADREPNE,
208 * Matching success; the conditional ones first
210 MOK_JUMP, /* Matching OK but needs jmp_match() */
211 MOK_GOOD /* Matching unconditionally OK */
214 typedef struct {
215 enum ea_type type; /* what kind of EA is this? */
216 int sib_present; /* is a SIB byte necessary? */
217 int bytes; /* # of bytes of offset needed */
218 int size; /* lazy - this is sib+bytes+1 */
219 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
220 int8_t disp8; /* compressed displacement for EVEX */
221 } ea;
223 #define GEN_SIB(scale, index, base) \
224 (((scale) << 6) | ((index) << 3) | ((base)))
226 #define GEN_MODRM(mod, reg, rm) \
227 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
229 static iflag_t cpu; /* cpu level received from nasm.c */
231 static int64_t calcsize(int32_t, int64_t, int, insn *,
232 const struct itemplate *);
233 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
234 static void gencode(struct out_data *data, insn *ins);
235 static enum match_result find_match(const struct itemplate **tempp,
236 insn *instruction,
237 int32_t segment, int64_t offset, int bits);
238 static enum match_result matches(const struct itemplate *, insn *, int bits);
239 static opflags_t regflag(const operand *);
240 static int32_t regval(const operand *);
241 static int rexflags(int, opflags_t, int);
242 static int op_rexflags(const operand *, int);
243 static int op_evexflags(const operand *, int, uint8_t);
244 static void add_asp(insn *, int);
246 static enum ea_type process_ea(operand *, ea *, int, int, opflags_t, insn *);
248 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
250 return ins->prefixes[pos] == prefix;
253 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
255 if (ins->prefixes[pos])
256 nasm_error(ERR_NONFATAL, "invalid %s prefix",
257 prefix_name(ins->prefixes[pos]));
260 static const char *size_name(int size)
262 switch (size) {
263 case 1:
264 return "byte";
265 case 2:
266 return "word";
267 case 4:
268 return "dword";
269 case 8:
270 return "qword";
271 case 10:
272 return "tword";
273 case 16:
274 return "oword";
275 case 32:
276 return "yword";
277 case 64:
278 return "zword";
279 default:
280 return "???";
284 static void warn_overflow(int pass, int size)
286 nasm_error(ERR_WARNING | pass | ERR_WARN_NOV,
287 "%s data exceeds bounds", size_name(size));
290 static void warn_overflow_const(int64_t data, int size)
292 if (overflow_general(data, size))
293 warn_overflow(ERR_PASS1, size);
296 static void warn_overflow_opd(const struct operand *o, int size)
298 if (o->wrt == NO_SEG && o->segment == NO_SEG) {
299 if (overflow_general(o->offset, size))
300 warn_overflow(ERR_PASS2, size);
305 * This routine wrappers the real output format's output routine,
306 * in order to pass a copy of the data off to the listing file
307 * generator at the same time, flatten unnecessary relocations,
308 * and verify backend compatibility.
310 static void out(struct out_data *data)
312 static int32_t lineno = 0; /* static!!! */
313 static const char *lnfname = NULL;
314 int asize;
315 const int amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
316 union {
317 uint8_t b[8];
318 uint64_t q;
319 } xdata;
320 uint64_t size = data->size;
322 if (!data->size)
323 return; /* Nothing to do */
325 switch (data->type) {
326 case OUT_ADDRESS:
327 asize = data->size;
328 nasm_assert(asize <= 8);
329 if (data->tsegment == NO_SEG && data->twrt == NO_SEG) {
330 /* Convert to RAWDATA */
331 /* XXX: check for overflow */
332 uint8_t *q = xdata.b;
334 WRITEADDR(q, data->toffset, asize);
335 data->data = xdata.b;
336 data->type = OUT_RAWDATA;
337 asize = 0; /* No longer an address */
339 break;
341 case OUT_RELADDR:
342 asize = data->size;
343 nasm_assert(asize <= 8);
344 if (data->tsegment == data->segment && data->twrt == NO_SEG) {
345 /* Convert to RAWDATA */
346 uint8_t *q = xdata.b;
347 int64_t delta = data->toffset - data->offset
348 - (data->inslen - data->insoffs);
350 if (overflow_signed(delta, asize))
351 warn_overflow(ERR_PASS2, asize);
353 WRITEADDR(q, delta, asize);
354 data->data = xdata.b;
355 data->type = OUT_RAWDATA;
356 asize = 0; /* No longer an address */
358 break;
360 default:
361 asize = 0; /* Not an address */
362 break;
365 lfmt->output(data);
368 * this call to src_get determines when we call the
369 * debug-format-specific "linenum" function
370 * it updates lineno and lnfname to the current values
371 * returning 0 if "same as last time", -2 if lnfname
372 * changed, and the amount by which lineno changed,
373 * if it did. thus, these variables must be static
376 if (src_get(&lineno, &lnfname))
377 dfmt->linenum(lnfname, lineno, data->segment);
379 if (asize && asize > amax) {
380 if (data->type != OUT_ADDRESS || data->sign == OUT_SIGNED) {
381 nasm_error(ERR_NONFATAL,
382 "%d-bit signed relocation unsupported by output format %s\n",
383 asize << 3, ofmt->shortname);
384 } else {
385 nasm_error(ERR_WARNING | ERR_WARN_ZEXTRELOC,
386 "%d-bit unsigned relocation zero-extended from %d bits\n",
387 asize << 3, ofmt->maxbits);
388 data->size = amax;
389 ofmt->output(data->segment, data->data, data->type,
390 data->size, data->tsegment, data->twrt);
391 data->insoffs += amax;
392 data->offset += amax;
393 data->size = size = asize - amax;
395 data->data = zero_buffer;
396 data->type = OUT_RAWDATA;
399 /* Hack until backend change */
400 switch (data->type) {
401 case OUT_RELADDR:
402 switch (data->size) {
403 case 1:
404 data->type = OUT_REL1ADR;
405 break;
406 case 2:
407 data->type = OUT_REL2ADR;
408 break;
409 case 4:
410 data->type = OUT_REL4ADR;
411 break;
412 case 8:
413 data->type = OUT_REL8ADR;
414 break;
415 default:
416 panic();
417 break;
420 xdata.q = data->toffset;
421 data->data = xdata.b;
422 data->size = data->inslen - data->insoffs;
423 break;
425 case OUT_SEGMENT:
426 data->type = OUT_ADDRESS;
427 /* fall through */
429 case OUT_ADDRESS:
430 xdata.q = data->toffset;
431 data->data = xdata.b;
432 data->size = (data->sign == OUT_SIGNED) ? -data->size : data->size;
433 break;
435 case OUT_RAWDATA:
436 case OUT_RESERVE:
437 data->tsegment = data->twrt = NO_SEG;
438 break;
440 default:
441 panic();
442 break;
445 ofmt->output(data->segment, data->data, data->type,
446 data->size, data->tsegment, data->twrt);
447 data->offset += size;
448 data->insoffs += size;
451 static inline void out_rawdata(struct out_data *data, const void *rawdata,
452 size_t size)
454 data->type = OUT_RAWDATA;
455 data->data = rawdata;
456 data->size = size;
457 out(data);
460 static void out_rawbyte(struct out_data *data, uint8_t byte)
462 data->type = OUT_RAWDATA;
463 data->data = &byte;
464 data->size = 1;
465 out(data);
468 static inline void out_reserve(struct out_data *data, uint64_t size)
470 data->type = OUT_RESERVE;
471 data->size = size;
472 out(data);
475 static inline void out_imm(struct out_data *data, struct operand *opx,
476 int size, enum out_sign sign)
478 data->type = OUT_ADDRESS;
479 data->sign = sign;
480 data->size = size;
481 data->toffset = opx->offset;
482 data->tsegment = opx->segment;
483 data->twrt = opx->wrt;
484 out(data);
487 static inline void out_reladdr(struct out_data *data, struct operand *opx,
488 int size)
490 data->type = OUT_RELADDR;
491 data->sign = OUT_SIGNED;
492 data->size = size;
493 data->toffset = opx->offset;
494 data->tsegment = opx->segment;
495 data->twrt = opx->wrt;
496 out(data);
499 static inline void out_segment(struct out_data *data, struct operand *opx)
501 data->type = OUT_SEGMENT;
502 data->sign = OUT_UNSIGNED;
503 data->size = 2;
504 data->toffset = opx->offset;
505 data->tsegment = ofmt->segbase(opx->segment + 1);
506 data->twrt = opx->wrt;
507 out(data);
510 static bool jmp_match(int32_t segment, int64_t offset, int bits,
511 insn * ins, const struct itemplate *temp)
513 int64_t isize;
514 const uint8_t *code = temp->code;
515 uint8_t c = code[0];
516 bool is_byte;
518 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
519 return false;
520 if (!optimizing)
521 return false;
522 if (optimizing < 0 && c == 0371)
523 return false;
525 isize = calcsize(segment, offset, bits, ins, temp);
527 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
528 /* Be optimistic in pass 1 */
529 return true;
531 if (ins->oprs[0].segment != segment)
532 return false;
534 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
535 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
537 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
538 /* jmp short (opcode eb) cannot be used with bnd prefix. */
539 ins->prefixes[PPS_REP] = P_none;
540 nasm_error(ERR_WARNING | ERR_WARN_BND | ERR_PASS2 ,
541 "jmp short does not init bnd regs - bnd prefix dropped.");
544 return is_byte;
547 int64_t assemble(int32_t segment, int64_t start, int bits, iflag_t cp,
548 insn * instruction)
550 struct out_data data;
551 const struct itemplate *temp;
552 enum match_result m;
553 int32_t itimes;
554 int64_t wsize; /* size for DB etc. */
556 cpu = cp;
558 data.offset = start;
559 data.segment = segment;
560 data.itemp = NULL;
561 data.sign = OUT_WRAP;
562 data.bits = bits;
564 wsize = idata_bytes(instruction->opcode);
565 if (wsize == -1)
566 return 0;
568 if (wsize) {
569 extop *e;
570 int32_t t = instruction->times;
571 if (t < 0)
572 nasm_panic(0, "instruction->times < 0 (%"PRId32") in assemble()", t);
574 while (t--) { /* repeat TIMES times */
575 list_for_each(e, instruction->eops) {
576 if (e->type == EOT_DB_NUMBER) {
577 if (wsize > 8) {
578 nasm_error(ERR_NONFATAL,
579 "integer supplied to a DT, DO or DY"
580 " instruction");
581 } else {
582 data.insoffs = 0;
583 data.type = OUT_ADDRESS;
584 data.inslen = data.size = wsize;
585 data.toffset = e->offset;
586 data.tsegment = e->segment;
587 data.twrt = e->wrt;
588 out(&data);
590 } else if (e->type == EOT_DB_STRING ||
591 e->type == EOT_DB_STRING_FREE) {
592 int align = e->stringlen % wsize;
593 if (align)
594 align = wsize - align;
596 data.insoffs = 0;
597 data.inslen = e->stringlen + align;
599 out_rawdata(&data, e->stringval, e->stringlen);
600 out_rawdata(&data, zero_buffer, align);
603 if (t > 0 && t == instruction->times - 1) {
604 lfmt->set_offset(data.offset);
605 lfmt->uplevel(LIST_TIMES);
608 if (instruction->times > 1)
609 lfmt->downlevel(LIST_TIMES);
610 } else if (instruction->opcode == I_INCBIN) {
611 const char *fname = instruction->eops->stringval;
612 FILE *fp;
613 static char buf[BUFSIZ];
614 size_t t = instruction->times;
615 off_t base = 0;
616 off_t len;
618 fp = nasm_open_read(fname, NF_BINARY);
619 if (!fp) {
620 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
621 fname);
622 goto done;
625 if (fseeko(fp, 0, SEEK_END) < 0) {
626 nasm_error(ERR_NONFATAL, "`incbin': unable to seek on file `%s'",
627 fname);
628 goto close_done;
631 len = ftello(fp);
632 if (instruction->eops->next) {
633 base = instruction->eops->next->offset;
634 if (base >= len) {
635 len = 0;
636 } else {
637 len -= base;
638 if (instruction->eops->next->next &&
639 len > (off_t)instruction->eops->next->next->offset)
640 len = (off_t)instruction->eops->next->next->offset;
643 lfmt->set_offset(data.offset);
644 lfmt->uplevel(LIST_INCBIN);
645 while (t--) {
646 off_t l;
648 data.insoffs = 0;
649 data.inslen = len;
651 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
652 nasm_error(ERR_NONFATAL,
653 "`incbin': unable to seek on file `%s'",
654 fname);
655 goto end_incbin;
657 l = len;
658 while (l > 0) {
659 size_t m = l > (off_t)sizeof(buf) ? (size_t)l : sizeof(buf);
660 m = fread(buf, 1, m, fp);
661 if (!m || feof(fp)) {
663 * This shouldn't happen unless the file
664 * actually changes while we are reading
665 * it.
667 nasm_error(ERR_NONFATAL,
668 "`incbin': unexpected EOF while"
669 " reading file `%s'", fname);
670 goto end_incbin;
672 out_rawdata(&data, buf, m);
673 l -= m;
676 end_incbin:
677 lfmt->downlevel(LIST_INCBIN);
678 if (instruction->times > 1) {
679 lfmt->set_offset(data.offset);
680 lfmt->uplevel(LIST_TIMES);
681 lfmt->downlevel(LIST_TIMES);
683 if (ferror(fp)) {
684 nasm_error(ERR_NONFATAL,
685 "`incbin': error while"
686 " reading file `%s'", fname);
688 close_done:
689 fclose(fp);
690 done:
692 } else {
693 /* "Real" instruction */
695 /* Check to see if we need an address-size prefix */
696 add_asp(instruction, bits);
698 m = find_match(&temp, instruction, data.segment, data.offset, bits);
700 if (m == MOK_GOOD) {
701 /* Matches! */
702 int64_t insn_size = calcsize(data.segment, data.offset,
703 bits, instruction, temp);
704 itimes = instruction->times;
705 if (insn_size < 0) /* shouldn't be, on pass two */
706 nasm_panic(0, "errors made it through from pass one");
708 data.itemp = temp;
709 data.bits = bits;
711 while (itimes--) {
712 data.insoffs = 0;
713 data.inslen = insn_size;
715 gencode(&data, instruction);
716 nasm_assert(data.insoffs == insn_size);
718 if (itimes > 0 && itimes == instruction->times - 1) {
719 lfmt->set_offset(data.offset);
720 lfmt->uplevel(LIST_TIMES);
723 if (instruction->times > 1)
724 lfmt->downlevel(LIST_TIMES);
725 } else {
726 /* No match */
727 switch (m) {
728 case MERR_OPSIZEMISSING:
729 nasm_error(ERR_NONFATAL, "operation size not specified");
730 break;
731 case MERR_OPSIZEMISMATCH:
732 nasm_error(ERR_NONFATAL, "mismatch in operand sizes");
733 break;
734 case MERR_BRNUMMISMATCH:
735 nasm_error(ERR_NONFATAL,
736 "mismatch in the number of broadcasting elements");
737 break;
738 case MERR_BADCPU:
739 nasm_error(ERR_NONFATAL, "no instruction for this cpu level");
740 break;
741 case MERR_BADMODE:
742 nasm_error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
743 bits);
744 break;
745 case MERR_ENCMISMATCH:
746 nasm_error(ERR_NONFATAL, "specific encoding scheme not available");
747 break;
748 case MERR_BADBND:
749 nasm_error(ERR_NONFATAL, "bnd prefix is not allowed");
750 break;
751 case MERR_BADREPNE:
752 nasm_error(ERR_NONFATAL, "%s prefix is not allowed",
753 (has_prefix(instruction, PPS_REP, P_REPNE) ?
754 "repne" : "repnz"));
755 break;
756 default:
757 nasm_error(ERR_NONFATAL,
758 "invalid combination of opcode and operands");
759 break;
763 return data.offset - start;
766 int64_t insn_size(int32_t segment, int64_t offset, int bits, iflag_t cp,
767 insn * instruction)
769 const struct itemplate *temp;
770 enum match_result m;
772 cpu = cp;
774 if (instruction->opcode == I_none)
775 return 0;
777 if (instruction->opcode == I_DB || instruction->opcode == I_DW ||
778 instruction->opcode == I_DD || instruction->opcode == I_DQ ||
779 instruction->opcode == I_DT || instruction->opcode == I_DO ||
780 instruction->opcode == I_DY) {
781 extop *e;
782 int32_t isize, osize, wsize;
784 isize = 0;
785 wsize = idata_bytes(instruction->opcode);
787 list_for_each(e, instruction->eops) {
788 int32_t align;
790 osize = 0;
791 if (e->type == EOT_DB_NUMBER) {
792 osize = 1;
793 warn_overflow_const(e->offset, wsize);
794 } else if (e->type == EOT_DB_STRING ||
795 e->type == EOT_DB_STRING_FREE)
796 osize = e->stringlen;
798 align = (-osize) % wsize;
799 if (align < 0)
800 align += wsize;
801 isize += osize + align;
803 return isize;
806 if (instruction->opcode == I_INCBIN) {
807 const char *fname = instruction->eops->stringval;
808 FILE *fp;
809 int64_t val = 0;
810 off_t len;
812 fp = nasm_open_read(fname, NF_BINARY);
813 if (!fp)
814 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
815 fname);
816 else if (fseek(fp, 0L, SEEK_END) < 0)
817 nasm_error(ERR_NONFATAL, "`incbin': unable to seek on file `%s'",
818 fname);
819 else {
820 len = ftell(fp);
821 if (instruction->eops->next) {
822 if (len <= (off_t)instruction->eops->next->offset) {
823 len = 0;
824 } else {
825 len -= instruction->eops->next->offset;
826 if (instruction->eops->next->next &&
827 len > (off_t)instruction->eops->next->next->offset) {
828 len = (off_t)instruction->eops->next->next->offset;
832 val = len;
834 if (fp)
835 fclose(fp);
836 return val;
839 /* Check to see if we need an address-size prefix */
840 add_asp(instruction, bits);
842 m = find_match(&temp, instruction, segment, offset, bits);
843 if (m == MOK_GOOD) {
844 /* we've matched an instruction. */
845 return calcsize(segment, offset, bits, instruction, temp);
846 } else {
847 return -1; /* didn't match any instruction */
851 static void bad_hle_warn(const insn * ins, uint8_t hleok)
853 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
854 enum whatwarn { w_none, w_lock, w_inval } ww;
855 static const enum whatwarn warn[2][4] =
857 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
858 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
860 unsigned int n;
862 n = (unsigned int)rep_pfx - P_XACQUIRE;
863 if (n > 1)
864 return; /* Not XACQUIRE/XRELEASE */
866 ww = warn[n][hleok];
867 if (!is_class(MEMORY, ins->oprs[0].type))
868 ww = w_inval; /* HLE requires operand 0 to be memory */
870 switch (ww) {
871 case w_none:
872 break;
874 case w_lock:
875 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
876 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
877 "%s with this instruction requires lock",
878 prefix_name(rep_pfx));
880 break;
882 case w_inval:
883 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
884 "%s invalid with this instruction",
885 prefix_name(rep_pfx));
886 break;
890 /* Common construct */
891 #define case3(x) case (x): case (x)+1: case (x)+2
892 #define case4(x) case3(x): case (x)+3
894 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
895 insn * ins, const struct itemplate *temp)
897 const uint8_t *codes = temp->code;
898 int64_t length = 0;
899 uint8_t c;
900 int rex_mask = ~0;
901 int op1, op2;
902 struct operand *opx;
903 uint8_t opex = 0;
904 enum ea_type eat;
905 uint8_t hleok = 0;
906 bool lockcheck = true;
907 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
909 ins->rex = 0; /* Ensure REX is reset */
910 eat = EA_SCALAR; /* Expect a scalar EA */
911 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
913 if (ins->prefixes[PPS_OSIZE] == P_O64)
914 ins->rex |= REX_W;
916 (void)segment; /* Don't warn that this parameter is unused */
917 (void)offset; /* Don't warn that this parameter is unused */
919 while (*codes) {
920 c = *codes++;
921 op1 = (c & 3) + ((opex & 1) << 2);
922 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
923 opx = &ins->oprs[op1];
924 opex = 0; /* For the next iteration */
926 switch (c) {
927 case4(01):
928 codes += c, length += c;
929 break;
931 case3(05):
932 opex = c;
933 break;
935 case4(010):
936 ins->rex |=
937 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
938 codes++, length++;
939 break;
941 case4(014):
942 /* this is an index reg of MIB operand */
943 mib_index = opx->basereg;
944 break;
946 case4(020):
947 case4(024):
948 length++;
949 break;
951 case4(030):
952 length += 2;
953 break;
955 case4(034):
956 if (opx->type & (BITS16 | BITS32 | BITS64))
957 length += (opx->type & BITS16) ? 2 : 4;
958 else
959 length += (bits == 16) ? 2 : 4;
960 break;
962 case4(040):
963 length += 4;
964 break;
966 case4(044):
967 length += ins->addr_size >> 3;
968 break;
970 case4(050):
971 length++;
972 break;
974 case4(054):
975 length += 8; /* MOV reg64/imm */
976 break;
978 case4(060):
979 length += 2;
980 break;
982 case4(064):
983 if (opx->type & (BITS16 | BITS32 | BITS64))
984 length += (opx->type & BITS16) ? 2 : 4;
985 else
986 length += (bits == 16) ? 2 : 4;
987 break;
989 case4(070):
990 length += 4;
991 break;
993 case4(074):
994 length += 2;
995 break;
997 case 0172:
998 case 0173:
999 codes++;
1000 length++;
1001 break;
1003 case4(0174):
1004 length++;
1005 break;
1007 case4(0240):
1008 ins->rex |= REX_EV;
1009 ins->vexreg = regval(opx);
1010 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1011 ins->vex_cm = *codes++;
1012 ins->vex_wlp = *codes++;
1013 ins->evex_tuple = (*codes++ - 0300);
1014 break;
1016 case 0250:
1017 ins->rex |= REX_EV;
1018 ins->vexreg = 0;
1019 ins->vex_cm = *codes++;
1020 ins->vex_wlp = *codes++;
1021 ins->evex_tuple = (*codes++ - 0300);
1022 break;
1024 case4(0254):
1025 length += 4;
1026 break;
1028 case4(0260):
1029 ins->rex |= REX_V;
1030 ins->vexreg = regval(opx);
1031 ins->vex_cm = *codes++;
1032 ins->vex_wlp = *codes++;
1033 break;
1035 case 0270:
1036 ins->rex |= REX_V;
1037 ins->vexreg = 0;
1038 ins->vex_cm = *codes++;
1039 ins->vex_wlp = *codes++;
1040 break;
1042 case3(0271):
1043 hleok = c & 3;
1044 break;
1046 case4(0274):
1047 length++;
1048 break;
1050 case4(0300):
1051 break;
1053 case 0310:
1054 if (bits == 64)
1055 return -1;
1056 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1057 break;
1059 case 0311:
1060 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1061 break;
1063 case 0312:
1064 break;
1066 case 0313:
1067 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1068 has_prefix(ins, PPS_ASIZE, P_A32))
1069 return -1;
1070 break;
1072 case4(0314):
1073 break;
1075 case 0320:
1077 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1078 if (pfx == P_O16)
1079 break;
1080 if (pfx != P_none)
1081 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1082 else
1083 ins->prefixes[PPS_OSIZE] = P_O16;
1084 break;
1087 case 0321:
1089 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1090 if (pfx == P_O32)
1091 break;
1092 if (pfx != P_none)
1093 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1094 else
1095 ins->prefixes[PPS_OSIZE] = P_O32;
1096 break;
1099 case 0322:
1100 break;
1102 case 0323:
1103 rex_mask &= ~REX_W;
1104 break;
1106 case 0324:
1107 ins->rex |= REX_W;
1108 break;
1110 case 0325:
1111 ins->rex |= REX_NH;
1112 break;
1114 case 0326:
1115 break;
1117 case 0330:
1118 codes++, length++;
1119 break;
1121 case 0331:
1122 break;
1124 case 0332:
1125 case 0333:
1126 length++;
1127 break;
1129 case 0334:
1130 ins->rex |= REX_L;
1131 break;
1133 case 0335:
1134 break;
1136 case 0336:
1137 if (!ins->prefixes[PPS_REP])
1138 ins->prefixes[PPS_REP] = P_REP;
1139 break;
1141 case 0337:
1142 if (!ins->prefixes[PPS_REP])
1143 ins->prefixes[PPS_REP] = P_REPNE;
1144 break;
1146 case 0340:
1147 if (ins->oprs[0].segment != NO_SEG)
1148 nasm_error(ERR_NONFATAL, "attempt to reserve non-constant"
1149 " quantity of BSS space");
1150 else
1151 length += ins->oprs[0].offset;
1152 break;
1154 case 0341:
1155 if (!ins->prefixes[PPS_WAIT])
1156 ins->prefixes[PPS_WAIT] = P_WAIT;
1157 break;
1159 case 0360:
1160 break;
1162 case 0361:
1163 length++;
1164 break;
1166 case 0364:
1167 case 0365:
1168 break;
1170 case 0366:
1171 case 0367:
1172 length++;
1173 break;
1175 case 0370:
1176 case 0371:
1177 break;
1179 case 0373:
1180 length++;
1181 break;
1183 case 0374:
1184 eat = EA_XMMVSIB;
1185 break;
1187 case 0375:
1188 eat = EA_YMMVSIB;
1189 break;
1191 case 0376:
1192 eat = EA_ZMMVSIB;
1193 break;
1195 case4(0100):
1196 case4(0110):
1197 case4(0120):
1198 case4(0130):
1199 case4(0200):
1200 case4(0204):
1201 case4(0210):
1202 case4(0214):
1203 case4(0220):
1204 case4(0224):
1205 case4(0230):
1206 case4(0234):
1208 ea ea_data;
1209 int rfield;
1210 opflags_t rflags;
1211 struct operand *opy = &ins->oprs[op2];
1212 struct operand *op_er_sae;
1214 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1216 if (c <= 0177) {
1217 /* pick rfield from operand b (opx) */
1218 rflags = regflag(opx);
1219 rfield = nasm_regvals[opx->basereg];
1220 } else {
1221 rflags = 0;
1222 rfield = c & 7;
1225 /* EVEX.b1 : evex_brerop contains the operand position */
1226 op_er_sae = (ins->evex_brerop >= 0 ?
1227 &ins->oprs[ins->evex_brerop] : NULL);
1229 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1230 /* set EVEX.b */
1231 ins->evex_p[2] |= EVEX_P2B;
1232 if (op_er_sae->decoflags & ER) {
1233 /* set EVEX.RC (rounding control) */
1234 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1235 & EVEX_P2RC;
1237 } else {
1238 /* set EVEX.L'L (vector length) */
1239 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1240 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1241 if (opy->decoflags & BRDCAST_MASK) {
1242 /* set EVEX.b */
1243 ins->evex_p[2] |= EVEX_P2B;
1247 if (itemp_has(temp, IF_MIB)) {
1248 opy->eaflags |= EAF_MIB;
1250 * if a separate form of MIB (ICC style) is used,
1251 * the index reg info is merged into mem operand
1253 if (mib_index != R_none) {
1254 opy->indexreg = mib_index;
1255 opy->scale = 1;
1256 opy->hintbase = mib_index;
1257 opy->hinttype = EAH_NOTBASE;
1261 if (process_ea(opy, &ea_data, bits,
1262 rfield, rflags, ins) != eat) {
1263 nasm_error(ERR_NONFATAL, "invalid effective address");
1264 return -1;
1265 } else {
1266 ins->rex |= ea_data.rex;
1267 length += ea_data.size;
1270 break;
1272 default:
1273 nasm_panic(0, "internal instruction table corrupt"
1274 ": instruction code \\%o (0x%02X) given", c, c);
1275 break;
1279 ins->rex &= rex_mask;
1281 if (ins->rex & REX_NH) {
1282 if (ins->rex & REX_H) {
1283 nasm_error(ERR_NONFATAL, "instruction cannot use high registers");
1284 return -1;
1286 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1289 switch (ins->prefixes[PPS_VEX]) {
1290 case P_EVEX:
1291 if (!(ins->rex & REX_EV))
1292 return -1;
1293 break;
1294 case P_VEX3:
1295 case P_VEX2:
1296 if (!(ins->rex & REX_V))
1297 return -1;
1298 break;
1299 default:
1300 break;
1303 if (ins->rex & (REX_V | REX_EV)) {
1304 int bad32 = REX_R|REX_W|REX_X|REX_B;
1306 if (ins->rex & REX_H) {
1307 nasm_error(ERR_NONFATAL, "cannot use high register in AVX instruction");
1308 return -1;
1310 switch (ins->vex_wlp & 060) {
1311 case 000:
1312 case 040:
1313 ins->rex &= ~REX_W;
1314 break;
1315 case 020:
1316 ins->rex |= REX_W;
1317 bad32 &= ~REX_W;
1318 break;
1319 case 060:
1320 /* Follow REX_W */
1321 break;
1324 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1325 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1326 return -1;
1327 } else if (!(ins->rex & REX_EV) &&
1328 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1329 nasm_error(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1330 return -1;
1332 if (ins->rex & REX_EV)
1333 length += 4;
1334 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1335 ins->prefixes[PPS_VEX] == P_VEX3)
1336 length += 3;
1337 else
1338 length += 2;
1339 } else if (ins->rex & REX_MASK) {
1340 if (ins->rex & REX_H) {
1341 nasm_error(ERR_NONFATAL, "cannot use high register in rex instruction");
1342 return -1;
1343 } else if (bits == 64) {
1344 length++;
1345 } else if ((ins->rex & REX_L) &&
1346 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1347 iflag_ffs(&cpu) >= IF_X86_64) {
1348 /* LOCK-as-REX.R */
1349 assert_no_prefix(ins, PPS_LOCK);
1350 lockcheck = false; /* Already errored, no need for warning */
1351 length++;
1352 } else {
1353 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1354 return -1;
1358 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1359 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1360 nasm_error(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1361 "instruction is not lockable");
1364 bad_hle_warn(ins, hleok);
1367 * when BND prefix is set by DEFAULT directive,
1368 * BND prefix is added to every appropriate instruction line
1369 * unless it is overridden by NOBND prefix.
1371 if (globalbnd &&
1372 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1373 ins->prefixes[PPS_REP] = P_BND;
1376 * Add length of legacy prefixes
1378 length += emit_prefix(NULL, bits, ins);
1380 return length;
1383 static inline void emit_rex(struct out_data *data, insn *ins)
1385 if (data->bits == 64) {
1386 if ((ins->rex & REX_MASK) &&
1387 !(ins->rex & (REX_V | REX_EV)) &&
1388 !ins->rex_done) {
1389 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1390 out_rawbyte(data, rex);
1391 ins->rex_done = true;
1396 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1398 int bytes = 0;
1399 int j;
1401 for (j = 0; j < MAXPREFIX; j++) {
1402 uint8_t c = 0;
1403 switch (ins->prefixes[j]) {
1404 case P_WAIT:
1405 c = 0x9B;
1406 break;
1407 case P_LOCK:
1408 c = 0xF0;
1409 break;
1410 case P_REPNE:
1411 case P_REPNZ:
1412 case P_XACQUIRE:
1413 case P_BND:
1414 c = 0xF2;
1415 break;
1416 case P_REPE:
1417 case P_REPZ:
1418 case P_REP:
1419 case P_XRELEASE:
1420 c = 0xF3;
1421 break;
1422 case R_CS:
1423 if (bits == 64) {
1424 nasm_error(ERR_WARNING | ERR_PASS2,
1425 "cs segment base generated, but will be ignored in 64-bit mode");
1427 c = 0x2E;
1428 break;
1429 case R_DS:
1430 if (bits == 64) {
1431 nasm_error(ERR_WARNING | ERR_PASS2,
1432 "ds segment base generated, but will be ignored in 64-bit mode");
1434 c = 0x3E;
1435 break;
1436 case R_ES:
1437 if (bits == 64) {
1438 nasm_error(ERR_WARNING | ERR_PASS2,
1439 "es segment base generated, but will be ignored in 64-bit mode");
1441 c = 0x26;
1442 break;
1443 case R_FS:
1444 c = 0x64;
1445 break;
1446 case R_GS:
1447 c = 0x65;
1448 break;
1449 case R_SS:
1450 if (bits == 64) {
1451 nasm_error(ERR_WARNING | ERR_PASS2,
1452 "ss segment base generated, but will be ignored in 64-bit mode");
1454 c = 0x36;
1455 break;
1456 case R_SEGR6:
1457 case R_SEGR7:
1458 nasm_error(ERR_NONFATAL,
1459 "segr6 and segr7 cannot be used as prefixes");
1460 break;
1461 case P_A16:
1462 if (bits == 64) {
1463 nasm_error(ERR_NONFATAL,
1464 "16-bit addressing is not supported "
1465 "in 64-bit mode");
1466 } else if (bits != 16)
1467 c = 0x67;
1468 break;
1469 case P_A32:
1470 if (bits != 32)
1471 c = 0x67;
1472 break;
1473 case P_A64:
1474 if (bits != 64) {
1475 nasm_error(ERR_NONFATAL,
1476 "64-bit addressing is only supported "
1477 "in 64-bit mode");
1479 break;
1480 case P_ASP:
1481 c = 0x67;
1482 break;
1483 case P_O16:
1484 if (bits != 16)
1485 c = 0x66;
1486 break;
1487 case P_O32:
1488 if (bits == 16)
1489 c = 0x66;
1490 break;
1491 case P_O64:
1492 /* REX.W */
1493 break;
1494 case P_OSP:
1495 c = 0x66;
1496 break;
1497 case P_EVEX:
1498 case P_VEX3:
1499 case P_VEX2:
1500 case P_NOBND:
1501 case P_none:
1502 break;
1503 default:
1504 nasm_panic(0, "invalid instruction prefix");
1506 if (c) {
1507 if (data)
1508 out_rawbyte(data, c);
1509 bytes++;
1512 return bytes;
1515 static void gencode(struct out_data *data, insn *ins)
1517 uint8_t c;
1518 uint8_t bytes[4];
1519 int64_t size;
1520 int op1, op2;
1521 struct operand *opx;
1522 const uint8_t *codes = data->itemp->code;
1523 uint8_t opex = 0;
1524 enum ea_type eat = EA_SCALAR;
1525 const int bits = data->bits;
1527 ins->rex_done = false;
1529 emit_prefix(data, bits, ins);
1531 while (*codes) {
1532 c = *codes++;
1533 op1 = (c & 3) + ((opex & 1) << 2);
1534 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1535 opx = &ins->oprs[op1];
1536 opex = 0; /* For the next iteration */
1539 switch (c) {
1540 case 01:
1541 case 02:
1542 case 03:
1543 case 04:
1544 emit_rex(data, ins);
1545 out_rawdata(data, codes, c);
1546 codes += c;
1547 break;
1549 case 05:
1550 case 06:
1551 case 07:
1552 opex = c;
1553 break;
1555 case4(010):
1556 emit_rex(data, ins);
1557 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1558 break;
1560 case4(014):
1561 break;
1563 case4(020):
1564 if (opx->offset < -256 || opx->offset > 255)
1565 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1566 "byte value exceeds bounds");
1567 out_imm(data, opx, 1, OUT_WRAP);
1568 break;
1570 case4(024):
1571 if (opx->offset < 0 || opx->offset > 255)
1572 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1573 "unsigned byte value exceeds bounds");
1574 out_imm(data, opx, 1, OUT_UNSIGNED);
1575 break;
1577 case4(030):
1578 warn_overflow_opd(opx, 2);
1579 out_imm(data, opx, 2, OUT_WRAP);
1580 break;
1582 case4(034):
1583 if (opx->type & (BITS16 | BITS32))
1584 size = (opx->type & BITS16) ? 2 : 4;
1585 else
1586 size = (bits == 16) ? 2 : 4;
1587 warn_overflow_opd(opx, size);
1588 out_imm(data, opx, size, OUT_WRAP);
1589 break;
1591 case4(040):
1592 warn_overflow_opd(opx, 4);
1593 out_imm(data, opx, 4, OUT_WRAP);
1594 break;
1596 case4(044):
1597 size = ins->addr_size >> 3;
1598 warn_overflow_opd(opx, size);
1599 out_imm(data, opx, size, OUT_WRAP);
1600 break;
1602 case4(050):
1603 if (opx->segment == data->segment) {
1604 int64_t delta = opx->offset - data->offset
1605 - (data->inslen - data->insoffs);
1606 if (delta > 127 || delta < -128)
1607 nasm_error(ERR_NONFATAL, "short jump is out of range");
1609 out_reladdr(data, opx, 1);
1610 break;
1612 case4(054):
1613 out_imm(data, opx, 8, OUT_WRAP);
1614 break;
1616 case4(060):
1617 out_reladdr(data, opx, 2);
1618 break;
1620 case4(064):
1621 if (opx->type & (BITS16 | BITS32 | BITS64))
1622 size = (opx->type & BITS16) ? 2 : 4;
1623 else
1624 size = (bits == 16) ? 2 : 4;
1626 out_reladdr(data, opx, size);
1627 break;
1629 case4(070):
1630 out_reladdr(data, opx, 4);
1631 break;
1633 case4(074):
1634 if (opx->segment == NO_SEG)
1635 nasm_error(ERR_NONFATAL, "value referenced by FAR is not"
1636 " relocatable");
1637 out_segment(data, opx);
1638 break;
1640 case 0172:
1641 c = *codes++;
1642 opx = &ins->oprs[c >> 3];
1643 bytes[0] = nasm_regvals[opx->basereg] << 4;
1644 opx = &ins->oprs[c & 7];
1645 if (opx->segment != NO_SEG || opx->wrt != NO_SEG) {
1646 nasm_error(ERR_NONFATAL,
1647 "non-absolute expression not permitted as argument %d",
1648 c & 7);
1649 } else {
1650 if (opx->offset & ~15) {
1651 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1652 "four-bit argument exceeds bounds");
1654 bytes[0] |= opx->offset & 15;
1656 out_rawdata(data, bytes, 1);
1657 break;
1659 case 0173:
1660 c = *codes++;
1661 opx = &ins->oprs[c >> 4];
1662 /* XXX: ZMM? */
1663 out_rawbyte(data, (nasm_regvals[opx->basereg] << 4) | (c & 15));
1664 break;
1666 case4(0174):
1667 out_rawbyte(data, (nasm_regvals[opx->basereg] << 4) |
1668 ((nasm_regvals[opx->basereg] & 16) >> 1));
1669 break;
1671 case4(0254):
1672 if (opx->wrt == NO_SEG && opx->segment == NO_SEG &&
1673 (int32_t)opx->offset != (int64_t)opx->offset) {
1674 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1675 "signed dword immediate exceeds bounds");
1677 out_imm(data, opx, 4, OUT_SIGNED);
1678 break;
1680 case4(0240):
1681 case 0250:
1682 codes += 3;
1683 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1684 EVEX_P2Z | EVEX_P2AAA, 2);
1685 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1686 bytes[0] = 0x62;
1687 /* EVEX.X can be set by either REX or EVEX for different reasons */
1688 bytes[1] = ((((ins->rex & 7) << 5) |
1689 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1690 (ins->vex_cm & EVEX_P0MM);
1691 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1692 ((~ins->vexreg & 15) << 3) |
1693 (1 << 2) | (ins->vex_wlp & 3);
1694 bytes[3] = ins->evex_p[2];
1695 out_rawdata(data, bytes, 4);
1696 break;
1698 case4(0260):
1699 case 0270:
1700 codes += 2;
1701 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1702 ins->prefixes[PPS_VEX] == P_VEX3) {
1703 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1704 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1705 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1706 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1707 out_rawdata(data, bytes, 3);
1708 } else {
1709 bytes[0] = 0xc5;
1710 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1711 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1712 out_rawdata(data, bytes, 2);
1714 break;
1716 case 0271:
1717 case 0272:
1718 case 0273:
1719 break;
1721 case4(0274):
1723 uint64_t uv, um;
1724 int s;
1726 if (ins->rex & REX_W)
1727 s = 64;
1728 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1729 s = 16;
1730 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1731 s = 32;
1732 else
1733 s = bits;
1735 um = (uint64_t)2 << (s-1);
1736 uv = opx->offset;
1738 if (uv > 127 && uv < (uint64_t)-128 &&
1739 (uv < um-128 || uv > um-1)) {
1740 /* If this wasn't explicitly byte-sized, warn as though we
1741 * had fallen through to the imm16/32/64 case.
1743 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1744 "%s value exceeds bounds",
1745 (opx->type & BITS8) ? "signed byte" :
1746 s == 16 ? "word" :
1747 s == 32 ? "dword" :
1748 "signed dword");
1750 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
1751 break;
1754 case4(0300):
1755 break;
1757 case 0310:
1758 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
1759 out_rawbyte(data, 0x67);
1760 break;
1762 case 0311:
1763 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
1764 out_rawbyte(data, 0x67);
1765 break;
1767 case 0312:
1768 break;
1770 case 0313:
1771 ins->rex = 0;
1772 break;
1774 case4(0314):
1775 break;
1777 case 0320:
1778 case 0321:
1779 break;
1781 case 0322:
1782 case 0323:
1783 break;
1785 case 0324:
1786 ins->rex |= REX_W;
1787 break;
1789 case 0325:
1790 break;
1792 case 0326:
1793 break;
1795 case 0330:
1796 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
1797 break;
1799 case 0331:
1800 break;
1802 case 0332:
1803 case 0333:
1804 out_rawbyte(data, c - 0332 + 0xF2);
1805 break;
1807 case 0334:
1808 if (ins->rex & REX_R)
1809 out_rawbyte(data, 0xF0);
1810 ins->rex &= ~(REX_L|REX_R);
1811 break;
1813 case 0335:
1814 break;
1816 case 0336:
1817 case 0337:
1818 break;
1820 case 0340:
1821 if (ins->oprs[0].segment != NO_SEG)
1822 nasm_panic(0, "non-constant BSS size in pass two");
1824 out_reserve(data, ins->oprs[0].offset);
1825 break;
1827 case 0341:
1828 break;
1830 case 0360:
1831 break;
1833 case 0361:
1834 out_rawbyte(data, 0x66);
1835 break;
1837 case 0364:
1838 case 0365:
1839 break;
1841 case 0366:
1842 case 0367:
1843 out_rawbyte(data, c - 0366 + 0x66);
1844 break;
1846 case3(0370):
1847 break;
1849 case 0373:
1850 out_rawbyte(data, bits == 16 ? 3 : 5);
1851 break;
1853 case 0374:
1854 eat = EA_XMMVSIB;
1855 break;
1857 case 0375:
1858 eat = EA_YMMVSIB;
1859 break;
1861 case 0376:
1862 eat = EA_ZMMVSIB;
1863 break;
1865 case4(0100):
1866 case4(0110):
1867 case4(0120):
1868 case4(0130):
1869 case4(0200):
1870 case4(0204):
1871 case4(0210):
1872 case4(0214):
1873 case4(0220):
1874 case4(0224):
1875 case4(0230):
1876 case4(0234):
1878 ea ea_data;
1879 int rfield;
1880 opflags_t rflags;
1881 uint8_t *p;
1882 struct operand *opy = &ins->oprs[op2];
1884 if (c <= 0177) {
1885 /* pick rfield from operand b (opx) */
1886 rflags = regflag(opx);
1887 rfield = nasm_regvals[opx->basereg];
1888 } else {
1889 /* rfield is constant */
1890 rflags = 0;
1891 rfield = c & 7;
1894 if (process_ea(opy, &ea_data, bits,
1895 rfield, rflags, ins) != eat)
1896 nasm_error(ERR_NONFATAL, "invalid effective address");
1898 p = bytes;
1899 *p++ = ea_data.modrm;
1900 if (ea_data.sib_present)
1901 *p++ = ea_data.sib;
1902 out_rawdata(data, bytes, p - bytes);
1905 * Make sure the address gets the right offset in case
1906 * the line breaks in the .lst file (BR 1197827)
1909 if (ea_data.bytes) {
1910 /* use compressed displacement, if available */
1911 if (ea_data.disp8) {
1912 out_rawbyte(data, ea_data.disp8);
1913 } else if (ea_data.rip) {
1914 out_reladdr(data, opy, ea_data.bytes);
1915 } else {
1916 int asize = ins->addr_size >> 3;
1918 if (overflow_general(opy->offset, asize) ||
1919 signed_bits(opy->offset, ins->addr_size) !=
1920 signed_bits(opy->offset, ea_data.bytes << 3))
1921 warn_overflow(ERR_PASS2, ea_data.bytes);
1923 out_imm(data, opy, ea_data.bytes,
1924 (asize > ea_data.bytes) ? OUT_SIGNED : OUT_UNSIGNED);
1928 break;
1930 default:
1931 nasm_panic(0, "internal instruction table corrupt"
1932 ": instruction code \\%o (0x%02X) given", c, c);
1933 break;
1938 static opflags_t regflag(const operand * o)
1940 if (!is_register(o->basereg))
1941 nasm_panic(0, "invalid operand passed to regflag()");
1942 return nasm_reg_flags[o->basereg];
1945 static int32_t regval(const operand * o)
1947 if (!is_register(o->basereg))
1948 nasm_panic(0, "invalid operand passed to regval()");
1949 return nasm_regvals[o->basereg];
1952 static int op_rexflags(const operand * o, int mask)
1954 opflags_t flags;
1955 int val;
1957 if (!is_register(o->basereg))
1958 nasm_panic(0, "invalid operand passed to op_rexflags()");
1960 flags = nasm_reg_flags[o->basereg];
1961 val = nasm_regvals[o->basereg];
1963 return rexflags(val, flags, mask);
1966 static int rexflags(int val, opflags_t flags, int mask)
1968 int rex = 0;
1970 if (val >= 0 && (val & 8))
1971 rex |= REX_B|REX_X|REX_R;
1972 if (flags & BITS64)
1973 rex |= REX_W;
1974 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
1975 rex |= REX_H;
1976 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
1977 rex |= REX_P;
1979 return rex & mask;
1982 static int evexflags(int val, decoflags_t deco,
1983 int mask, uint8_t byte)
1985 int evex = 0;
1987 switch (byte) {
1988 case 0:
1989 if (val >= 0 && (val & 16))
1990 evex |= (EVEX_P0RP | EVEX_P0X);
1991 break;
1992 case 2:
1993 if (val >= 0 && (val & 16))
1994 evex |= EVEX_P2VP;
1995 if (deco & Z)
1996 evex |= EVEX_P2Z;
1997 if (deco & OPMASK_MASK)
1998 evex |= deco & EVEX_P2AAA;
1999 break;
2001 return evex & mask;
2004 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2006 int val;
2008 val = nasm_regvals[o->basereg];
2010 return evexflags(val, o->decoflags, mask, byte);
2013 static enum match_result find_match(const struct itemplate **tempp,
2014 insn *instruction,
2015 int32_t segment, int64_t offset, int bits)
2017 const struct itemplate *temp;
2018 enum match_result m, merr;
2019 opflags_t xsizeflags[MAX_OPERANDS];
2020 bool opsizemissing = false;
2021 int8_t broadcast = instruction->evex_brerop;
2022 int i;
2024 /* broadcasting uses a different data element size */
2025 for (i = 0; i < instruction->operands; i++)
2026 if (i == broadcast)
2027 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2028 else
2029 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2031 merr = MERR_INVALOP;
2033 for (temp = nasm_instructions[instruction->opcode];
2034 temp->opcode != I_none; temp++) {
2035 m = matches(temp, instruction, bits);
2036 if (m == MOK_JUMP) {
2037 if (jmp_match(segment, offset, bits, instruction, temp))
2038 m = MOK_GOOD;
2039 else
2040 m = MERR_INVALOP;
2041 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2043 * Missing operand size and a candidate for fuzzy matching...
2045 for (i = 0; i < temp->operands; i++)
2046 if (i == broadcast)
2047 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2048 else
2049 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2050 opsizemissing = true;
2052 if (m > merr)
2053 merr = m;
2054 if (merr == MOK_GOOD)
2055 goto done;
2058 /* No match, but see if we can get a fuzzy operand size match... */
2059 if (!opsizemissing)
2060 goto done;
2062 for (i = 0; i < instruction->operands; i++) {
2064 * We ignore extrinsic operand sizes on registers, so we should
2065 * never try to fuzzy-match on them. This also resolves the case
2066 * when we have e.g. "xmmrm128" in two different positions.
2068 if (is_class(REGISTER, instruction->oprs[i].type))
2069 continue;
2071 /* This tests if xsizeflags[i] has more than one bit set */
2072 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2073 goto done; /* No luck */
2075 if (i == broadcast) {
2076 instruction->oprs[i].decoflags |= xsizeflags[i];
2077 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2078 BITS32 : BITS64);
2079 } else {
2080 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2084 /* Try matching again... */
2085 for (temp = nasm_instructions[instruction->opcode];
2086 temp->opcode != I_none; temp++) {
2087 m = matches(temp, instruction, bits);
2088 if (m == MOK_JUMP) {
2089 if (jmp_match(segment, offset, bits, instruction, temp))
2090 m = MOK_GOOD;
2091 else
2092 m = MERR_INVALOP;
2094 if (m > merr)
2095 merr = m;
2096 if (merr == MOK_GOOD)
2097 goto done;
2100 done:
2101 *tempp = temp;
2102 return merr;
2105 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2107 opflags_t opsize = opflags & SIZE_MASK;
2108 uint8_t brcast_num;
2111 * Due to discontinuity between BITS64 and BITS128 (BITS80),
2112 * this cannot be a simple arithmetic calculation.
2114 if (brsize > BITS64)
2115 nasm_error(ERR_FATAL,
2116 "size of broadcasting element is greater than 64 bits");
2118 switch (opsize) {
2119 case BITS64:
2120 brcast_num = BITS64 / brsize;
2121 break;
2122 default:
2123 brcast_num = (opsize / BITS128) * (BITS64 / brsize) * 2;
2124 break;
2127 return brcast_num;
2130 static enum match_result matches(const struct itemplate *itemp,
2131 insn *instruction, int bits)
2133 opflags_t size[MAX_OPERANDS], asize;
2134 bool opsizemissing = false;
2135 int i, oprs;
2138 * Check the opcode
2140 if (itemp->opcode != instruction->opcode)
2141 return MERR_INVALOP;
2144 * Count the operands
2146 if (itemp->operands != instruction->operands)
2147 return MERR_INVALOP;
2150 * Is it legal?
2152 if (!(optimizing > 0) && itemp_has(itemp, IF_OPT))
2153 return MERR_INVALOP;
2156 * {evex} available?
2158 switch (instruction->prefixes[PPS_VEX]) {
2159 case P_EVEX:
2160 if (!itemp_has(itemp, IF_EVEX))
2161 return MERR_ENCMISMATCH;
2162 break;
2163 case P_VEX3:
2164 case P_VEX2:
2165 if (!itemp_has(itemp, IF_VEX))
2166 return MERR_ENCMISMATCH;
2167 break;
2168 default:
2169 break;
2173 * Check that no spurious colons or TOs are present
2175 for (i = 0; i < itemp->operands; i++)
2176 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2177 return MERR_INVALOP;
2180 * Process size flags
2182 switch (itemp_smask(itemp)) {
2183 case IF_GENBIT(IF_SB):
2184 asize = BITS8;
2185 break;
2186 case IF_GENBIT(IF_SW):
2187 asize = BITS16;
2188 break;
2189 case IF_GENBIT(IF_SD):
2190 asize = BITS32;
2191 break;
2192 case IF_GENBIT(IF_SQ):
2193 asize = BITS64;
2194 break;
2195 case IF_GENBIT(IF_SO):
2196 asize = BITS128;
2197 break;
2198 case IF_GENBIT(IF_SY):
2199 asize = BITS256;
2200 break;
2201 case IF_GENBIT(IF_SZ):
2202 asize = BITS512;
2203 break;
2204 case IF_GENBIT(IF_SIZE):
2205 switch (bits) {
2206 case 16:
2207 asize = BITS16;
2208 break;
2209 case 32:
2210 asize = BITS32;
2211 break;
2212 case 64:
2213 asize = BITS64;
2214 break;
2215 default:
2216 asize = 0;
2217 break;
2219 break;
2220 default:
2221 asize = 0;
2222 break;
2225 if (itemp_armask(itemp)) {
2226 /* S- flags only apply to a specific operand */
2227 i = itemp_arg(itemp);
2228 memset(size, 0, sizeof size);
2229 size[i] = asize;
2230 } else {
2231 /* S- flags apply to all operands */
2232 for (i = 0; i < MAX_OPERANDS; i++)
2233 size[i] = asize;
2237 * Check that the operand flags all match up,
2238 * it's a bit tricky so lets be verbose:
2240 * 1) Find out the size of operand. If instruction
2241 * doesn't have one specified -- we're trying to
2242 * guess it either from template (IF_S* flag) or
2243 * from code bits.
2245 * 2) If template operand do not match the instruction OR
2246 * template has an operand size specified AND this size differ
2247 * from which instruction has (perhaps we got it from code bits)
2248 * we are:
2249 * a) Check that only size of instruction and operand is differ
2250 * other characteristics do match
2251 * b) Perhaps it's a register specified in instruction so
2252 * for such a case we just mark that operand as "size
2253 * missing" and this will turn on fuzzy operand size
2254 * logic facility (handled by a caller)
2256 for (i = 0; i < itemp->operands; i++) {
2257 opflags_t type = instruction->oprs[i].type;
2258 decoflags_t deco = instruction->oprs[i].decoflags;
2259 bool is_broadcast = deco & BRDCAST_MASK;
2260 uint8_t brcast_num = 0;
2261 opflags_t template_opsize, insn_opsize;
2263 if (!(type & SIZE_MASK))
2264 type |= size[i];
2266 insn_opsize = type & SIZE_MASK;
2267 if (!is_broadcast) {
2268 template_opsize = itemp->opd[i] & SIZE_MASK;
2269 } else {
2270 decoflags_t deco_brsize = itemp->deco[i] & BRSIZE_MASK;
2272 * when broadcasting, the element size depends on
2273 * the instruction type. decorator flag should match.
2276 if (deco_brsize) {
2277 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2278 /* calculate the proper number : {1to<brcast_num>} */
2279 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2280 } else {
2281 template_opsize = 0;
2285 if ((itemp->opd[i] & ~type & ~SIZE_MASK) ||
2286 (deco & ~itemp->deco[i] & ~BRNUM_MASK)) {
2287 return MERR_INVALOP;
2288 } else if (template_opsize) {
2289 if (template_opsize != insn_opsize) {
2290 if (insn_opsize) {
2291 return MERR_INVALOP;
2292 } else if (!is_class(REGISTER, type)) {
2294 * Note: we don't honor extrinsic operand sizes for registers,
2295 * so "missing operand size" for a register should be
2296 * considered a wildcard match rather than an error.
2298 opsizemissing = true;
2300 } else if (is_broadcast &&
2301 (brcast_num !=
2302 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2304 * broadcasting opsize matches but the number of repeated memory
2305 * element does not match.
2306 * if 64b double precision float is broadcasted to ymm (256b),
2307 * broadcasting decorator must be {1to4}.
2309 return MERR_BRNUMMISMATCH;
2314 if (opsizemissing)
2315 return MERR_OPSIZEMISSING;
2318 * Check operand sizes
2320 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2321 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2322 for (i = 0; i < oprs; i++) {
2323 asize = itemp->opd[i] & SIZE_MASK;
2324 if (asize) {
2325 for (i = 0; i < oprs; i++)
2326 size[i] = asize;
2327 break;
2330 } else {
2331 oprs = itemp->operands;
2334 for (i = 0; i < itemp->operands; i++) {
2335 if (!(itemp->opd[i] & SIZE_MASK) &&
2336 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2337 return MERR_OPSIZEMISMATCH;
2341 * Check template is okay at the set cpu level
2343 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2344 return MERR_BADCPU;
2347 * Verify the appropriate long mode flag.
2349 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2350 return MERR_BADMODE;
2353 * If we have a HLE prefix, look for the NOHLE flag
2355 if (itemp_has(itemp, IF_NOHLE) &&
2356 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2357 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2358 return MERR_BADHLE;
2361 * Check if special handling needed for Jumps
2363 if ((itemp->code[0] & ~1) == 0370)
2364 return MOK_JUMP;
2367 * Check if BND prefix is allowed.
2368 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2370 if (!itemp_has(itemp, IF_BND) &&
2371 (has_prefix(instruction, PPS_REP, P_BND) ||
2372 has_prefix(instruction, PPS_REP, P_NOBND)))
2373 return MERR_BADBND;
2374 else if (itemp_has(itemp, IF_BND) &&
2375 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2376 has_prefix(instruction, PPS_REP, P_REPNZ)))
2377 return MERR_BADREPNE;
2379 return MOK_GOOD;
2383 * Check if ModR/M.mod should/can be 01.
2384 * - EAF_BYTEOFFS is set
2385 * - offset can fit in a byte when EVEX is not used
2386 * - offset can be compressed when EVEX is used
2388 #define IS_MOD_01() (input->eaflags & EAF_BYTEOFFS || \
2389 (o >= -128 && o <= 127 && \
2390 seg == NO_SEG && !forw_ref && \
2391 !(input->eaflags & EAF_WORDOFFS) && \
2392 !(ins->rex & REX_EV)) || \
2393 (ins->rex & REX_EV && \
2394 is_disp8n(input, ins, &output->disp8)))
2396 static enum ea_type process_ea(operand *input, ea *output, int bits,
2397 int rfield, opflags_t rflags, insn *ins)
2399 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2400 int addrbits = ins->addr_size;
2401 int eaflags = input->eaflags;
2403 output->type = EA_SCALAR;
2404 output->rip = false;
2405 output->disp8 = 0;
2407 /* REX flags for the rfield operand */
2408 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2409 /* EVEX.R' flag for the REG operand */
2410 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2412 if (is_class(REGISTER, input->type)) {
2414 * It's a direct register.
2416 if (!is_register(input->basereg))
2417 goto err;
2419 if (!is_reg_class(REG_EA, input->basereg))
2420 goto err;
2422 /* broadcasting is not available with a direct register operand. */
2423 if (input->decoflags & BRDCAST_MASK) {
2424 nasm_error(ERR_NONFATAL, "Broadcasting not allowed from a register");
2425 goto err;
2428 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2429 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2430 output->sib_present = false; /* no SIB necessary */
2431 output->bytes = 0; /* no offset necessary either */
2432 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2433 } else {
2435 * It's a memory reference.
2438 /* Embedded rounding or SAE is not available with a mem ref operand. */
2439 if (input->decoflags & (ER | SAE)) {
2440 nasm_error(ERR_NONFATAL,
2441 "Embedded rounding is available only with reg-reg op.");
2442 return -1;
2445 if (input->basereg == -1 &&
2446 (input->indexreg == -1 || input->scale == 0)) {
2448 * It's a pure offset.
2450 if (bits == 64 && ((input->type & IP_REL) == IP_REL) &&
2451 input->segment == NO_SEG) {
2452 nasm_error(ERR_WARNING | ERR_PASS1, "absolute address can not be RIP-relative");
2453 input->type &= ~IP_REL;
2454 input->type |= MEMORY;
2457 if (bits == 64 &&
2458 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2459 nasm_error(ERR_NONFATAL, "RIP-relative addressing is prohibited for mib.");
2460 return -1;
2463 if (eaflags & EAF_BYTEOFFS ||
2464 (eaflags & EAF_WORDOFFS &&
2465 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2466 nasm_error(ERR_WARNING | ERR_PASS1, "displacement size ignored on absolute address");
2469 if (bits == 64 && (~input->type & IP_REL)) {
2470 output->sib_present = true;
2471 output->sib = GEN_SIB(0, 4, 5);
2472 output->bytes = 4;
2473 output->modrm = GEN_MODRM(0, rfield, 4);
2474 output->rip = false;
2475 } else {
2476 output->sib_present = false;
2477 output->bytes = (addrbits != 16 ? 4 : 2);
2478 output->modrm = GEN_MODRM(0, rfield, (addrbits != 16 ? 5 : 6));
2479 output->rip = bits == 64;
2481 } else {
2483 * It's an indirection.
2485 int i = input->indexreg, b = input->basereg, s = input->scale;
2486 int32_t seg = input->segment;
2487 int hb = input->hintbase, ht = input->hinttype;
2488 int t, it, bt; /* register numbers */
2489 opflags_t x, ix, bx; /* register flags */
2491 if (s == 0)
2492 i = -1; /* make this easy, at least */
2494 if (is_register(i)) {
2495 it = nasm_regvals[i];
2496 ix = nasm_reg_flags[i];
2497 } else {
2498 it = -1;
2499 ix = 0;
2502 if (is_register(b)) {
2503 bt = nasm_regvals[b];
2504 bx = nasm_reg_flags[b];
2505 } else {
2506 bt = -1;
2507 bx = 0;
2510 /* if either one are a vector register... */
2511 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2512 opflags_t sok = BITS32 | BITS64;
2513 int32_t o = input->offset;
2514 int mod, scale, index, base;
2517 * For a vector SIB, one has to be a vector and the other,
2518 * if present, a GPR. The vector must be the index operand.
2520 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2521 if (s == 0)
2522 s = 1;
2523 else if (s != 1)
2524 goto err;
2526 t = bt, bt = it, it = t;
2527 x = bx, bx = ix, ix = x;
2530 if (bt != -1) {
2531 if (REG_GPR & ~bx)
2532 goto err;
2533 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2534 sok &= bx;
2535 else
2536 goto err;
2540 * While we're here, ensure the user didn't specify
2541 * WORD or QWORD
2543 if (input->disp_size == 16 || input->disp_size == 64)
2544 goto err;
2546 if (addrbits == 16 ||
2547 (addrbits == 32 && !(sok & BITS32)) ||
2548 (addrbits == 64 && !(sok & BITS64)))
2549 goto err;
2551 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2552 : ((ix & YMMREG & ~REG_EA)
2553 ? EA_YMMVSIB : EA_XMMVSIB));
2555 output->rex |= rexflags(it, ix, REX_X);
2556 output->rex |= rexflags(bt, bx, REX_B);
2557 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2559 index = it & 7; /* it is known to be != -1 */
2561 switch (s) {
2562 case 1:
2563 scale = 0;
2564 break;
2565 case 2:
2566 scale = 1;
2567 break;
2568 case 4:
2569 scale = 2;
2570 break;
2571 case 8:
2572 scale = 3;
2573 break;
2574 default: /* then what the smeg is it? */
2575 goto err; /* panic */
2578 if (bt == -1) {
2579 base = 5;
2580 mod = 0;
2581 } else {
2582 base = (bt & 7);
2583 if (base != REG_NUM_EBP && o == 0 &&
2584 seg == NO_SEG && !forw_ref &&
2585 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2586 mod = 0;
2587 else if (IS_MOD_01())
2588 mod = 1;
2589 else
2590 mod = 2;
2593 output->sib_present = true;
2594 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2595 output->modrm = GEN_MODRM(mod, rfield, 4);
2596 output->sib = GEN_SIB(scale, index, base);
2597 } else if ((ix|bx) & (BITS32|BITS64)) {
2599 * it must be a 32/64-bit memory reference. Firstly we have
2600 * to check that all registers involved are type E/Rxx.
2602 opflags_t sok = BITS32 | BITS64;
2603 int32_t o = input->offset;
2605 if (it != -1) {
2606 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2607 sok &= ix;
2608 else
2609 goto err;
2612 if (bt != -1) {
2613 if (REG_GPR & ~bx)
2614 goto err; /* Invalid register */
2615 if (~sok & bx & SIZE_MASK)
2616 goto err; /* Invalid size */
2617 sok &= bx;
2621 * While we're here, ensure the user didn't specify
2622 * WORD or QWORD
2624 if (input->disp_size == 16 || input->disp_size == 64)
2625 goto err;
2627 if (addrbits == 16 ||
2628 (addrbits == 32 && !(sok & BITS32)) ||
2629 (addrbits == 64 && !(sok & BITS64)))
2630 goto err;
2632 /* now reorganize base/index */
2633 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2634 ((hb == b && ht == EAH_NOTBASE) ||
2635 (hb == i && ht == EAH_MAKEBASE))) {
2636 /* swap if hints say so */
2637 t = bt, bt = it, it = t;
2638 x = bx, bx = ix, ix = x;
2641 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2642 /* make single reg base, unless hint */
2643 bt = it, bx = ix, it = -1, ix = 0;
2645 if (eaflags & EAF_MIB) {
2646 /* only for mib operands */
2647 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2649 * make a single reg index [reg*1].
2650 * gas uses this form for an explicit index register.
2652 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2654 if ((ht == EAH_SUMMED) && bt == -1) {
2655 /* separate once summed index into [base, index] */
2656 bt = it, bx = ix, s--;
2658 } else {
2659 if (((s == 2 && it != REG_NUM_ESP &&
2660 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
2661 s == 3 || s == 5 || s == 9) && bt == -1) {
2662 /* convert 3*EAX to EAX+2*EAX */
2663 bt = it, bx = ix, s--;
2665 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2666 (eaflags & EAF_TIMESTWO) &&
2667 (hb == b && ht == EAH_NOTBASE)) {
2669 * convert [NOSPLIT EAX*1]
2670 * to sib format with 0x0 displacement - [EAX*1+0].
2672 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2675 if (s == 1 && it == REG_NUM_ESP) {
2676 /* swap ESP into base if scale is 1 */
2677 t = it, it = bt, bt = t;
2678 x = ix, ix = bx, bx = x;
2680 if (it == REG_NUM_ESP ||
2681 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2682 goto err; /* wrong, for various reasons */
2684 output->rex |= rexflags(it, ix, REX_X);
2685 output->rex |= rexflags(bt, bx, REX_B);
2687 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2688 /* no SIB needed */
2689 int mod, rm;
2691 if (bt == -1) {
2692 rm = 5;
2693 mod = 0;
2694 } else {
2695 rm = (bt & 7);
2696 if (rm != REG_NUM_EBP && o == 0 &&
2697 seg == NO_SEG && !forw_ref &&
2698 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2699 mod = 0;
2700 else if (IS_MOD_01())
2701 mod = 1;
2702 else
2703 mod = 2;
2706 output->sib_present = false;
2707 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2708 output->modrm = GEN_MODRM(mod, rfield, rm);
2709 } else {
2710 /* we need a SIB */
2711 int mod, scale, index, base;
2713 if (it == -1)
2714 index = 4, s = 1;
2715 else
2716 index = (it & 7);
2718 switch (s) {
2719 case 1:
2720 scale = 0;
2721 break;
2722 case 2:
2723 scale = 1;
2724 break;
2725 case 4:
2726 scale = 2;
2727 break;
2728 case 8:
2729 scale = 3;
2730 break;
2731 default: /* then what the smeg is it? */
2732 goto err; /* panic */
2735 if (bt == -1) {
2736 base = 5;
2737 mod = 0;
2738 } else {
2739 base = (bt & 7);
2740 if (base != REG_NUM_EBP && o == 0 &&
2741 seg == NO_SEG && !forw_ref &&
2742 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2743 mod = 0;
2744 else if (IS_MOD_01())
2745 mod = 1;
2746 else
2747 mod = 2;
2750 output->sib_present = true;
2751 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2752 output->modrm = GEN_MODRM(mod, rfield, 4);
2753 output->sib = GEN_SIB(scale, index, base);
2755 } else { /* it's 16-bit */
2756 int mod, rm;
2757 int16_t o = input->offset;
2759 /* check for 64-bit long mode */
2760 if (addrbits == 64)
2761 goto err;
2763 /* check all registers are BX, BP, SI or DI */
2764 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2765 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2766 goto err;
2768 /* ensure the user didn't specify DWORD/QWORD */
2769 if (input->disp_size == 32 || input->disp_size == 64)
2770 goto err;
2772 if (s != 1 && i != -1)
2773 goto err; /* no can do, in 16-bit EA */
2774 if (b == -1 && i != -1) {
2775 int tmp = b;
2776 b = i;
2777 i = tmp;
2778 } /* swap */
2779 if ((b == R_SI || b == R_DI) && i != -1) {
2780 int tmp = b;
2781 b = i;
2782 i = tmp;
2784 /* have BX/BP as base, SI/DI index */
2785 if (b == i)
2786 goto err; /* shouldn't ever happen, in theory */
2787 if (i != -1 && b != -1 &&
2788 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2789 goto err; /* invalid combinations */
2790 if (b == -1) /* pure offset: handled above */
2791 goto err; /* so if it gets to here, panic! */
2793 rm = -1;
2794 if (i != -1)
2795 switch (i * 256 + b) {
2796 case R_SI * 256 + R_BX:
2797 rm = 0;
2798 break;
2799 case R_DI * 256 + R_BX:
2800 rm = 1;
2801 break;
2802 case R_SI * 256 + R_BP:
2803 rm = 2;
2804 break;
2805 case R_DI * 256 + R_BP:
2806 rm = 3;
2807 break;
2808 } else
2809 switch (b) {
2810 case R_SI:
2811 rm = 4;
2812 break;
2813 case R_DI:
2814 rm = 5;
2815 break;
2816 case R_BP:
2817 rm = 6;
2818 break;
2819 case R_BX:
2820 rm = 7;
2821 break;
2823 if (rm == -1) /* can't happen, in theory */
2824 goto err; /* so panic if it does */
2826 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2827 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2828 mod = 0;
2829 else if (IS_MOD_01())
2830 mod = 1;
2831 else
2832 mod = 2;
2834 output->sib_present = false; /* no SIB - it's 16-bit */
2835 output->bytes = mod; /* bytes of offset needed */
2836 output->modrm = GEN_MODRM(mod, rfield, rm);
2841 output->size = 1 + output->sib_present + output->bytes;
2842 return output->type;
2844 err:
2845 return output->type = EA_INVALID;
2848 static void add_asp(insn *ins, int addrbits)
2850 int j, valid;
2851 int defdisp;
2853 valid = (addrbits == 64) ? 64|32 : 32|16;
2855 switch (ins->prefixes[PPS_ASIZE]) {
2856 case P_A16:
2857 valid &= 16;
2858 break;
2859 case P_A32:
2860 valid &= 32;
2861 break;
2862 case P_A64:
2863 valid &= 64;
2864 break;
2865 case P_ASP:
2866 valid &= (addrbits == 32) ? 16 : 32;
2867 break;
2868 default:
2869 break;
2872 for (j = 0; j < ins->operands; j++) {
2873 if (is_class(MEMORY, ins->oprs[j].type)) {
2874 opflags_t i, b;
2876 /* Verify as Register */
2877 if (!is_register(ins->oprs[j].indexreg))
2878 i = 0;
2879 else
2880 i = nasm_reg_flags[ins->oprs[j].indexreg];
2882 /* Verify as Register */
2883 if (!is_register(ins->oprs[j].basereg))
2884 b = 0;
2885 else
2886 b = nasm_reg_flags[ins->oprs[j].basereg];
2888 if (ins->oprs[j].scale == 0)
2889 i = 0;
2891 if (!i && !b) {
2892 int ds = ins->oprs[j].disp_size;
2893 if ((addrbits != 64 && ds > 8) ||
2894 (addrbits == 64 && ds == 16))
2895 valid &= ds;
2896 } else {
2897 if (!(REG16 & ~b))
2898 valid &= 16;
2899 if (!(REG32 & ~b))
2900 valid &= 32;
2901 if (!(REG64 & ~b))
2902 valid &= 64;
2904 if (!(REG16 & ~i))
2905 valid &= 16;
2906 if (!(REG32 & ~i))
2907 valid &= 32;
2908 if (!(REG64 & ~i))
2909 valid &= 64;
2914 if (valid & addrbits) {
2915 ins->addr_size = addrbits;
2916 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2917 /* Add an address size prefix */
2918 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2919 ins->addr_size = (addrbits == 32) ? 16 : 32;
2920 } else {
2921 /* Impossible... */
2922 nasm_error(ERR_NONFATAL, "impossible combination of address sizes");
2923 ins->addr_size = addrbits; /* Error recovery */
2926 defdisp = ins->addr_size == 16 ? 16 : 32;
2928 for (j = 0; j < ins->operands; j++) {
2929 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2930 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2932 * mem_offs sizes must match the address size; if not,
2933 * strip the MEM_OFFS bit and match only EA instructions
2935 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);