nasmlib/file: move memory-mapping functions out of file.c
[nasm.git] / asm / assemble.c
blob6aee17105059e4ceb66970eca8c8d93434a23afb
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2017 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * Bytecode specification
38 * ----------------------
41 * Codes Mnemonic Explanation
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
178 #include "compiler.h"
180 #include <stdio.h>
181 #include <string.h>
182 #include <stdlib.h>
184 #include "nasm.h"
185 #include "nasmlib.h"
186 #include "assemble.h"
187 #include "insns.h"
188 #include "tables.h"
189 #include "disp8.h"
190 #include "listing.h"
192 enum match_result {
194 * Matching errors. These should be sorted so that more specific
195 * errors come later in the sequence.
197 MERR_INVALOP,
198 MERR_OPSIZEMISSING,
199 MERR_OPSIZEMISMATCH,
200 MERR_BRNUMMISMATCH,
201 MERR_BADCPU,
202 MERR_BADMODE,
203 MERR_BADHLE,
204 MERR_ENCMISMATCH,
205 MERR_BADBND,
206 MERR_BADREPNE,
208 * Matching success; the conditional ones first
210 MOK_JUMP, /* Matching OK but needs jmp_match() */
211 MOK_GOOD /* Matching unconditionally OK */
214 typedef struct {
215 enum ea_type type; /* what kind of EA is this? */
216 int sib_present; /* is a SIB byte necessary? */
217 int bytes; /* # of bytes of offset needed */
218 int size; /* lazy - this is sib+bytes+1 */
219 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
220 int8_t disp8; /* compressed displacement for EVEX */
221 } ea;
223 #define GEN_SIB(scale, index, base) \
224 (((scale) << 6) | ((index) << 3) | ((base)))
226 #define GEN_MODRM(mod, reg, rm) \
227 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
229 static iflag_t cpu; /* cpu level received from nasm.c */
231 static int64_t calcsize(int32_t, int64_t, int, insn *,
232 const struct itemplate *);
233 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
234 static void gencode(struct out_data *data, insn *ins);
235 static enum match_result find_match(const struct itemplate **tempp,
236 insn *instruction,
237 int32_t segment, int64_t offset, int bits);
238 static enum match_result matches(const struct itemplate *, insn *, int bits);
239 static opflags_t regflag(const operand *);
240 static int32_t regval(const operand *);
241 static int rexflags(int, opflags_t, int);
242 static int op_rexflags(const operand *, int);
243 static int op_evexflags(const operand *, int, uint8_t);
244 static void add_asp(insn *, int);
246 static enum ea_type process_ea(operand *, ea *, int, int, opflags_t, insn *);
248 static inline bool absolute_op(const struct operand *o)
250 return o->segment == NO_SEG && o->wrt == NO_SEG &&
251 !(o->opflags & OPFLAG_RELATIVE);
254 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
256 return ins->prefixes[pos] == prefix;
259 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
261 if (ins->prefixes[pos])
262 nasm_error(ERR_NONFATAL, "invalid %s prefix",
263 prefix_name(ins->prefixes[pos]));
266 static const char *size_name(int size)
268 switch (size) {
269 case 1:
270 return "byte";
271 case 2:
272 return "word";
273 case 4:
274 return "dword";
275 case 8:
276 return "qword";
277 case 10:
278 return "tword";
279 case 16:
280 return "oword";
281 case 32:
282 return "yword";
283 case 64:
284 return "zword";
285 default:
286 return "???";
290 static void warn_overflow(int pass, int size)
292 nasm_error(ERR_WARNING | pass | ERR_WARN_NOV,
293 "%s data exceeds bounds", size_name(size));
296 static void warn_overflow_const(int64_t data, int size)
298 if (overflow_general(data, size))
299 warn_overflow(ERR_PASS1, size);
302 static void warn_overflow_opd(const struct operand *o, int size)
304 if (absolute_op(o)) {
305 if (overflow_general(o->offset, size))
306 warn_overflow(ERR_PASS2, size);
310 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
312 bool err;
314 switch (sign) {
315 case OUT_WRAP:
316 err = overflow_general(data, size);
317 break;
318 case OUT_SIGNED:
319 err = overflow_signed(data, size);
320 break;
321 case OUT_UNSIGNED:
322 err = overflow_unsigned(data, size);
323 break;
324 default:
325 panic();
326 break;
329 if (err)
330 warn_overflow(ERR_PASS2, size);
334 * This routine wrappers the real output format's output routine,
335 * in order to pass a copy of the data off to the listing file
336 * generator at the same time, flatten unnecessary relocations,
337 * and verify backend compatibility.
339 static void out(struct out_data *data)
341 static int32_t lineno = 0; /* static!!! */
342 static const char *lnfname = NULL;
343 int asize;
344 const int amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
345 union {
346 uint8_t b[8];
347 uint64_t q;
348 } xdata;
349 uint64_t size = data->size;
350 int64_t addrval;
351 int32_t fixseg; /* Segment for which to produce fixed data */
353 if (!data->size)
354 return; /* Nothing to do */
357 * Convert addresses to RAWDATA if possible
358 * XXX: not all backends want this for global symbols!!!!
360 switch (data->type) {
361 case OUT_ADDRESS:
362 addrval = data->toffset;
363 fixseg = NO_SEG; /* Absolute address is fixed data */
364 goto address;
366 case OUT_RELADDR:
367 addrval = data->toffset - data->relbase;
368 fixseg = data->segment; /* Our own segment is fixed data */
369 goto address;
371 address:
372 asize = data->size;
373 nasm_assert(asize <= 8);
374 if (data->tsegment == fixseg && data->twrt == NO_SEG) {
375 uint8_t *q = xdata.b;
377 warn_overflow_out(addrval, asize, data->sign);
379 WRITEADDR(q, addrval, asize);
380 data->data = xdata.b;
381 data->type = OUT_RAWDATA;
382 asize = 0; /* No longer an address */
384 break;
386 default:
387 asize = 0; /* Not an address */
388 break;
391 lfmt->output(data);
394 * this call to src_get determines when we call the
395 * debug-format-specific "linenum" function
396 * it updates lineno and lnfname to the current values
397 * returning 0 if "same as last time", -2 if lnfname
398 * changed, and the amount by which lineno changed,
399 * if it did. thus, these variables must be static
402 if (src_get(&lineno, &lnfname))
403 dfmt->linenum(lnfname, lineno, data->segment);
405 if (asize && asize > amax) {
406 if (data->type != OUT_ADDRESS || data->sign == OUT_SIGNED) {
407 nasm_error(ERR_NONFATAL,
408 "%d-bit signed relocation unsupported by output format %s\n",
409 asize << 3, ofmt->shortname);
410 } else {
411 nasm_error(ERR_WARNING | ERR_WARN_ZEXTRELOC,
412 "%d-bit unsigned relocation zero-extended from %d bits\n",
413 asize << 3, ofmt->maxbits);
414 data->size = amax;
415 ofmt->output(data);
416 data->insoffs += amax;
417 data->offset += amax;
418 data->size = size = asize - amax;
420 data->data = zero_buffer;
421 data->type = OUT_RAWDATA;
424 ofmt->output(data);
425 data->offset += size;
426 data->insoffs += size;
429 static inline void out_rawdata(struct out_data *data, const void *rawdata,
430 size_t size)
432 data->type = OUT_RAWDATA;
433 data->data = rawdata;
434 data->size = size;
435 out(data);
438 static void out_rawbyte(struct out_data *data, uint8_t byte)
440 data->type = OUT_RAWDATA;
441 data->data = &byte;
442 data->size = 1;
443 out(data);
446 static inline void out_reserve(struct out_data *data, uint64_t size)
448 data->type = OUT_RESERVE;
449 data->size = size;
450 out(data);
453 static inline void out_imm(struct out_data *data, const struct operand *opx,
454 int size, enum out_sign sign)
456 data->type =
457 (opx->opflags & OPFLAG_RELATIVE) ? OUT_RELADDR : OUT_ADDRESS;
458 data->sign = sign;
459 data->size = size;
460 data->toffset = opx->offset;
461 data->tsegment = opx->segment;
462 data->twrt = opx->wrt;
464 * XXX: improve this if at some point in the future we can
465 * distinguish the subtrahend in expressions like [foo - bar]
466 * where bar is a symbol in the current segment. However, at the
467 * current point, if OPFLAG_RELATIVE is set that subtraction has
468 * already occurred.
470 data->relbase = 0;
471 out(data);
474 static void out_reladdr(struct out_data *data, const struct operand *opx,
475 int size)
477 if (opx->opflags & OPFLAG_RELATIVE)
478 nasm_error(ERR_NONFATAL, "invalid use of self-relative expression");
480 data->type = OUT_RELADDR;
481 data->sign = OUT_SIGNED;
482 data->size = size;
483 data->toffset = opx->offset;
484 data->tsegment = opx->segment;
485 data->twrt = opx->wrt;
486 data->relbase = data->offset + (data->inslen - data->insoffs);
487 out(data);
490 static inline void out_segment(struct out_data *data,
491 const struct operand *opx)
493 data->type = OUT_SEGMENT;
494 data->sign = OUT_UNSIGNED;
495 data->size = 2;
496 data->toffset = opx->offset;
497 data->tsegment = ofmt->segbase(opx->segment + 1);
498 data->twrt = opx->wrt;
499 out(data);
502 static bool jmp_match(int32_t segment, int64_t offset, int bits,
503 insn * ins, const struct itemplate *temp)
505 int64_t isize;
506 const uint8_t *code = temp->code;
507 uint8_t c = code[0];
508 bool is_byte;
510 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
511 return false;
512 if (!optimizing)
513 return false;
514 if (optimizing < 0 && c == 0371)
515 return false;
517 isize = calcsize(segment, offset, bits, ins, temp);
519 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
520 /* Be optimistic in pass 1 */
521 return true;
523 if (ins->oprs[0].segment != segment)
524 return false;
526 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
527 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
529 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
530 /* jmp short (opcode eb) cannot be used with bnd prefix. */
531 ins->prefixes[PPS_REP] = P_none;
532 nasm_error(ERR_WARNING | ERR_WARN_BND | ERR_PASS2 ,
533 "jmp short does not init bnd regs - bnd prefix dropped.");
536 return is_byte;
539 /* This is totally just a wild guess what is reasonable... */
540 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
542 int64_t assemble(int32_t segment, int64_t start, int bits, iflag_t cp,
543 insn * instruction)
545 struct out_data data;
546 const struct itemplate *temp;
547 enum match_result m;
548 int32_t itimes;
549 int64_t wsize; /* size for DB etc. */
551 cpu = cp;
553 nasm_zero(&data);
554 data.offset = start;
555 data.segment = segment;
556 data.itemp = NULL;
557 data.sign = OUT_WRAP;
558 data.bits = bits;
560 wsize = idata_bytes(instruction->opcode);
561 if (wsize == -1)
562 return 0;
564 if (wsize) {
565 extop *e;
566 int32_t t = instruction->times;
567 if (t < 0)
568 nasm_panic(0, "instruction->times < 0 (%"PRId32") in assemble()", t);
570 while (t--) { /* repeat TIMES times */
571 list_for_each(e, instruction->eops) {
572 if (e->type == EOT_DB_NUMBER) {
573 if (wsize > 8) {
574 nasm_error(ERR_NONFATAL,
575 "integer supplied to a DT, DO or DY"
576 " instruction");
577 } else {
578 data.insoffs = 0;
579 data.type = e->relative ? OUT_RELADDR : OUT_ADDRESS;
580 data.inslen = data.size = wsize;
581 data.toffset = e->offset;
582 data.tsegment = e->segment;
583 data.twrt = e->wrt;
584 data.relbase = 0;
585 out(&data);
587 } else if (e->type == EOT_DB_STRING ||
588 e->type == EOT_DB_STRING_FREE) {
589 int align = e->stringlen % wsize;
590 if (align)
591 align = wsize - align;
593 data.insoffs = 0;
594 data.inslen = e->stringlen + align;
596 out_rawdata(&data, e->stringval, e->stringlen);
597 out_rawdata(&data, zero_buffer, align);
600 if (t > 0 && t == instruction->times - 1) {
601 lfmt->set_offset(data.offset);
602 lfmt->uplevel(LIST_TIMES);
605 if (instruction->times > 1)
606 lfmt->downlevel(LIST_TIMES);
607 } else if (instruction->opcode == I_INCBIN) {
608 const char *fname = instruction->eops->stringval;
609 FILE *fp;
610 size_t t = instruction->times;
611 off_t base = 0;
612 off_t len;
613 const void *map = NULL;
614 char *buf = NULL;
615 size_t blk = 0; /* Buffered I/O block size */
616 size_t m = 0; /* Bytes last read */
618 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
619 if (!fp) {
620 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
621 fname);
622 goto done;
625 len = nasm_file_size(fp);
627 if (len == (off_t)-1) {
628 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
629 fname);
630 goto close_done;
633 if (instruction->eops->next) {
634 base = instruction->eops->next->offset;
635 if (base >= len) {
636 len = 0;
637 } else {
638 len -= base;
639 if (instruction->eops->next->next &&
640 len > (off_t)instruction->eops->next->next->offset)
641 len = (off_t)instruction->eops->next->next->offset;
645 lfmt->set_offset(data.offset);
646 lfmt->uplevel(LIST_INCBIN);
648 if (!len)
649 goto end_incbin;
651 /* Try to map file data */
652 map = nasm_map_file(fp, base, len);
653 if (!map) {
654 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
655 buf = nasm_malloc(blk);
658 while (t--) {
660 * Consider these irrelevant for INCBIN, since it is fully
661 * possible that these might be (way) bigger than an int
662 * can hold; there is, however, no reason to widen these
663 * types just for INCBIN. data.inslen == 0 signals to the
664 * backend that these fields are meaningless, if at all
665 * needed.
667 data.insoffs = 0;
668 data.inslen = 0;
670 if (map) {
671 out_rawdata(&data, map, len);
672 } else if ((off_t)m == len) {
673 out_rawdata(&data, buf, len);
674 } else {
675 off_t l = len;
677 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
678 nasm_error(ERR_NONFATAL,
679 "`incbin': unable to seek on file `%s'",
680 fname);
681 goto end_incbin;
683 while (l > 0) {
684 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
685 if (!m || feof(fp)) {
687 * This shouldn't happen unless the file
688 * actually changes while we are reading
689 * it.
691 nasm_error(ERR_NONFATAL,
692 "`incbin': unexpected EOF while"
693 " reading file `%s'", fname);
694 goto end_incbin;
696 out_rawdata(&data, buf, m);
697 l -= m;
701 end_incbin:
702 lfmt->downlevel(LIST_INCBIN);
703 if (instruction->times > 1) {
704 lfmt->set_offset(data.offset);
705 lfmt->uplevel(LIST_TIMES);
706 lfmt->downlevel(LIST_TIMES);
708 if (ferror(fp)) {
709 nasm_error(ERR_NONFATAL,
710 "`incbin': error while"
711 " reading file `%s'", fname);
713 close_done:
714 if (buf)
715 nasm_free(buf);
716 if (map)
717 nasm_unmap_file(map, len);
718 fclose(fp);
719 done:
721 } else {
722 /* "Real" instruction */
724 /* Check to see if we need an address-size prefix */
725 add_asp(instruction, bits);
727 m = find_match(&temp, instruction, data.segment, data.offset, bits);
729 if (m == MOK_GOOD) {
730 /* Matches! */
731 int64_t insn_size = calcsize(data.segment, data.offset,
732 bits, instruction, temp);
733 itimes = instruction->times;
734 if (insn_size < 0) /* shouldn't be, on pass two */
735 nasm_panic(0, "errors made it through from pass one");
737 data.itemp = temp;
738 data.bits = bits;
740 while (itimes--) {
741 data.insoffs = 0;
742 data.inslen = insn_size;
744 gencode(&data, instruction);
745 nasm_assert(data.insoffs == insn_size);
747 if (itimes > 0 && itimes == instruction->times - 1) {
748 lfmt->set_offset(data.offset);
749 lfmt->uplevel(LIST_TIMES);
752 if (instruction->times > 1)
753 lfmt->downlevel(LIST_TIMES);
754 } else {
755 /* No match */
756 switch (m) {
757 case MERR_OPSIZEMISSING:
758 nasm_error(ERR_NONFATAL, "operation size not specified");
759 break;
760 case MERR_OPSIZEMISMATCH:
761 nasm_error(ERR_NONFATAL, "mismatch in operand sizes");
762 break;
763 case MERR_BRNUMMISMATCH:
764 nasm_error(ERR_NONFATAL,
765 "mismatch in the number of broadcasting elements");
766 break;
767 case MERR_BADCPU:
768 nasm_error(ERR_NONFATAL, "no instruction for this cpu level");
769 break;
770 case MERR_BADMODE:
771 nasm_error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
772 bits);
773 break;
774 case MERR_ENCMISMATCH:
775 nasm_error(ERR_NONFATAL, "specific encoding scheme not available");
776 break;
777 case MERR_BADBND:
778 nasm_error(ERR_NONFATAL, "bnd prefix is not allowed");
779 break;
780 case MERR_BADREPNE:
781 nasm_error(ERR_NONFATAL, "%s prefix is not allowed",
782 (has_prefix(instruction, PPS_REP, P_REPNE) ?
783 "repne" : "repnz"));
784 break;
785 default:
786 nasm_error(ERR_NONFATAL,
787 "invalid combination of opcode and operands");
788 break;
792 return data.offset - start;
795 int64_t insn_size(int32_t segment, int64_t offset, int bits, iflag_t cp,
796 insn * instruction)
798 const struct itemplate *temp;
799 enum match_result m;
801 cpu = cp;
803 if (instruction->opcode == I_none)
804 return 0;
806 if (instruction->opcode == I_DB || instruction->opcode == I_DW ||
807 instruction->opcode == I_DD || instruction->opcode == I_DQ ||
808 instruction->opcode == I_DT || instruction->opcode == I_DO ||
809 instruction->opcode == I_DY) {
810 extop *e;
811 int32_t isize, osize, wsize;
813 isize = 0;
814 wsize = idata_bytes(instruction->opcode);
816 list_for_each(e, instruction->eops) {
817 int32_t align;
819 osize = 0;
820 if (e->type == EOT_DB_NUMBER) {
821 osize = 1;
822 warn_overflow_const(e->offset, wsize);
823 } else if (e->type == EOT_DB_STRING ||
824 e->type == EOT_DB_STRING_FREE)
825 osize = e->stringlen;
827 align = (-osize) % wsize;
828 if (align < 0)
829 align += wsize;
830 isize += osize + align;
832 return isize;
835 if (instruction->opcode == I_INCBIN) {
836 const char *fname = instruction->eops->stringval;
837 off_t len;
839 len = nasm_file_size_by_path(fname);
840 if (len == (off_t)-1) {
841 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
842 fname);
843 return 0;
846 if (instruction->eops->next) {
847 if (len <= (off_t)instruction->eops->next->offset) {
848 len = 0;
849 } else {
850 len -= instruction->eops->next->offset;
851 if (instruction->eops->next->next &&
852 len > (off_t)instruction->eops->next->next->offset) {
853 len = (off_t)instruction->eops->next->next->offset;
858 return len;
861 /* Check to see if we need an address-size prefix */
862 add_asp(instruction, bits);
864 m = find_match(&temp, instruction, segment, offset, bits);
865 if (m == MOK_GOOD) {
866 /* we've matched an instruction. */
867 return calcsize(segment, offset, bits, instruction, temp);
868 } else {
869 return -1; /* didn't match any instruction */
873 static void bad_hle_warn(const insn * ins, uint8_t hleok)
875 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
876 enum whatwarn { w_none, w_lock, w_inval } ww;
877 static const enum whatwarn warn[2][4] =
879 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
880 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
882 unsigned int n;
884 n = (unsigned int)rep_pfx - P_XACQUIRE;
885 if (n > 1)
886 return; /* Not XACQUIRE/XRELEASE */
888 ww = warn[n][hleok];
889 if (!is_class(MEMORY, ins->oprs[0].type))
890 ww = w_inval; /* HLE requires operand 0 to be memory */
892 switch (ww) {
893 case w_none:
894 break;
896 case w_lock:
897 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
898 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
899 "%s with this instruction requires lock",
900 prefix_name(rep_pfx));
902 break;
904 case w_inval:
905 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
906 "%s invalid with this instruction",
907 prefix_name(rep_pfx));
908 break;
912 /* Common construct */
913 #define case3(x) case (x): case (x)+1: case (x)+2
914 #define case4(x) case3(x): case (x)+3
916 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
917 insn * ins, const struct itemplate *temp)
919 const uint8_t *codes = temp->code;
920 int64_t length = 0;
921 uint8_t c;
922 int rex_mask = ~0;
923 int op1, op2;
924 struct operand *opx;
925 uint8_t opex = 0;
926 enum ea_type eat;
927 uint8_t hleok = 0;
928 bool lockcheck = true;
929 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
931 ins->rex = 0; /* Ensure REX is reset */
932 eat = EA_SCALAR; /* Expect a scalar EA */
933 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
935 if (ins->prefixes[PPS_OSIZE] == P_O64)
936 ins->rex |= REX_W;
938 (void)segment; /* Don't warn that this parameter is unused */
939 (void)offset; /* Don't warn that this parameter is unused */
941 while (*codes) {
942 c = *codes++;
943 op1 = (c & 3) + ((opex & 1) << 2);
944 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
945 opx = &ins->oprs[op1];
946 opex = 0; /* For the next iteration */
948 switch (c) {
949 case4(01):
950 codes += c, length += c;
951 break;
953 case3(05):
954 opex = c;
955 break;
957 case4(010):
958 ins->rex |=
959 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
960 codes++, length++;
961 break;
963 case4(014):
964 /* this is an index reg of MIB operand */
965 mib_index = opx->basereg;
966 break;
968 case4(020):
969 case4(024):
970 length++;
971 break;
973 case4(030):
974 length += 2;
975 break;
977 case4(034):
978 if (opx->type & (BITS16 | BITS32 | BITS64))
979 length += (opx->type & BITS16) ? 2 : 4;
980 else
981 length += (bits == 16) ? 2 : 4;
982 break;
984 case4(040):
985 length += 4;
986 break;
988 case4(044):
989 length += ins->addr_size >> 3;
990 break;
992 case4(050):
993 length++;
994 break;
996 case4(054):
997 length += 8; /* MOV reg64/imm */
998 break;
1000 case4(060):
1001 length += 2;
1002 break;
1004 case4(064):
1005 if (opx->type & (BITS16 | BITS32 | BITS64))
1006 length += (opx->type & BITS16) ? 2 : 4;
1007 else
1008 length += (bits == 16) ? 2 : 4;
1009 break;
1011 case4(070):
1012 length += 4;
1013 break;
1015 case4(074):
1016 length += 2;
1017 break;
1019 case 0172:
1020 case 0173:
1021 codes++;
1022 length++;
1023 break;
1025 case4(0174):
1026 length++;
1027 break;
1029 case4(0240):
1030 ins->rex |= REX_EV;
1031 ins->vexreg = regval(opx);
1032 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1033 ins->vex_cm = *codes++;
1034 ins->vex_wlp = *codes++;
1035 ins->evex_tuple = (*codes++ - 0300);
1036 break;
1038 case 0250:
1039 ins->rex |= REX_EV;
1040 ins->vexreg = 0;
1041 ins->vex_cm = *codes++;
1042 ins->vex_wlp = *codes++;
1043 ins->evex_tuple = (*codes++ - 0300);
1044 break;
1046 case4(0254):
1047 length += 4;
1048 break;
1050 case4(0260):
1051 ins->rex |= REX_V;
1052 ins->vexreg = regval(opx);
1053 ins->vex_cm = *codes++;
1054 ins->vex_wlp = *codes++;
1055 break;
1057 case 0270:
1058 ins->rex |= REX_V;
1059 ins->vexreg = 0;
1060 ins->vex_cm = *codes++;
1061 ins->vex_wlp = *codes++;
1062 break;
1064 case3(0271):
1065 hleok = c & 3;
1066 break;
1068 case4(0274):
1069 length++;
1070 break;
1072 case4(0300):
1073 break;
1075 case 0310:
1076 if (bits == 64)
1077 return -1;
1078 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1079 break;
1081 case 0311:
1082 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1083 break;
1085 case 0312:
1086 break;
1088 case 0313:
1089 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1090 has_prefix(ins, PPS_ASIZE, P_A32))
1091 return -1;
1092 break;
1094 case4(0314):
1095 break;
1097 case 0320:
1099 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1100 if (pfx == P_O16)
1101 break;
1102 if (pfx != P_none)
1103 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1104 else
1105 ins->prefixes[PPS_OSIZE] = P_O16;
1106 break;
1109 case 0321:
1111 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1112 if (pfx == P_O32)
1113 break;
1114 if (pfx != P_none)
1115 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1116 else
1117 ins->prefixes[PPS_OSIZE] = P_O32;
1118 break;
1121 case 0322:
1122 break;
1124 case 0323:
1125 rex_mask &= ~REX_W;
1126 break;
1128 case 0324:
1129 ins->rex |= REX_W;
1130 break;
1132 case 0325:
1133 ins->rex |= REX_NH;
1134 break;
1136 case 0326:
1137 break;
1139 case 0330:
1140 codes++, length++;
1141 break;
1143 case 0331:
1144 break;
1146 case 0332:
1147 case 0333:
1148 length++;
1149 break;
1151 case 0334:
1152 ins->rex |= REX_L;
1153 break;
1155 case 0335:
1156 break;
1158 case 0336:
1159 if (!ins->prefixes[PPS_REP])
1160 ins->prefixes[PPS_REP] = P_REP;
1161 break;
1163 case 0337:
1164 if (!ins->prefixes[PPS_REP])
1165 ins->prefixes[PPS_REP] = P_REPNE;
1166 break;
1168 case 0340:
1169 if (!absolute_op(&ins->oprs[0]))
1170 nasm_error(ERR_NONFATAL, "attempt to reserve non-constant"
1171 " quantity of BSS space");
1172 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
1173 nasm_error(ERR_WARNING | ERR_PASS1,
1174 "forward reference in RESx can have unpredictable results");
1175 else
1176 length += ins->oprs[0].offset;
1177 break;
1179 case 0341:
1180 if (!ins->prefixes[PPS_WAIT])
1181 ins->prefixes[PPS_WAIT] = P_WAIT;
1182 break;
1184 case 0360:
1185 break;
1187 case 0361:
1188 length++;
1189 break;
1191 case 0364:
1192 case 0365:
1193 break;
1195 case 0366:
1196 case 0367:
1197 length++;
1198 break;
1200 case 0370:
1201 case 0371:
1202 break;
1204 case 0373:
1205 length++;
1206 break;
1208 case 0374:
1209 eat = EA_XMMVSIB;
1210 break;
1212 case 0375:
1213 eat = EA_YMMVSIB;
1214 break;
1216 case 0376:
1217 eat = EA_ZMMVSIB;
1218 break;
1220 case4(0100):
1221 case4(0110):
1222 case4(0120):
1223 case4(0130):
1224 case4(0200):
1225 case4(0204):
1226 case4(0210):
1227 case4(0214):
1228 case4(0220):
1229 case4(0224):
1230 case4(0230):
1231 case4(0234):
1233 ea ea_data;
1234 int rfield;
1235 opflags_t rflags;
1236 struct operand *opy = &ins->oprs[op2];
1237 struct operand *op_er_sae;
1239 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1241 if (c <= 0177) {
1242 /* pick rfield from operand b (opx) */
1243 rflags = regflag(opx);
1244 rfield = nasm_regvals[opx->basereg];
1245 } else {
1246 rflags = 0;
1247 rfield = c & 7;
1250 /* EVEX.b1 : evex_brerop contains the operand position */
1251 op_er_sae = (ins->evex_brerop >= 0 ?
1252 &ins->oprs[ins->evex_brerop] : NULL);
1254 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1255 /* set EVEX.b */
1256 ins->evex_p[2] |= EVEX_P2B;
1257 if (op_er_sae->decoflags & ER) {
1258 /* set EVEX.RC (rounding control) */
1259 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1260 & EVEX_P2RC;
1262 } else {
1263 /* set EVEX.L'L (vector length) */
1264 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1265 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1266 if (opy->decoflags & BRDCAST_MASK) {
1267 /* set EVEX.b */
1268 ins->evex_p[2] |= EVEX_P2B;
1272 if (itemp_has(temp, IF_MIB)) {
1273 opy->eaflags |= EAF_MIB;
1275 * if a separate form of MIB (ICC style) is used,
1276 * the index reg info is merged into mem operand
1278 if (mib_index != R_none) {
1279 opy->indexreg = mib_index;
1280 opy->scale = 1;
1281 opy->hintbase = mib_index;
1282 opy->hinttype = EAH_NOTBASE;
1286 if (process_ea(opy, &ea_data, bits,
1287 rfield, rflags, ins) != eat) {
1288 nasm_error(ERR_NONFATAL, "invalid effective address");
1289 return -1;
1290 } else {
1291 ins->rex |= ea_data.rex;
1292 length += ea_data.size;
1295 break;
1297 default:
1298 nasm_panic(0, "internal instruction table corrupt"
1299 ": instruction code \\%o (0x%02X) given", c, c);
1300 break;
1304 ins->rex &= rex_mask;
1306 if (ins->rex & REX_NH) {
1307 if (ins->rex & REX_H) {
1308 nasm_error(ERR_NONFATAL, "instruction cannot use high registers");
1309 return -1;
1311 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1314 switch (ins->prefixes[PPS_VEX]) {
1315 case P_EVEX:
1316 if (!(ins->rex & REX_EV))
1317 return -1;
1318 break;
1319 case P_VEX3:
1320 case P_VEX2:
1321 if (!(ins->rex & REX_V))
1322 return -1;
1323 break;
1324 default:
1325 break;
1328 if (ins->rex & (REX_V | REX_EV)) {
1329 int bad32 = REX_R|REX_W|REX_X|REX_B;
1331 if (ins->rex & REX_H) {
1332 nasm_error(ERR_NONFATAL, "cannot use high register in AVX instruction");
1333 return -1;
1335 switch (ins->vex_wlp & 060) {
1336 case 000:
1337 case 040:
1338 ins->rex &= ~REX_W;
1339 break;
1340 case 020:
1341 ins->rex |= REX_W;
1342 bad32 &= ~REX_W;
1343 break;
1344 case 060:
1345 /* Follow REX_W */
1346 break;
1349 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1350 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1351 return -1;
1352 } else if (!(ins->rex & REX_EV) &&
1353 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1354 nasm_error(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1355 return -1;
1357 if (ins->rex & REX_EV)
1358 length += 4;
1359 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1360 ins->prefixes[PPS_VEX] == P_VEX3)
1361 length += 3;
1362 else
1363 length += 2;
1364 } else if (ins->rex & REX_MASK) {
1365 if (ins->rex & REX_H) {
1366 nasm_error(ERR_NONFATAL, "cannot use high register in rex instruction");
1367 return -1;
1368 } else if (bits == 64) {
1369 length++;
1370 } else if ((ins->rex & REX_L) &&
1371 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1372 iflag_ffs(&cpu) >= IF_X86_64) {
1373 /* LOCK-as-REX.R */
1374 assert_no_prefix(ins, PPS_LOCK);
1375 lockcheck = false; /* Already errored, no need for warning */
1376 length++;
1377 } else {
1378 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1379 return -1;
1383 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1384 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1385 nasm_error(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1386 "instruction is not lockable");
1389 bad_hle_warn(ins, hleok);
1392 * when BND prefix is set by DEFAULT directive,
1393 * BND prefix is added to every appropriate instruction line
1394 * unless it is overridden by NOBND prefix.
1396 if (globalbnd &&
1397 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1398 ins->prefixes[PPS_REP] = P_BND;
1401 * Add length of legacy prefixes
1403 length += emit_prefix(NULL, bits, ins);
1405 return length;
1408 static inline void emit_rex(struct out_data *data, insn *ins)
1410 if (data->bits == 64) {
1411 if ((ins->rex & REX_MASK) &&
1412 !(ins->rex & (REX_V | REX_EV)) &&
1413 !ins->rex_done) {
1414 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1415 out_rawbyte(data, rex);
1416 ins->rex_done = true;
1421 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1423 int bytes = 0;
1424 int j;
1426 for (j = 0; j < MAXPREFIX; j++) {
1427 uint8_t c = 0;
1428 switch (ins->prefixes[j]) {
1429 case P_WAIT:
1430 c = 0x9B;
1431 break;
1432 case P_LOCK:
1433 c = 0xF0;
1434 break;
1435 case P_REPNE:
1436 case P_REPNZ:
1437 case P_XACQUIRE:
1438 case P_BND:
1439 c = 0xF2;
1440 break;
1441 case P_REPE:
1442 case P_REPZ:
1443 case P_REP:
1444 case P_XRELEASE:
1445 c = 0xF3;
1446 break;
1447 case R_CS:
1448 if (bits == 64) {
1449 nasm_error(ERR_WARNING | ERR_PASS2,
1450 "cs segment base generated, but will be ignored in 64-bit mode");
1452 c = 0x2E;
1453 break;
1454 case R_DS:
1455 if (bits == 64) {
1456 nasm_error(ERR_WARNING | ERR_PASS2,
1457 "ds segment base generated, but will be ignored in 64-bit mode");
1459 c = 0x3E;
1460 break;
1461 case R_ES:
1462 if (bits == 64) {
1463 nasm_error(ERR_WARNING | ERR_PASS2,
1464 "es segment base generated, but will be ignored in 64-bit mode");
1466 c = 0x26;
1467 break;
1468 case R_FS:
1469 c = 0x64;
1470 break;
1471 case R_GS:
1472 c = 0x65;
1473 break;
1474 case R_SS:
1475 if (bits == 64) {
1476 nasm_error(ERR_WARNING | ERR_PASS2,
1477 "ss segment base generated, but will be ignored in 64-bit mode");
1479 c = 0x36;
1480 break;
1481 case R_SEGR6:
1482 case R_SEGR7:
1483 nasm_error(ERR_NONFATAL,
1484 "segr6 and segr7 cannot be used as prefixes");
1485 break;
1486 case P_A16:
1487 if (bits == 64) {
1488 nasm_error(ERR_NONFATAL,
1489 "16-bit addressing is not supported "
1490 "in 64-bit mode");
1491 } else if (bits != 16)
1492 c = 0x67;
1493 break;
1494 case P_A32:
1495 if (bits != 32)
1496 c = 0x67;
1497 break;
1498 case P_A64:
1499 if (bits != 64) {
1500 nasm_error(ERR_NONFATAL,
1501 "64-bit addressing is only supported "
1502 "in 64-bit mode");
1504 break;
1505 case P_ASP:
1506 c = 0x67;
1507 break;
1508 case P_O16:
1509 if (bits != 16)
1510 c = 0x66;
1511 break;
1512 case P_O32:
1513 if (bits == 16)
1514 c = 0x66;
1515 break;
1516 case P_O64:
1517 /* REX.W */
1518 break;
1519 case P_OSP:
1520 c = 0x66;
1521 break;
1522 case P_EVEX:
1523 case P_VEX3:
1524 case P_VEX2:
1525 case P_NOBND:
1526 case P_none:
1527 break;
1528 default:
1529 nasm_panic(0, "invalid instruction prefix");
1531 if (c) {
1532 if (data)
1533 out_rawbyte(data, c);
1534 bytes++;
1537 return bytes;
1540 static void gencode(struct out_data *data, insn *ins)
1542 uint8_t c;
1543 uint8_t bytes[4];
1544 int64_t size;
1545 int op1, op2;
1546 struct operand *opx;
1547 const uint8_t *codes = data->itemp->code;
1548 uint8_t opex = 0;
1549 enum ea_type eat = EA_SCALAR;
1550 int r;
1551 const int bits = data->bits;
1553 ins->rex_done = false;
1555 emit_prefix(data, bits, ins);
1557 while (*codes) {
1558 c = *codes++;
1559 op1 = (c & 3) + ((opex & 1) << 2);
1560 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1561 opx = &ins->oprs[op1];
1562 opex = 0; /* For the next iteration */
1565 switch (c) {
1566 case 01:
1567 case 02:
1568 case 03:
1569 case 04:
1570 emit_rex(data, ins);
1571 out_rawdata(data, codes, c);
1572 codes += c;
1573 break;
1575 case 05:
1576 case 06:
1577 case 07:
1578 opex = c;
1579 break;
1581 case4(010):
1582 emit_rex(data, ins);
1583 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1584 break;
1586 case4(014):
1587 break;
1589 case4(020):
1590 if (opx->offset < -256 || opx->offset > 255)
1591 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1592 "byte value exceeds bounds");
1593 out_imm(data, opx, 1, OUT_WRAP);
1594 break;
1596 case4(024):
1597 if (opx->offset < 0 || opx->offset > 255)
1598 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1599 "unsigned byte value exceeds bounds");
1600 out_imm(data, opx, 1, OUT_UNSIGNED);
1601 break;
1603 case4(030):
1604 warn_overflow_opd(opx, 2);
1605 out_imm(data, opx, 2, OUT_WRAP);
1606 break;
1608 case4(034):
1609 if (opx->type & (BITS16 | BITS32))
1610 size = (opx->type & BITS16) ? 2 : 4;
1611 else
1612 size = (bits == 16) ? 2 : 4;
1613 warn_overflow_opd(opx, size);
1614 out_imm(data, opx, size, OUT_WRAP);
1615 break;
1617 case4(040):
1618 warn_overflow_opd(opx, 4);
1619 out_imm(data, opx, 4, OUT_WRAP);
1620 break;
1622 case4(044):
1623 size = ins->addr_size >> 3;
1624 warn_overflow_opd(opx, size);
1625 out_imm(data, opx, size, OUT_WRAP);
1626 break;
1628 case4(050):
1629 if (opx->segment == data->segment) {
1630 int64_t delta = opx->offset - data->offset
1631 - (data->inslen - data->insoffs);
1632 if (delta > 127 || delta < -128)
1633 nasm_error(ERR_NONFATAL, "short jump is out of range");
1635 out_reladdr(data, opx, 1);
1636 break;
1638 case4(054):
1639 out_imm(data, opx, 8, OUT_WRAP);
1640 break;
1642 case4(060):
1643 out_reladdr(data, opx, 2);
1644 break;
1646 case4(064):
1647 if (opx->type & (BITS16 | BITS32 | BITS64))
1648 size = (opx->type & BITS16) ? 2 : 4;
1649 else
1650 size = (bits == 16) ? 2 : 4;
1652 out_reladdr(data, opx, size);
1653 break;
1655 case4(070):
1656 out_reladdr(data, opx, 4);
1657 break;
1659 case4(074):
1660 if (opx->segment == NO_SEG)
1661 nasm_error(ERR_NONFATAL, "value referenced by FAR is not"
1662 " relocatable");
1663 out_segment(data, opx);
1664 break;
1666 case 0172:
1668 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1669 const struct operand *opy;
1671 c = *codes++;
1672 opx = &ins->oprs[c >> 3];
1673 opy = &ins->oprs[c & 7];
1674 if (!absolute_op(opy)) {
1675 nasm_error(ERR_NONFATAL,
1676 "non-absolute expression not permitted as argument %d",
1677 c & 7);
1678 } else if (opy->offset & ~mask) {
1679 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1680 "is4 argument exceeds bounds");
1682 c = opy->offset & mask;
1683 goto emit_is4;
1686 case 0173:
1687 c = *codes++;
1688 opx = &ins->oprs[c >> 4];
1689 c &= 15;
1690 goto emit_is4;
1692 case4(0174):
1693 c = 0;
1694 emit_is4:
1695 r = nasm_regvals[opx->basereg];
1696 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
1697 break;
1699 case4(0254):
1700 if (absolute_op(opx) &&
1701 (int32_t)opx->offset != (int64_t)opx->offset) {
1702 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1703 "signed dword immediate exceeds bounds");
1705 out_imm(data, opx, 4, OUT_SIGNED);
1706 break;
1708 case4(0240):
1709 case 0250:
1710 codes += 3;
1711 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1712 EVEX_P2Z | EVEX_P2AAA, 2);
1713 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1714 bytes[0] = 0x62;
1715 /* EVEX.X can be set by either REX or EVEX for different reasons */
1716 bytes[1] = ((((ins->rex & 7) << 5) |
1717 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1718 (ins->vex_cm & EVEX_P0MM);
1719 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1720 ((~ins->vexreg & 15) << 3) |
1721 (1 << 2) | (ins->vex_wlp & 3);
1722 bytes[3] = ins->evex_p[2];
1723 out_rawdata(data, bytes, 4);
1724 break;
1726 case4(0260):
1727 case 0270:
1728 codes += 2;
1729 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1730 ins->prefixes[PPS_VEX] == P_VEX3) {
1731 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1732 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1733 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1734 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1735 out_rawdata(data, bytes, 3);
1736 } else {
1737 bytes[0] = 0xc5;
1738 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1739 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1740 out_rawdata(data, bytes, 2);
1742 break;
1744 case 0271:
1745 case 0272:
1746 case 0273:
1747 break;
1749 case4(0274):
1751 uint64_t uv, um;
1752 int s;
1754 if (ins->rex & REX_W)
1755 s = 64;
1756 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1757 s = 16;
1758 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1759 s = 32;
1760 else
1761 s = bits;
1763 um = (uint64_t)2 << (s-1);
1764 uv = opx->offset;
1766 if (uv > 127 && uv < (uint64_t)-128 &&
1767 (uv < um-128 || uv > um-1)) {
1768 /* If this wasn't explicitly byte-sized, warn as though we
1769 * had fallen through to the imm16/32/64 case.
1771 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1772 "%s value exceeds bounds",
1773 (opx->type & BITS8) ? "signed byte" :
1774 s == 16 ? "word" :
1775 s == 32 ? "dword" :
1776 "signed dword");
1778 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
1779 break;
1782 case4(0300):
1783 break;
1785 case 0310:
1786 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
1787 out_rawbyte(data, 0x67);
1788 break;
1790 case 0311:
1791 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
1792 out_rawbyte(data, 0x67);
1793 break;
1795 case 0312:
1796 break;
1798 case 0313:
1799 ins->rex = 0;
1800 break;
1802 case4(0314):
1803 break;
1805 case 0320:
1806 case 0321:
1807 break;
1809 case 0322:
1810 case 0323:
1811 break;
1813 case 0324:
1814 ins->rex |= REX_W;
1815 break;
1817 case 0325:
1818 break;
1820 case 0326:
1821 break;
1823 case 0330:
1824 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
1825 break;
1827 case 0331:
1828 break;
1830 case 0332:
1831 case 0333:
1832 out_rawbyte(data, c - 0332 + 0xF2);
1833 break;
1835 case 0334:
1836 if (ins->rex & REX_R)
1837 out_rawbyte(data, 0xF0);
1838 ins->rex &= ~(REX_L|REX_R);
1839 break;
1841 case 0335:
1842 break;
1844 case 0336:
1845 case 0337:
1846 break;
1848 case 0340:
1849 if (ins->oprs[0].segment != NO_SEG)
1850 nasm_panic(0, "non-constant BSS size in pass two");
1852 out_reserve(data, ins->oprs[0].offset);
1853 break;
1855 case 0341:
1856 break;
1858 case 0360:
1859 break;
1861 case 0361:
1862 out_rawbyte(data, 0x66);
1863 break;
1865 case 0364:
1866 case 0365:
1867 break;
1869 case 0366:
1870 case 0367:
1871 out_rawbyte(data, c - 0366 + 0x66);
1872 break;
1874 case3(0370):
1875 break;
1877 case 0373:
1878 out_rawbyte(data, bits == 16 ? 3 : 5);
1879 break;
1881 case 0374:
1882 eat = EA_XMMVSIB;
1883 break;
1885 case 0375:
1886 eat = EA_YMMVSIB;
1887 break;
1889 case 0376:
1890 eat = EA_ZMMVSIB;
1891 break;
1893 case4(0100):
1894 case4(0110):
1895 case4(0120):
1896 case4(0130):
1897 case4(0200):
1898 case4(0204):
1899 case4(0210):
1900 case4(0214):
1901 case4(0220):
1902 case4(0224):
1903 case4(0230):
1904 case4(0234):
1906 ea ea_data;
1907 int rfield;
1908 opflags_t rflags;
1909 uint8_t *p;
1910 struct operand *opy = &ins->oprs[op2];
1912 if (c <= 0177) {
1913 /* pick rfield from operand b (opx) */
1914 rflags = regflag(opx);
1915 rfield = nasm_regvals[opx->basereg];
1916 } else {
1917 /* rfield is constant */
1918 rflags = 0;
1919 rfield = c & 7;
1922 if (process_ea(opy, &ea_data, bits,
1923 rfield, rflags, ins) != eat)
1924 nasm_error(ERR_NONFATAL, "invalid effective address");
1926 p = bytes;
1927 *p++ = ea_data.modrm;
1928 if (ea_data.sib_present)
1929 *p++ = ea_data.sib;
1930 out_rawdata(data, bytes, p - bytes);
1933 * Make sure the address gets the right offset in case
1934 * the line breaks in the .lst file (BR 1197827)
1937 if (ea_data.bytes) {
1938 /* use compressed displacement, if available */
1939 if (ea_data.disp8) {
1940 out_rawbyte(data, ea_data.disp8);
1941 } else if (ea_data.rip) {
1942 out_reladdr(data, opy, ea_data.bytes);
1943 } else {
1944 int asize = ins->addr_size >> 3;
1946 if (overflow_general(opy->offset, asize) ||
1947 signed_bits(opy->offset, ins->addr_size) !=
1948 signed_bits(opy->offset, ea_data.bytes << 3))
1949 warn_overflow(ERR_PASS2, ea_data.bytes);
1951 out_imm(data, opy, ea_data.bytes,
1952 (asize > ea_data.bytes) ? OUT_SIGNED : OUT_UNSIGNED);
1956 break;
1958 default:
1959 nasm_panic(0, "internal instruction table corrupt"
1960 ": instruction code \\%o (0x%02X) given", c, c);
1961 break;
1966 static opflags_t regflag(const operand * o)
1968 if (!is_register(o->basereg))
1969 nasm_panic(0, "invalid operand passed to regflag()");
1970 return nasm_reg_flags[o->basereg];
1973 static int32_t regval(const operand * o)
1975 if (!is_register(o->basereg))
1976 nasm_panic(0, "invalid operand passed to regval()");
1977 return nasm_regvals[o->basereg];
1980 static int op_rexflags(const operand * o, int mask)
1982 opflags_t flags;
1983 int val;
1985 if (!is_register(o->basereg))
1986 nasm_panic(0, "invalid operand passed to op_rexflags()");
1988 flags = nasm_reg_flags[o->basereg];
1989 val = nasm_regvals[o->basereg];
1991 return rexflags(val, flags, mask);
1994 static int rexflags(int val, opflags_t flags, int mask)
1996 int rex = 0;
1998 if (val >= 0 && (val & 8))
1999 rex |= REX_B|REX_X|REX_R;
2000 if (flags & BITS64)
2001 rex |= REX_W;
2002 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2003 rex |= REX_H;
2004 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2005 rex |= REX_P;
2007 return rex & mask;
2010 static int evexflags(int val, decoflags_t deco,
2011 int mask, uint8_t byte)
2013 int evex = 0;
2015 switch (byte) {
2016 case 0:
2017 if (val >= 0 && (val & 16))
2018 evex |= (EVEX_P0RP | EVEX_P0X);
2019 break;
2020 case 2:
2021 if (val >= 0 && (val & 16))
2022 evex |= EVEX_P2VP;
2023 if (deco & Z)
2024 evex |= EVEX_P2Z;
2025 if (deco & OPMASK_MASK)
2026 evex |= deco & EVEX_P2AAA;
2027 break;
2029 return evex & mask;
2032 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2034 int val;
2036 val = nasm_regvals[o->basereg];
2038 return evexflags(val, o->decoflags, mask, byte);
2041 static enum match_result find_match(const struct itemplate **tempp,
2042 insn *instruction,
2043 int32_t segment, int64_t offset, int bits)
2045 const struct itemplate *temp;
2046 enum match_result m, merr;
2047 opflags_t xsizeflags[MAX_OPERANDS];
2048 bool opsizemissing = false;
2049 int8_t broadcast = instruction->evex_brerop;
2050 int i;
2052 /* broadcasting uses a different data element size */
2053 for (i = 0; i < instruction->operands; i++)
2054 if (i == broadcast)
2055 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2056 else
2057 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2059 merr = MERR_INVALOP;
2061 for (temp = nasm_instructions[instruction->opcode];
2062 temp->opcode != I_none; temp++) {
2063 m = matches(temp, instruction, bits);
2064 if (m == MOK_JUMP) {
2065 if (jmp_match(segment, offset, bits, instruction, temp))
2066 m = MOK_GOOD;
2067 else
2068 m = MERR_INVALOP;
2069 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2071 * Missing operand size and a candidate for fuzzy matching...
2073 for (i = 0; i < temp->operands; i++)
2074 if (i == broadcast)
2075 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2076 else
2077 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2078 opsizemissing = true;
2080 if (m > merr)
2081 merr = m;
2082 if (merr == MOK_GOOD)
2083 goto done;
2086 /* No match, but see if we can get a fuzzy operand size match... */
2087 if (!opsizemissing)
2088 goto done;
2090 for (i = 0; i < instruction->operands; i++) {
2092 * We ignore extrinsic operand sizes on registers, so we should
2093 * never try to fuzzy-match on them. This also resolves the case
2094 * when we have e.g. "xmmrm128" in two different positions.
2096 if (is_class(REGISTER, instruction->oprs[i].type))
2097 continue;
2099 /* This tests if xsizeflags[i] has more than one bit set */
2100 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2101 goto done; /* No luck */
2103 if (i == broadcast) {
2104 instruction->oprs[i].decoflags |= xsizeflags[i];
2105 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2106 BITS32 : BITS64);
2107 } else {
2108 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2112 /* Try matching again... */
2113 for (temp = nasm_instructions[instruction->opcode];
2114 temp->opcode != I_none; temp++) {
2115 m = matches(temp, instruction, bits);
2116 if (m == MOK_JUMP) {
2117 if (jmp_match(segment, offset, bits, instruction, temp))
2118 m = MOK_GOOD;
2119 else
2120 m = MERR_INVALOP;
2122 if (m > merr)
2123 merr = m;
2124 if (merr == MOK_GOOD)
2125 goto done;
2128 done:
2129 *tempp = temp;
2130 return merr;
2133 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2135 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
2136 uint8_t brcast_num;
2138 if (brsize > BITS64)
2139 nasm_error(ERR_FATAL,
2140 "size of broadcasting element is greater than 64 bits");
2143 * The shift term is to take care of the extra BITS80 inserted
2144 * between BITS64 and BITS128.
2146 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2147 >> (opsize > (BITS64 >> SIZE_SHIFT));
2149 return brcast_num;
2152 static enum match_result matches(const struct itemplate *itemp,
2153 insn *instruction, int bits)
2155 opflags_t size[MAX_OPERANDS], asize;
2156 bool opsizemissing = false;
2157 int i, oprs;
2160 * Check the opcode
2162 if (itemp->opcode != instruction->opcode)
2163 return MERR_INVALOP;
2166 * Count the operands
2168 if (itemp->operands != instruction->operands)
2169 return MERR_INVALOP;
2172 * Is it legal?
2174 if (!(optimizing > 0) && itemp_has(itemp, IF_OPT))
2175 return MERR_INVALOP;
2178 * {evex} available?
2180 switch (instruction->prefixes[PPS_VEX]) {
2181 case P_EVEX:
2182 if (!itemp_has(itemp, IF_EVEX))
2183 return MERR_ENCMISMATCH;
2184 break;
2185 case P_VEX3:
2186 case P_VEX2:
2187 if (!itemp_has(itemp, IF_VEX))
2188 return MERR_ENCMISMATCH;
2189 break;
2190 default:
2191 break;
2195 * Check that no spurious colons or TOs are present
2197 for (i = 0; i < itemp->operands; i++)
2198 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2199 return MERR_INVALOP;
2202 * Process size flags
2204 switch (itemp_smask(itemp)) {
2205 case IF_GENBIT(IF_SB):
2206 asize = BITS8;
2207 break;
2208 case IF_GENBIT(IF_SW):
2209 asize = BITS16;
2210 break;
2211 case IF_GENBIT(IF_SD):
2212 asize = BITS32;
2213 break;
2214 case IF_GENBIT(IF_SQ):
2215 asize = BITS64;
2216 break;
2217 case IF_GENBIT(IF_SO):
2218 asize = BITS128;
2219 break;
2220 case IF_GENBIT(IF_SY):
2221 asize = BITS256;
2222 break;
2223 case IF_GENBIT(IF_SZ):
2224 asize = BITS512;
2225 break;
2226 case IF_GENBIT(IF_SIZE):
2227 switch (bits) {
2228 case 16:
2229 asize = BITS16;
2230 break;
2231 case 32:
2232 asize = BITS32;
2233 break;
2234 case 64:
2235 asize = BITS64;
2236 break;
2237 default:
2238 asize = 0;
2239 break;
2241 break;
2242 default:
2243 asize = 0;
2244 break;
2247 if (itemp_armask(itemp)) {
2248 /* S- flags only apply to a specific operand */
2249 i = itemp_arg(itemp);
2250 memset(size, 0, sizeof size);
2251 size[i] = asize;
2252 } else {
2253 /* S- flags apply to all operands */
2254 for (i = 0; i < MAX_OPERANDS; i++)
2255 size[i] = asize;
2259 * Check that the operand flags all match up,
2260 * it's a bit tricky so lets be verbose:
2262 * 1) Find out the size of operand. If instruction
2263 * doesn't have one specified -- we're trying to
2264 * guess it either from template (IF_S* flag) or
2265 * from code bits.
2267 * 2) If template operand do not match the instruction OR
2268 * template has an operand size specified AND this size differ
2269 * from which instruction has (perhaps we got it from code bits)
2270 * we are:
2271 * a) Check that only size of instruction and operand is differ
2272 * other characteristics do match
2273 * b) Perhaps it's a register specified in instruction so
2274 * for such a case we just mark that operand as "size
2275 * missing" and this will turn on fuzzy operand size
2276 * logic facility (handled by a caller)
2278 for (i = 0; i < itemp->operands; i++) {
2279 opflags_t type = instruction->oprs[i].type;
2280 decoflags_t deco = instruction->oprs[i].decoflags;
2281 bool is_broadcast = deco & BRDCAST_MASK;
2282 uint8_t brcast_num = 0;
2283 opflags_t template_opsize, insn_opsize;
2285 if (!(type & SIZE_MASK))
2286 type |= size[i];
2288 insn_opsize = type & SIZE_MASK;
2289 if (!is_broadcast) {
2290 template_opsize = itemp->opd[i] & SIZE_MASK;
2291 } else {
2292 decoflags_t deco_brsize = itemp->deco[i] & BRSIZE_MASK;
2294 * when broadcasting, the element size depends on
2295 * the instruction type. decorator flag should match.
2298 if (deco_brsize) {
2299 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2300 /* calculate the proper number : {1to<brcast_num>} */
2301 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2302 } else {
2303 template_opsize = 0;
2307 if ((itemp->opd[i] & ~type & ~SIZE_MASK) ||
2308 (deco & ~itemp->deco[i] & ~BRNUM_MASK)) {
2309 return MERR_INVALOP;
2310 } else if (template_opsize) {
2311 if (template_opsize != insn_opsize) {
2312 if (insn_opsize) {
2313 return MERR_INVALOP;
2314 } else if (!is_class(REGISTER, type)) {
2316 * Note: we don't honor extrinsic operand sizes for registers,
2317 * so "missing operand size" for a register should be
2318 * considered a wildcard match rather than an error.
2320 opsizemissing = true;
2322 } else if (is_broadcast &&
2323 (brcast_num !=
2324 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2326 * broadcasting opsize matches but the number of repeated memory
2327 * element does not match.
2328 * if 64b double precision float is broadcasted to ymm (256b),
2329 * broadcasting decorator must be {1to4}.
2331 return MERR_BRNUMMISMATCH;
2336 if (opsizemissing)
2337 return MERR_OPSIZEMISSING;
2340 * Check operand sizes
2342 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2343 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2344 for (i = 0; i < oprs; i++) {
2345 asize = itemp->opd[i] & SIZE_MASK;
2346 if (asize) {
2347 for (i = 0; i < oprs; i++)
2348 size[i] = asize;
2349 break;
2352 } else {
2353 oprs = itemp->operands;
2356 for (i = 0; i < itemp->operands; i++) {
2357 if (!(itemp->opd[i] & SIZE_MASK) &&
2358 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2359 return MERR_OPSIZEMISMATCH;
2363 * Check template is okay at the set cpu level
2365 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2366 return MERR_BADCPU;
2369 * Verify the appropriate long mode flag.
2371 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2372 return MERR_BADMODE;
2375 * If we have a HLE prefix, look for the NOHLE flag
2377 if (itemp_has(itemp, IF_NOHLE) &&
2378 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2379 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2380 return MERR_BADHLE;
2383 * Check if special handling needed for Jumps
2385 if ((itemp->code[0] & ~1) == 0370)
2386 return MOK_JUMP;
2389 * Check if BND prefix is allowed.
2390 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2392 if (!itemp_has(itemp, IF_BND) &&
2393 (has_prefix(instruction, PPS_REP, P_BND) ||
2394 has_prefix(instruction, PPS_REP, P_NOBND)))
2395 return MERR_BADBND;
2396 else if (itemp_has(itemp, IF_BND) &&
2397 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2398 has_prefix(instruction, PPS_REP, P_REPNZ)))
2399 return MERR_BADREPNE;
2401 return MOK_GOOD;
2405 * Check if ModR/M.mod should/can be 01.
2406 * - EAF_BYTEOFFS is set
2407 * - offset can fit in a byte when EVEX is not used
2408 * - offset can be compressed when EVEX is used
2410 #define IS_MOD_01() (input->eaflags & EAF_BYTEOFFS || \
2411 (o >= -128 && o <= 127 && \
2412 seg == NO_SEG && !forw_ref && \
2413 !(input->eaflags & EAF_WORDOFFS) && \
2414 !(ins->rex & REX_EV)) || \
2415 (ins->rex & REX_EV && \
2416 is_disp8n(input, ins, &output->disp8)))
2418 static enum ea_type process_ea(operand *input, ea *output, int bits,
2419 int rfield, opflags_t rflags, insn *ins)
2421 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2422 int addrbits = ins->addr_size;
2423 int eaflags = input->eaflags;
2425 output->type = EA_SCALAR;
2426 output->rip = false;
2427 output->disp8 = 0;
2429 /* REX flags for the rfield operand */
2430 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2431 /* EVEX.R' flag for the REG operand */
2432 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2434 if (is_class(REGISTER, input->type)) {
2436 * It's a direct register.
2438 if (!is_register(input->basereg))
2439 goto err;
2441 if (!is_reg_class(REG_EA, input->basereg))
2442 goto err;
2444 /* broadcasting is not available with a direct register operand. */
2445 if (input->decoflags & BRDCAST_MASK) {
2446 nasm_error(ERR_NONFATAL, "Broadcasting not allowed from a register");
2447 goto err;
2450 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2451 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2452 output->sib_present = false; /* no SIB necessary */
2453 output->bytes = 0; /* no offset necessary either */
2454 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2455 } else {
2457 * It's a memory reference.
2460 /* Embedded rounding or SAE is not available with a mem ref operand. */
2461 if (input->decoflags & (ER | SAE)) {
2462 nasm_error(ERR_NONFATAL,
2463 "Embedded rounding is available only with reg-reg op.");
2464 return -1;
2467 if (input->basereg == -1 &&
2468 (input->indexreg == -1 || input->scale == 0)) {
2470 * It's a pure offset.
2472 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
2473 if (input->segment == NO_SEG || (input->opflags & OPFLAG_RELATIVE)) {
2474 nasm_error(ERR_WARNING | ERR_PASS2, "absolute address can not be RIP-relative");
2475 input->type &= ~IP_REL;
2476 input->type |= MEMORY;
2480 if (bits == 64 &&
2481 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2482 nasm_error(ERR_NONFATAL, "RIP-relative addressing is prohibited for mib.");
2483 return -1;
2486 if (eaflags & EAF_BYTEOFFS ||
2487 (eaflags & EAF_WORDOFFS &&
2488 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2489 nasm_error(ERR_WARNING | ERR_PASS1, "displacement size ignored on absolute address");
2492 if (bits == 64 && (~input->type & IP_REL)) {
2493 output->sib_present = true;
2494 output->sib = GEN_SIB(0, 4, 5);
2495 output->bytes = 4;
2496 output->modrm = GEN_MODRM(0, rfield, 4);
2497 output->rip = false;
2498 } else {
2499 output->sib_present = false;
2500 output->bytes = (addrbits != 16 ? 4 : 2);
2501 output->modrm = GEN_MODRM(0, rfield, (addrbits != 16 ? 5 : 6));
2502 output->rip = bits == 64;
2504 } else {
2506 * It's an indirection.
2508 int i = input->indexreg, b = input->basereg, s = input->scale;
2509 int32_t seg = input->segment;
2510 int hb = input->hintbase, ht = input->hinttype;
2511 int t, it, bt; /* register numbers */
2512 opflags_t x, ix, bx; /* register flags */
2514 if (s == 0)
2515 i = -1; /* make this easy, at least */
2517 if (is_register(i)) {
2518 it = nasm_regvals[i];
2519 ix = nasm_reg_flags[i];
2520 } else {
2521 it = -1;
2522 ix = 0;
2525 if (is_register(b)) {
2526 bt = nasm_regvals[b];
2527 bx = nasm_reg_flags[b];
2528 } else {
2529 bt = -1;
2530 bx = 0;
2533 /* if either one are a vector register... */
2534 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2535 opflags_t sok = BITS32 | BITS64;
2536 int32_t o = input->offset;
2537 int mod, scale, index, base;
2540 * For a vector SIB, one has to be a vector and the other,
2541 * if present, a GPR. The vector must be the index operand.
2543 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2544 if (s == 0)
2545 s = 1;
2546 else if (s != 1)
2547 goto err;
2549 t = bt, bt = it, it = t;
2550 x = bx, bx = ix, ix = x;
2553 if (bt != -1) {
2554 if (REG_GPR & ~bx)
2555 goto err;
2556 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2557 sok &= bx;
2558 else
2559 goto err;
2563 * While we're here, ensure the user didn't specify
2564 * WORD or QWORD
2566 if (input->disp_size == 16 || input->disp_size == 64)
2567 goto err;
2569 if (addrbits == 16 ||
2570 (addrbits == 32 && !(sok & BITS32)) ||
2571 (addrbits == 64 && !(sok & BITS64)))
2572 goto err;
2574 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2575 : ((ix & YMMREG & ~REG_EA)
2576 ? EA_YMMVSIB : EA_XMMVSIB));
2578 output->rex |= rexflags(it, ix, REX_X);
2579 output->rex |= rexflags(bt, bx, REX_B);
2580 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2582 index = it & 7; /* it is known to be != -1 */
2584 switch (s) {
2585 case 1:
2586 scale = 0;
2587 break;
2588 case 2:
2589 scale = 1;
2590 break;
2591 case 4:
2592 scale = 2;
2593 break;
2594 case 8:
2595 scale = 3;
2596 break;
2597 default: /* then what the smeg is it? */
2598 goto err; /* panic */
2601 if (bt == -1) {
2602 base = 5;
2603 mod = 0;
2604 } else {
2605 base = (bt & 7);
2606 if (base != REG_NUM_EBP && o == 0 &&
2607 seg == NO_SEG && !forw_ref &&
2608 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2609 mod = 0;
2610 else if (IS_MOD_01())
2611 mod = 1;
2612 else
2613 mod = 2;
2616 output->sib_present = true;
2617 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2618 output->modrm = GEN_MODRM(mod, rfield, 4);
2619 output->sib = GEN_SIB(scale, index, base);
2620 } else if ((ix|bx) & (BITS32|BITS64)) {
2622 * it must be a 32/64-bit memory reference. Firstly we have
2623 * to check that all registers involved are type E/Rxx.
2625 opflags_t sok = BITS32 | BITS64;
2626 int32_t o = input->offset;
2628 if (it != -1) {
2629 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2630 sok &= ix;
2631 else
2632 goto err;
2635 if (bt != -1) {
2636 if (REG_GPR & ~bx)
2637 goto err; /* Invalid register */
2638 if (~sok & bx & SIZE_MASK)
2639 goto err; /* Invalid size */
2640 sok &= bx;
2644 * While we're here, ensure the user didn't specify
2645 * WORD or QWORD
2647 if (input->disp_size == 16 || input->disp_size == 64)
2648 goto err;
2650 if (addrbits == 16 ||
2651 (addrbits == 32 && !(sok & BITS32)) ||
2652 (addrbits == 64 && !(sok & BITS64)))
2653 goto err;
2655 /* now reorganize base/index */
2656 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2657 ((hb == b && ht == EAH_NOTBASE) ||
2658 (hb == i && ht == EAH_MAKEBASE))) {
2659 /* swap if hints say so */
2660 t = bt, bt = it, it = t;
2661 x = bx, bx = ix, ix = x;
2664 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2665 /* make single reg base, unless hint */
2666 bt = it, bx = ix, it = -1, ix = 0;
2668 if (eaflags & EAF_MIB) {
2669 /* only for mib operands */
2670 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2672 * make a single reg index [reg*1].
2673 * gas uses this form for an explicit index register.
2675 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2677 if ((ht == EAH_SUMMED) && bt == -1) {
2678 /* separate once summed index into [base, index] */
2679 bt = it, bx = ix, s--;
2681 } else {
2682 if (((s == 2 && it != REG_NUM_ESP &&
2683 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
2684 s == 3 || s == 5 || s == 9) && bt == -1) {
2685 /* convert 3*EAX to EAX+2*EAX */
2686 bt = it, bx = ix, s--;
2688 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2689 (eaflags & EAF_TIMESTWO) &&
2690 (hb == b && ht == EAH_NOTBASE)) {
2692 * convert [NOSPLIT EAX*1]
2693 * to sib format with 0x0 displacement - [EAX*1+0].
2695 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2698 if (s == 1 && it == REG_NUM_ESP) {
2699 /* swap ESP into base if scale is 1 */
2700 t = it, it = bt, bt = t;
2701 x = ix, ix = bx, bx = x;
2703 if (it == REG_NUM_ESP ||
2704 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2705 goto err; /* wrong, for various reasons */
2707 output->rex |= rexflags(it, ix, REX_X);
2708 output->rex |= rexflags(bt, bx, REX_B);
2710 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2711 /* no SIB needed */
2712 int mod, rm;
2714 if (bt == -1) {
2715 rm = 5;
2716 mod = 0;
2717 } else {
2718 rm = (bt & 7);
2719 if (rm != REG_NUM_EBP && o == 0 &&
2720 seg == NO_SEG && !forw_ref &&
2721 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2722 mod = 0;
2723 else if (IS_MOD_01())
2724 mod = 1;
2725 else
2726 mod = 2;
2729 output->sib_present = false;
2730 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2731 output->modrm = GEN_MODRM(mod, rfield, rm);
2732 } else {
2733 /* we need a SIB */
2734 int mod, scale, index, base;
2736 if (it == -1)
2737 index = 4, s = 1;
2738 else
2739 index = (it & 7);
2741 switch (s) {
2742 case 1:
2743 scale = 0;
2744 break;
2745 case 2:
2746 scale = 1;
2747 break;
2748 case 4:
2749 scale = 2;
2750 break;
2751 case 8:
2752 scale = 3;
2753 break;
2754 default: /* then what the smeg is it? */
2755 goto err; /* panic */
2758 if (bt == -1) {
2759 base = 5;
2760 mod = 0;
2761 } else {
2762 base = (bt & 7);
2763 if (base != REG_NUM_EBP && o == 0 &&
2764 seg == NO_SEG && !forw_ref &&
2765 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2766 mod = 0;
2767 else if (IS_MOD_01())
2768 mod = 1;
2769 else
2770 mod = 2;
2773 output->sib_present = true;
2774 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2775 output->modrm = GEN_MODRM(mod, rfield, 4);
2776 output->sib = GEN_SIB(scale, index, base);
2778 } else { /* it's 16-bit */
2779 int mod, rm;
2780 int16_t o = input->offset;
2782 /* check for 64-bit long mode */
2783 if (addrbits == 64)
2784 goto err;
2786 /* check all registers are BX, BP, SI or DI */
2787 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2788 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2789 goto err;
2791 /* ensure the user didn't specify DWORD/QWORD */
2792 if (input->disp_size == 32 || input->disp_size == 64)
2793 goto err;
2795 if (s != 1 && i != -1)
2796 goto err; /* no can do, in 16-bit EA */
2797 if (b == -1 && i != -1) {
2798 int tmp = b;
2799 b = i;
2800 i = tmp;
2801 } /* swap */
2802 if ((b == R_SI || b == R_DI) && i != -1) {
2803 int tmp = b;
2804 b = i;
2805 i = tmp;
2807 /* have BX/BP as base, SI/DI index */
2808 if (b == i)
2809 goto err; /* shouldn't ever happen, in theory */
2810 if (i != -1 && b != -1 &&
2811 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2812 goto err; /* invalid combinations */
2813 if (b == -1) /* pure offset: handled above */
2814 goto err; /* so if it gets to here, panic! */
2816 rm = -1;
2817 if (i != -1)
2818 switch (i * 256 + b) {
2819 case R_SI * 256 + R_BX:
2820 rm = 0;
2821 break;
2822 case R_DI * 256 + R_BX:
2823 rm = 1;
2824 break;
2825 case R_SI * 256 + R_BP:
2826 rm = 2;
2827 break;
2828 case R_DI * 256 + R_BP:
2829 rm = 3;
2830 break;
2831 } else
2832 switch (b) {
2833 case R_SI:
2834 rm = 4;
2835 break;
2836 case R_DI:
2837 rm = 5;
2838 break;
2839 case R_BP:
2840 rm = 6;
2841 break;
2842 case R_BX:
2843 rm = 7;
2844 break;
2846 if (rm == -1) /* can't happen, in theory */
2847 goto err; /* so panic if it does */
2849 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2850 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2851 mod = 0;
2852 else if (IS_MOD_01())
2853 mod = 1;
2854 else
2855 mod = 2;
2857 output->sib_present = false; /* no SIB - it's 16-bit */
2858 output->bytes = mod; /* bytes of offset needed */
2859 output->modrm = GEN_MODRM(mod, rfield, rm);
2864 output->size = 1 + output->sib_present + output->bytes;
2865 return output->type;
2867 err:
2868 return output->type = EA_INVALID;
2871 static void add_asp(insn *ins, int addrbits)
2873 int j, valid;
2874 int defdisp;
2876 valid = (addrbits == 64) ? 64|32 : 32|16;
2878 switch (ins->prefixes[PPS_ASIZE]) {
2879 case P_A16:
2880 valid &= 16;
2881 break;
2882 case P_A32:
2883 valid &= 32;
2884 break;
2885 case P_A64:
2886 valid &= 64;
2887 break;
2888 case P_ASP:
2889 valid &= (addrbits == 32) ? 16 : 32;
2890 break;
2891 default:
2892 break;
2895 for (j = 0; j < ins->operands; j++) {
2896 if (is_class(MEMORY, ins->oprs[j].type)) {
2897 opflags_t i, b;
2899 /* Verify as Register */
2900 if (!is_register(ins->oprs[j].indexreg))
2901 i = 0;
2902 else
2903 i = nasm_reg_flags[ins->oprs[j].indexreg];
2905 /* Verify as Register */
2906 if (!is_register(ins->oprs[j].basereg))
2907 b = 0;
2908 else
2909 b = nasm_reg_flags[ins->oprs[j].basereg];
2911 if (ins->oprs[j].scale == 0)
2912 i = 0;
2914 if (!i && !b) {
2915 int ds = ins->oprs[j].disp_size;
2916 if ((addrbits != 64 && ds > 8) ||
2917 (addrbits == 64 && ds == 16))
2918 valid &= ds;
2919 } else {
2920 if (!(REG16 & ~b))
2921 valid &= 16;
2922 if (!(REG32 & ~b))
2923 valid &= 32;
2924 if (!(REG64 & ~b))
2925 valid &= 64;
2927 if (!(REG16 & ~i))
2928 valid &= 16;
2929 if (!(REG32 & ~i))
2930 valid &= 32;
2931 if (!(REG64 & ~i))
2932 valid &= 64;
2937 if (valid & addrbits) {
2938 ins->addr_size = addrbits;
2939 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2940 /* Add an address size prefix */
2941 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2942 ins->addr_size = (addrbits == 32) ? 16 : 32;
2943 } else {
2944 /* Impossible... */
2945 nasm_error(ERR_NONFATAL, "impossible combination of address sizes");
2946 ins->addr_size = addrbits; /* Error recovery */
2949 defdisp = ins->addr_size == 16 ? 16 : 32;
2951 for (j = 0; j < ins->operands; j++) {
2952 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2953 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2955 * mem_offs sizes must match the address size; if not,
2956 * strip the MEM_OFFS bit and match only EA instructions
2958 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);