win32, doc: fix PDF generation scripts for Windows
[nasm.git] / asm / assemble.c
blob31db516a545a56f1de2b5581901671c395ed5b80
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2017 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
35 * assemble.c code generation for the Netwide Assembler
37 * Bytecode specification
38 * ----------------------
41 * Codes Mnemonic Explanation
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
178 #include "compiler.h"
180 #include <stdio.h>
181 #include <string.h>
182 #include <stdlib.h>
184 #include "nasm.h"
185 #include "nasmlib.h"
186 #include "error.h"
187 #include "assemble.h"
188 #include "insns.h"
189 #include "tables.h"
190 #include "disp8.h"
191 #include "listing.h"
193 enum match_result {
195 * Matching errors. These should be sorted so that more specific
196 * errors come later in the sequence.
198 MERR_INVALOP,
199 MERR_OPSIZEMISSING,
200 MERR_OPSIZEMISMATCH,
201 MERR_BRNOTHERE,
202 MERR_BRNUMMISMATCH,
203 MERR_MASKNOTHERE,
204 MERR_BADCPU,
205 MERR_BADMODE,
206 MERR_BADHLE,
207 MERR_ENCMISMATCH,
208 MERR_BADBND,
209 MERR_BADREPNE,
211 * Matching success; the conditional ones first
213 MOK_JUMP, /* Matching OK but needs jmp_match() */
214 MOK_GOOD /* Matching unconditionally OK */
217 typedef struct {
218 enum ea_type type; /* what kind of EA is this? */
219 int sib_present; /* is a SIB byte necessary? */
220 int bytes; /* # of bytes of offset needed */
221 int size; /* lazy - this is sib+bytes+1 */
222 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
223 int8_t disp8; /* compressed displacement for EVEX */
224 } ea;
226 #define GEN_SIB(scale, index, base) \
227 (((scale) << 6) | ((index) << 3) | ((base)))
229 #define GEN_MODRM(mod, reg, rm) \
230 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
232 static int64_t calcsize(int32_t, int64_t, int, insn *,
233 const struct itemplate *);
234 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
235 static void gencode(struct out_data *data, insn *ins);
236 static enum match_result find_match(const struct itemplate **tempp,
237 insn *instruction,
238 int32_t segment, int64_t offset, int bits);
239 static enum match_result matches(const struct itemplate *, insn *, int bits);
240 static opflags_t regflag(const operand *);
241 static int32_t regval(const operand *);
242 static int rexflags(int, opflags_t, int);
243 static int op_rexflags(const operand *, int);
244 static int op_evexflags(const operand *, int, uint8_t);
245 static void add_asp(insn *, int);
247 static enum ea_type process_ea(operand *, ea *, int, int,
248 opflags_t, insn *, const char **);
250 static inline bool absolute_op(const struct operand *o)
252 return o->segment == NO_SEG && o->wrt == NO_SEG &&
253 !(o->opflags & OPFLAG_RELATIVE);
256 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
258 return ins->prefixes[pos] == prefix;
261 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
263 if (ins->prefixes[pos])
264 nasm_error(ERR_NONFATAL, "invalid %s prefix",
265 prefix_name(ins->prefixes[pos]));
268 static const char *size_name(int size)
270 switch (size) {
271 case 1:
272 return "byte";
273 case 2:
274 return "word";
275 case 4:
276 return "dword";
277 case 8:
278 return "qword";
279 case 10:
280 return "tword";
281 case 16:
282 return "oword";
283 case 32:
284 return "yword";
285 case 64:
286 return "zword";
287 default:
288 return "???";
292 static void warn_overflow(int size)
294 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
295 "%s data exceeds bounds", size_name(size));
298 static void warn_overflow_const(int64_t data, int size)
300 if (overflow_general(data, size))
301 warn_overflow(size);
304 static void warn_overflow_opd(const struct operand *o, int size)
306 if (absolute_op(o)) {
307 if (overflow_general(o->offset, size))
308 warn_overflow(size);
312 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
314 bool err;
316 switch (sign) {
317 case OUT_WRAP:
318 err = overflow_general(data, size);
319 break;
320 case OUT_SIGNED:
321 err = overflow_signed(data, size);
322 break;
323 case OUT_UNSIGNED:
324 err = overflow_unsigned(data, size);
325 break;
326 default:
327 panic();
328 break;
331 if (err)
332 warn_overflow(size);
336 * This routine wrappers the real output format's output routine,
337 * in order to pass a copy of the data off to the listing file
338 * generator at the same time, flatten unnecessary relocations,
339 * and verify backend compatibility.
341 static void out(struct out_data *data)
343 static int32_t lineno = 0; /* static!!! */
344 static const char *lnfname = NULL;
345 int asize;
346 const int amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
347 union {
348 uint8_t b[8];
349 uint64_t q;
350 } xdata;
351 uint64_t size = data->size;
352 int64_t addrval;
353 int32_t fixseg; /* Segment for which to produce fixed data */
355 if (!data->size)
356 return; /* Nothing to do */
359 * Convert addresses to RAWDATA if possible
360 * XXX: not all backends want this for global symbols!!!!
362 switch (data->type) {
363 case OUT_ADDRESS:
364 addrval = data->toffset;
365 fixseg = NO_SEG; /* Absolute address is fixed data */
366 goto address;
368 case OUT_RELADDR:
369 addrval = data->toffset - data->relbase;
370 fixseg = data->segment; /* Our own segment is fixed data */
371 goto address;
373 address:
374 asize = data->size;
375 nasm_assert(asize <= 8);
376 if (data->tsegment == fixseg && data->twrt == NO_SEG) {
377 uint8_t *q = xdata.b;
379 warn_overflow_out(addrval, asize, data->sign);
381 WRITEADDR(q, addrval, asize);
382 data->data = xdata.b;
383 data->type = OUT_RAWDATA;
384 asize = 0; /* No longer an address */
386 break;
388 default:
389 asize = 0; /* Not an address */
390 break;
393 lfmt->output(data);
396 * this call to src_get determines when we call the
397 * debug-format-specific "linenum" function
398 * it updates lineno and lnfname to the current values
399 * returning 0 if "same as last time", -2 if lnfname
400 * changed, and the amount by which lineno changed,
401 * if it did. thus, these variables must be static
404 if (src_get(&lineno, &lnfname))
405 dfmt->linenum(lnfname, lineno, data->segment);
407 if (asize && asize > amax) {
408 if (data->type != OUT_ADDRESS || data->sign == OUT_SIGNED) {
409 nasm_error(ERR_NONFATAL,
410 "%d-bit signed relocation unsupported by output format %s\n",
411 asize << 3, ofmt->shortname);
412 } else {
413 nasm_error(ERR_WARNING | ERR_WARN_ZEXTRELOC,
414 "%d-bit unsigned relocation zero-extended from %d bits\n",
415 asize << 3, ofmt->maxbits);
416 data->size = amax;
417 ofmt->output(data);
418 data->insoffs += amax;
419 data->offset += amax;
420 data->size = size = asize - amax;
422 data->data = zero_buffer;
423 data->type = OUT_RAWDATA;
426 ofmt->output(data);
427 data->offset += size;
428 data->insoffs += size;
431 static inline void out_rawdata(struct out_data *data, const void *rawdata,
432 size_t size)
434 data->type = OUT_RAWDATA;
435 data->data = rawdata;
436 data->size = size;
437 out(data);
440 static void out_rawbyte(struct out_data *data, uint8_t byte)
442 data->type = OUT_RAWDATA;
443 data->data = &byte;
444 data->size = 1;
445 out(data);
448 static inline void out_reserve(struct out_data *data, uint64_t size)
450 data->type = OUT_RESERVE;
451 data->size = size;
452 out(data);
455 static inline void out_imm(struct out_data *data, const struct operand *opx,
456 int size, enum out_sign sign)
458 data->type =
459 (opx->opflags & OPFLAG_RELATIVE) ? OUT_RELADDR : OUT_ADDRESS;
460 data->sign = sign;
461 data->size = size;
462 data->toffset = opx->offset;
463 data->tsegment = opx->segment;
464 data->twrt = opx->wrt;
466 * XXX: improve this if at some point in the future we can
467 * distinguish the subtrahend in expressions like [foo - bar]
468 * where bar is a symbol in the current segment. However, at the
469 * current point, if OPFLAG_RELATIVE is set that subtraction has
470 * already occurred.
472 data->relbase = 0;
473 out(data);
476 static void out_reladdr(struct out_data *data, const struct operand *opx,
477 int size)
479 if (opx->opflags & OPFLAG_RELATIVE)
480 nasm_error(ERR_NONFATAL, "invalid use of self-relative expression");
482 data->type = OUT_RELADDR;
483 data->sign = OUT_SIGNED;
484 data->size = size;
485 data->toffset = opx->offset;
486 data->tsegment = opx->segment;
487 data->twrt = opx->wrt;
488 data->relbase = data->offset + (data->inslen - data->insoffs);
489 out(data);
492 static inline void out_segment(struct out_data *data,
493 const struct operand *opx)
495 data->type = OUT_SEGMENT;
496 data->sign = OUT_UNSIGNED;
497 data->size = 2;
498 data->toffset = opx->offset;
499 data->tsegment = ofmt->segbase(opx->segment + 1);
500 data->twrt = opx->wrt;
501 out(data);
504 static bool jmp_match(int32_t segment, int64_t offset, int bits,
505 insn * ins, const struct itemplate *temp)
507 int64_t isize;
508 const uint8_t *code = temp->code;
509 uint8_t c = code[0];
510 bool is_byte;
512 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
513 return false;
514 if (!optimizing)
515 return false;
516 if (optimizing < 0 && c == 0371)
517 return false;
519 isize = calcsize(segment, offset, bits, ins, temp);
521 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
522 /* Be optimistic in pass 1 */
523 return true;
525 if (ins->oprs[0].segment != segment)
526 return false;
528 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
529 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
531 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
532 /* jmp short (opcode eb) cannot be used with bnd prefix. */
533 ins->prefixes[PPS_REP] = P_none;
534 nasm_error(ERR_WARNING | ERR_WARN_BND | ERR_PASS2 ,
535 "jmp short does not init bnd regs - bnd prefix dropped.");
538 return is_byte;
541 /* This is totally just a wild guess what is reasonable... */
542 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
544 int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
546 struct out_data data;
547 const struct itemplate *temp;
548 enum match_result m;
549 int32_t itimes;
550 int64_t wsize; /* size for DB etc. */
552 nasm_zero(data);
553 data.offset = start;
554 data.segment = segment;
555 data.itemp = NULL;
556 data.sign = OUT_WRAP;
557 data.bits = bits;
559 wsize = idata_bytes(instruction->opcode);
560 if (wsize == -1)
561 return 0;
563 if (wsize) {
564 extop *e;
565 int32_t t = instruction->times;
566 if (t < 0)
567 nasm_panic(0, "instruction->times < 0 (%"PRId32") in assemble()", t);
569 while (t--) { /* repeat TIMES times */
570 list_for_each(e, instruction->eops) {
571 if (e->type == EOT_DB_NUMBER) {
572 if (wsize > 8) {
573 nasm_error(ERR_NONFATAL,
574 "integer supplied to a DT, DO or DY"
575 " instruction");
576 } else {
577 data.insoffs = 0;
578 data.type = e->relative ? OUT_RELADDR : OUT_ADDRESS;
579 data.inslen = data.size = wsize;
580 data.toffset = e->offset;
581 data.tsegment = e->segment;
582 data.twrt = e->wrt;
583 data.relbase = 0;
584 out(&data);
586 } else if (e->type == EOT_DB_STRING ||
587 e->type == EOT_DB_STRING_FREE) {
588 int align = e->stringlen % wsize;
589 if (align)
590 align = wsize - align;
592 data.insoffs = 0;
593 data.inslen = e->stringlen + align;
595 out_rawdata(&data, e->stringval, e->stringlen);
596 out_rawdata(&data, zero_buffer, align);
599 if (t > 0 && t == instruction->times - 1) {
600 lfmt->set_offset(data.offset);
601 lfmt->uplevel(LIST_TIMES);
604 if (instruction->times > 1)
605 lfmt->downlevel(LIST_TIMES);
606 } else if (instruction->opcode == I_INCBIN) {
607 const char *fname = instruction->eops->stringval;
608 FILE *fp;
609 size_t t = instruction->times;
610 off_t base = 0;
611 off_t len;
612 const void *map = NULL;
613 char *buf = NULL;
614 size_t blk = 0; /* Buffered I/O block size */
615 size_t m = 0; /* Bytes last read */
617 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
618 if (!fp) {
619 nasm_error(ERR_NONFATAL, "`incbin': unable to open file `%s'",
620 fname);
621 goto done;
624 len = nasm_file_size(fp);
626 if (len == (off_t)-1) {
627 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
628 fname);
629 goto close_done;
632 if (instruction->eops->next) {
633 base = instruction->eops->next->offset;
634 if (base >= len) {
635 len = 0;
636 } else {
637 len -= base;
638 if (instruction->eops->next->next &&
639 len > (off_t)instruction->eops->next->next->offset)
640 len = (off_t)instruction->eops->next->next->offset;
644 lfmt->set_offset(data.offset);
645 lfmt->uplevel(LIST_INCBIN);
647 if (!len)
648 goto end_incbin;
650 /* Try to map file data */
651 map = nasm_map_file(fp, base, len);
652 if (!map) {
653 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
654 buf = nasm_malloc(blk);
657 while (t--) {
659 * Consider these irrelevant for INCBIN, since it is fully
660 * possible that these might be (way) bigger than an int
661 * can hold; there is, however, no reason to widen these
662 * types just for INCBIN. data.inslen == 0 signals to the
663 * backend that these fields are meaningless, if at all
664 * needed.
666 data.insoffs = 0;
667 data.inslen = 0;
669 if (map) {
670 out_rawdata(&data, map, len);
671 } else if ((off_t)m == len) {
672 out_rawdata(&data, buf, len);
673 } else {
674 off_t l = len;
676 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
677 nasm_error(ERR_NONFATAL,
678 "`incbin': unable to seek on file `%s'",
679 fname);
680 goto end_incbin;
682 while (l > 0) {
683 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
684 if (!m || feof(fp)) {
686 * This shouldn't happen unless the file
687 * actually changes while we are reading
688 * it.
690 nasm_error(ERR_NONFATAL,
691 "`incbin': unexpected EOF while"
692 " reading file `%s'", fname);
693 goto end_incbin;
695 out_rawdata(&data, buf, m);
696 l -= m;
700 end_incbin:
701 lfmt->downlevel(LIST_INCBIN);
702 if (instruction->times > 1) {
703 lfmt->set_offset(data.offset);
704 lfmt->uplevel(LIST_TIMES);
705 lfmt->downlevel(LIST_TIMES);
707 if (ferror(fp)) {
708 nasm_error(ERR_NONFATAL,
709 "`incbin': error while"
710 " reading file `%s'", fname);
712 close_done:
713 if (buf)
714 nasm_free(buf);
715 if (map)
716 nasm_unmap_file(map, len);
717 fclose(fp);
718 done:
720 } else {
721 /* "Real" instruction */
723 /* Check to see if we need an address-size prefix */
724 add_asp(instruction, bits);
726 m = find_match(&temp, instruction, data.segment, data.offset, bits);
728 if (m == MOK_GOOD) {
729 /* Matches! */
730 int64_t insn_size = calcsize(data.segment, data.offset,
731 bits, instruction, temp);
732 itimes = instruction->times;
733 if (insn_size < 0) /* shouldn't be, on pass two */
734 nasm_panic(0, "errors made it through from pass one");
736 data.itemp = temp;
737 data.bits = bits;
739 while (itimes--) {
740 data.insoffs = 0;
741 data.inslen = insn_size;
743 gencode(&data, instruction);
744 nasm_assert(data.insoffs == insn_size);
746 if (itimes > 0 && itimes == instruction->times - 1) {
747 lfmt->set_offset(data.offset);
748 lfmt->uplevel(LIST_TIMES);
751 if (instruction->times > 1)
752 lfmt->downlevel(LIST_TIMES);
753 } else {
754 /* No match */
755 switch (m) {
756 case MERR_OPSIZEMISSING:
757 nasm_error(ERR_NONFATAL, "operation size not specified");
758 break;
759 case MERR_OPSIZEMISMATCH:
760 nasm_error(ERR_NONFATAL, "mismatch in operand sizes");
761 break;
762 case MERR_BRNOTHERE:
763 nasm_error(ERR_NONFATAL,
764 "broadcast not permitted on this operand");
765 break;
766 case MERR_BRNUMMISMATCH:
767 nasm_error(ERR_NONFATAL,
768 "mismatch in the number of broadcasting elements");
769 break;
770 case MERR_MASKNOTHERE:
771 nasm_error(ERR_NONFATAL,
772 "mask not permitted on this operand");
773 break;
774 case MERR_BADCPU:
775 nasm_error(ERR_NONFATAL, "no instruction for this cpu level");
776 break;
777 case MERR_BADMODE:
778 nasm_error(ERR_NONFATAL, "instruction not supported in %d-bit mode",
779 bits);
780 break;
781 case MERR_ENCMISMATCH:
782 nasm_error(ERR_NONFATAL, "specific encoding scheme not available");
783 break;
784 case MERR_BADBND:
785 nasm_error(ERR_NONFATAL, "bnd prefix is not allowed");
786 break;
787 case MERR_BADREPNE:
788 nasm_error(ERR_NONFATAL, "%s prefix is not allowed",
789 (has_prefix(instruction, PPS_REP, P_REPNE) ?
790 "repne" : "repnz"));
791 break;
792 default:
793 nasm_error(ERR_NONFATAL,
794 "invalid combination of opcode and operands");
795 break;
799 return data.offset - start;
802 int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
804 const struct itemplate *temp;
805 enum match_result m;
807 if (instruction->opcode == I_none)
808 return 0;
810 if (instruction->opcode == I_DB || instruction->opcode == I_DW ||
811 instruction->opcode == I_DD || instruction->opcode == I_DQ ||
812 instruction->opcode == I_DT || instruction->opcode == I_DO ||
813 instruction->opcode == I_DY) {
814 extop *e;
815 int32_t isize, osize, wsize;
817 isize = 0;
818 wsize = idata_bytes(instruction->opcode);
820 list_for_each(e, instruction->eops) {
821 int32_t align;
823 osize = 0;
824 if (e->type == EOT_DB_NUMBER) {
825 osize = 1;
826 warn_overflow_const(e->offset, wsize);
827 } else if (e->type == EOT_DB_STRING ||
828 e->type == EOT_DB_STRING_FREE)
829 osize = e->stringlen;
831 align = (-osize) % wsize;
832 if (align < 0)
833 align += wsize;
834 isize += osize + align;
836 return isize;
839 if (instruction->opcode == I_INCBIN) {
840 const char *fname = instruction->eops->stringval;
841 off_t len;
843 len = nasm_file_size_by_path(fname);
844 if (len == (off_t)-1) {
845 nasm_error(ERR_NONFATAL, "`incbin': unable to get length of file `%s'",
846 fname);
847 return 0;
850 if (instruction->eops->next) {
851 if (len <= (off_t)instruction->eops->next->offset) {
852 len = 0;
853 } else {
854 len -= instruction->eops->next->offset;
855 if (instruction->eops->next->next &&
856 len > (off_t)instruction->eops->next->next->offset) {
857 len = (off_t)instruction->eops->next->next->offset;
862 return len;
865 /* Check to see if we need an address-size prefix */
866 add_asp(instruction, bits);
868 m = find_match(&temp, instruction, segment, offset, bits);
869 if (m == MOK_GOOD) {
870 /* we've matched an instruction. */
871 return calcsize(segment, offset, bits, instruction, temp);
872 } else {
873 return -1; /* didn't match any instruction */
877 static void bad_hle_warn(const insn * ins, uint8_t hleok)
879 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
880 enum whatwarn { w_none, w_lock, w_inval } ww;
881 static const enum whatwarn warn[2][4] =
883 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
884 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
886 unsigned int n;
888 n = (unsigned int)rep_pfx - P_XACQUIRE;
889 if (n > 1)
890 return; /* Not XACQUIRE/XRELEASE */
892 ww = warn[n][hleok];
893 if (!is_class(MEMORY, ins->oprs[0].type))
894 ww = w_inval; /* HLE requires operand 0 to be memory */
896 switch (ww) {
897 case w_none:
898 break;
900 case w_lock:
901 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
902 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
903 "%s with this instruction requires lock",
904 prefix_name(rep_pfx));
906 break;
908 case w_inval:
909 nasm_error(ERR_WARNING | ERR_WARN_HLE | ERR_PASS2,
910 "%s invalid with this instruction",
911 prefix_name(rep_pfx));
912 break;
916 /* Common construct */
917 #define case3(x) case (x): case (x)+1: case (x)+2
918 #define case4(x) case3(x): case (x)+3
920 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
921 insn * ins, const struct itemplate *temp)
923 const uint8_t *codes = temp->code;
924 int64_t length = 0;
925 uint8_t c;
926 int rex_mask = ~0;
927 int op1, op2;
928 struct operand *opx;
929 uint8_t opex = 0;
930 enum ea_type eat;
931 uint8_t hleok = 0;
932 bool lockcheck = true;
933 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
934 const char *errmsg;
936 ins->rex = 0; /* Ensure REX is reset */
937 eat = EA_SCALAR; /* Expect a scalar EA */
938 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
940 if (ins->prefixes[PPS_OSIZE] == P_O64)
941 ins->rex |= REX_W;
943 (void)segment; /* Don't warn that this parameter is unused */
944 (void)offset; /* Don't warn that this parameter is unused */
946 while (*codes) {
947 c = *codes++;
948 op1 = (c & 3) + ((opex & 1) << 2);
949 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
950 opx = &ins->oprs[op1];
951 opex = 0; /* For the next iteration */
953 switch (c) {
954 case4(01):
955 codes += c, length += c;
956 break;
958 case3(05):
959 opex = c;
960 break;
962 case4(010):
963 ins->rex |=
964 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
965 codes++, length++;
966 break;
968 case4(014):
969 /* this is an index reg of MIB operand */
970 mib_index = opx->basereg;
971 break;
973 case4(020):
974 case4(024):
975 length++;
976 break;
978 case4(030):
979 length += 2;
980 break;
982 case4(034):
983 if (opx->type & (BITS16 | BITS32 | BITS64))
984 length += (opx->type & BITS16) ? 2 : 4;
985 else
986 length += (bits == 16) ? 2 : 4;
987 break;
989 case4(040):
990 length += 4;
991 break;
993 case4(044):
994 length += ins->addr_size >> 3;
995 break;
997 case4(050):
998 length++;
999 break;
1001 case4(054):
1002 length += 8; /* MOV reg64/imm */
1003 break;
1005 case4(060):
1006 length += 2;
1007 break;
1009 case4(064):
1010 if (opx->type & (BITS16 | BITS32 | BITS64))
1011 length += (opx->type & BITS16) ? 2 : 4;
1012 else
1013 length += (bits == 16) ? 2 : 4;
1014 break;
1016 case4(070):
1017 length += 4;
1018 break;
1020 case4(074):
1021 length += 2;
1022 break;
1024 case 0172:
1025 case 0173:
1026 codes++;
1027 length++;
1028 break;
1030 case4(0174):
1031 length++;
1032 break;
1034 case4(0240):
1035 ins->rex |= REX_EV;
1036 ins->vexreg = regval(opx);
1037 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1038 ins->vex_cm = *codes++;
1039 ins->vex_wlp = *codes++;
1040 ins->evex_tuple = (*codes++ - 0300);
1041 break;
1043 case 0250:
1044 ins->rex |= REX_EV;
1045 ins->vexreg = 0;
1046 ins->vex_cm = *codes++;
1047 ins->vex_wlp = *codes++;
1048 ins->evex_tuple = (*codes++ - 0300);
1049 break;
1051 case4(0254):
1052 length += 4;
1053 break;
1055 case4(0260):
1056 ins->rex |= REX_V;
1057 ins->vexreg = regval(opx);
1058 ins->vex_cm = *codes++;
1059 ins->vex_wlp = *codes++;
1060 break;
1062 case 0270:
1063 ins->rex |= REX_V;
1064 ins->vexreg = 0;
1065 ins->vex_cm = *codes++;
1066 ins->vex_wlp = *codes++;
1067 break;
1069 case3(0271):
1070 hleok = c & 3;
1071 break;
1073 case4(0274):
1074 length++;
1075 break;
1077 case4(0300):
1078 break;
1080 case 0310:
1081 if (bits == 64)
1082 return -1;
1083 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1084 break;
1086 case 0311:
1087 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1088 break;
1090 case 0312:
1091 break;
1093 case 0313:
1094 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1095 has_prefix(ins, PPS_ASIZE, P_A32))
1096 return -1;
1097 break;
1099 case4(0314):
1100 break;
1102 case 0320:
1104 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1105 if (pfx == P_O16)
1106 break;
1107 if (pfx != P_none)
1108 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1109 else
1110 ins->prefixes[PPS_OSIZE] = P_O16;
1111 break;
1114 case 0321:
1116 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1117 if (pfx == P_O32)
1118 break;
1119 if (pfx != P_none)
1120 nasm_error(ERR_WARNING | ERR_PASS2, "invalid operand size prefix");
1121 else
1122 ins->prefixes[PPS_OSIZE] = P_O32;
1123 break;
1126 case 0322:
1127 break;
1129 case 0323:
1130 rex_mask &= ~REX_W;
1131 break;
1133 case 0324:
1134 ins->rex |= REX_W;
1135 break;
1137 case 0325:
1138 ins->rex |= REX_NH;
1139 break;
1141 case 0326:
1142 break;
1144 case 0330:
1145 codes++, length++;
1146 break;
1148 case 0331:
1149 break;
1151 case 0332:
1152 case 0333:
1153 length++;
1154 break;
1156 case 0334:
1157 ins->rex |= REX_L;
1158 break;
1160 case 0335:
1161 break;
1163 case 0336:
1164 if (!ins->prefixes[PPS_REP])
1165 ins->prefixes[PPS_REP] = P_REP;
1166 break;
1168 case 0337:
1169 if (!ins->prefixes[PPS_REP])
1170 ins->prefixes[PPS_REP] = P_REPNE;
1171 break;
1173 case 0340:
1174 if (!absolute_op(&ins->oprs[0]))
1175 nasm_error(ERR_NONFATAL, "attempt to reserve non-constant"
1176 " quantity of BSS space");
1177 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
1178 nasm_error(ERR_WARNING | ERR_PASS1,
1179 "forward reference in RESx can have unpredictable results");
1180 else
1181 length += ins->oprs[0].offset;
1182 break;
1184 case 0341:
1185 if (!ins->prefixes[PPS_WAIT])
1186 ins->prefixes[PPS_WAIT] = P_WAIT;
1187 break;
1189 case 0360:
1190 break;
1192 case 0361:
1193 length++;
1194 break;
1196 case 0364:
1197 case 0365:
1198 break;
1200 case 0366:
1201 case 0367:
1202 length++;
1203 break;
1205 case 0370:
1206 case 0371:
1207 break;
1209 case 0373:
1210 length++;
1211 break;
1213 case 0374:
1214 eat = EA_XMMVSIB;
1215 break;
1217 case 0375:
1218 eat = EA_YMMVSIB;
1219 break;
1221 case 0376:
1222 eat = EA_ZMMVSIB;
1223 break;
1225 case4(0100):
1226 case4(0110):
1227 case4(0120):
1228 case4(0130):
1229 case4(0200):
1230 case4(0204):
1231 case4(0210):
1232 case4(0214):
1233 case4(0220):
1234 case4(0224):
1235 case4(0230):
1236 case4(0234):
1238 ea ea_data;
1239 int rfield;
1240 opflags_t rflags;
1241 struct operand *opy = &ins->oprs[op2];
1242 struct operand *op_er_sae;
1244 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1246 if (c <= 0177) {
1247 /* pick rfield from operand b (opx) */
1248 rflags = regflag(opx);
1249 rfield = nasm_regvals[opx->basereg];
1250 } else {
1251 rflags = 0;
1252 rfield = c & 7;
1255 /* EVEX.b1 : evex_brerop contains the operand position */
1256 op_er_sae = (ins->evex_brerop >= 0 ?
1257 &ins->oprs[ins->evex_brerop] : NULL);
1259 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1260 /* set EVEX.b */
1261 ins->evex_p[2] |= EVEX_P2B;
1262 if (op_er_sae->decoflags & ER) {
1263 /* set EVEX.RC (rounding control) */
1264 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1265 & EVEX_P2RC;
1267 } else {
1268 /* set EVEX.L'L (vector length) */
1269 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1270 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1271 if (opy->decoflags & BRDCAST_MASK) {
1272 /* set EVEX.b */
1273 ins->evex_p[2] |= EVEX_P2B;
1277 if (itemp_has(temp, IF_MIB)) {
1278 opy->eaflags |= EAF_MIB;
1280 * if a separate form of MIB (ICC style) is used,
1281 * the index reg info is merged into mem operand
1283 if (mib_index != R_none) {
1284 opy->indexreg = mib_index;
1285 opy->scale = 1;
1286 opy->hintbase = mib_index;
1287 opy->hinttype = EAH_NOTBASE;
1291 if (process_ea(opy, &ea_data, bits,
1292 rfield, rflags, ins, &errmsg) != eat) {
1293 nasm_error(ERR_NONFATAL, "%s", errmsg);
1294 return -1;
1295 } else {
1296 ins->rex |= ea_data.rex;
1297 length += ea_data.size;
1300 break;
1302 default:
1303 nasm_panic(0, "internal instruction table corrupt"
1304 ": instruction code \\%o (0x%02X) given", c, c);
1305 break;
1309 ins->rex &= rex_mask;
1311 if (ins->rex & REX_NH) {
1312 if (ins->rex & REX_H) {
1313 nasm_error(ERR_NONFATAL, "instruction cannot use high registers");
1314 return -1;
1316 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1319 switch (ins->prefixes[PPS_VEX]) {
1320 case P_EVEX:
1321 if (!(ins->rex & REX_EV))
1322 return -1;
1323 break;
1324 case P_VEX3:
1325 case P_VEX2:
1326 if (!(ins->rex & REX_V))
1327 return -1;
1328 break;
1329 default:
1330 break;
1333 if (ins->rex & (REX_V | REX_EV)) {
1334 int bad32 = REX_R|REX_W|REX_X|REX_B;
1336 if (ins->rex & REX_H) {
1337 nasm_error(ERR_NONFATAL, "cannot use high register in AVX instruction");
1338 return -1;
1340 switch (ins->vex_wlp & 060) {
1341 case 000:
1342 case 040:
1343 ins->rex &= ~REX_W;
1344 break;
1345 case 020:
1346 ins->rex |= REX_W;
1347 bad32 &= ~REX_W;
1348 break;
1349 case 060:
1350 /* Follow REX_W */
1351 break;
1354 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1355 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1356 return -1;
1357 } else if (!(ins->rex & REX_EV) &&
1358 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1359 nasm_error(ERR_NONFATAL, "invalid high-16 register in non-AVX-512");
1360 return -1;
1362 if (ins->rex & REX_EV)
1363 length += 4;
1364 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1365 ins->prefixes[PPS_VEX] == P_VEX3)
1366 length += 3;
1367 else
1368 length += 2;
1369 } else if (ins->rex & REX_MASK) {
1370 if (ins->rex & REX_H) {
1371 nasm_error(ERR_NONFATAL, "cannot use high register in rex instruction");
1372 return -1;
1373 } else if (bits == 64) {
1374 length++;
1375 } else if ((ins->rex & REX_L) &&
1376 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1377 iflag_ffs(&cpu) >= IF_X86_64) {
1378 /* LOCK-as-REX.R */
1379 assert_no_prefix(ins, PPS_LOCK);
1380 lockcheck = false; /* Already errored, no need for warning */
1381 length++;
1382 } else {
1383 nasm_error(ERR_NONFATAL, "invalid operands in non-64-bit mode");
1384 return -1;
1388 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1389 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1390 nasm_error(ERR_WARNING | ERR_WARN_LOCK | ERR_PASS2 ,
1391 "instruction is not lockable");
1394 bad_hle_warn(ins, hleok);
1397 * when BND prefix is set by DEFAULT directive,
1398 * BND prefix is added to every appropriate instruction line
1399 * unless it is overridden by NOBND prefix.
1401 if (globalbnd &&
1402 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1403 ins->prefixes[PPS_REP] = P_BND;
1406 * Add length of legacy prefixes
1408 length += emit_prefix(NULL, bits, ins);
1410 return length;
1413 static inline void emit_rex(struct out_data *data, insn *ins)
1415 if (data->bits == 64) {
1416 if ((ins->rex & REX_MASK) &&
1417 !(ins->rex & (REX_V | REX_EV)) &&
1418 !ins->rex_done) {
1419 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1420 out_rawbyte(data, rex);
1421 ins->rex_done = true;
1426 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1428 int bytes = 0;
1429 int j;
1431 for (j = 0; j < MAXPREFIX; j++) {
1432 uint8_t c = 0;
1433 switch (ins->prefixes[j]) {
1434 case P_WAIT:
1435 c = 0x9B;
1436 break;
1437 case P_LOCK:
1438 c = 0xF0;
1439 break;
1440 case P_REPNE:
1441 case P_REPNZ:
1442 case P_XACQUIRE:
1443 case P_BND:
1444 c = 0xF2;
1445 break;
1446 case P_REPE:
1447 case P_REPZ:
1448 case P_REP:
1449 case P_XRELEASE:
1450 c = 0xF3;
1451 break;
1452 case R_CS:
1453 if (bits == 64) {
1454 nasm_error(ERR_WARNING | ERR_PASS2,
1455 "cs segment base generated, but will be ignored in 64-bit mode");
1457 c = 0x2E;
1458 break;
1459 case R_DS:
1460 if (bits == 64) {
1461 nasm_error(ERR_WARNING | ERR_PASS2,
1462 "ds segment base generated, but will be ignored in 64-bit mode");
1464 c = 0x3E;
1465 break;
1466 case R_ES:
1467 if (bits == 64) {
1468 nasm_error(ERR_WARNING | ERR_PASS2,
1469 "es segment base generated, but will be ignored in 64-bit mode");
1471 c = 0x26;
1472 break;
1473 case R_FS:
1474 c = 0x64;
1475 break;
1476 case R_GS:
1477 c = 0x65;
1478 break;
1479 case R_SS:
1480 if (bits == 64) {
1481 nasm_error(ERR_WARNING | ERR_PASS2,
1482 "ss segment base generated, but will be ignored in 64-bit mode");
1484 c = 0x36;
1485 break;
1486 case R_SEGR6:
1487 case R_SEGR7:
1488 nasm_error(ERR_NONFATAL,
1489 "segr6 and segr7 cannot be used as prefixes");
1490 break;
1491 case P_A16:
1492 if (bits == 64) {
1493 nasm_error(ERR_NONFATAL,
1494 "16-bit addressing is not supported "
1495 "in 64-bit mode");
1496 } else if (bits != 16)
1497 c = 0x67;
1498 break;
1499 case P_A32:
1500 if (bits != 32)
1501 c = 0x67;
1502 break;
1503 case P_A64:
1504 if (bits != 64) {
1505 nasm_error(ERR_NONFATAL,
1506 "64-bit addressing is only supported "
1507 "in 64-bit mode");
1509 break;
1510 case P_ASP:
1511 c = 0x67;
1512 break;
1513 case P_O16:
1514 if (bits != 16)
1515 c = 0x66;
1516 break;
1517 case P_O32:
1518 if (bits == 16)
1519 c = 0x66;
1520 break;
1521 case P_O64:
1522 /* REX.W */
1523 break;
1524 case P_OSP:
1525 c = 0x66;
1526 break;
1527 case P_EVEX:
1528 case P_VEX3:
1529 case P_VEX2:
1530 case P_NOBND:
1531 case P_none:
1532 break;
1533 default:
1534 nasm_panic(0, "invalid instruction prefix");
1536 if (c) {
1537 if (data)
1538 out_rawbyte(data, c);
1539 bytes++;
1542 return bytes;
1545 static void gencode(struct out_data *data, insn *ins)
1547 uint8_t c;
1548 uint8_t bytes[4];
1549 int64_t size;
1550 int op1, op2;
1551 struct operand *opx;
1552 const uint8_t *codes = data->itemp->code;
1553 uint8_t opex = 0;
1554 enum ea_type eat = EA_SCALAR;
1555 int r;
1556 const int bits = data->bits;
1557 const char *errmsg;
1559 ins->rex_done = false;
1561 emit_prefix(data, bits, ins);
1563 while (*codes) {
1564 c = *codes++;
1565 op1 = (c & 3) + ((opex & 1) << 2);
1566 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1567 opx = &ins->oprs[op1];
1568 opex = 0; /* For the next iteration */
1571 switch (c) {
1572 case 01:
1573 case 02:
1574 case 03:
1575 case 04:
1576 emit_rex(data, ins);
1577 out_rawdata(data, codes, c);
1578 codes += c;
1579 break;
1581 case 05:
1582 case 06:
1583 case 07:
1584 opex = c;
1585 break;
1587 case4(010):
1588 emit_rex(data, ins);
1589 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1590 break;
1592 case4(014):
1593 break;
1595 case4(020):
1596 if (opx->offset < -256 || opx->offset > 255)
1597 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1598 "byte value exceeds bounds");
1599 out_imm(data, opx, 1, OUT_WRAP);
1600 break;
1602 case4(024):
1603 if (opx->offset < 0 || opx->offset > 255)
1604 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1605 "unsigned byte value exceeds bounds");
1606 out_imm(data, opx, 1, OUT_UNSIGNED);
1607 break;
1609 case4(030):
1610 warn_overflow_opd(opx, 2);
1611 out_imm(data, opx, 2, OUT_WRAP);
1612 break;
1614 case4(034):
1615 if (opx->type & (BITS16 | BITS32))
1616 size = (opx->type & BITS16) ? 2 : 4;
1617 else
1618 size = (bits == 16) ? 2 : 4;
1619 warn_overflow_opd(opx, size);
1620 out_imm(data, opx, size, OUT_WRAP);
1621 break;
1623 case4(040):
1624 warn_overflow_opd(opx, 4);
1625 out_imm(data, opx, 4, OUT_WRAP);
1626 break;
1628 case4(044):
1629 size = ins->addr_size >> 3;
1630 warn_overflow_opd(opx, size);
1631 out_imm(data, opx, size, OUT_WRAP);
1632 break;
1634 case4(050):
1635 if (opx->segment == data->segment) {
1636 int64_t delta = opx->offset - data->offset
1637 - (data->inslen - data->insoffs);
1638 if (delta > 127 || delta < -128)
1639 nasm_error(ERR_NONFATAL, "short jump is out of range");
1641 out_reladdr(data, opx, 1);
1642 break;
1644 case4(054):
1645 out_imm(data, opx, 8, OUT_WRAP);
1646 break;
1648 case4(060):
1649 out_reladdr(data, opx, 2);
1650 break;
1652 case4(064):
1653 if (opx->type & (BITS16 | BITS32 | BITS64))
1654 size = (opx->type & BITS16) ? 2 : 4;
1655 else
1656 size = (bits == 16) ? 2 : 4;
1658 out_reladdr(data, opx, size);
1659 break;
1661 case4(070):
1662 out_reladdr(data, opx, 4);
1663 break;
1665 case4(074):
1666 if (opx->segment == NO_SEG)
1667 nasm_error(ERR_NONFATAL, "value referenced by FAR is not"
1668 " relocatable");
1669 out_segment(data, opx);
1670 break;
1672 case 0172:
1674 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1675 const struct operand *opy;
1677 c = *codes++;
1678 opx = &ins->oprs[c >> 3];
1679 opy = &ins->oprs[c & 7];
1680 if (!absolute_op(opy)) {
1681 nasm_error(ERR_NONFATAL,
1682 "non-absolute expression not permitted as argument %d",
1683 c & 7);
1684 } else if (opy->offset & ~mask) {
1685 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1686 "is4 argument exceeds bounds");
1688 c = opy->offset & mask;
1689 goto emit_is4;
1692 case 0173:
1693 c = *codes++;
1694 opx = &ins->oprs[c >> 4];
1695 c &= 15;
1696 goto emit_is4;
1698 case4(0174):
1699 c = 0;
1700 emit_is4:
1701 r = nasm_regvals[opx->basereg];
1702 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
1703 break;
1705 case4(0254):
1706 if (absolute_op(opx) &&
1707 (int32_t)opx->offset != (int64_t)opx->offset) {
1708 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1709 "signed dword immediate exceeds bounds");
1711 out_imm(data, opx, 4, OUT_SIGNED);
1712 break;
1714 case4(0240):
1715 case 0250:
1716 codes += 3;
1717 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1718 EVEX_P2Z | EVEX_P2AAA, 2);
1719 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
1720 bytes[0] = 0x62;
1721 /* EVEX.X can be set by either REX or EVEX for different reasons */
1722 bytes[1] = ((((ins->rex & 7) << 5) |
1723 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
1724 (ins->vex_cm & EVEX_P0MM);
1725 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
1726 ((~ins->vexreg & 15) << 3) |
1727 (1 << 2) | (ins->vex_wlp & 3);
1728 bytes[3] = ins->evex_p[2];
1729 out_rawdata(data, bytes, 4);
1730 break;
1732 case4(0260):
1733 case 0270:
1734 codes += 2;
1735 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1736 ins->prefixes[PPS_VEX] == P_VEX3) {
1737 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
1738 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
1739 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
1740 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
1741 out_rawdata(data, bytes, 3);
1742 } else {
1743 bytes[0] = 0xc5;
1744 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
1745 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
1746 out_rawdata(data, bytes, 2);
1748 break;
1750 case 0271:
1751 case 0272:
1752 case 0273:
1753 break;
1755 case4(0274):
1757 uint64_t uv, um;
1758 int s;
1760 if (absolute_op(opx)) {
1761 if (ins->rex & REX_W)
1762 s = 64;
1763 else if (ins->prefixes[PPS_OSIZE] == P_O16)
1764 s = 16;
1765 else if (ins->prefixes[PPS_OSIZE] == P_O32)
1766 s = 32;
1767 else
1768 s = bits;
1770 um = (uint64_t)2 << (s-1);
1771 uv = opx->offset;
1773 if (uv > 127 && uv < (uint64_t)-128 &&
1774 (uv < um-128 || uv > um-1)) {
1775 /* If this wasn't explicitly byte-sized, warn as though we
1776 * had fallen through to the imm16/32/64 case.
1778 nasm_error(ERR_WARNING | ERR_PASS2 | ERR_WARN_NOV,
1779 "%s value exceeds bounds",
1780 (opx->type & BITS8) ? "signed byte" :
1781 s == 16 ? "word" :
1782 s == 32 ? "dword" :
1783 "signed dword");
1786 /* Output as a raw byte to avoid byte overflow check */
1787 out_rawbyte(data, (uint8_t)uv);
1788 } else {
1789 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
1791 break;
1794 case4(0300):
1795 break;
1797 case 0310:
1798 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
1799 out_rawbyte(data, 0x67);
1800 break;
1802 case 0311:
1803 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
1804 out_rawbyte(data, 0x67);
1805 break;
1807 case 0312:
1808 break;
1810 case 0313:
1811 ins->rex = 0;
1812 break;
1814 case4(0314):
1815 break;
1817 case 0320:
1818 case 0321:
1819 break;
1821 case 0322:
1822 case 0323:
1823 break;
1825 case 0324:
1826 ins->rex |= REX_W;
1827 break;
1829 case 0325:
1830 break;
1832 case 0326:
1833 break;
1835 case 0330:
1836 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
1837 break;
1839 case 0331:
1840 break;
1842 case 0332:
1843 case 0333:
1844 out_rawbyte(data, c - 0332 + 0xF2);
1845 break;
1847 case 0334:
1848 if (ins->rex & REX_R)
1849 out_rawbyte(data, 0xF0);
1850 ins->rex &= ~(REX_L|REX_R);
1851 break;
1853 case 0335:
1854 break;
1856 case 0336:
1857 case 0337:
1858 break;
1860 case 0340:
1861 if (ins->oprs[0].segment != NO_SEG)
1862 nasm_panic(0, "non-constant BSS size in pass two");
1864 out_reserve(data, ins->oprs[0].offset);
1865 break;
1867 case 0341:
1868 break;
1870 case 0360:
1871 break;
1873 case 0361:
1874 out_rawbyte(data, 0x66);
1875 break;
1877 case 0364:
1878 case 0365:
1879 break;
1881 case 0366:
1882 case 0367:
1883 out_rawbyte(data, c - 0366 + 0x66);
1884 break;
1886 case3(0370):
1887 break;
1889 case 0373:
1890 out_rawbyte(data, bits == 16 ? 3 : 5);
1891 break;
1893 case 0374:
1894 eat = EA_XMMVSIB;
1895 break;
1897 case 0375:
1898 eat = EA_YMMVSIB;
1899 break;
1901 case 0376:
1902 eat = EA_ZMMVSIB;
1903 break;
1905 case4(0100):
1906 case4(0110):
1907 case4(0120):
1908 case4(0130):
1909 case4(0200):
1910 case4(0204):
1911 case4(0210):
1912 case4(0214):
1913 case4(0220):
1914 case4(0224):
1915 case4(0230):
1916 case4(0234):
1918 ea ea_data;
1919 int rfield;
1920 opflags_t rflags;
1921 uint8_t *p;
1922 struct operand *opy = &ins->oprs[op2];
1924 if (c <= 0177) {
1925 /* pick rfield from operand b (opx) */
1926 rflags = regflag(opx);
1927 rfield = nasm_regvals[opx->basereg];
1928 } else {
1929 /* rfield is constant */
1930 rflags = 0;
1931 rfield = c & 7;
1934 if (process_ea(opy, &ea_data, bits,
1935 rfield, rflags, ins, &errmsg) != eat)
1936 nasm_error(ERR_NONFATAL, "%s", errmsg);
1938 p = bytes;
1939 *p++ = ea_data.modrm;
1940 if (ea_data.sib_present)
1941 *p++ = ea_data.sib;
1942 out_rawdata(data, bytes, p - bytes);
1945 * Make sure the address gets the right offset in case
1946 * the line breaks in the .lst file (BR 1197827)
1949 if (ea_data.bytes) {
1950 /* use compressed displacement, if available */
1951 if (ea_data.disp8) {
1952 out_rawbyte(data, ea_data.disp8);
1953 } else if (ea_data.rip) {
1954 out_reladdr(data, opy, ea_data.bytes);
1955 } else {
1956 int asize = ins->addr_size >> 3;
1958 if (overflow_general(opy->offset, asize) ||
1959 signed_bits(opy->offset, ins->addr_size) !=
1960 signed_bits(opy->offset, ea_data.bytes << 3))
1961 warn_overflow(ea_data.bytes);
1963 out_imm(data, opy, ea_data.bytes,
1964 (asize > ea_data.bytes)
1965 ? OUT_SIGNED : OUT_WRAP);
1969 break;
1971 default:
1972 nasm_panic(0, "internal instruction table corrupt"
1973 ": instruction code \\%o (0x%02X) given", c, c);
1974 break;
1979 static opflags_t regflag(const operand * o)
1981 if (!is_register(o->basereg))
1982 nasm_panic(0, "invalid operand passed to regflag()");
1983 return nasm_reg_flags[o->basereg];
1986 static int32_t regval(const operand * o)
1988 if (!is_register(o->basereg))
1989 nasm_panic(0, "invalid operand passed to regval()");
1990 return nasm_regvals[o->basereg];
1993 static int op_rexflags(const operand * o, int mask)
1995 opflags_t flags;
1996 int val;
1998 if (!is_register(o->basereg))
1999 nasm_panic(0, "invalid operand passed to op_rexflags()");
2001 flags = nasm_reg_flags[o->basereg];
2002 val = nasm_regvals[o->basereg];
2004 return rexflags(val, flags, mask);
2007 static int rexflags(int val, opflags_t flags, int mask)
2009 int rex = 0;
2011 if (val >= 0 && (val & 8))
2012 rex |= REX_B|REX_X|REX_R;
2013 if (flags & BITS64)
2014 rex |= REX_W;
2015 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2016 rex |= REX_H;
2017 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2018 rex |= REX_P;
2020 return rex & mask;
2023 static int evexflags(int val, decoflags_t deco,
2024 int mask, uint8_t byte)
2026 int evex = 0;
2028 switch (byte) {
2029 case 0:
2030 if (val >= 0 && (val & 16))
2031 evex |= (EVEX_P0RP | EVEX_P0X);
2032 break;
2033 case 2:
2034 if (val >= 0 && (val & 16))
2035 evex |= EVEX_P2VP;
2036 if (deco & Z)
2037 evex |= EVEX_P2Z;
2038 if (deco & OPMASK_MASK)
2039 evex |= deco & EVEX_P2AAA;
2040 break;
2042 return evex & mask;
2045 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2047 int val;
2049 val = nasm_regvals[o->basereg];
2051 return evexflags(val, o->decoflags, mask, byte);
2054 static enum match_result find_match(const struct itemplate **tempp,
2055 insn *instruction,
2056 int32_t segment, int64_t offset, int bits)
2058 const struct itemplate *temp;
2059 enum match_result m, merr;
2060 opflags_t xsizeflags[MAX_OPERANDS];
2061 bool opsizemissing = false;
2062 int8_t broadcast = instruction->evex_brerop;
2063 int i;
2065 /* broadcasting uses a different data element size */
2066 for (i = 0; i < instruction->operands; i++)
2067 if (i == broadcast)
2068 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2069 else
2070 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2072 merr = MERR_INVALOP;
2074 for (temp = nasm_instructions[instruction->opcode];
2075 temp->opcode != I_none; temp++) {
2076 m = matches(temp, instruction, bits);
2077 if (m == MOK_JUMP) {
2078 if (jmp_match(segment, offset, bits, instruction, temp))
2079 m = MOK_GOOD;
2080 else
2081 m = MERR_INVALOP;
2082 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2084 * Missing operand size and a candidate for fuzzy matching...
2086 for (i = 0; i < temp->operands; i++)
2087 if (i == broadcast)
2088 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2089 else
2090 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2091 opsizemissing = true;
2093 if (m > merr)
2094 merr = m;
2095 if (merr == MOK_GOOD)
2096 goto done;
2099 /* No match, but see if we can get a fuzzy operand size match... */
2100 if (!opsizemissing)
2101 goto done;
2103 for (i = 0; i < instruction->operands; i++) {
2105 * We ignore extrinsic operand sizes on registers, so we should
2106 * never try to fuzzy-match on them. This also resolves the case
2107 * when we have e.g. "xmmrm128" in two different positions.
2109 if (is_class(REGISTER, instruction->oprs[i].type))
2110 continue;
2112 /* This tests if xsizeflags[i] has more than one bit set */
2113 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2114 goto done; /* No luck */
2116 if (i == broadcast) {
2117 instruction->oprs[i].decoflags |= xsizeflags[i];
2118 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2119 BITS32 : BITS64);
2120 } else {
2121 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2125 /* Try matching again... */
2126 for (temp = nasm_instructions[instruction->opcode];
2127 temp->opcode != I_none; temp++) {
2128 m = matches(temp, instruction, bits);
2129 if (m == MOK_JUMP) {
2130 if (jmp_match(segment, offset, bits, instruction, temp))
2131 m = MOK_GOOD;
2132 else
2133 m = MERR_INVALOP;
2135 if (m > merr)
2136 merr = m;
2137 if (merr == MOK_GOOD)
2138 goto done;
2141 done:
2142 *tempp = temp;
2143 return merr;
2146 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2148 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
2149 uint8_t brcast_num;
2151 if (brsize > BITS64)
2152 nasm_error(ERR_FATAL,
2153 "size of broadcasting element is greater than 64 bits");
2156 * The shift term is to take care of the extra BITS80 inserted
2157 * between BITS64 and BITS128.
2159 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2160 >> (opsize > (BITS64 >> SIZE_SHIFT));
2162 return brcast_num;
2165 static enum match_result matches(const struct itemplate *itemp,
2166 insn *instruction, int bits)
2168 opflags_t size[MAX_OPERANDS], asize;
2169 bool opsizemissing = false;
2170 int i, oprs;
2173 * Check the opcode
2175 if (itemp->opcode != instruction->opcode)
2176 return MERR_INVALOP;
2179 * Count the operands
2181 if (itemp->operands != instruction->operands)
2182 return MERR_INVALOP;
2185 * Is it legal?
2187 if (!(optimizing > 0) && itemp_has(itemp, IF_OPT))
2188 return MERR_INVALOP;
2191 * {evex} available?
2193 switch (instruction->prefixes[PPS_VEX]) {
2194 case P_EVEX:
2195 if (!itemp_has(itemp, IF_EVEX))
2196 return MERR_ENCMISMATCH;
2197 break;
2198 case P_VEX3:
2199 case P_VEX2:
2200 if (!itemp_has(itemp, IF_VEX))
2201 return MERR_ENCMISMATCH;
2202 break;
2203 default:
2204 break;
2208 * Check that no spurious colons or TOs are present
2210 for (i = 0; i < itemp->operands; i++)
2211 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2212 return MERR_INVALOP;
2215 * Process size flags
2217 switch (itemp_smask(itemp)) {
2218 case IF_GENBIT(IF_SB):
2219 asize = BITS8;
2220 break;
2221 case IF_GENBIT(IF_SW):
2222 asize = BITS16;
2223 break;
2224 case IF_GENBIT(IF_SD):
2225 asize = BITS32;
2226 break;
2227 case IF_GENBIT(IF_SQ):
2228 asize = BITS64;
2229 break;
2230 case IF_GENBIT(IF_SO):
2231 asize = BITS128;
2232 break;
2233 case IF_GENBIT(IF_SY):
2234 asize = BITS256;
2235 break;
2236 case IF_GENBIT(IF_SZ):
2237 asize = BITS512;
2238 break;
2239 case IF_GENBIT(IF_SIZE):
2240 switch (bits) {
2241 case 16:
2242 asize = BITS16;
2243 break;
2244 case 32:
2245 asize = BITS32;
2246 break;
2247 case 64:
2248 asize = BITS64;
2249 break;
2250 default:
2251 asize = 0;
2252 break;
2254 break;
2255 default:
2256 asize = 0;
2257 break;
2260 if (itemp_armask(itemp)) {
2261 /* S- flags only apply to a specific operand */
2262 i = itemp_arg(itemp);
2263 memset(size, 0, sizeof size);
2264 size[i] = asize;
2265 } else {
2266 /* S- flags apply to all operands */
2267 for (i = 0; i < MAX_OPERANDS; i++)
2268 size[i] = asize;
2272 * Check that the operand flags all match up,
2273 * it's a bit tricky so lets be verbose:
2275 * 1) Find out the size of operand. If instruction
2276 * doesn't have one specified -- we're trying to
2277 * guess it either from template (IF_S* flag) or
2278 * from code bits.
2280 * 2) If template operand do not match the instruction OR
2281 * template has an operand size specified AND this size differ
2282 * from which instruction has (perhaps we got it from code bits)
2283 * we are:
2284 * a) Check that only size of instruction and operand is differ
2285 * other characteristics do match
2286 * b) Perhaps it's a register specified in instruction so
2287 * for such a case we just mark that operand as "size
2288 * missing" and this will turn on fuzzy operand size
2289 * logic facility (handled by a caller)
2291 for (i = 0; i < itemp->operands; i++) {
2292 opflags_t type = instruction->oprs[i].type;
2293 decoflags_t deco = instruction->oprs[i].decoflags;
2294 decoflags_t ideco = itemp->deco[i];
2295 bool is_broadcast = deco & BRDCAST_MASK;
2296 uint8_t brcast_num = 0;
2297 opflags_t template_opsize, insn_opsize;
2299 if (!(type & SIZE_MASK))
2300 type |= size[i];
2302 insn_opsize = type & SIZE_MASK;
2303 if (!is_broadcast) {
2304 template_opsize = itemp->opd[i] & SIZE_MASK;
2305 } else {
2306 decoflags_t deco_brsize = ideco & BRSIZE_MASK;
2308 if (~ideco & BRDCAST_MASK)
2309 return MERR_BRNOTHERE;
2312 * when broadcasting, the element size depends on
2313 * the instruction type. decorator flag should match.
2315 if (deco_brsize) {
2316 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2317 /* calculate the proper number : {1to<brcast_num>} */
2318 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2319 } else {
2320 template_opsize = 0;
2324 if (~ideco & deco & OPMASK_MASK)
2325 return MERR_MASKNOTHERE;
2327 if (itemp->opd[i] & ~type & ~SIZE_MASK) {
2328 return MERR_INVALOP;
2329 } else if (template_opsize) {
2330 if (template_opsize != insn_opsize) {
2331 if (insn_opsize) {
2332 return MERR_INVALOP;
2333 } else if (!is_class(REGISTER, type)) {
2335 * Note: we don't honor extrinsic operand sizes for registers,
2336 * so "missing operand size" for a register should be
2337 * considered a wildcard match rather than an error.
2339 opsizemissing = true;
2341 } else if (is_broadcast &&
2342 (brcast_num !=
2343 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2345 * broadcasting opsize matches but the number of repeated memory
2346 * element does not match.
2347 * if 64b double precision float is broadcasted to ymm (256b),
2348 * broadcasting decorator must be {1to4}.
2350 return MERR_BRNUMMISMATCH;
2355 if (opsizemissing)
2356 return MERR_OPSIZEMISSING;
2359 * Check operand sizes
2361 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2362 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2363 for (i = 0; i < oprs; i++) {
2364 asize = itemp->opd[i] & SIZE_MASK;
2365 if (asize) {
2366 for (i = 0; i < oprs; i++)
2367 size[i] = asize;
2368 break;
2371 } else {
2372 oprs = itemp->operands;
2375 for (i = 0; i < itemp->operands; i++) {
2376 if (!(itemp->opd[i] & SIZE_MASK) &&
2377 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2378 return MERR_OPSIZEMISMATCH;
2382 * Check template is okay at the set cpu level
2384 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2385 return MERR_BADCPU;
2388 * Verify the appropriate long mode flag.
2390 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2391 return MERR_BADMODE;
2394 * If we have a HLE prefix, look for the NOHLE flag
2396 if (itemp_has(itemp, IF_NOHLE) &&
2397 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2398 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2399 return MERR_BADHLE;
2402 * Check if special handling needed for Jumps
2404 if ((itemp->code[0] & ~1) == 0370)
2405 return MOK_JUMP;
2408 * Check if BND prefix is allowed.
2409 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2411 if (!itemp_has(itemp, IF_BND) &&
2412 (has_prefix(instruction, PPS_REP, P_BND) ||
2413 has_prefix(instruction, PPS_REP, P_NOBND)))
2414 return MERR_BADBND;
2415 else if (itemp_has(itemp, IF_BND) &&
2416 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2417 has_prefix(instruction, PPS_REP, P_REPNZ)))
2418 return MERR_BADREPNE;
2420 return MOK_GOOD;
2424 * Check if ModR/M.mod should/can be 01.
2425 * - EAF_BYTEOFFS is set
2426 * - offset can fit in a byte when EVEX is not used
2427 * - offset can be compressed when EVEX is used
2429 #define IS_MOD_01() (!(input->eaflags & EAF_WORDOFFS) && \
2430 (ins->rex & REX_EV ? seg == NO_SEG && !forw_ref && \
2431 is_disp8n(input, ins, &output->disp8) : \
2432 input->eaflags & EAF_BYTEOFFS || (o >= -128 && \
2433 o <= 127 && seg == NO_SEG && !forw_ref)))
2435 static enum ea_type process_ea(operand *input, ea *output, int bits,
2436 int rfield, opflags_t rflags, insn *ins,
2437 const char **errmsg)
2439 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2440 int addrbits = ins->addr_size;
2441 int eaflags = input->eaflags;
2443 *errmsg = "invalid effective address"; /* Default error message */
2445 output->type = EA_SCALAR;
2446 output->rip = false;
2447 output->disp8 = 0;
2449 /* REX flags for the rfield operand */
2450 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2451 /* EVEX.R' flag for the REG operand */
2452 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2454 if (is_class(REGISTER, input->type)) {
2456 * It's a direct register.
2458 if (!is_register(input->basereg))
2459 goto err;
2461 if (!is_reg_class(REG_EA, input->basereg))
2462 goto err;
2464 /* broadcasting is not available with a direct register operand. */
2465 if (input->decoflags & BRDCAST_MASK) {
2466 *errmsg = "broadcast not allowed with register operand";
2467 goto err;
2470 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2471 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2472 output->sib_present = false; /* no SIB necessary */
2473 output->bytes = 0; /* no offset necessary either */
2474 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2475 } else {
2477 * It's a memory reference.
2480 /* Embedded rounding or SAE is not available with a mem ref operand. */
2481 if (input->decoflags & (ER | SAE)) {
2482 *errmsg = "embedded rounding is available only with "
2483 "register-register operations";
2484 goto err;
2487 if (input->basereg == -1 &&
2488 (input->indexreg == -1 || input->scale == 0)) {
2490 * It's a pure offset.
2492 if (bits == 64 && ((input->type & IP_REL) == IP_REL)) {
2493 if (input->segment == NO_SEG ||
2494 (input->opflags & OPFLAG_RELATIVE)) {
2495 nasm_error(ERR_WARNING | ERR_PASS2,
2496 "absolute address can not be RIP-relative");
2497 input->type &= ~IP_REL;
2498 input->type |= MEMORY;
2502 if (bits == 64 &&
2503 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2504 *errmsg = "RIP-relative addressing is prohibited for MIB";
2505 goto err;
2508 if (eaflags & EAF_BYTEOFFS ||
2509 (eaflags & EAF_WORDOFFS &&
2510 input->disp_size != (addrbits != 16 ? 32 : 16))) {
2511 nasm_error(ERR_WARNING | ERR_PASS1,
2512 "displacement size ignored on absolute address");
2515 if (bits == 64 && (~input->type & IP_REL)) {
2516 output->sib_present = true;
2517 output->sib = GEN_SIB(0, 4, 5);
2518 output->bytes = 4;
2519 output->modrm = GEN_MODRM(0, rfield, 4);
2520 output->rip = false;
2521 } else {
2522 output->sib_present = false;
2523 output->bytes = (addrbits != 16 ? 4 : 2);
2524 output->modrm = GEN_MODRM(0, rfield,
2525 (addrbits != 16 ? 5 : 6));
2526 output->rip = bits == 64;
2528 } else {
2530 * It's an indirection.
2532 int i = input->indexreg, b = input->basereg, s = input->scale;
2533 int32_t seg = input->segment;
2534 int hb = input->hintbase, ht = input->hinttype;
2535 int t, it, bt; /* register numbers */
2536 opflags_t x, ix, bx; /* register flags */
2538 if (s == 0)
2539 i = -1; /* make this easy, at least */
2541 if (is_register(i)) {
2542 it = nasm_regvals[i];
2543 ix = nasm_reg_flags[i];
2544 } else {
2545 it = -1;
2546 ix = 0;
2549 if (is_register(b)) {
2550 bt = nasm_regvals[b];
2551 bx = nasm_reg_flags[b];
2552 } else {
2553 bt = -1;
2554 bx = 0;
2557 /* if either one are a vector register... */
2558 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2559 opflags_t sok = BITS32 | BITS64;
2560 int32_t o = input->offset;
2561 int mod, scale, index, base;
2564 * For a vector SIB, one has to be a vector and the other,
2565 * if present, a GPR. The vector must be the index operand.
2567 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2568 if (s == 0)
2569 s = 1;
2570 else if (s != 1)
2571 goto err;
2573 t = bt, bt = it, it = t;
2574 x = bx, bx = ix, ix = x;
2577 if (bt != -1) {
2578 if (REG_GPR & ~bx)
2579 goto err;
2580 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2581 sok &= bx;
2582 else
2583 goto err;
2587 * While we're here, ensure the user didn't specify
2588 * WORD or QWORD
2590 if (input->disp_size == 16 || input->disp_size == 64)
2591 goto err;
2593 if (addrbits == 16 ||
2594 (addrbits == 32 && !(sok & BITS32)) ||
2595 (addrbits == 64 && !(sok & BITS64)))
2596 goto err;
2598 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2599 : ((ix & YMMREG & ~REG_EA)
2600 ? EA_YMMVSIB : EA_XMMVSIB));
2602 output->rex |= rexflags(it, ix, REX_X);
2603 output->rex |= rexflags(bt, bx, REX_B);
2604 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2606 index = it & 7; /* it is known to be != -1 */
2608 switch (s) {
2609 case 1:
2610 scale = 0;
2611 break;
2612 case 2:
2613 scale = 1;
2614 break;
2615 case 4:
2616 scale = 2;
2617 break;
2618 case 8:
2619 scale = 3;
2620 break;
2621 default: /* then what the smeg is it? */
2622 goto err; /* panic */
2625 if (bt == -1) {
2626 base = 5;
2627 mod = 0;
2628 } else {
2629 base = (bt & 7);
2630 if (base != REG_NUM_EBP && o == 0 &&
2631 seg == NO_SEG && !forw_ref &&
2632 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2633 mod = 0;
2634 else if (IS_MOD_01())
2635 mod = 1;
2636 else
2637 mod = 2;
2640 output->sib_present = true;
2641 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2642 output->modrm = GEN_MODRM(mod, rfield, 4);
2643 output->sib = GEN_SIB(scale, index, base);
2644 } else if ((ix|bx) & (BITS32|BITS64)) {
2646 * it must be a 32/64-bit memory reference. Firstly we have
2647 * to check that all registers involved are type E/Rxx.
2649 opflags_t sok = BITS32 | BITS64;
2650 int32_t o = input->offset;
2652 if (it != -1) {
2653 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2654 sok &= ix;
2655 else
2656 goto err;
2659 if (bt != -1) {
2660 if (REG_GPR & ~bx)
2661 goto err; /* Invalid register */
2662 if (~sok & bx & SIZE_MASK)
2663 goto err; /* Invalid size */
2664 sok &= bx;
2668 * While we're here, ensure the user didn't specify
2669 * WORD or QWORD
2671 if (input->disp_size == 16 || input->disp_size == 64)
2672 goto err;
2674 if (addrbits == 16 ||
2675 (addrbits == 32 && !(sok & BITS32)) ||
2676 (addrbits == 64 && !(sok & BITS64)))
2677 goto err;
2679 /* now reorganize base/index */
2680 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2681 ((hb == b && ht == EAH_NOTBASE) ||
2682 (hb == i && ht == EAH_MAKEBASE))) {
2683 /* swap if hints say so */
2684 t = bt, bt = it, it = t;
2685 x = bx, bx = ix, ix = x;
2688 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2689 /* make single reg base, unless hint */
2690 bt = it, bx = ix, it = -1, ix = 0;
2692 if (eaflags & EAF_MIB) {
2693 /* only for mib operands */
2694 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2696 * make a single reg index [reg*1].
2697 * gas uses this form for an explicit index register.
2699 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2701 if ((ht == EAH_SUMMED) && bt == -1) {
2702 /* separate once summed index into [base, index] */
2703 bt = it, bx = ix, s--;
2705 } else {
2706 if (((s == 2 && it != REG_NUM_ESP &&
2707 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
2708 s == 3 || s == 5 || s == 9) && bt == -1) {
2709 /* convert 3*EAX to EAX+2*EAX */
2710 bt = it, bx = ix, s--;
2712 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
2713 (eaflags & EAF_TIMESTWO) &&
2714 (hb == b && ht == EAH_NOTBASE)) {
2716 * convert [NOSPLIT EAX*1]
2717 * to sib format with 0x0 displacement - [EAX*1+0].
2719 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2722 if (s == 1 && it == REG_NUM_ESP) {
2723 /* swap ESP into base if scale is 1 */
2724 t = it, it = bt, bt = t;
2725 x = ix, ix = bx, bx = x;
2727 if (it == REG_NUM_ESP ||
2728 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
2729 goto err; /* wrong, for various reasons */
2731 output->rex |= rexflags(it, ix, REX_X);
2732 output->rex |= rexflags(bt, bx, REX_B);
2734 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
2735 /* no SIB needed */
2736 int mod, rm;
2738 if (bt == -1) {
2739 rm = 5;
2740 mod = 0;
2741 } else {
2742 rm = (bt & 7);
2743 if (rm != REG_NUM_EBP && o == 0 &&
2744 seg == NO_SEG && !forw_ref &&
2745 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2746 mod = 0;
2747 else if (IS_MOD_01())
2748 mod = 1;
2749 else
2750 mod = 2;
2753 output->sib_present = false;
2754 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2755 output->modrm = GEN_MODRM(mod, rfield, rm);
2756 } else {
2757 /* we need a SIB */
2758 int mod, scale, index, base;
2760 if (it == -1)
2761 index = 4, s = 1;
2762 else
2763 index = (it & 7);
2765 switch (s) {
2766 case 1:
2767 scale = 0;
2768 break;
2769 case 2:
2770 scale = 1;
2771 break;
2772 case 4:
2773 scale = 2;
2774 break;
2775 case 8:
2776 scale = 3;
2777 break;
2778 default: /* then what the smeg is it? */
2779 goto err; /* panic */
2782 if (bt == -1) {
2783 base = 5;
2784 mod = 0;
2785 } else {
2786 base = (bt & 7);
2787 if (base != REG_NUM_EBP && o == 0 &&
2788 seg == NO_SEG && !forw_ref &&
2789 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2790 mod = 0;
2791 else if (IS_MOD_01())
2792 mod = 1;
2793 else
2794 mod = 2;
2797 output->sib_present = true;
2798 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2799 output->modrm = GEN_MODRM(mod, rfield, 4);
2800 output->sib = GEN_SIB(scale, index, base);
2802 } else { /* it's 16-bit */
2803 int mod, rm;
2804 int16_t o = input->offset;
2806 /* check for 64-bit long mode */
2807 if (addrbits == 64)
2808 goto err;
2810 /* check all registers are BX, BP, SI or DI */
2811 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
2812 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
2813 goto err;
2815 /* ensure the user didn't specify DWORD/QWORD */
2816 if (input->disp_size == 32 || input->disp_size == 64)
2817 goto err;
2819 if (s != 1 && i != -1)
2820 goto err; /* no can do, in 16-bit EA */
2821 if (b == -1 && i != -1) {
2822 int tmp = b;
2823 b = i;
2824 i = tmp;
2825 } /* swap */
2826 if ((b == R_SI || b == R_DI) && i != -1) {
2827 int tmp = b;
2828 b = i;
2829 i = tmp;
2831 /* have BX/BP as base, SI/DI index */
2832 if (b == i)
2833 goto err; /* shouldn't ever happen, in theory */
2834 if (i != -1 && b != -1 &&
2835 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
2836 goto err; /* invalid combinations */
2837 if (b == -1) /* pure offset: handled above */
2838 goto err; /* so if it gets to here, panic! */
2840 rm = -1;
2841 if (i != -1)
2842 switch (i * 256 + b) {
2843 case R_SI * 256 + R_BX:
2844 rm = 0;
2845 break;
2846 case R_DI * 256 + R_BX:
2847 rm = 1;
2848 break;
2849 case R_SI * 256 + R_BP:
2850 rm = 2;
2851 break;
2852 case R_DI * 256 + R_BP:
2853 rm = 3;
2854 break;
2855 } else
2856 switch (b) {
2857 case R_SI:
2858 rm = 4;
2859 break;
2860 case R_DI:
2861 rm = 5;
2862 break;
2863 case R_BP:
2864 rm = 6;
2865 break;
2866 case R_BX:
2867 rm = 7;
2868 break;
2870 if (rm == -1) /* can't happen, in theory */
2871 goto err; /* so panic if it does */
2873 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
2874 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2875 mod = 0;
2876 else if (IS_MOD_01())
2877 mod = 1;
2878 else
2879 mod = 2;
2881 output->sib_present = false; /* no SIB - it's 16-bit */
2882 output->bytes = mod; /* bytes of offset needed */
2883 output->modrm = GEN_MODRM(mod, rfield, rm);
2888 output->size = 1 + output->sib_present + output->bytes;
2889 return output->type;
2891 err:
2892 return output->type = EA_INVALID;
2895 static void add_asp(insn *ins, int addrbits)
2897 int j, valid;
2898 int defdisp;
2900 valid = (addrbits == 64) ? 64|32 : 32|16;
2902 switch (ins->prefixes[PPS_ASIZE]) {
2903 case P_A16:
2904 valid &= 16;
2905 break;
2906 case P_A32:
2907 valid &= 32;
2908 break;
2909 case P_A64:
2910 valid &= 64;
2911 break;
2912 case P_ASP:
2913 valid &= (addrbits == 32) ? 16 : 32;
2914 break;
2915 default:
2916 break;
2919 for (j = 0; j < ins->operands; j++) {
2920 if (is_class(MEMORY, ins->oprs[j].type)) {
2921 opflags_t i, b;
2923 /* Verify as Register */
2924 if (!is_register(ins->oprs[j].indexreg))
2925 i = 0;
2926 else
2927 i = nasm_reg_flags[ins->oprs[j].indexreg];
2929 /* Verify as Register */
2930 if (!is_register(ins->oprs[j].basereg))
2931 b = 0;
2932 else
2933 b = nasm_reg_flags[ins->oprs[j].basereg];
2935 if (ins->oprs[j].scale == 0)
2936 i = 0;
2938 if (!i && !b) {
2939 int ds = ins->oprs[j].disp_size;
2940 if ((addrbits != 64 && ds > 8) ||
2941 (addrbits == 64 && ds == 16))
2942 valid &= ds;
2943 } else {
2944 if (!(REG16 & ~b))
2945 valid &= 16;
2946 if (!(REG32 & ~b))
2947 valid &= 32;
2948 if (!(REG64 & ~b))
2949 valid &= 64;
2951 if (!(REG16 & ~i))
2952 valid &= 16;
2953 if (!(REG32 & ~i))
2954 valid &= 32;
2955 if (!(REG64 & ~i))
2956 valid &= 64;
2961 if (valid & addrbits) {
2962 ins->addr_size = addrbits;
2963 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
2964 /* Add an address size prefix */
2965 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
2966 ins->addr_size = (addrbits == 32) ? 16 : 32;
2967 } else {
2968 /* Impossible... */
2969 nasm_error(ERR_NONFATAL, "impossible combination of address sizes");
2970 ins->addr_size = addrbits; /* Error recovery */
2973 defdisp = ins->addr_size == 16 ? 16 : 32;
2975 for (j = 0; j < ins->operands; j++) {
2976 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
2977 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
2979 * mem_offs sizes must match the address size; if not,
2980 * strip the MEM_OFFS bit and match only EA instructions
2982 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);